Created
December 11, 2019 08:56
-
-
Save SuvroBaner/bcd2acb749e11a15b54156d981a536d1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1): | |
""" | |
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. | |
Arguments: | |
X -- input data, of shape (input size, number of examples) | |
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples) | |
learning_rate -- learning rate of the optimization | |
num_iterations -- number of iterations of the optimization loop | |
print_cost -- If True, print the cost every 10000 iterations | |
lambd -- regularization hyperparameter, scalar | |
keep_prob - probability of keeping a neuron active during drop-out, scalar. | |
Returns: | |
parameters -- parameters learned by the model. They can then be used to predict. | |
""" | |
grads = {} | |
costs = [] # to keep track of the cost | |
m = X.shape[1] # number of examples | |
layers_dims = [X.shape[0], 20, 3, 1] | |
# Initialize parameters dictionary. | |
parameters = initialize_parameters(layers_dims) | |
# Loop (gradient descent) | |
for i in range(0, num_iterations): | |
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. | |
if keep_prob == 1: | |
a3, cache = forward_propagation(X, parameters) | |
elif keep_prob < 1: | |
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) | |
# Cost function | |
if lambd == 0: | |
cost = compute_cost(a3, Y) | |
else: | |
cost = compute_cost_with_regularization(a3, Y, parameters, lambd) | |
# Backward propagation. | |
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, | |
# but this assignment will only explore one at a time | |
if lambd == 0 and keep_prob == 1: | |
grads = backward_propagation(X, Y, cache) | |
elif lambd != 0: | |
grads = backward_propagation_with_regularization(X, Y, cache, lambd) | |
elif keep_prob < 1: | |
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) | |
# Update parameters. | |
parameters = update_parameters(parameters, grads, learning_rate) | |
# Print the loss every 10000 iterations | |
if print_cost and i % 10000 == 0: | |
print("Cost after iteration {}: {}".format(i, cost)) | |
if print_cost and i % 1000 == 0: | |
costs.append(cost) | |
# plot the cost | |
plt.plot(costs) | |
plt.ylabel('cost') | |
plt.xlabel('iterations (x1,000)') | |
plt.title("Learning rate =" + str(learning_rate)) | |
plt.show() | |
return parameters |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment