Created
September 15, 2017 11:43
-
-
Save giuseppebonaccorso/967159b13cbd50a7a725a0f70a560517 to your computer and use it in GitHub Desktop.
Quickprop example
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from sklearn.datasets import make_classification | |
import numpy as np | |
# Set random seed (for reproducibility) | |
np.random.seed(1000) | |
def sigmoid(arg): | |
return 1.0 / (1.0 + np.exp(-arg)) | |
# Create random dataset | |
nb_samples = 500 | |
nb_features = 2 | |
X, Y = make_classification(n_samples=nb_samples, | |
n_features=nb_features, | |
n_informative=nb_features, | |
n_redundant=0, | |
random_state=4) | |
# Prepreprocess the dataset | |
Xt = np.ones(shape=(nb_samples, nb_features+1)) | |
Xt[:, 0:2] = X | |
Yt = Y.reshape((nb_samples, 1)) | |
# Initial values | |
W = np.random.uniform(-0.01, 0.01, size=(1, nb_features+1)) | |
dL = np.zeros((1, nb_features + 1)) | |
dL_prev = np.random.uniform(-0.001, 0.001, size=(1, nb_features + 1)) | |
dW_prec = np.ones((1, nb_features + 1)) | |
W_prev = np.zeros((1, nb_features + 1)) | |
learning_rate = 0.0001 | |
tolerance = 1e-6 | |
# Run a training process | |
i = 0 | |
while np.linalg.norm(W_prev - W, ord=1) > tolerance: | |
i += 1 | |
sZ = sigmoid(np.dot(Xt, W.T)) | |
dL = np.sum((Yt - sZ) * Xt, axis=0) | |
delta = dL / (dL_prev - dL) | |
dL_prev = dL.copy() | |
dW = dW_prec * delta | |
dW_prec = dW.copy() | |
W_prev = W.copy() | |
W += learning_rate * dW | |
print('Converged after {} iterations'.format(i)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thank you to share your code with others, but I am getting some difficulties in understanding the code. I am requesting you to please help me to understand the code.