-
-
Save yuletide/3956850 to your computer and use it in GitHub Desktop.
coursera neural networks assignment 1 sample code (ported from octave)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import scipy.io | |
UNWANTED_KEYS = ('__globals__', '__header__', '__version__') | |
def load_dataset(path): | |
data = scipy.io.loadmat(path) | |
for key in UNWANTED_KEYS: | |
del data[key] | |
return data |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy | |
from numpy.linalg import norm | |
# %% Learns the weights of a perceptron and displays the results. | |
# function [w] = learn_perceptron(neg_examples_nobias,pos_examples_nobias,w_init,w_gen_feas) | |
# %% | |
# % Learns the weights of a perceptron for a 2-dimensional dataset and plots | |
# % the perceptron at each iteration where an iteration is defined as one | |
# % full pass through the data. If a generously feasible weight vector | |
# % is provided then the visualization will also show the distance | |
# % of the learned weight vectors to the generously feasible weight vector. | |
# % Required Inputs: | |
# % neg_examples_nobias - The num_neg_examples x 2 matrix for the examples with target 0. | |
# % num_neg_examples is the number of examples for the negative class. | |
# % pos_examples_nobias - The num_pos_examples x 2 matrix for the examples with target 1. | |
# % num_pos_examples is the number of examples for the positive class. | |
# % w_init - A 3-dimensional initial weight vector. The last element is the bias. | |
# % w_gen_feas - A generously feasible weight vector. | |
# % Returns: | |
# % w - The learned weight vector. | |
# %% | |
def learn_perceptron(neg_examples_nobias, pos_examples_nobias, w_init=None, w_gen_feas=[]): | |
# %Bookkeeping | |
# num_neg_examples = size(neg_examples_nobias,1); | |
# num_pos_examples = size(pos_examples_nobias,1); | |
num_neg_examples = neg_examples_nobias.shape[0] | |
num_pos_examples = pos_examples_nobias.shape[0] | |
# num_err_history = []; | |
# w_dist_history = []; | |
num_err_history = [] | |
w_dist_history = [] | |
# %Here we add a column of ones to the examples in order to allow us to learn | |
# %bias parameters. | |
# neg_examples = [neg_examples_nobias,ones(num_neg_examples,1)]; | |
# pos_examples = [pos_examples_nobias,ones(num_pos_examples,1)]; | |
neg_examples = numpy.hstack((neg_examples_nobias, numpy.ones((num_neg_examples, 1)))) | |
pos_examples = numpy.hstack((pos_examples_nobias, numpy.ones((num_pos_examples, 1)))) | |
# %If weight vectors have not been provided, initialize them appropriately. | |
# if (~exist('w_init','var') || isempty(w_init)) | |
# w = randn(3,1); | |
# else | |
# w = w_init; | |
# end | |
# | |
# if (~exist('w_gen_feas','var')) | |
# w_gen_feas = []; | |
# end | |
if w_init is None: | |
w = numpy.randn(3,1) | |
else: | |
w = w_init | |
# %Find the data points that the perceptron has incorrectly classified | |
# %and record the number of errors it makes. | |
# iter = 0; | |
i = 0 | |
# NOTE: I'm setting num_errs here to 1 just to enter the loop. It is reset in the 2nd statement | |
num_errs = 1 | |
# %Iterate until the perceptron has correctly classified all points. | |
# while (num_errs > 0) | |
while num_errs > 0: | |
# [neg_mistakes, pos_mistakes] = eval_perceptron(neg_examples,pos_examples,w); | |
neg_mistakes, pos_mistakes = eval_perceptron(neg_examples, pos_examples, w) | |
# num_errs = size(neg_mistakes,1) + size(pos_mistakes,1); | |
num_errs = len(neg_mistakes) + len(pos_mistakes) | |
# num_err_history(end+1) = num_errs; | |
num_err_history.append(num_errs) | |
# fprintf('Number of errors in iteration %d:\t%d\n',i,num_errs); | |
# fprintf(['weights:\t', mat2str(w), '\n']); | |
print('Number of errors in iteration %d:\t%d' % (i, num_errs)) | |
print('weights:\t' + str(w)) | |
# plot_perceptron(neg_examples, pos_examples, neg_mistakes, pos_mistakes, num_err_history, w, w_dist_history); | |
# TODO: plotting | |
# key = input('<Press enter to continue, q to quit.>', 's'); | |
key = raw_input('<Press enter to continue, q to quit.>') | |
# if (key == 'q') | |
# return; | |
# end | |
if key == 'q': | |
return | |
# %If a generously feasible weight vector exists, record the distance | |
# %to it from the initial weight vector. | |
# if (length(w_gen_feas) ~= 0) | |
# w_dist_history(end+1) = norm(w - w_gen_feas); | |
# end | |
if max(w_gen_feas.shape) != 0: | |
w_dist_history.append(norm(w - w_gen_feas)) | |
# %Update the weights of the perceptron. | |
w = update_weights(neg_examples, pos_examples, w) | |
i += 1 | |
# NOTE: The while loop used to be here, but there's no sense in repeating ourselves... | |
# i = i + 1; | |
# %Update the weights of the perceptron. | |
# w = update_weights(neg_examples,pos_examples,w); | |
# %If a generously feasible weight vector exists, record the distance | |
# %to it from the current weight vector. | |
# if (length(w_gen_feas) ~= 0) | |
# w_dist_history(end+1) = norm(w - w_gen_feas); | |
# end | |
# %Find the data points that the perceptron has incorrectly classified. | |
# %and record the number of errors it makes. | |
# [neg_mistakes, pos_mistakes] = eval_perceptron(neg_examples,pos_examples,w); | |
# num_errs = size(neg_mistakes,1) + size(pos_mistakes,1); | |
# num_err_history(end+1) = num_errs; | |
# fprintf('Number of errors in iteration %d:\t%d\n',i,num_errs); | |
# fprintf(['weights:\t', mat2str(w), '\n']); | |
# plot_perceptron(neg_examples, pos_examples, neg_mistakes, pos_mistakes, num_err_history, w, w_dist_history); | |
# key = input('<Press enter to continue, q to quit.>', 's'); | |
# if (key == 'q') | |
# break; | |
# end | |
# end | |
# %WRITE THE CODE TO COMPLETE THIS FUNCTION | |
# function [w] = update_weights(neg_examples, pos_examples, w_current) | |
def update_weights(neg_examples, pos_examples, w_current): | |
# %% | |
# % Updates the weights of the perceptron for incorrectly classified points | |
# % using the perceptron update algorithm. This function makes one sweep | |
# % over the dataset. | |
# % Inputs: | |
# % neg_examples - The num_neg_examples x 3 matrix for the examples with target 0. | |
# % num_neg_examples is the number of examples for the negative class. | |
# % pos_examples- The num_pos_examples x 3 matrix for the examples with target 1. | |
# % num_pos_examples is the number of examples for the positive class. | |
# % w_current - A 3-dimensional weight vector, the last element is the bias. | |
# % Returns: | |
# % w - The weight vector after one pass through the dataset using the perceptron | |
# % learning rule. | |
# %% | |
# w = w_current; | |
w = w_current | |
# num_neg_examples = size(neg_examples,1); | |
# num_pos_examples = size(pos_examples,1); | |
num_neg_examples = neg_examples.shape[0] | |
num_pos_examples = pos_examples.shape[0] | |
# for i=1:num_neg_examples | |
# this_case = neg_examples(i,:); | |
# x = this_case'; %Hint | |
# activation = this_case*w; | |
# if (activation >= 0) | |
# %YOUR CODE HERE | |
# end | |
# end | |
for i in range(num_neg_examples): | |
this_case = neg_examples[i,:] | |
# we can't just use x.T, since x is a one-dimensional array at this point | |
x = this_case.reshape(this_case.shape+(1,)) | |
activation = numpy.dot(this_case, w) | |
if (activation >= 0): | |
#TODO: YOUR CODE HERE | |
# for i=1:num_pos_examples | |
# this_case = pos_examples(i,:); | |
# x = this_case'; | |
# activation = this_case*w; | |
# if (activation < 0) | |
# %YOUR CODE HERE | |
# end | |
# end | |
for i in range(num_pos_examples): | |
this_case = pos_examples[i,:] | |
# we can't just use x.T, since x is a one-dimensional array at this point | |
x = this_case.reshape(this_case.shape+(1,)) | |
activation = numpy.dot(this_case, w) | |
if (activation < 0): | |
#TODO: YOUR CODE HERE | |
return w | |
# function [neg_mistakes, pos_mistakes] = eval_perceptron(neg_examples, pos_examples, w) | |
def eval_perceptron(neg_examples, pos_examples, w): | |
# %% | |
# % Evaluates the perceptron using a given weight vector. Here, evaluation | |
# % refers to finding the data points that the perceptron incorrectly classifies. | |
# % Inputs: | |
# % neg_examples - The num_neg_examples x 3 matrix for the examples with target 0. | |
# % num_neg_examples is the number of examples for the negative class. | |
# % pos_examples- The num_pos_examples x 3 matrix for the examples with target 1. | |
# % num_pos_examples is the number of examples for the positive class. | |
# % w - A 3-dimensional weight vector, the last element is the bias. | |
# % Returns: | |
# % neg_mistakes - A vector containing the indices of the negative examples that have been | |
# % incorrectly classified as positive. | |
# % pos_mistakes - A vector containing the indices of the positive examples that have been | |
# % incorrectly classified as negative. | |
# %% | |
# num_neg_examples = size(neg_examples,1); | |
# num_pos_examples = size(pos_examples,1); | |
num_neg_examples = neg_examples.shape[0] | |
num_pos_examples = pos_examples.shape[0] | |
# neg_mistakes = []; | |
# pos_mistakes = []; | |
neg_mistakes = [] | |
pos_mistakes = [] | |
# for i=1:num_neg_examples | |
# x = neg_examples(i,:)'; | |
# activation = x'*w; | |
# if (activation >= 0) | |
# neg_mistakes = [neg_mistakes;i]; | |
# end | |
# end | |
for i in range(num_neg_examples): | |
x = neg_examples[i,:] | |
activation = numpy.dot(x, w) | |
if activation >= 0: | |
neg_mistakes.append(i) | |
# for i=1:num_pos_examples | |
# x = pos_examples(i,:)'; | |
# activation = x'*w; | |
# if (activation < 0) | |
# pos_mistakes = [pos_mistakes;i]; | |
# end | |
# end | |
for i in range(num_pos_examples): | |
x = pos_examples[i,:] | |
activation = numpy.dot(x, w) | |
if activation < 0: | |
pos_mistakes.append(i) | |
return neg_mistakes, pos_mistakes | |
import helpers | |
data = helpers.load_dataset('Datasets/dataset1.mat') | |
learn_perceptron(**data) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment