Skip to content

Instantly share code, notes, and snippets.

@nyk510
Last active October 14, 2015 00:36
Show Gist options
  • Save nyk510/e9bada3c648805223b9b to your computer and use it in GitHub Desktop.
Save nyk510/e9bada3c648805223b9b to your computer and use it in GitHub Desktop.
PRML 第5章 ニューラルネットの実装 ref: http://qiita.com/nykergoto/items/a6bd77b198814699a843
\begin{align}
y_n&=f(\sum_{j=1}^M w_{kj}^{(2)} \tanh(\sum_{i=1}^D w_{ji}^{(1)}x_i+w_{j0}^{(0)})+w_{k0}^{(2)})\\
&=\sum_{j=1}^M w_{kj}^{(2)} \tanh(\sum_{i=1}^D w_{ji}^{(1)}x_i+w_{j0}^{(0)})+w_{k0}^{(2)}
\end{align}
E(w)=\sum_{n=1}^N ||y_n-t_n||_2^2
#coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
#import multiprocessing as mp
import seaborn as sns
class NeuralNet():
def __init__(self,input=1,output=1,hidden=3):
self.input = input
self.output = output
self.hidden = hidden
self.w_1 = np.random.rand(self.input+1,self.hidden)*2.-1.
self.w_2 = np.random.rand(self.hidden+1,self.output)*2.-1.
np.random.seed(71) #再現性のため
def predict(self,x):
"""
predict target value.
x: x value for predict.numpy.ndarray like.
return predict value.numpy.ndarray like
"""
A = self.w_1[1:].dot(x)+self.w_1[0]
Z = np.tanh(A)
Y = self.w_2[1:].T.dot(Z.T)+self.w_2[0]
self.Z = Z
return Y[0]
def Fit(self,X,t,epoch=1000,eta=.1):
"""
Fitting Parameters
X: Training Data X value.numpy.ndarray like
t: target value.ndarray like
epoch: number of iteration (whole dataset)
eta: learning rate with one gradient.
return void
"""
self.X = X
self.t = t
self.eta = eta
error_list = []
for n in range(epoch):
if n%50 == 0:
error_list.append([n+1,self._compute_error()])
for x_i,t_i in zip(X,t):
delta_k = self.predict(np.array((x_i)))-t_i
delta_j = (1-self.Z**2)*self.w_2[1:].dot(delta_k)
# print "self.Z",self.Z
# print "delta_k:",delta_k
# print "delta_j:",delta_j
# print "self.w1",self.w_1
# print "w2:",self.w_2
#バイアス項は常にdelta_j,kにより更新(x_0=1.で固定なため)
self.w_1[0] = self.w_1[0] - eta * delta_j
self.w_2[0] = self.w_2[0] - eta * delta_k
self.w_1[1:] = self.w_1[1:] - eta * np.outer(np.array((x_i)),delta_j)
self.w_2[1:] = self.w_2[1:] - eta * np.outer(self.Z,delta_k)
self.errors = np.array(error_list)
return
def _compute_error(self):
"""
Compute loss function.(sum of squares)
return: error whole dataset.
"""
error = 0.
for X_n,t_n in zip(self.X,self.t):
error = error + sum(self.predict(np.array((X_n)))-t_n)**2
return error
target_val_list = []
x_train = np.linspace(-1,1,50)
target_val_list.append(np.sin(x_train*np.pi))
target_val_list.append(x_train**2)
target_val_list.append(np.abs(x_train))
target_val_list.append((np.sign(x_train)+1)*.5)
def subcalc(p):#p=0,1,......
target_val = target_val_list[p]
neural_net = NeuralNet(input=1,output=1,hidden=3)
neural_net.Fit(x_train,target_val)
pred = []
for x in x_train:
pred.append(neural_net.predict(np.array(x)))
return [pred,neural_net.errors]
proc = len(target_val_list)
#pool = mp.Pool(proc)
#callback = pool.map(subcalc,range(proc))
callback = []
for i in range(len(target_val_list)):
callback.append(subcalc(i))
pred_list = []
error_list = []
for i in range(proc):
pred_list.append(callback[i][0])
error_list.append(callback[i][1])
test_num = len(target_val_list)
for i,(pred,target_val) in enumerate(zip(pred_list,target_val_list)):
plt.subplot(test_num,2,i*2+1)
plt.plot(x_train,target_val,"o")
plt.plot(x_train,pred,"--")
plt.subplot(test_num,2,i*2+2)
plt.plot(error_list[i][:,0],error_list[i][:,1])
plt.yscale("log")
plt.xscale("log")
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment