Created
December 3, 2009 18:31
-
-
Save radiosilence/248400 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Library Module Implementing Layers of Perceptrons for CY2D7 | |
// Dr Richard Mitchell 18/12/06 ... 17/3/08 ...12/8/09 | |
// Adapted by | |
#include "mlplayer.h" | |
#include <math.h> | |
#include <iomanip> | |
void dcopy (int num, const double fromarray[], double toarray[]) { | |
/// copy num doubles from the fromarray to the toarray | |
for (int ct=0; ct<num; ct++) | |
{ | |
toarray[ct] = fromarray[ct]; | |
} | |
} | |
double myrand (void) { | |
// return a random number in the range -1..1 | |
// do so calling the rand function in math library | |
return -1.0 + (2.0 * rand() / RAND_MAX); | |
} | |
// Implementation of LinActLayer ***************************** | |
LinActLayer::LinActLayer (int numIns, int numOuts) { | |
// constructor for Layer of linearly activated nodes | |
// it is passed the numbers of inputs and of outputs | |
// there are numOuts neurons in the layer | |
// each neuron has an output, a delta and an error - | |
// so have an array of outputs, deltas and errors | |
// each neuron has numIns+1 weights (first being the bias weight) | |
// so have large array of weights, and of weight changes | |
int ct; | |
numInputs = numIns; // store number inputs | |
numNodes = numOuts; // and of outputs in object | |
numWeights = (numInputs + 1) * numNodes;// for convenience also calc number of weights | |
outputs = new double [numNodes]; // get space for array for outputs | |
deltas = new double [numNodes]; // and Deltas | |
errors = new double [numNodes]; // and Errors | |
weights = new double [numWeights]; // get space for weights | |
deltaWeights = new double [numWeights]; // and change in weights | |
for (ct=0; ct<numWeights; ct++) { | |
weights[ct] = myrand(); // initialise weights randomly | |
deltaWeights[ct] = 0; // initialise deltaweights to 0 | |
} | |
for (ct=0; ct <= numNodes; ct++) { // initialise outputs, errors and deltas | |
outputs[ct] = 0; | |
deltas[ct] = 0; | |
errors[ct] = 0; | |
} | |
} | |
LinActLayer::~LinActLayer() { | |
// destructor ... returns all 'new' memory to heap | |
delete [] weights; // return array of weights to heap | |
delete [] deltaWeights; // return deltaweights to heap | |
delete [] outputs; // return outputs array | |
delete [] deltas; | |
delete [] errors; | |
} | |
void LinActLayer::CalcOutputs(const double ins[]) { | |
// calculate sum of weighted inputs ins for each neuron | |
// store in array of outputs | |
int weightct = 0; // counter for which weight | |
// note each weight used in order | |
for (int nodect=0; nodect < numNodes; nodect++) { // for each neuron in layer | |
outputs[nodect] = weights[weightct++]; // output = bias + | |
for (int inct=0; inct<numInputs; inct++) // each input * assoc. weight | |
outputs[nodect] += ins[inct] * weights[weightct++]; | |
} | |
} | |
void LinActLayer::TestNetwork(const double ins[], double outs[]) { | |
// calculate network outputs given the inputs ins | |
// and return outputs in the array in outs | |
CalcOutputs (ins); // calculate the weighted sum of ins | |
dcopy (numNodes, outputs, outs); // and copy from outputs to outs array | |
} | |
void LinActLayer::CalcErrors (const double targets[]) { | |
// calculate errors : being difference between targets and actual outputs | |
// store in errors array | |
for( int i = 0; i < numNodes; i++ ) | |
{ | |
errors[i] = targets[i] - outputs[i]; // set the errors for each neuron to the targets minus the outputs | |
} | |
} | |
void LinActLayer::ErrorsToDeltas (void) { | |
// calculate deltas from the errors | |
dcopy( numNodes, errors, deltas );// for LinActLayer, deltas are same as errors | |
} | |
void LinActLayer::ChangeAWeight(int weightct, double inp, double delta, const double learnparas[]) { | |
// change weightct'th weight of neuron | |
// using associated deltaWeight (which has weight change last time), sclaed by momentum | |
// and current delta, input inp and learning rate | |
// learnparas = [learning rate, momentum] | |
deltaWeights[weightct] = ( deltaWeights[weightct] * learnparas[1] ) + ( delta * inp * learnparas[0] ); | |
// Set deltaWeight to the right value as described in the brief. | |
weights[weightct] += deltaWeights[weightct]; | |
// Add it to weight | |
} | |
void LinActLayer::ChangeAllWeights (const double ins[], const double learnparas[]) { | |
// Change all weights in layer - using inputs ins and [learning rate, momentum] | |
// for each neuron, change bias weight (inherent input 1) and then other weights using ins | |
double k; | |
int w = 0, z; | |
// Avoided nested loop as this more accurately reflects the | |
// memory structure I enquired about as in lectures. | |
for( int i = 0; i < numNodes * ( numInputs+1 ); i++ ) | |
{ | |
if( i % ( numInputs + 1 ) == 0 ) | |
{ | |
// If we're on a bias | |
z = 0; // reset input counter | |
k = 1; // input = 1 | |
w = i / ( numInputs + 1 ); // the node we're on | |
} | |
else | |
{ | |
// If we're on an input, set k to the input. | |
k = ins[z++]; | |
} | |
// Run ze function | |
ChangeAWeight( i, k, deltas[w], learnparas ); | |
} | |
} | |
void LinActLayer::FindDeltas (const double targets[]) { | |
// find the deltas using targets and object's outputs | |
// first calculate errors (outputs - targets) and thence deltas | |
CalcErrors(targets); // find the errors | |
ErrorsToDeltas(); // hence find the deltas | |
} | |
void LinActLayer::TrainNetwork (const double ins[], double outs[], double targets[], const double learnparas[]) { | |
// pass inputs ins to network, calculating outputs putting them in outs | |
// Then find errors and deltas, using targets | |
// and then adjust weights by applying the delta rule | |
// where learnparas[0] is learning rate; learnparas[1] is momentum | |
TestNetwork(ins, outs); // pass data to network | |
FindDeltas(targets); // find errors and thence deltas | |
ChangeAllWeights(ins, learnparas); // and change all the weights | |
} | |
void LinActLayer::SetTheWeights (const double initWt[]) { | |
// set the weights of the layer to the values in initWt | |
// do so by copying from initWt into object's weights | |
dcopy (numWeights, initWt, weights); // copy from initWt to Weights | |
} | |
int LinActLayer::HowManyWeights (void) { | |
// return the number of weights in layer | |
return numWeights; | |
} | |
void LinActLayer::ReturnTheWeights (double theWts[]) { | |
// return in theWts the current value of the weights in the layer | |
dcopy( numWeights, weights, theWts ); // copy the layer's weights to array theWts | |
} | |
void LinActLayer::WeightedDeltas( double wdeltas[] ) | |
{ | |
// return weighted sum of deltas (used in back prop) | |
// wdeltas is array whose size is number of outputs of prev layer (ie numInputs) | |
for( int ic = 0; ic < numInputs; ic++ ) // ic: input counter | |
{ | |
wdeltas[ ic ] = 0; // Initialise this as zero, just in case. | |
for( int nc = 0; nc < numNodes; nc++ ) // wc: node counter | |
{ | |
int w = ic + ( 1 + ( numInputs + 1 ) * nc ); // Find the weight array index that we want. | |
wdeltas[ ic ] += deltas[ nc ] * weights[ w ]; // Add the current delta * weigh to the total | |
} | |
} | |
} | |
// Implementation of SigActLayer ***************************** | |
SigActLayer::SigActLayer (int numIns, int numOuts) | |
:LinActLayer (numIns, numOuts) { | |
// just use inherited constructor - no extra variables to initialise | |
} | |
SigActLayer::~SigActLayer() { | |
// destructor - does not need to do anything other than call inherited destructor | |
} | |
void SigActLayer::CalcOutputs(const double ins[]) { | |
// Calculate Outputs being Sigmoid (WeightedSum of ins) | |
LinActLayer::CalcOutputs( ins ); | |
for( int i = 0; i < numNodes; i++ ) | |
{ | |
outputs[ i ] = 1 / ( 1.0 + exp( - outputs[ i ] ) );// sigmoidal function | |
} | |
} | |
void SigActLayer::ErrorsToDeltas (void) { | |
// Calculate the Deltas for the layet - processing Errors | |
// Deltas are Outputs * (1 - Outputs) * Errors | |
for( int i = 0; i < numNodes; i++ ) | |
{ | |
deltas[ i ] = outputs[i] * ( 1 - outputs[i] ) * errors[ i ]; // Set the deltas to the errors | |
} | |
} | |
// Implementation of SigActHidLayer ***************************** | |
SigActHidLayer::SigActHidLayer (int numIns, int numOuts, LinActLayer *tonextlayer) | |
:SigActLayer (numIns, numOuts) { | |
// construct a hidden layer with numIns inputs and numOuts outputs | |
// where (a pointer to) its next layer is in tonextlayer | |
// use inherited constructor for hidden layer | |
// and attach the pointer to the next layer that is passed | |
nextlayer = tonextlayer; | |
} | |
SigActHidLayer::~SigActHidLayer() { | |
delete nextlayer; // remove output layer, then auto-call inherited destructor | |
} | |
void SigActHidLayer::TestNetwork( const double ins[], double outs[] ) | |
{ | |
SigActLayer::CalcOutputs(ins); // This takes the inputs and runs them though the hidden layer | |
nextlayer ->TestNetwork(outputs,outs); // This takes the outputs of the hidden layer and inputs it into the second layer | |
} | |
void SigActHidLayer::FindDeltas( const double targets[] ) | |
{ | |
// find all deltas in the network | |
// do so by funding errors and deltas in next layer | |
// and then the errors and deltas in this layer | |
nextlayer->FindDeltas( targets );//find errors in next layer layer | |
nextlayer->WeightedDeltas( errors ); //find weighted sum of deltas and weights in next layer | |
SigActLayer::ErrorsToDeltas(); //set errors of hidden layer into deltas | |
} | |
void SigActHidLayer::ChangeAllWeights( const double ins[], const double learnparas[] ) | |
{ | |
// Change all weights in network - do this layer then next layet | |
SigActLayer::ChangeAllWeights( ins, learnparas ); | |
nextlayer->ChangeAllWeights( outputs, learnparas ); | |
} | |
void SigActHidLayer::SetTheWeights( const double initWt[] ) | |
{ | |
// load all weights in network using values in initWt | |
// initWt[0..numWeights-1] are weights for this layer | |
// rest are weights for next layer | |
SigActLayer::SetTheWeights( initWt ); // first n weights for this layer | |
nextlayer->SetTheWeights( &initWt[ numWeights ] ); | |
} // rest are for output layer | |
int SigActHidLayer::HowManyWeights( void ) | |
{ | |
// return the number of weights in network | |
// these are those in this layer + those in nextlayer | |
int nw = nextlayer->HowManyWeights() + numWeights; // Pull the number of weights for nextlayer. Could also do nextlayer->numWeights but it seems indecent to directly access a property like that. | |
return nw; // nw is the number of weights. | |
} | |
void SigActHidLayer::ReturnTheWeights( double theWts[] ) | |
{ | |
// return the weights of the network into theWts | |
// theWts[0..numWeights-1] are weights for this layer | |
// rest are weights for next layer | |
SigActLayer::ReturnTheWeights( theWts ); // Shove the weights for SigActHidLayer into theWts array. | |
nextlayer->ReturnTheWeights( &theWts[ numWeights ] ); // Call the SigActLayer's ReturnTheWeights that dcopies the weights into theWts STARTING after the last weight of the SigActHidLayer. | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment