Skip to content

Instantly share code, notes, and snippets.

@ollewelin
Last active May 11, 2017 06:39
Show Gist options
  • Save ollewelin/80ce58bebc16cc688214edeee358b47b to your computer and use it in GitHub Desktop.
Save ollewelin/80ce58bebc16cc688214edeee358b47b to your computer and use it in GitHub Desktop.
main.cpp
//2017-05-11 fix bugg index was wrong before m32_conv0 = convolute_mat2(&Feature0Kernel[31][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
//2017-05-05 32x32x32 feature stright connection
//2017-05-05 FIX so kernel update weight direct after each pixel step (before there was a sum up all steps togheter dont work prorper). So now the kerenle patches traingin much much faster and better
//the kernels feature now adapt and look's much more like it correspond to the traingin images.
//Add dropout on fully connected HiddenNodes prevent overtraning
//2017-01-17 fix bugg in make_SumChangeFeature0Weights replace c_m1 with m1_conv0
//Ask of loading any reruned training turn
//Rerun fully connected weights (10 times) and lock kernel layer 0 after (2) rerun lock kernel layer 1 after (4) reruns
//Show all kernels in 3 windows
//In this example 6 output nodes and explaned training images set in start of program.
float pre_debug_diff = 0.0;
float debug_diff = 0.0;
int load_kernels;
#include <opencv2/highgui/highgui.hpp> // OpenCV window I/O
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <stdio.h>
#include <raspicam/raspicam_cv.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <cstdlib>
#include <ctime>
#include <math.h> // exp
#include <stdlib.h>// exit(0);
#include <iostream>
const float MAX_SumChangeFeatureWeights = 10000.0f;
const int NUM_OF_KERNEL_LAYER = 3;
const int FRAME_WIDTH = 64;//Training image size
const int FRAME_HEIGHT = 48;//Training image size
const int FEATURE2 = 32;//Number of learned features (or filter kernels) in second convolution
const int FEATURE1 = 32;//Number of learned features (or filter kernels) in second convolution
const int FEATURE0 = 32;//Number of learned features (or filter kernels) in second convolution
const int FE1KSIZE = 49;//9X9 Size of learnel kernel
const int FE1KSIZESQR = 7;//Square Rot of the FE1KSIZE Size of learnel kernel
const int PADDING_SIZE_TO_T1P = (FE1KSIZESQR-1)/2;//How many padding pixel will be added on from
const int FE0KSIZE = 49;//9X9 Size of learnel kernel
const int FE0KSIZESQR = 7;//Square Rot of the FE0KSIZE Size of learnel kernel
const int PADDING_SIZE_TO_T0P = (FE0KSIZESQR-1)/2;//How many padding pixel will be added on
const int FE2KSIZE = 121;//5X5 Size of learnel kernel
const int FE2KSIZESQR = 11;//Square Rot of the FE2KSIZE Size of learnel kernel
const int PADDING_SIZE_TO_T2P = (FE2KSIZESQR-1)/2;//How many padding pixel will be added on from T2 to T2P image
int NUMBER_OF_RERUN = 10;
int rerun_fully_connected = 0;
const int NN_CONVOLUTED_FRAMES = FEATURE2;//4 first feature and FEATURE2 second feature
const float MAX_PIXEL_START_DEFAULT = -10;
const int POLC2_H = 6;
const int POLC2_W = 8;
const int C2M_H = POLC2_H * 2;//12
const int C2M_W = POLC2_W * 2;//16
const int POL_C_M_HIGHT = C2M_H + PADDING_SIZE_TO_T2P*2;//12 + 11-1 = 22
const int POL_C_M_WIDTH = C2M_W + PADDING_SIZE_TO_T2P*2;//16 + 11-1 = 26
const int T_FRAME_HEIGHT = C2M_H * 2; //24
const int T_FRAME_WIDTH = C2M_W * 2; //32
const int POL_C0_M_HIGHT = T_FRAME_HEIGHT + FE1KSIZESQR-1;//24 + 9-1 = 32
const int POL_C0_M_WIDTH = T_FRAME_WIDTH + FE1KSIZESQR-1;//32 + 9-1 = 40
const int T0_FRAME_HEIGHT = T_FRAME_HEIGHT * 2; //48
const int T0_FRAME_WIDTH = T_FRAME_WIDTH * 2; //64
using namespace std;
using namespace cv;
const char PIXEL_BYTES = 1;//1 Bytes on One pixel. Grayscale
const string trackbarWindowName = "Trackbars";
int ker_gain0 = 100;
int ker_gain1 = 100;
int ker_gain2 = 100;
int momentum_fully = 100;
int momentum0 = 100;
int momentum1 = 100;
int momentum2 = 100;
int H_MIN = 0;
int H_MAX = 1000;
int M_MAX = 100;
void on_trackbar( int, void* )
{//This function gets called whenever a
// trackbar position is changed
}
void createTrackbars(){
//create window for trackbars
namedWindow(trackbarWindowName,0);
//create memory to store trackbar name on window
char TrackbarName[50];
sprintf( TrackbarName, "Kernel Gain proporion ");
createTrackbar( "ker_gain0 ", trackbarWindowName, &ker_gain0, H_MAX, on_trackbar );
createTrackbar( "ker_gain1 ", trackbarWindowName, &ker_gain1, H_MAX, on_trackbar );
createTrackbar( "ker_gain2 ", trackbarWindowName, &ker_gain2, H_MAX, on_trackbar );
createTrackbar( "momentum_fully ", trackbarWindowName, &momentum_fully, M_MAX, on_trackbar );
createTrackbar( "momentum0 ", trackbarWindowName, &momentum0, M_MAX, on_trackbar );
createTrackbar( "momentum1 ", trackbarWindowName, &momentum1, M_MAX, on_trackbar );
createTrackbar( "momentum2 ", trackbarWindowName, &momentum2, M_MAX, on_trackbar );
}
FILE *fp2;
// ====================================================================
// == Neural Network Configuration - customized per network
// ====================================================================
const int OutputNodes = 6;//Number of catigorys
const int TrainingPicturesAtOneCategory = 250;//How many picture it will be at one category
const int PatternCount = TrainingPicturesAtOneCategory * OutputNodes;//Number of total training images
const int InputNodes = POLC2_W * POLC2_H * NN_CONVOLUTED_FRAMES * PIXEL_BYTES;//128*96*3 = 36864 Input Nodes
const int HiddenNodes = 100;//20
int dropoutHidden[HiddenNodes];
float LearningRate = 0.025;//0.025
//float LearningRateKer = 5.3;//2.225 ,20.0
//float LearningRateKer1 = 3.0;//1.225 , 20.0
//float LearningRateKer0 = 0.8;//2.225 ,20.0
const float const_LearningRateKer_high_gain = 4;//2.225 ,20.0
const float const_LearningRateKer1_high_gain = 10;//1.225 , 20.0
const float const_LearningRateKer0_high_gain = 20;//2.225 ,20.0
#define USE_HIGH_GAIN_AT_START
//const int repeat_same = 1;
//const float USE_HIGH_GAIN_ERROR = 100.0f;
const float USE_HIGH_GAIN_ERROR = 1000.0f;
//const float const_LearningRateKer = 2;//2.0
//const float const_LearningRateKer1 = 0.5;//1.0
//const float const_LearningRateKer0 = 0.4;//0.4
const float const_LearningRateKer = 5;//2.0
const float const_LearningRateKer1 = 2;//1.0
const float const_LearningRateKer0 = 0.8;//0.4
int train_feature_kernel_layer;
float LearningRateKer = 5;//2.225 ,20.0
float LearningRateKer1 = 2;//1.225 , 20.0
float LearningRateKer0 = 0.8;//2.225 ,20.0
//Things regarding dynamic auto adjustment gain of individual kernels
//#define REPORT_KERNEL_AUTO_ADJ
float kernel_gain_level[NUM_OF_KERNEL_LAYER][FEATURE2];
const int DO_KERNEL_GAIN_CALC = 1;//Do kernel gain calculation
int check_kernel_gain = 0;//Counter up to DO_KERNEL_GAIN_CALC
const float FEAT0_TYP_GAIN = 0.7;//0.5
const float FEAT1_TYP_GAIN = 0.6;//0.5
const float FEAT2_TYP_GAIN = 0.8;
//
const float C_moment = 0.9;
const float C_momentKer2 = 0.8;//0.85
const float C_momentKer1 = 0.9;//0.85
const float C_momentKer0 = 0.9;//0.85
float Momentum = 0.9;
float MomentumKer2 = 0.8;//0.85
float MomentumKer1 = 0.9;//0.85
float MomentumKer0 = 0.9;//0.85
const float InitialWeightMax = 0.04;
const float KERMULINIT = 15;
const float Success = 1.0;
#define USE_NORMALIZER
//#define USE_LOCK_KERNEL_LAYER
#define USE_LOCK_KER_0_1_AFTER_RERUN_NR
#define LOCK_KER_0_AFTER_RERUN_NR 4
#define LOCK_KER_1_AFTER_RERUN_NR 6
#define USE_VERIFICATION //Run a verification image set verXXX.JPG after each training cycle to test if overfittin occure
#define USE_BACK_PROP_OFF_AT_PADDING
//#define USE_KER_HAT_GAIN
//const float accetp_increase_verification_error = 0.0;
const float accetp_increase_ver_part_error = 0.10f;//0.05 = 5%
float Input[InputNodes] = {};//This is the input array of the fully connected neural network (after the two stage of convolution the 16 second layer matrix merged into this vector array)
float Target[PatternCount][OutputNodes] = {};
// ====================================================================
// == End Neural Network Configuration
// ====================================================================
// ====================================================================
// == Neural Network variables
// ====================================================================
int i, j, p, q, r;
int ReportEvery1000;
int RandomizedIndex[PatternCount];
long TrainingCycle;
float Rando;
const float START_MIN_ERROR_LEVEL = 100000;
float Error_level;
float min_Error_level = START_MIN_ERROR_LEVEL;
float min_Verification_error_level = START_MIN_ERROR_LEVEL;
int verification = 0;
int overfitted = 0;
float Accum;
float Hidden[HiddenNodes];
float Output[OutputNodes];
float HiddenWeights[InputNodes+1][HiddenNodes];
float OutputWeights[HiddenNodes+1][OutputNodes];
float HiddenDelta[HiddenNodes];
float OutputDelta[OutputNodes];
float ChangeHiddenWeights[InputNodes+1][HiddenNodes];
float ChangeOutputWeights[HiddenNodes+1][OutputNodes];
float InputDelta[InputNodes];//This is the input node of the fully connected network backwards from this layer it will no longer are fully connected only shared wheights
//Convolutional part of the NN shared weight (Not fully connected)
float ChangeFeatureWeights[FE2KSIZE+1][FEATURE2];//this is the shared weights for convoltuional features
float SumChangeFeatureWeights[FE2KSIZE+1][FEATURE2];//this will be summed to a shared weights for convoltuional
float ChangeFeature1Weights[FE1KSIZE+1][FEATURE1];//this is the shared weights for convoltuional features
float SumChangeFeature1Weights[FE1KSIZE+1][FEATURE1];//this will be summed to a shared weights for convoltuional
float ChangeFeature0Weights[FE0KSIZE+1][FEATURE0];//this is the shared weights for convoltuional features
float SumChangeFeature0Weights[FE0KSIZE+1][FEATURE0];//this will be summed to a shared weights for convoltuional
//kernel show
//Mat layer_0_kernels(Hight, Width, CV_32F);
Mat layer_0_kernels(FE0KSIZESQR * FEATURE0, FE0KSIZESQR, CV_32F);
Mat layer_1_kernels(FE1KSIZESQR * FEATURE1, FE1KSIZESQR, CV_32F);
Mat layer_2_kernels(FE2KSIZESQR * FEATURE2, FE2KSIZESQR, CV_32F);
//
float pol_c_m_delta[FEATURE1][POL_C_M_HIGHT * POL_C_M_WIDTH];//This is delta needed for the make it possible to calculate and train first layer kernel
float pol_c0_m_delta[FEATURE0][POL_C0_M_HIGHT * POL_C0_M_WIDTH];//This is delta needed for the make it possible to calculate and train first layer kernel
Mat m1_0_padded(T0_FRAME_HEIGHT+(PADDING_SIZE_TO_T0P*2),T0_FRAME_WIDTH+(PADDING_SIZE_TO_T0P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m1_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m2_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m3_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m4_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m5_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m6_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m7_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m8_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m9_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m10_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m11_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m12_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m13_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m14_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m15_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m16_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m17_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m18_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m19_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m20_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m21_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m22_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m23_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m24_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m25_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m26_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m27_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m28_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m29_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m30_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m31_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat m32_conv0(T0_FRAME_HEIGHT +(PADDING_SIZE_TO_T1P*2),T0_FRAME_WIDTH +(PADDING_SIZE_TO_T1P*2),CV_32F);//This is the first stage of image but with added padded pixels around the source image
Mat pol_c0_m1_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After first pooling befor padding
Mat pol_c0_m2_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m3_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m4_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m5_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m6_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m7_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m8_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m9_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After first pooling befor padding
Mat pol_c0_m10_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m11_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m12_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m13_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m14_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m15_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m16_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m17_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After first pooling befor padding
Mat pol_c0_m18_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m19_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m20_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m21_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m22_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m23_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m24_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m25_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After first pooling befor padding
Mat pol_c0_m26_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m27_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m28_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m29_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m30_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m31_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m32_unpad(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c0_m1(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);//Now stage 2 of padding
Mat pol_c0_m2(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m3(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m4(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m5(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m6(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m7(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m8(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m9(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);//Now stage 2 of padding
Mat pol_c0_m10(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m11(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m12(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m13(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m14(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m15(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m16(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m17(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);//Now stage 2 of padding
Mat pol_c0_m18(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m19(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m20(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m21(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m22(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m23(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m24(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m25(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);//Now stage 2 of padding
Mat pol_c0_m26(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m27(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m28(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m29(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m30(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m31(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat pol_c0_m32(POL_C0_M_HIGHT,POL_C0_M_WIDTH,CV_32F);
Mat c_m1(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After second convoluted stage
Mat c_m2(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m3(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m4(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m5(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m6(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m7(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m8(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m9(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m10(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m11(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m12(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m13(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m14(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m15(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m16(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m17(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);//After second convoluted stage
Mat c_m18(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m19(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m20(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m21(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m22(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m23(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m24(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m25(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m26(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m27(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m28(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m29(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m30(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m31(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat c_m32(T_FRAME_HEIGHT,T_FRAME_WIDTH,CV_32F);
Mat pol_c_m1_unpad(C2M_H,C2M_H,CV_32F);//After second pooling befor padding
Mat pol_c_m2_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m3_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m4_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m5_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m6_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m7_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m8_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m9_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m10_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m11_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m12_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m13_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m14_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m15_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m16_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m17_unpad(C2M_H,C2M_H,CV_32F);//After second pooling befor padding
Mat pol_c_m18_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m19_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m20_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m21_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m22_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m23_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m24_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m25_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m26_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m27_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m28_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m29_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m30_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m31_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m32_unpad(C2M_H,C2M_H,CV_32F);
Mat pol_c_m1(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);//Now stage 3 of padding
Mat pol_c_m2(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m3(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m4(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m5(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m6(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m7(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m8(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m9(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m10(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m11(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m12(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m13(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m14(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m15(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m16(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m17(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);//Now stage 3 of padding
Mat pol_c_m18(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m19(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m20(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m21(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m22(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m23(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m24(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m25(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m26(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m27(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m28(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m29(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m30(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m31(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat pol_c_m32(POL_C_M_HIGHT,POL_C_M_WIDTH,CV_32F);
Mat c2_m1_n(C2M_H,C2M_W,CV_32F);//After theard convolution stage
Mat c2_m2_n(C2M_H,C2M_W,CV_32F);
Mat c2_m3_n(C2M_H,C2M_W,CV_32F);
Mat c2_m4_n(C2M_H,C2M_W,CV_32F);
Mat c2_m1_e(C2M_H,C2M_W,CV_32F);
Mat c2_m2_e(C2M_H,C2M_W,CV_32F);
Mat c2_m3_e(C2M_H,C2M_W,CV_32F);
Mat c2_m4_e(C2M_H,C2M_W,CV_32F);
Mat c2_m1_w(C2M_H,C2M_W,CV_32F);
Mat c2_m2_w(C2M_H,C2M_W,CV_32F);
Mat c2_m3_w(C2M_H,C2M_W,CV_32F);
Mat c2_m4_w(C2M_H,C2M_W,CV_32F);
Mat c2_m1_s(C2M_H,C2M_W,CV_32F);
Mat c2_m2_s(C2M_H,C2M_W,CV_32F);
Mat c2_m3_s(C2M_H,C2M_W,CV_32F);
Mat c2_m4_s(C2M_H,C2M_W,CV_32F);
Mat c3_m1_n(C2M_H,C2M_W,CV_32F);//After theard convolution stage
Mat c3_m2_n(C2M_H,C2M_W,CV_32F);
Mat c3_m3_n(C2M_H,C2M_W,CV_32F);
Mat c3_m4_n(C2M_H,C2M_W,CV_32F);
Mat c3_m1_e(C2M_H,C2M_W,CV_32F);
Mat c3_m2_e(C2M_H,C2M_W,CV_32F);
Mat c3_m3_e(C2M_H,C2M_W,CV_32F);
Mat c3_m4_e(C2M_H,C2M_W,CV_32F);
Mat c3_m1_w(C2M_H,C2M_W,CV_32F);
Mat c3_m2_w(C2M_H,C2M_W,CV_32F);
Mat c3_m3_w(C2M_H,C2M_W,CV_32F);
Mat c3_m4_w(C2M_H,C2M_W,CV_32F);
Mat c3_m1_s(C2M_H,C2M_W,CV_32F);
Mat c3_m2_s(C2M_H,C2M_W,CV_32F);
Mat c3_m3_s(C2M_H,C2M_W,CV_32F);
Mat c3_m4_s(C2M_H,C2M_W,CV_32F);
Mat pol_c2_m1_n(POLC2_H,POLC2_W,CV_32F);//After theard pooling stage all this data will put into Input[] array
Mat pol_c2_m2_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m3_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m4_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m1_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m2_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m3_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m4_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m1_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m2_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m3_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m4_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m1_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m2_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m3_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c2_m4_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m1_n(POLC2_H,POLC2_W,CV_32F);//After theard pooling stage all this data will put into Input[] array
Mat pol_c3_m2_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m3_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m4_n(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m1_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m2_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m3_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m4_e(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m1_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m2_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m3_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m4_w(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m1_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m2_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m3_s(POLC2_H,POLC2_W,CV_32F);
Mat pol_c3_m4_s(POLC2_H,POLC2_W,CV_32F);
// ====================================================================
// == End Neural Network variables
// ====================================================================
// ====================================================================
// == Regarding File read
// ====================================================================
char filename[20];
char filename2[20];
// ====================================================================
// == END Regarding File read
// ====================================================================
//decalare prototype
void toTerminal(void);
void adjust_ker_gain_by_trackbars(void);
float Feature0Kernel[FEATURE0][FE0KSIZE+1];
float Feature1Kernel[FEATURE1][FE1KSIZE+1];
float Feature2Kernel[FEATURE2][FE2KSIZE+1];
float Feature0domeKernel[FEATURE0][FE0KSIZE+1];
float Feature1domeKernel[FEATURE1][FE1KSIZE+1];
float Feature2domeKernel[FEATURE2][FE2KSIZE+1];
float kernel0_dome_gain[FE0KSIZE+1];
float kernel1_dome_gain[FE1KSIZE+1];
float kernel2_dome_gain[FE2KSIZE+1];
float abs_value(float signed_value)
{
float abs_value;
abs_value = signed_value;
if(abs_value < 0)
{
abs_value = -abs_value;
}
return abs_value;
}
void randomize_dropoutHid(void)
{
int drop_out_part = HiddenNodes/2;//1/2= 50% hidden nodes
// int dropoutHidden[HiddenNodes];
srand (static_cast <unsigned> (time(0)));//Seed the randomizer
// q = rand() % PatternCount;
for(int i=0; i<HiddenNodes; i++)
{
dropoutHidden[i] = 0;//reset
}
int check_how_many_dropout = 0;
if(verification == 0)
{
for(int k=0; k<HiddenNodes; k++) //Itterate max HiddenNodes number of times then give up to reach drop_out_part
{
for(int i=0; i<(drop_out_part-check_how_many_dropout); i++)
{
int r=0;
r = rand() % (HiddenNodes-1);
dropoutHidden[r] = 1;//add a dropout node
}
check_how_many_dropout = 0;
for(int j=0; j<HiddenNodes; j++)
{
check_how_many_dropout += dropoutHidden[j];
}
if(check_how_many_dropout >= drop_out_part)
{
break;
}
}
// printf("check_how_many_dropout =%d\n", check_how_many_dropout);
}
}
void local_normalizing(Mat gray)
{
#define BLUR_FLT_NUMERATOR 2
#define BLUR_FLT_DENOMINATOR 20
Mat float_gray, blur, num, den, store_gray;
store_gray = gray;//Initialize size
// convert to floating-point image
gray.convertTo(float_gray, CV_32F, 1.0/255.0);
// numerator = img - gauss_blur(img)
cv::GaussianBlur(float_gray, blur, Size(0,0), BLUR_FLT_NUMERATOR, BLUR_FLT_NUMERATOR);
num = float_gray - blur;
// denominator = sqrt(gauss_blur(img^2))
cv::GaussianBlur(num.mul(num), blur, Size(0,0), BLUR_FLT_DENOMINATOR, BLUR_FLT_DENOMINATOR);
cv::pow(blur, 0.5, den);
// output = numerator / denominator
gray = num / den;
// normalize output into [0,1]
cv::normalize(gray, gray, 0.0, 1.0, NORM_MINMAX, -1);
// Display
//namedWindow("demo", CV_WINDOW_AUTOSIZE );
gray.convertTo(store_gray, CV_8U, 255);
//imshow("demo", gray);
}
void print_mat_size(Mat image)
{
int image_channels2 = image.channels();
int image_nRows2 = image.rows;
int image_nCols2 = image.cols * image_channels2;
int image_pix_nCols2 = image.cols;
printf("image matrix consist of channels: %d\n", image_channels2);//Print out only to understanding the image Mat matrix layout. in this case channels it 3 bytes for image Huge, Sat and Value byte
printf("image matrix consist of nRows: %d\n", image_nRows2);
printf("image matrix consist of nCols: %d\n", image_nCols2);
printf("image matrix consist of pixels on one row: %d\n", image_pix_nCols2);
uchar* uchar_pointer_zero;
float* float_pointer_zero;
uchar_pointer_zero = image.ptr<uchar>(0);
float_pointer_zero = image.ptr<float>(0);
printf("Float pixel %f\n", *float_pointer_zero);
printf("uchar pixel %d\n", *uchar_pointer_zero);
}
Mat convolute_mat2(float* kernel, int kernel_cols, int kernel_rows, Mat image, int src_n_colums)
{
//Second Convolution layer
//This function will walk throug a convolution filter kernel on the input image and return a little less sized image depended on the input kernel how are convoluted
float* pointer_zero;
float* pointer_index;
pointer_zero = image.ptr<float>(0);
int nRows = image.rows;
int nCols = image.cols;
int nhoriz_steps = 0;
int nverti_steps = 0;
nhoriz_steps = nCols - kernel_cols + 1;
nverti_steps = nRows - kernel_rows + 1;
float matrix_sum = 0.0f;
pointer_index = pointer_zero;
float* kernel_index = kernel;
Mat conv_img(nverti_steps, nhoriz_steps, CV_32F);//Mat xxx(hight, witdh, type)
float* conv_ptr_zero;
float* conv_ptr_index;
conv_ptr_zero = conv_img.ptr<float>(0);
conv_ptr_index = conv_ptr_zero;
for(int i=0;i<nverti_steps;i++)
{
for(int j=0;j<nhoriz_steps;j++)
{
for(int rk=0;rk < kernel_rows; rk++)
{
for(int ck=0;ck < kernel_cols; ck++)
{
pointer_index = pointer_zero + i * src_n_colums + j + ck + rk * src_n_colums;
//(pointer_zero + i * nCols + j) will point at Upper left corner where the kernel is located now
//and (+ ck + rk * kernel_cols) will add offset of the walk througe the kernel matrix
matrix_sum = matrix_sum + ((float) *pointer_index) * ((float) *kernel_index);
kernel_index++;
}
}
matrix_sum = matrix_sum + *kernel_index;//Add bias node weight from kernel
//matrix_sum = 1.0/(1.0 + exp(-matrix_sum));//Sigmoid function
//Sigmoid will be applyed after sum two side image matrix done
*conv_ptr_index = matrix_sum;
matrix_sum = 0.0f;
kernel_index = kernel;
conv_ptr_index++;
}
}
return conv_img;
}
Mat padded_image(Mat image)
{
float* ptr_src_zero;
float* ptr_src_index;
ptr_src_zero = image.ptr<float>(0);
int nRows = image.rows;
int nCols = image.cols;
Mat padded_img(nRows + 2*PADDING_SIZE_TO_T2P , nCols + 2*PADDING_SIZE_TO_T2P, CV_32F);//Mat xxx(hight, witdh, type)
float* ptr_dst_zero;
float* ptr_dst_index;
ptr_dst_zero = padded_img.ptr<float>(0);
ptr_src_index = ptr_src_zero;
ptr_dst_index = ptr_dst_zero;
int srcRows = padded_img.rows;
int srcCols = padded_img.cols;
for(int i=0;i<srcRows;i++)
{
for(int j=0;j<srcCols;j++)
{
if(i < PADDING_SIZE_TO_T2P || i > (srcRows - PADDING_SIZE_TO_T2P - 1) || j < PADDING_SIZE_TO_T2P || j > (srcCols - PADDING_SIZE_TO_T2P - 1))
{
//Up padding OR Down Padding OR Left padding OR Right padding
*ptr_dst_index = 0.5f;//Padding data is always zero
}
else
{
//No padding, inside padding frame, use data from src area
*ptr_dst_index = *ptr_src_index;
ptr_src_index++;
}
ptr_dst_index++;
}
}
return padded_img;
}
Mat padded1_image(Mat image)
{
float* ptr_src_zero;
float* ptr_src_index;
ptr_src_zero = image.ptr<float>(0);
int nRows = image.rows;
int nCols = image.cols;
Mat padded_img(nRows + 2*PADDING_SIZE_TO_T1P , nCols + 2*PADDING_SIZE_TO_T1P, CV_32F);//Mat xxx(hight, witdh, type)
float* ptr_dst_zero;
float* ptr_dst_index;
ptr_dst_zero = padded_img.ptr<float>(0);
ptr_src_index = ptr_src_zero;
ptr_dst_index = ptr_dst_zero;
int srcRows = padded_img.rows;
int srcCols = padded_img.cols;
for(int i=0;i<srcRows;i++)
{
for(int j=0;j<srcCols;j++)
{
if(i < PADDING_SIZE_TO_T1P || i > (srcRows - PADDING_SIZE_TO_T1P - 1) || j < PADDING_SIZE_TO_T1P || j > (srcCols - PADDING_SIZE_TO_T1P - 1))
{
//Up padding OR Down Padding OR Left padding OR Right padding
*ptr_dst_index = 0.5f;//Padding data is always zero
}
else
{
//No padding, inside padding frame, use data from src area
*ptr_dst_index = *ptr_src_index;
ptr_src_index++;
}
ptr_dst_index++;
}
}
return padded_img;
}
Mat padded0_image(Mat image)
{
float* ptr_src_zero;
float* ptr_src_index;
ptr_src_zero = image.ptr<float>(0);
int nRows = image.rows;
int nCols = image.cols;
Mat padded_img(nRows + 2*PADDING_SIZE_TO_T0P , nCols + 2*PADDING_SIZE_TO_T0P, CV_32F);//Mat xxx(hight, witdh, type)
float* ptr_dst_zero;
float* ptr_dst_index;
ptr_dst_zero = padded_img.ptr<float>(0);
ptr_src_index = ptr_src_zero;
ptr_dst_index = ptr_dst_zero;
int srcRows = padded_img.rows;
int srcCols = padded_img.cols;
for(int i=0;i<srcRows;i++)
{
for(int j=0;j<srcCols;j++)
{
if(i < PADDING_SIZE_TO_T0P || i > (srcRows - PADDING_SIZE_TO_T0P - 1) || j < PADDING_SIZE_TO_T0P || j > (srcCols - PADDING_SIZE_TO_T0P - 1))
{
//Up padding OR Down Padding OR Left padding OR Right padding
*ptr_dst_index = 0.5f;//Padding data is always zero
}
else
{
//No padding, inside padding frame, use data from src area
*ptr_dst_index = *ptr_src_index;
ptr_src_index++;
}
ptr_dst_index++;
}
}
return padded_img;
}
void sigmoid_mat(Mat image)
{
float* ptr_src_index;
ptr_src_index = image.ptr<float>(0);
int nRows = image.rows;
int nCols = image.cols;
for(int i=0;i<nRows;i++)
{
for(int j=0;j<nCols;j++)
{
*ptr_src_index = 1.0/(1.0 + exp(-(*ptr_src_index)));//Sigmoid function
ptr_src_index++;
}
}
}
Mat max_pooling(Mat image, int src_n_colums)//Pooling 2x2 to 1x1 ratio
{
float* pointer_zero;
float* pointer_index;
pointer_zero = image.ptr<float>(0);
pointer_index = pointer_zero;
int nRows = image.rows;
int nCols = image.cols;
float* pol_ptr_zero;
float* pol_ptr_index;
Mat pol_img(nRows/2, nCols/2, CV_32F);//Mat xxx(hight, witdh, type);
pol_ptr_zero = pol_img.ptr<float>(0);
pol_ptr_index = pol_ptr_zero;
int pol_Rows = pol_img.rows;
int pol_Cols = pol_img.cols;
float max_pixel = MAX_PIXEL_START_DEFAULT;
float pix =0;
for(int i=0;i<pol_Rows;i++)
{
for(int j=0;j<pol_Cols;j++)
{
for(int rk=0;rk < 2; rk++)
{
for(int ck=0;ck < 2; ck++)
{
pointer_index = pointer_zero + i*2 * src_n_colums + j*2 + ck + rk * src_n_colums;
pix = *pointer_index;
if(pix > max_pixel)
{
max_pixel = pix;
}
}
}
*pol_ptr_index = max_pixel;
pol_ptr_index++;
max_pixel = MAX_PIXEL_START_DEFAULT;
pix = 0;
}
}
return pol_img;
}
//inline Mat convolute_all_stage(Mat m1, Mat all_convoluted_frames,int src_img_colums);
Mat convolute_all_stage(Mat m1, Mat all_convoluted_frames,int src_img_colums)
{
Mat m1_cloned;
m1_cloned = m1.clone();
m1_0_padded = padded0_image(m1_cloned);
//Add conv0, pool0, padding0
m1_conv0 = convolute_mat2(&Feature0Kernel[0][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image //FIX BUGG!! &Feature1Kernel[0][0] changed to &Feature1Kernel[0][0],
m2_conv0 = convolute_mat2(&Feature0Kernel[1][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m3_conv0 = convolute_mat2(&Feature0Kernel[2][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m4_conv0 = convolute_mat2(&Feature0Kernel[3][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m5_conv0 = convolute_mat2(&Feature0Kernel[4][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m6_conv0 = convolute_mat2(&Feature0Kernel[5][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m7_conv0 = convolute_mat2(&Feature0Kernel[6][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m8_conv0 = convolute_mat2(&Feature0Kernel[7][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m9_conv0 = convolute_mat2(&Feature0Kernel[8][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image //FIX BUGG!! &Feature1Kernel[0][0] changed to &Feature1Kernel[0][0],
m10_conv0 = convolute_mat2(&Feature0Kernel[9][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m11_conv0 = convolute_mat2(&Feature0Kernel[10][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m12_conv0 = convolute_mat2(&Feature0Kernel[11][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m13_conv0 = convolute_mat2(&Feature0Kernel[12][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m14_conv0 = convolute_mat2(&Feature0Kernel[13][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m15_conv0 = convolute_mat2(&Feature0Kernel[14][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m16_conv0 = convolute_mat2(&Feature0Kernel[15][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m17_conv0 = convolute_mat2(&Feature0Kernel[16][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image //FIX BUGG!! &Feature1Kernel[0][0] changed to &Feature1Kernel[0][0],
m18_conv0 = convolute_mat2(&Feature0Kernel[17][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m19_conv0 = convolute_mat2(&Feature0Kernel[18][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m20_conv0 = convolute_mat2(&Feature0Kernel[19][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m21_conv0 = convolute_mat2(&Feature0Kernel[20][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m22_conv0 = convolute_mat2(&Feature0Kernel[21][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m23_conv0 = convolute_mat2(&Feature0Kernel[22][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m24_conv0 = convolute_mat2(&Feature0Kernel[23][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m25_conv0 = convolute_mat2(&Feature0Kernel[24][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image //FIX BUGG!! &Feature1Kernel[0][0] changed to &Feature1Kernel[0][0],
m26_conv0 = convolute_mat2(&Feature0Kernel[25][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m27_conv0 = convolute_mat2(&Feature0Kernel[26][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m28_conv0 = convolute_mat2(&Feature0Kernel[27][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m29_conv0 = convolute_mat2(&Feature0Kernel[28][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m30_conv0 = convolute_mat2(&Feature0Kernel[29][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m31_conv0 = convolute_mat2(&Feature0Kernel[30][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
m32_conv0 = convolute_mat2(&Feature0Kernel[31][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
pol_c0_m1_unpad = max_pooling(m1_conv0, int (m1_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m2_unpad = max_pooling(m2_conv0, int (m2_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m3_unpad = max_pooling(m3_conv0, int (m3_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m4_unpad = max_pooling(m4_conv0, int (m4_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m5_unpad = max_pooling(m5_conv0, int (m5_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m6_unpad = max_pooling(m6_conv0, int (m6_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m7_unpad = max_pooling(m7_conv0, int (m7_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m8_unpad = max_pooling(m8_conv0, int (m8_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m9_unpad = max_pooling(m9_conv0, int (m9_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m10_unpad = max_pooling(m10_conv0, int (m10_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m11_unpad = max_pooling(m11_conv0, int (m11_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m12_unpad = max_pooling(m12_conv0, int (m12_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m13_unpad = max_pooling(m13_conv0, int (m13_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m14_unpad = max_pooling(m14_conv0, int (m14_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m15_unpad = max_pooling(m15_conv0, int (m15_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m16_unpad = max_pooling(m16_conv0, int (m16_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m17_unpad = max_pooling(m17_conv0, int (m17_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m18_unpad = max_pooling(m18_conv0, int (m18_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m19_unpad = max_pooling(m19_conv0, int (m19_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m20_unpad = max_pooling(m20_conv0, int (m20_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m21_unpad = max_pooling(m21_conv0, int (m21_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m22_unpad = max_pooling(m22_conv0, int (m22_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m23_unpad = max_pooling(m23_conv0, int (m23_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m24_unpad = max_pooling(m24_conv0, int (m24_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m25_unpad = max_pooling(m25_conv0, int (m25_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m26_unpad = max_pooling(m26_conv0, int (m26_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m27_unpad = max_pooling(m27_conv0, int (m27_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m28_unpad = max_pooling(m28_conv0, int (m28_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m29_unpad = max_pooling(m29_conv0, int (m29_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m30_unpad = max_pooling(m30_conv0, int (m30_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m31_unpad = max_pooling(m31_conv0, int (m31_conv0.cols));//Pooling c_m1 max pooling
pol_c0_m32_unpad = max_pooling(m32_conv0, int (m32_conv0.cols));//Pooling c_m1 max pooling
sigmoid_mat(pol_c0_m1_unpad);
sigmoid_mat(pol_c0_m2_unpad);
sigmoid_mat(pol_c0_m3_unpad);
sigmoid_mat(pol_c0_m4_unpad);
sigmoid_mat(pol_c0_m5_unpad);
sigmoid_mat(pol_c0_m6_unpad);
sigmoid_mat(pol_c0_m7_unpad);
sigmoid_mat(pol_c0_m8_unpad);
sigmoid_mat(pol_c0_m9_unpad);
sigmoid_mat(pol_c0_m10_unpad);
sigmoid_mat(pol_c0_m11_unpad);
sigmoid_mat(pol_c0_m12_unpad);
sigmoid_mat(pol_c0_m13_unpad);
sigmoid_mat(pol_c0_m14_unpad);
sigmoid_mat(pol_c0_m15_unpad);
sigmoid_mat(pol_c0_m16_unpad);
sigmoid_mat(pol_c0_m17_unpad);
sigmoid_mat(pol_c0_m18_unpad);
sigmoid_mat(pol_c0_m19_unpad);
sigmoid_mat(pol_c0_m20_unpad);
sigmoid_mat(pol_c0_m21_unpad);
sigmoid_mat(pol_c0_m22_unpad);
sigmoid_mat(pol_c0_m23_unpad);
sigmoid_mat(pol_c0_m24_unpad);
sigmoid_mat(pol_c0_m25_unpad);
sigmoid_mat(pol_c0_m26_unpad);
sigmoid_mat(pol_c0_m27_unpad);
sigmoid_mat(pol_c0_m28_unpad);
sigmoid_mat(pol_c0_m29_unpad);
sigmoid_mat(pol_c0_m30_unpad);
sigmoid_mat(pol_c0_m31_unpad);
sigmoid_mat(pol_c0_m32_unpad);
pol_c0_m1 = padded1_image(pol_c0_m1_unpad);
pol_c0_m2 = padded1_image(pol_c0_m2_unpad);
pol_c0_m3 = padded1_image(pol_c0_m3_unpad);
pol_c0_m4 = padded1_image(pol_c0_m4_unpad);
pol_c0_m5 = padded1_image(pol_c0_m5_unpad);
pol_c0_m6 = padded1_image(pol_c0_m6_unpad);
pol_c0_m7 = padded1_image(pol_c0_m7_unpad);
pol_c0_m8 = padded1_image(pol_c0_m8_unpad);
pol_c0_m9 = padded1_image(pol_c0_m9_unpad);
pol_c0_m10 = padded1_image(pol_c0_m10_unpad);
pol_c0_m11 = padded1_image(pol_c0_m11_unpad);
pol_c0_m12 = padded1_image(pol_c0_m12_unpad);
pol_c0_m13 = padded1_image(pol_c0_m13_unpad);
pol_c0_m14 = padded1_image(pol_c0_m14_unpad);
pol_c0_m15 = padded1_image(pol_c0_m15_unpad);
pol_c0_m16 = padded1_image(pol_c0_m16_unpad);
pol_c0_m17 = padded1_image(pol_c0_m17_unpad);
pol_c0_m18 = padded1_image(pol_c0_m18_unpad);
pol_c0_m19 = padded1_image(pol_c0_m19_unpad);
pol_c0_m20 = padded1_image(pol_c0_m20_unpad);
pol_c0_m21 = padded1_image(pol_c0_m21_unpad);
pol_c0_m22 = padded1_image(pol_c0_m22_unpad);
pol_c0_m23 = padded1_image(pol_c0_m23_unpad);
pol_c0_m24 = padded1_image(pol_c0_m24_unpad);
pol_c0_m25 = padded1_image(pol_c0_m25_unpad);
pol_c0_m26 = padded1_image(pol_c0_m26_unpad);
pol_c0_m27 = padded1_image(pol_c0_m27_unpad);
pol_c0_m28 = padded1_image(pol_c0_m28_unpad);
pol_c0_m29 = padded1_image(pol_c0_m29_unpad);
pol_c0_m30 = padded1_image(pol_c0_m30_unpad);
pol_c0_m31 = padded1_image(pol_c0_m31_unpad);
pol_c0_m32 = padded1_image(pol_c0_m32_unpad);
//--------------------------
c_m1 = convolute_mat2(&Feature1Kernel[0][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m1, int (pol_c0_m1.cols));// Make a convolution of the image
c_m2 = convolute_mat2(&Feature1Kernel[1][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m2, int (pol_c0_m2.cols));// Make a convolution of the image
c_m3 = convolute_mat2(&Feature1Kernel[2][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m3, int (pol_c0_m3.cols));// Make a convolution of the image
c_m4 = convolute_mat2(&Feature1Kernel[3][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m4, int (pol_c0_m4.cols));// Make a convolution of the image
c_m5 = convolute_mat2(&Feature1Kernel[4][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m5, int (pol_c0_m5.cols));// Make a convolution of the image
c_m6 = convolute_mat2(&Feature1Kernel[5][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m6, int (pol_c0_m6.cols));// Make a convolution of the image
c_m7 = convolute_mat2(&Feature1Kernel[6][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m7, int (pol_c0_m7.cols));// Make a convolution of the image
c_m8 = convolute_mat2(&Feature1Kernel[7][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m8, int (pol_c0_m8.cols));// Make a convolution of the image
c_m9 = convolute_mat2(&Feature1Kernel[8][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m9, int (pol_c0_m1.cols));// Make a convolution of the image
c_m10 = convolute_mat2(&Feature1Kernel[9][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m10, int (pol_c0_m2.cols));// Make a convolution of the image
c_m11 = convolute_mat2(&Feature1Kernel[10][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m11, int (pol_c0_m3.cols));// Make a convolution of the image
c_m12 = convolute_mat2(&Feature1Kernel[11][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m12, int (pol_c0_m4.cols));// Make a convolution of the image
c_m13 = convolute_mat2(&Feature1Kernel[12][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m13, int (pol_c0_m5.cols));// Make a convolution of the image
c_m14 = convolute_mat2(&Feature1Kernel[13][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m14, int (pol_c0_m6.cols));// Make a convolution of the image
c_m15 = convolute_mat2(&Feature1Kernel[14][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m15, int (pol_c0_m7.cols));// Make a convolution of the image
c_m16 = convolute_mat2(&Feature1Kernel[15][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m16, int (pol_c0_m8.cols));// Make a convolution of the image
c_m17 = convolute_mat2(&Feature1Kernel[16][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m17, int (pol_c0_m1.cols));// Make a convolution of the image
c_m18 = convolute_mat2(&Feature1Kernel[17][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m18, int (pol_c0_m2.cols));// Make a convolution of the image
c_m19 = convolute_mat2(&Feature1Kernel[18][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m19, int (pol_c0_m3.cols));// Make a convolution of the image
c_m20 = convolute_mat2(&Feature1Kernel[19][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m20, int (pol_c0_m4.cols));// Make a convolution of the image
c_m21 = convolute_mat2(&Feature1Kernel[20][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m21, int (pol_c0_m5.cols));// Make a convolution of the image
c_m22 = convolute_mat2(&Feature1Kernel[21][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m22, int (pol_c0_m6.cols));// Make a convolution of the image
c_m23 = convolute_mat2(&Feature1Kernel[22][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m23, int (pol_c0_m7.cols));// Make a convolution of the image
c_m24 = convolute_mat2(&Feature1Kernel[23][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m24, int (pol_c0_m8.cols));// Make a convolution of the image
c_m25 = convolute_mat2(&Feature1Kernel[24][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m25, int (pol_c0_m1.cols));// Make a convolution of the image
c_m26 = convolute_mat2(&Feature1Kernel[25][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m26, int (pol_c0_m2.cols));// Make a convolution of the image
c_m27 = convolute_mat2(&Feature1Kernel[26][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m27, int (pol_c0_m3.cols));// Make a convolution of the image
c_m28 = convolute_mat2(&Feature1Kernel[27][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m28, int (pol_c0_m4.cols));// Make a convolution of the image
c_m29 = convolute_mat2(&Feature1Kernel[28][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m29, int (pol_c0_m5.cols));// Make a convolution of the image
c_m30 = convolute_mat2(&Feature1Kernel[29][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m30, int (pol_c0_m6.cols));// Make a convolution of the image
c_m31 = convolute_mat2(&Feature1Kernel[30][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m31, int (pol_c0_m7.cols));// Make a convolution of the image
c_m32 = convolute_mat2(&Feature1Kernel[31][0], FE1KSIZESQR, FE1KSIZESQR, pol_c0_m32, int (pol_c0_m8.cols));// Make a convolution of the image
pol_c_m1_unpad = max_pooling(c_m1, int (c_m1.cols));//Pooling c_m1 max pooling
pol_c_m2_unpad = max_pooling(c_m2, int (c_m2.cols));//Pooling c_m1 max pooling
pol_c_m3_unpad = max_pooling(c_m3, int (c_m3.cols));//Pooling c_m1 max pooling
pol_c_m4_unpad = max_pooling(c_m4, int (c_m4.cols));//Pooling c_m1 max pooling
pol_c_m5_unpad = max_pooling(c_m5, int (c_m5.cols));//Pooling c_m1 max pooling
pol_c_m6_unpad = max_pooling(c_m6, int (c_m6.cols));//Pooling c_m1 max pooling
pol_c_m7_unpad = max_pooling(c_m7, int (c_m7.cols));//Pooling c_m1 max pooling
pol_c_m8_unpad = max_pooling(c_m8, int (c_m8.cols));//Pooling c_m1 max pooling
pol_c_m9_unpad = max_pooling(c_m9, int (c_m9.cols));//Pooling c_m1 max pooling
pol_c_m10_unpad = max_pooling(c_m10, int (c_m10.cols));//Pooling c_m1 max pooling
pol_c_m11_unpad = max_pooling(c_m11, int (c_m11.cols));//Pooling c_m1 max pooling
pol_c_m12_unpad = max_pooling(c_m12, int (c_m12.cols));//Pooling c_m1 max pooling
pol_c_m13_unpad = max_pooling(c_m13, int (c_m13.cols));//Pooling c_m1 max pooling
pol_c_m14_unpad = max_pooling(c_m14, int (c_m14.cols));//Pooling c_m1 max pooling
pol_c_m15_unpad = max_pooling(c_m15, int (c_m15.cols));//Pooling c_m1 max pooling
pol_c_m16_unpad = max_pooling(c_m16, int (c_m16.cols));//Pooling c_m1 max pooling
pol_c_m17_unpad = max_pooling(c_m17, int (c_m17.cols));//Pooling c_m1 max pooling
pol_c_m18_unpad = max_pooling(c_m18, int (c_m18.cols));//Pooling c_m1 max pooling
pol_c_m19_unpad = max_pooling(c_m19, int (c_m19.cols));//Pooling c_m1 max pooling
pol_c_m20_unpad = max_pooling(c_m20, int (c_m20.cols));//Pooling c_m1 max pooling
pol_c_m21_unpad = max_pooling(c_m21, int (c_m21.cols));//Pooling c_m1 max pooling
pol_c_m22_unpad = max_pooling(c_m22, int (c_m22.cols));//Pooling c_m1 max pooling
pol_c_m23_unpad = max_pooling(c_m23, int (c_m23.cols));//Pooling c_m1 max pooling
pol_c_m24_unpad = max_pooling(c_m24, int (c_m24.cols));//Pooling c_m1 max pooling
pol_c_m25_unpad = max_pooling(c_m25, int (c_m25.cols));//Pooling c_m1 max pooling
pol_c_m26_unpad = max_pooling(c_m26, int (c_m26.cols));//Pooling c_m1 max pooling
pol_c_m27_unpad = max_pooling(c_m27, int (c_m27.cols));//Pooling c_m1 max pooling
pol_c_m28_unpad = max_pooling(c_m28, int (c_m28.cols));//Pooling c_m1 max pooling
pol_c_m29_unpad = max_pooling(c_m29, int (c_m29.cols));//Pooling c_m1 max pooling
pol_c_m30_unpad = max_pooling(c_m30, int (c_m30.cols));//Pooling c_m1 max pooling
pol_c_m31_unpad = max_pooling(c_m31, int (c_m31.cols));//Pooling c_m1 max pooling
pol_c_m32_unpad = max_pooling(c_m32, int (c_m32.cols));//Pooling c_m1 max pooling
sigmoid_mat(pol_c_m1_unpad);
sigmoid_mat(pol_c_m2_unpad);
sigmoid_mat(pol_c_m3_unpad);
sigmoid_mat(pol_c_m4_unpad);
sigmoid_mat(pol_c_m5_unpad);
sigmoid_mat(pol_c_m6_unpad);
sigmoid_mat(pol_c_m7_unpad);
sigmoid_mat(pol_c_m8_unpad);
sigmoid_mat(pol_c_m9_unpad);
sigmoid_mat(pol_c_m10_unpad);
sigmoid_mat(pol_c_m11_unpad);
sigmoid_mat(pol_c_m12_unpad);
sigmoid_mat(pol_c_m13_unpad);
sigmoid_mat(pol_c_m14_unpad);
sigmoid_mat(pol_c_m15_unpad);
sigmoid_mat(pol_c_m16_unpad);
sigmoid_mat(pol_c_m17_unpad);
sigmoid_mat(pol_c_m18_unpad);
sigmoid_mat(pol_c_m19_unpad);
sigmoid_mat(pol_c_m20_unpad);
sigmoid_mat(pol_c_m21_unpad);
sigmoid_mat(pol_c_m22_unpad);
sigmoid_mat(pol_c_m23_unpad);
sigmoid_mat(pol_c_m24_unpad);
sigmoid_mat(pol_c_m25_unpad);
sigmoid_mat(pol_c_m26_unpad);
sigmoid_mat(pol_c_m27_unpad);
sigmoid_mat(pol_c_m28_unpad);
sigmoid_mat(pol_c_m29_unpad);
sigmoid_mat(pol_c_m30_unpad);
sigmoid_mat(pol_c_m31_unpad);
sigmoid_mat(pol_c_m32_unpad);
pol_c_m1 = padded_image(pol_c_m1_unpad);
pol_c_m2 = padded_image(pol_c_m2_unpad);
pol_c_m3 = padded_image(pol_c_m3_unpad);
pol_c_m4 = padded_image(pol_c_m4_unpad);
pol_c_m5 = padded_image(pol_c_m5_unpad);
pol_c_m6 = padded_image(pol_c_m6_unpad);
pol_c_m7 = padded_image(pol_c_m7_unpad);
pol_c_m8 = padded_image(pol_c_m8_unpad);
pol_c_m9 = padded_image(pol_c_m9_unpad);
pol_c_m10 = padded_image(pol_c_m10_unpad);
pol_c_m11 = padded_image(pol_c_m11_unpad);
pol_c_m12 = padded_image(pol_c_m12_unpad);
pol_c_m13 = padded_image(pol_c_m13_unpad);
pol_c_m14 = padded_image(pol_c_m14_unpad);
pol_c_m15 = padded_image(pol_c_m15_unpad);
pol_c_m16 = padded_image(pol_c_m16_unpad);
pol_c_m17 = padded_image(pol_c_m17_unpad);
pol_c_m18 = padded_image(pol_c_m18_unpad);
pol_c_m19 = padded_image(pol_c_m19_unpad);
pol_c_m20 = padded_image(pol_c_m20_unpad);
pol_c_m21 = padded_image(pol_c_m21_unpad);
pol_c_m22 = padded_image(pol_c_m22_unpad);
pol_c_m23 = padded_image(pol_c_m23_unpad);
pol_c_m24 = padded_image(pol_c_m24_unpad);
pol_c_m25 = padded_image(pol_c_m25_unpad);
pol_c_m26 = padded_image(pol_c_m26_unpad);
pol_c_m27 = padded_image(pol_c_m27_unpad);
pol_c_m28 = padded_image(pol_c_m28_unpad);
pol_c_m29 = padded_image(pol_c_m29_unpad);
pol_c_m30 = padded_image(pol_c_m30_unpad);
pol_c_m31 = padded_image(pol_c_m31_unpad);
pol_c_m32 = padded_image(pol_c_m32_unpad);
c2_m1_n = convolute_mat2(&Feature2Kernel[0][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m1, int (pol_c_m1.cols));// Make a convolution of the image
c2_m2_n = convolute_mat2(&Feature2Kernel[1][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m2, int (pol_c_m2.cols));// Make a convolution of the image
c2_m3_n = convolute_mat2(&Feature2Kernel[2][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m3, int (pol_c_m3.cols));// Make a convolution of the image
c2_m4_n = convolute_mat2(&Feature2Kernel[3][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m4, int (pol_c_m4.cols));// Make a convolution of the image
c2_m1_e = convolute_mat2(&Feature2Kernel[4][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m5, int (pol_c_m1.cols));// Make a convolution of the image
c2_m2_e = convolute_mat2(&Feature2Kernel[5][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m6, int (pol_c_m2.cols));// Make a convolution of the image
c2_m3_e = convolute_mat2(&Feature2Kernel[6][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m7, int (pol_c_m3.cols));// Make a convolution of the image
c2_m4_e = convolute_mat2(&Feature2Kernel[7][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m8, int (pol_c_m4.cols));// Make a convolution of the image
c2_m1_w = convolute_mat2(&Feature2Kernel[8][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m9, int (pol_c_m1.cols));// Make a convolution of the image
c2_m2_w = convolute_mat2(&Feature2Kernel[9][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m10, int (pol_c_m2.cols));// Make a convolution of the image
c2_m3_w = convolute_mat2(&Feature2Kernel[10][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m11, int (pol_c_m3.cols));// Make a convolution of the image
c2_m4_w = convolute_mat2(&Feature2Kernel[11][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m12, int (pol_c_m4.cols));// Make a convolution of the image
c2_m1_s = convolute_mat2(&Feature2Kernel[12][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m13, int (pol_c_m1.cols));// Make a convolution of the image
c2_m2_s = convolute_mat2(&Feature2Kernel[13][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m14, int (pol_c_m2.cols));// Make a convolution of the image
c2_m3_s = convolute_mat2(&Feature2Kernel[14][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m15, int (pol_c_m3.cols));// Make a convolution of the image
c2_m4_s = convolute_mat2(&Feature2Kernel[15][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m16, int (pol_c_m4.cols));// Make a convolution of the image
c3_m1_n = convolute_mat2(&Feature2Kernel[16][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m17, int (pol_c_m1.cols));// Make a convolution of the image
c3_m2_n = convolute_mat2(&Feature2Kernel[17][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m18, int (pol_c_m2.cols));// Make a convolution of the image
c3_m3_n = convolute_mat2(&Feature2Kernel[18][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m19, int (pol_c_m3.cols));// Make a convolution of the image
c3_m4_n = convolute_mat2(&Feature2Kernel[19][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m20, int (pol_c_m4.cols));// Make a convolution of the image
c3_m1_e = convolute_mat2(&Feature2Kernel[20][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m21, int (pol_c_m1.cols));// Make a convolution of the image
c3_m2_e = convolute_mat2(&Feature2Kernel[21][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m22, int (pol_c_m2.cols));// Make a convolution of the image
c3_m3_e = convolute_mat2(&Feature2Kernel[22][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m23, int (pol_c_m3.cols));// Make a convolution of the image
c3_m4_e = convolute_mat2(&Feature2Kernel[23][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m24, int (pol_c_m4.cols));// Make a convolution of the image
c3_m1_w = convolute_mat2(&Feature2Kernel[24][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m25, int (pol_c_m1.cols));// Make a convolution of the image
c3_m2_w = convolute_mat2(&Feature2Kernel[25][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m26, int (pol_c_m2.cols));// Make a convolution of the image
c3_m3_w = convolute_mat2(&Feature2Kernel[26][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m27, int (pol_c_m3.cols));// Make a convolution of the image
c3_m4_w = convolute_mat2(&Feature2Kernel[27][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m28, int (pol_c_m4.cols));// Make a convolution of the image
c3_m1_s = convolute_mat2(&Feature2Kernel[28][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m29, int (pol_c_m1.cols));// Make a convolution of the image
c3_m2_s = convolute_mat2(&Feature2Kernel[29][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m30, int (pol_c_m2.cols));// Make a convolution of the image
c3_m3_s = convolute_mat2(&Feature2Kernel[30][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m31, int (pol_c_m3.cols));// Make a convolution of the image
c3_m4_s = convolute_mat2(&Feature2Kernel[31][0], FE2KSIZESQR, FE2KSIZESQR, pol_c_m32, int (pol_c_m4.cols));// Make a convolution of the image
pol_c2_m1_n = max_pooling(c2_m1_n, int (c2_m1_n.cols));//Pooling c_m1 max pooling
pol_c2_m2_n = max_pooling(c2_m2_n, int (c2_m2_n.cols));//Pooling c_m1 max pooling
pol_c2_m3_n = max_pooling(c2_m3_n, int (c2_m3_n.cols));//Pooling c_m1 max pooling
pol_c2_m4_n = max_pooling(c2_m4_n, int (c2_m4_n.cols));//Pooling c_m1 max pooling
pol_c2_m1_e = max_pooling(c2_m1_e, int (c2_m1_e.cols));//Pooling c_m1 max pooling
pol_c2_m2_e = max_pooling(c2_m2_e, int (c2_m2_e.cols));//Pooling c_m1 max pooling
pol_c2_m3_e = max_pooling(c2_m3_e, int (c2_m3_e.cols));//Pooling c_m1 max pooling
pol_c2_m4_e = max_pooling(c2_m4_e, int (c2_m4_e.cols));//Pooling c_m1 max pooling
pol_c2_m1_w = max_pooling(c2_m1_w, int (c2_m1_w.cols));//Pooling c_m1 max pooling
pol_c2_m2_w = max_pooling(c2_m2_w, int (c2_m2_w.cols));//Pooling c_m1 max pooling
pol_c2_m3_w = max_pooling(c2_m3_w, int (c2_m3_w.cols));//Pooling c_m1 max pooling
pol_c2_m4_w = max_pooling(c2_m4_w, int (c2_m4_w.cols));//Pooling c_m1 max pooling
pol_c2_m1_s = max_pooling(c2_m1_s, int (c2_m1_s.cols));//Pooling c_m1 max pooling
pol_c2_m2_s = max_pooling(c2_m2_s, int (c2_m2_s.cols));//Pooling c_m1 max pooling
pol_c2_m3_s = max_pooling(c2_m3_s, int (c2_m3_s.cols));//Pooling c_m1 max pooling
pol_c2_m4_s = max_pooling(c2_m4_s, int (c2_m4_s.cols));//Pooling c_m1 max pooling
pol_c3_m1_n = max_pooling(c3_m1_n, int (c2_m1_n.cols));//Pooling c_m1 max pooling
pol_c3_m2_n = max_pooling(c3_m2_n, int (c2_m2_n.cols));//Pooling c_m1 max pooling
pol_c3_m3_n = max_pooling(c3_m3_n, int (c2_m3_n.cols));//Pooling c_m1 max pooling
pol_c3_m4_n = max_pooling(c3_m4_n, int (c2_m4_n.cols));//Pooling c_m1 max pooling
pol_c3_m1_e = max_pooling(c3_m1_e, int (c2_m1_e.cols));//Pooling c_m1 max pooling
pol_c3_m2_e = max_pooling(c3_m2_e, int (c2_m2_e.cols));//Pooling c_m1 max pooling
pol_c3_m3_e = max_pooling(c3_m3_e, int (c2_m3_e.cols));//Pooling c_m1 max pooling
pol_c3_m4_e = max_pooling(c3_m4_e, int (c2_m4_e.cols));//Pooling c_m1 max pooling
pol_c3_m1_w = max_pooling(c3_m1_w, int (c2_m1_w.cols));//Pooling c_m1 max pooling
pol_c3_m2_w = max_pooling(c3_m2_w, int (c2_m2_w.cols));//Pooling c_m1 max pooling
pol_c3_m3_w = max_pooling(c3_m3_w, int (c2_m3_w.cols));//Pooling c_m1 max pooling
pol_c3_m4_w = max_pooling(c3_m4_w, int (c2_m4_w.cols));//Pooling c_m1 max pooling
pol_c3_m1_s = max_pooling(c3_m1_s, int (c2_m1_s.cols));//Pooling c_m1 max pooling
pol_c3_m2_s = max_pooling(c3_m2_s, int (c2_m2_s.cols));//Pooling c_m1 max pooling
pol_c3_m3_s = max_pooling(c3_m3_s, int (c2_m3_s.cols));//Pooling c_m1 max pooling
pol_c3_m4_s = max_pooling(c3_m4_s, int (c2_m4_s.cols));//Pooling c_m1 max pooling
sigmoid_mat(pol_c2_m1_n);
sigmoid_mat(pol_c2_m2_n);
sigmoid_mat(pol_c2_m3_n);
sigmoid_mat(pol_c2_m4_n);
sigmoid_mat(pol_c2_m1_e);
sigmoid_mat(pol_c2_m2_e);
sigmoid_mat(pol_c2_m3_e);
sigmoid_mat(pol_c2_m4_e);
sigmoid_mat(pol_c2_m1_w);
sigmoid_mat(pol_c2_m2_w);
sigmoid_mat(pol_c2_m3_w);
sigmoid_mat(pol_c2_m4_w);
sigmoid_mat(pol_c2_m1_s);
sigmoid_mat(pol_c2_m2_s);
sigmoid_mat(pol_c2_m3_s);
sigmoid_mat(pol_c2_m4_s);
sigmoid_mat(pol_c3_m1_n);
sigmoid_mat(pol_c3_m2_n);
sigmoid_mat(pol_c3_m3_n);
sigmoid_mat(pol_c3_m4_n);
sigmoid_mat(pol_c3_m1_e);
sigmoid_mat(pol_c3_m2_e);
sigmoid_mat(pol_c3_m3_e);
sigmoid_mat(pol_c3_m4_e);
sigmoid_mat(pol_c3_m1_w);
sigmoid_mat(pol_c3_m2_w);
sigmoid_mat(pol_c3_m3_w);
sigmoid_mat(pol_c3_m4_w);
sigmoid_mat(pol_c3_m1_s);
sigmoid_mat(pol_c3_m2_s);
sigmoid_mat(pol_c3_m3_s);
sigmoid_mat(pol_c3_m4_s);
//End code part 1
//Begin code part 2
float* all_conv_ptr_index;
all_conv_ptr_index = all_convoluted_frames.ptr<float>(0);
for(int nFrames = 0; nFrames<NN_CONVOLUTED_FRAMES; nFrames++)
{
switch(nFrames)
{
float* pointer_zero;
float* pointer_index;
case 0:
pointer_zero = pol_c2_m1_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 1:
pointer_zero = pol_c2_m2_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 2:
pointer_zero = pol_c2_m3_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 3:
pointer_zero = pol_c2_m4_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 4:
pointer_zero = pol_c2_m1_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 5:
pointer_zero = pol_c2_m2_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 6:
pointer_zero = pol_c2_m3_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 7:
pointer_zero = pol_c2_m4_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 8:
pointer_zero = pol_c2_m1_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 9:
pointer_zero = pol_c2_m2_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 10:
pointer_zero = pol_c2_m3_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 11:
pointer_zero = pol_c2_m4_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 12:
pointer_zero = pol_c2_m1_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 13:
pointer_zero = pol_c2_m2_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 14:
pointer_zero = pol_c2_m3_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 15:
pointer_zero = pol_c2_m4_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 16:
pointer_zero = pol_c3_m1_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 17:
pointer_zero = pol_c3_m2_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 18:
pointer_zero = pol_c3_m3_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 19:
pointer_zero = pol_c3_m4_n.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 20:
pointer_zero = pol_c3_m1_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 21:
pointer_zero = pol_c3_m2_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 22:
pointer_zero = pol_c3_m3_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 23:
pointer_zero = pol_c3_m4_e.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 24:
pointer_zero = pol_c3_m1_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 25:
pointer_zero = pol_c3_m2_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 26:
pointer_zero = pol_c3_m3_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 27:
pointer_zero = pol_c3_m4_w.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 28:
pointer_zero = pol_c3_m1_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 29:
pointer_zero = pol_c3_m2_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 30:
pointer_zero = pol_c3_m3_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
case 31:
pointer_zero = pol_c3_m4_s.ptr<float>(0);
pointer_index = pointer_zero;
for(int Frows = 0;Frows<pol_c2_m1_n.rows; Frows++)
{
for(int Fcols = 0;Fcols<pol_c2_m1_n.cols; Fcols++)
{
pointer_index = pointer_zero + Fcols + Frows * pol_c2_m1_n.cols;
*all_conv_ptr_index = *pointer_index;
all_conv_ptr_index++;
}
}
break;
default:
printf("Copy to all\n");
}
}
//==================================
//For evaluation and visualize
//==================================
imshow("c_m1", c_m1);
imshow("pol_c_m1_unpad", pol_c_m1_unpad);
imshow("m1_0_padded", m1_0_padded);
imshow("m1", m1);
//==================================
//End evaluation and visualize
//==================================
return all_convoluted_frames;
}
void load_training_images(Mat img, Mat convl_ima, int src_img_colums, int training_image)// Mat is a specific OpenCV pointer structure, so here img is infact a pointer
{
//Load the training image set
Mat color_img;
//===================================================================================================================
//== Regard acces Grayscale, Value pick up and put into Neural Network matrix
//===================================================================================================================
int convl_ima_channels = convl_ima.channels();
int convl_ima_nRows = convl_ima.rows;
int convl_ima_nCols = convl_ima.cols * convl_ima_channels;
int convl_ima_pix_nCols = convl_ima.cols;
float* convl_ima_pointer_zero;
float* convl_ima_pointer_index;
convl_ima_pointer_zero = convl_ima.ptr<float>(0);
float convl_ima_data;
//===================================================================================================================
//== END Variables regard acces Grayscale pixel data pick up and put into Neural Network matrix
//===================================================================================================================
if(verification == 0)
{
sprintf(filename, "pos%d.JPG", training_image);//Assigne a filename "pos" with index number added
}
else
{
sprintf(filename, "ver%d.JPG", training_image);//Assigne a filename "ver" with index number added
}
color_img = imread( filename, 1 );
if ( !color_img.data )
{
printf("\n");
printf("==================================================\n");
printf("No image data Error! Probably not find pos%d.JPG or ver%d \n", training_image, training_image);
printf("==================================================\n");
printf("\n");
//return -1;
}
imshow("Color Image", color_img);
cvtColor(color_img,img,CV_BGR2GRAY);
#ifdef USE_NORMALIZER
local_normalizing(img);
#endif
imshow("Gray Image", img);
img.convertTo(img, CV_32F, 1.0/255.0);//Convert pixels from 0..255 char to float 0..1
Mat img_truncated(img, Rect(0, 0, T0_FRAME_WIDTH, T0_FRAME_HEIGHT));// Rect(<start_x>, <start_y>, <width>, <hight>)
convl_ima = convolute_all_stage(img_truncated, convl_ima, src_img_colums);
//waitKey(100);
waitKey(10);
for (int byte_counter = 0; byte_counter< InputNodes; byte_counter++) //InputNodes should
{
convl_ima_pointer_index = convl_ima_pointer_zero + byte_counter;
convl_ima_data = *convl_ima_pointer_index;
Input[byte_counter] = convl_ima_data;//Sigmoid already done in convolution function
}
imshow("Convolution image", convl_ima);
}
void load_training_target(void)//Load the training target answer for the corresponing training image
{
//Target[PatternCount][OutputNodes]
for(int tp = 0;tp<PatternCount;tp++)
{
for(int categorys = 0;categorys<OutputNodes;categorys++)
{
if(tp < (categorys+1) * TrainingPicturesAtOneCategory && tp >= (categorys) * TrainingPicturesAtOneCategory)
{
Target[tp][categorys] = 1.0;//Picture positive for this node
}
else
{
Target[tp][categorys] = 0.0;//Picture negative for this node
}
}
}
printf("=======================================================\n");
printf("***** Success to load all training target answers *****\n");
printf("=======================================================\n");
}
void setup()
{
srand (static_cast <unsigned> (time(0)));//Seed the randomizer
ReportEvery1000 = 1;
for( p = 0 ; p < PatternCount ; p++ )
{
RandomizedIndex[p] = p ;
}
}
float limit_value(float max_min, float data)
{
if(data > max_min)
{
data = max_min;
}
if(data < -max_min)
{
data = -max_min;
}
return data;
}
void make_kernel_dome_gain(void)
{
float kernel_region_gain = 0;
int rampR;
int rampC;
for(int kerR=0;kerR<FE2KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE2KSIZESQR;kerC++)
{
if(kerR <= FE2KSIZESQR/2)
{
rampR = kerR;
}
else
{
rampR = FE2KSIZESQR - kerR;
}
if(kerC <= FE2KSIZESQR/2)
{
rampC = kerC;
}
else
{
rampC = FE2KSIZESQR - kerC;
}
kernel_region_gain = (float)(rampC + rampR) / (float)FE2KSIZESQR;
kernel2_dome_gain[kerR*FE2KSIZESQR + kerC] = kernel_region_gain;
}
}
for(int kerR=0;kerR<FE1KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE1KSIZESQR;kerC++)
{
if(kerR <= FE1KSIZESQR/2)
{
rampR = kerR;
}
else
{
rampR = FE1KSIZESQR - kerR;
}
if(kerC <= FE1KSIZESQR/2)
{
rampC = kerC;
}
else
{
rampC = FE1KSIZESQR - kerC;
}
kernel_region_gain = (float)(rampC + rampR) / (float)FE1KSIZESQR;
kernel1_dome_gain[kerR*FE1KSIZESQR + kerC] = kernel_region_gain;
}
}
for(int kerR=0;kerR<FE0KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE0KSIZESQR;kerC++)
{
if(kerR <= FE0KSIZESQR/2)
{
rampR = kerR;
}
else
{
rampR = FE0KSIZESQR - kerR;
}
if(kerC <= FE0KSIZESQR/2)
{
rampC = kerC;
}
else
{
rampC = FE0KSIZESQR - kerC;
}
kernel_region_gain = (float)(rampC + rampR) / (float)FE0KSIZESQR;
kernel0_dome_gain[kerR*FE2KSIZESQR + kerC] = kernel_region_gain;
}
}
}
void do_dome_gain(void)
{
//float domeKernel[FEATURE0][FE0KSIZE+1];
//float kernel0_dome_gain[FE0KSIZE+1];
for(int feature=0;feature < FEATURE0; feature++)
{
for(int kerR=0;kerR<FE0KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE0KSIZESQR;kerC++)
{
Feature0domeKernel[feature][kerR*FE0KSIZESQR + FE0KSIZESQR] = Feature0Kernel[feature][kerR*FE0KSIZESQR + FE0KSIZESQR] * kernel0_dome_gain[kerR*FE0KSIZESQR + FE0KSIZESQR];
}
}
}
}
void make_SumChangeFeatureWeights(float* back_trk_ptr_zero, float* pol_c_ptr_zeroAside, int col, int row,int go_t_f_ker,float delta, int FirstLayerFeature_nr)
{
float* back_track_ptr;
float* pol_c_ptr_indexAside;
float max_pixel = MAX_PIXEL_START_DEFAULT;
int this_bkt_max;
for(int bkt=0;bkt<4;bkt++)
{
if(bkt<2)
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt + row*2* c2_m1_n.cols;
}
else
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt-2 + row*2* c2_m1_n.cols + c2_m1_n.cols;
}
if(*back_track_ptr > max_pixel)
{
this_bkt_max = bkt;
max_pixel = *back_track_ptr;
}
}
#ifdef USE_KER_HAT_GAIN
float kernel_region_gain = 0;
int rampR;
int rampC;
#else
float kernel_region_gain = 1.0;
#endif
for(int kerR=0;kerR<FE2KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE2KSIZESQR;kerC++)
{
#ifdef USE_KER_HAT_GAIN
if(kerR <= FE2KSIZESQR/2)
{
rampR = kerR;
}
else
{
rampR = FE2KSIZESQR - kerR;
}
if(kerC <= FE2KSIZESQR/2)
{
rampC = kerC;
}
else
{
rampC = FE2KSIZESQR - kerC;
}
kernel_region_gain = (float)(rampC + rampR) / (float)FE2KSIZESQR;
#endif
int offset_add;
if(this_bkt_max < 2)
{
offset_add = this_bkt_max + col + kerC + row*2*pol_c_m1.cols + kerR*pol_c_m1.cols;
}
else
{
offset_add = (this_bkt_max-2) + pol_c_m1.cols + col + kerC + row*2*pol_c_m1.cols + kerR*pol_c_m1.cols;
}
pol_c_ptr_indexAside = pol_c_ptr_zeroAside + offset_add;
#ifdef USE_BACK_PROP_OFF_AT_PADDING
int store_delta = 1;
if(col*2 <= PADDING_SIZE_TO_T2P && kerC <= PADDING_SIZE_TO_T2P)
{
store_delta = 0;
}
if(col*2 >= (POL_C_M_WIDTH - PADDING_SIZE_TO_T2P) && kerC >= PADDING_SIZE_TO_T2P)
{
store_delta = 0;
}
if(row*2 <= PADDING_SIZE_TO_T2P && kerR <= PADDING_SIZE_TO_T2P)
{
store_delta = 0;
}
if(row*2 >= (POL_C_M_HIGHT - PADDING_SIZE_TO_T2P) && kerR >= PADDING_SIZE_TO_T2P)
{
store_delta = 0;
}
if(store_delta == 1)
{
pol_c_m_delta[FirstLayerFeature_nr][offset_add] += Feature2Kernel[go_t_f_ker][kerR*FE2KSIZESQR + kerC] * delta * kernel_region_gain;//here pol_c_m_delta[][] is like the "Accum"
}
#else
pol_c_m_delta[FirstLayerFeature_nr][offset_add] += Feature2Kernel[go_t_f_ker][kerR*FE2KSIZESQR + kerC] * delta * kernel_region_gain;//here pol_c_m_delta[][] is like the "Accum"
#endif
//====================================================
//Add up the sum weight calculation with respect to delta, kernel weigth and input pixel (input pixel is now served in pointer *pol_c_ptr_indexAside and *pol_c_ptr_indexBside)
//====================================================
SumChangeFeatureWeights[kerR*FE2KSIZESQR + kerC][go_t_f_ker] = limit_value(MAX_SumChangeFeatureWeights, SumChangeFeatureWeights[kerR*FE2KSIZESQR + kerC][go_t_f_ker]);
SumChangeFeatureWeights[kerR*FE2KSIZESQR + kerC][go_t_f_ker] += (*pol_c_ptr_indexAside) * delta;
//====================================================
}
}
//Add up the sum of the kernel bias weight calculation also here, with respect to delta, kernel bias weigth (and 1 because bias node always 1).
SumChangeFeatureWeights[FE2KSIZE][go_t_f_ker] += delta;//bias node
//End bias weights
}
void make_SumChangeFeature1Weights(float* back_trk_ptr_zero, float* pol_c_ptr_zeroAside, int col, int row,int go_t_f_ker,float delta, int FirstLayerFeature_nr)
{
float* back_track_ptr;
float* pol_c_ptr_indexAside;
float max_pixel = MAX_PIXEL_START_DEFAULT;
int this_bkt_max;
//exempel
//*back_track_ptr = c_m1
//*pol_c_ptr_zeroAside = pol_c0_m1
// col and row comes from pol_c_m1
for(int bkt=0;bkt<4;bkt++)
{
if(bkt<2)
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt + row*2* c_m1.cols;
}
else
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt-2 + row*2* c_m1.cols + c_m1.cols;
}
if(*back_track_ptr > max_pixel)
{
this_bkt_max = bkt;
max_pixel = *back_track_ptr;
}
}
#ifdef USE_KER_HAT_GAIN
float kernel_region_gain = 0;
int rampR;
int rampC;
#else
float kernel_region_gain = 1.0;
#endif
for(int kerR=0;kerR<FE1KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE1KSIZESQR;kerC++)
{
#ifdef USE_KER_HAT_GAIN
if(kerR <= FE1KSIZESQR/2)
{
rampR = kerR;
}
else
{
rampR = FE1KSIZESQR - kerR;
}
if(kerC <= FE1KSIZESQR/2)
{
rampC = kerC;
}
else
{
rampC = FE1KSIZESQR - kerC;
}
kernel_region_gain = (float)(rampC + rampR) / (float)FE1KSIZESQR;
#endif
int offset_add;
if(this_bkt_max < 2)
{
offset_add = this_bkt_max + col + kerC + row*2*pol_c0_m1.cols + kerR*pol_c0_m1.cols;
}
else
{
offset_add = (this_bkt_max-2) + pol_c0_m1.cols + col + kerC + row*2*pol_c0_m1.cols + kerR*pol_c0_m1.cols;
}
pol_c_ptr_indexAside = pol_c_ptr_zeroAside + offset_add;
#ifdef USE_BACK_PROP_OFF_AT_PADDING
int store_delta = 1;
if(col*2 <= PADDING_SIZE_TO_T0P && kerC <= PADDING_SIZE_TO_T0P)
{
store_delta = 0;
}
if(col*2 >= (POL_C0_M_WIDTH - PADDING_SIZE_TO_T0P) && kerC >= PADDING_SIZE_TO_T0P)
{
store_delta = 0;
}
if(row*2 <= PADDING_SIZE_TO_T0P && kerR <= PADDING_SIZE_TO_T0P)
{
store_delta = 0;
}
if(row*2 >= (POL_C0_M_HIGHT - PADDING_SIZE_TO_T0P) && kerR >= PADDING_SIZE_TO_T0P)
{
store_delta = 0;
}
if(store_delta == 1)
{
pol_c0_m_delta[FirstLayerFeature_nr][offset_add] += Feature1Kernel[go_t_f_ker][kerR*FE1KSIZESQR + kerC] * delta * kernel_region_gain;//here pol_c0_m_delta[][] is like the "Accum"
}
#else
pol_c0_m_delta[FirstLayerFeature_nr][offset_add] += Feature1Kernel[go_t_f_ker][kerR*FE1KSIZESQR + kerC] * delta * kernel_region_gain;//here pol_c0_m_delta[][] is like the "Accum"
#endif
//====================================================
//Add up the sum weight calculation with respect to delta, kernel weigth and input pixel (input pixel is now served in pointer *pol_c_ptr_indexAside and *pol_c_ptr_indexBside)
//====================================================
SumChangeFeature1Weights[kerR*FE1KSIZESQR + kerC][go_t_f_ker] = limit_value(MAX_SumChangeFeatureWeights, SumChangeFeature1Weights[kerR*FE1KSIZESQR + kerC][go_t_f_ker]);
SumChangeFeature1Weights[kerR*FE1KSIZESQR + kerC][go_t_f_ker] += (*pol_c_ptr_indexAside) * delta;
//====================================================
}
}
//Add up the sum of the kernel bias weight calculation also here, with respect to delta, kernel bias weigth (and 1 because bias node always 1).
SumChangeFeature1Weights[FE1KSIZE][go_t_f_ker] += delta;//bias node
//End bias weights
}
void make_SumChangeFeature0Weights(float* back_trk_ptr_zero, float* pol_c_ptr_zeroAside, int col, int row,int go_t_f_ker,float delta, int FirstLayerFeature_nr)
{
float* back_track_ptr;
float* pol_c_ptr_indexAside;
float max_pixel = MAX_PIXEL_START_DEFAULT;
int this_bkt_max;
for(int bkt=0;bkt<4;bkt++)
{
if(bkt<2)
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt + row*2* m1_conv0.cols;
}
else
{
back_track_ptr = back_trk_ptr_zero + col*2+bkt-2 + row*2* m1_conv0.cols + m1_conv0.cols;
}
if(*back_track_ptr > max_pixel)
{
this_bkt_max = bkt;
max_pixel = *back_track_ptr;
}
}
for(int kerR=0;kerR<FE0KSIZESQR;kerR++)
{
for(int kerC=0;kerC<FE0KSIZESQR;kerC++)
{
int offset_add;
if(this_bkt_max < 2)
{
offset_add = this_bkt_max + col + kerC + row*2*m1_0_padded.cols + kerR*m1_0_padded.cols;
}
else
{
offset_add = (this_bkt_max-2) + m1_0_padded.cols + col + kerC + row*2*m1_0_padded.cols + kerR*m1_0_padded.cols;
}
pol_c_ptr_indexAside = pol_c_ptr_zeroAside + offset_add;
//====================================================
//Add up the sum weight calculation with respect to delta, kernel weigth and input pixel (input pixel is now served in pointer *pol_c_ptr_indexAside and *pol_c_ptr_indexBside)
//====================================================
SumChangeFeature0Weights[kerR*FE0KSIZESQR + kerC][go_t_f_ker] = limit_value(MAX_SumChangeFeatureWeights, SumChangeFeature0Weights[kerR*FE0KSIZESQR + kerC][go_t_f_ker]);
SumChangeFeature0Weights[kerR*FE0KSIZESQR + kerC][go_t_f_ker] += (*pol_c_ptr_indexAside) * delta;
//====================================================
}
}
//Add up the sum of the kernel bias weight calculation also here, with respect to delta, kernel bias weigth (and 1 because bias node always 1).
SumChangeFeature0Weights[FE0KSIZE][go_t_f_ker] += delta;//bias node
//End bias weights
}
//End part 2
//Part 3
void show_kernel(void)
{
//Show layer 3 kernels
//layer_0_kernels
float* ptr_src_index0;
ptr_src_index0 = layer_0_kernels.ptr<float>(0);
float* ptr_src_index1;
ptr_src_index1 = layer_1_kernels.ptr<float>(0);
float* ptr_src_index2;
ptr_src_index2 = layer_2_kernels.ptr<float>(0);
//Make show mat of all kernels in layer 0
int count_kernel_number = 0;
int row_counter = 0;
for(int row=0;row<layer_0_kernels.rows;row++)
{
for(int col=0;col<layer_0_kernels.cols;col++)
{
//Feature0Kernel[FEATURE0][FE0KSIZE+1]
*ptr_src_index0 = Feature0Kernel[count_kernel_number][col + row_counter * layer_0_kernels.cols];
ptr_src_index0++;
}
if(row_counter < FE0KSIZESQR-1)
{
row_counter++;
}
else
{
count_kernel_number++;
row_counter = 0;
}
}
//Make show mat of all kernels in layer 1
count_kernel_number = 0;
row_counter = 0;
for(int row=0;row<layer_1_kernels.rows;row++)
{
for(int col=0;col<layer_1_kernels.cols;col++)
{
//Feature0Kernel[FEATURE1][FE1KSIZE+1]
*ptr_src_index1 = Feature1Kernel[count_kernel_number][col + row_counter * layer_1_kernels.cols];
ptr_src_index1++;
}
if(row_counter < FE1KSIZESQR-1)
{
row_counter++;
}
else
{
count_kernel_number++;
row_counter = 0;
}
}
//Make show mat of all kernels in layer 2
count_kernel_number = 0;
row_counter = 0;
for(int row=0;row<layer_2_kernels.rows;row++)
{
for(int col=0;col<layer_2_kernels.cols;col++)
{
//Feature0Kernel[FEATURE2][FE2KSIZE+1]
*ptr_src_index2 = Feature2Kernel[count_kernel_number][col + row_counter * layer_2_kernels.cols];
ptr_src_index2++;
}
if(row_counter < FE2KSIZESQR-1)
{
row_counter++;
}
else
{
count_kernel_number++;
row_counter = 0;
}
}
sigmoid_mat(layer_0_kernels);
sigmoid_mat(layer_1_kernels);
sigmoid_mat(layer_2_kernels);
imshow("K0", layer_0_kernels);
imshow("K1", layer_1_kernels);
imshow("K2", layer_2_kernels);
}
void init_fully_connected(void)
{
/******************************************************************
* Initialize HiddenWeights and ChangeHiddenWeights
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
for( j = 0 ; j <= InputNodes ; j++ ) {
ChangeHiddenWeights[j][i] = 0.0 ;
Rando = (float) (rand() % 65535) / 65536;
HiddenWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ;
}
}
/******************************************************************
* Initialize OutputWeights and ChangeOutputWeights
******************************************************************/
for( i = 0 ; i < OutputNodes ; i ++ ) {
for( j = 0 ; j <= HiddenNodes ; j++ ) {
ChangeOutputWeights[j][i] = 0.0 ;
Rando = (float) (rand() % 65535) / 65536;
OutputWeights[j][i] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax ;
}
}
printf("Initial the fully connected Neural Network with random values \n");
}
void locking_kernel_training(void)
{
#ifdef USE_LOCK_KER_0_1_AFTER_RERUN_NR
if(rerun_fully_connected > LOCK_KER_0_AFTER_RERUN_NR-1)
{
LearningRateKer0 = 0.0f;//
printf("Kernel layer 0 is locked\n");
}
if(rerun_fully_connected > LOCK_KER_1_AFTER_RERUN_NR-1)
{
LearningRateKer1 = 0.0f;//
printf("Kernel layer 1 is locked\n");
}
#endif
}
void training_neural_net(Mat m1, Mat all_convoluted_frames, int m1_img_colums)
{
//Initial neural net weights
srand (static_cast <unsigned> (time(0)));//Seed the randomizer
setup();
/******************************************************************
* Initialize SharedWeights and ChangeHiddenWeights
******************************************************************/
if(load_kernels == 0)
{
//Kernel 2 init
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE2;go_t_f_ker++)
{
for(int kernel_index=0;kernel_index<(FE2KSIZE+1);kernel_index++)//FE2KSIZE+1, +1 is for the bias weight
{
Rando = (float) (rand() % 65535) / 65536;
Feature2Kernel[go_t_f_ker][kernel_index] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax * KERMULINIT;
}
}
//************
//Kernel 1 init
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE1;go_t_f_ker++)
{
for(int kernel_index=0;kernel_index<(FE1KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
Rando = (float) (rand() % 65535) / 65536;
Feature1Kernel[go_t_f_ker][kernel_index] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax * KERMULINIT;
}
}
//************
//Kernel 1 init
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE0;go_t_f_ker++)
{
for(int kernel_index=0;kernel_index<(FE0KSIZE+1);kernel_index++)//FE0KSIZE+1, +1 is for the bias weight
{
Rando = (float) (rand() % 65535) / 65536;
Feature0Kernel[go_t_f_ker][kernel_index] = 2.0 * ( Rando - 0.5 ) * InitialWeightMax * KERMULINIT;
}
}
//************
}
init_fully_connected();
printf("Initial/Untrained Outputs: \n");
toTerminal();
//End Init
int train_kernel = 0;
/******************************************************************
* Begin training
******************************************************************/
for( TrainingCycle = 1 ; TrainingCycle < 40000 ; TrainingCycle++) {
//randomize_dropoutHid();
/******************************************************************
* Randomize order of training patterns
******************************************************************/
for( p = 0 ; p < PatternCount ; p++) {
q = rand() % PatternCount;
r = RandomizedIndex[p] ;
RandomizedIndex[p] = RandomizedIndex[q] ;
RandomizedIndex[q] = r ;
}
Error_level = 0.0 ;
/******************************************************************
* Cycle through each training pattern in the randomized order
******************************************************************/
for( q = 0 ; q < PatternCount ; q++ ) {
p = RandomizedIndex[q];
// show_kernel();
//for(int repeat_i=0;repeat_i<repeat_same;repeat_i++)
//{
// if(verification == 1)
// {
// if(repeat_i>0)
// {
// break;
// }
// }
randomize_dropoutHid();
show_kernel();
/*********************************
* Load a training image
*********************************/
load_training_images(m1, all_convoluted_frames, m1_img_colums, p);
/******************************************************************
* Compute hidden layer activations
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = HiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ ) {
Accum += Input[j] * HiddenWeights[j][i] ;
}
if(dropoutHidden[i] == 0)
{
Hidden[i] = 1.0/(1.0 + exp(-Accum)) ;
}
else
{
Hidden[i] = 0.0f;// Hidden node
}
}
/******************************************************************
* Compute output layer activations and calculate errors
******************************************************************/
for( i = 0 ; i < OutputNodes ; i++ ) {
Accum = OutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += Hidden[j] * OutputWeights[j][i] ;
}
Output[i] = 1.0/(1.0 + exp(-Accum)) ;
OutputDelta[i] = (Target[p][i] - Output[i]) * Output[i] * (1.0 - Output[i]) ;
Error_level += 0.5 * (Target[p][i] - Output[i]) * (Target[p][i] - Output[i]) ;
}
if(verification == 0)
{
//Start training
/******************************************************************
* Backpropagate errors to hidden layer
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = 0.0 ;
for( j = 0 ; j < OutputNodes ; j++ ) {
Accum += OutputWeights[i][j] * OutputDelta[j] ;
}
if(dropoutHidden[i] == 0)
{
HiddenDelta[i] = Accum * Hidden[i] * (1.0 - Hidden[i]) ;
}
else
{
HiddenDelta[i] = 0.0f;// Hidden node
}
}
/******************************************************************
* Backpropagate errors to Input layer
******************************************************************/
for( i = 0 ; i < InputNodes ; i++ ) {
Accum = 0.0 ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += HiddenWeights[i][j] * HiddenDelta[j] ;
}
InputDelta[i] = Accum * Input[i] * (1.0 - Input[i]) ;
}
/******************************************************************
* Update pol_c_mX-->Shared kernel Weights
******************************************************************/
float* conv_ptr_zero;
float* conv_ptr_index;
conv_ptr_zero = all_convoluted_frames.ptr<float>(0);
conv_ptr_index = conv_ptr_zero;
float delta;
float* back_trk_ptr_zero;
float* pol_c_ptr_zeroAside;
float* pol_c_ptr_zeroBside;
float sumOfKerData = 0.0;
float normalizeKerData = 0.0;
//----------------------------------------
//Clear the whole pol_c_m_delta[][] vector
//----------------------------------------
for(int feat1=0;feat1<FEATURE1;feat1++)
{
for(int clear_p_c_m_delta=0;clear_p_c_m_delta<(POL_C_M_HIGHT * POL_C_M_WIDTH);clear_p_c_m_delta++)
{
pol_c_m_delta[feat1][clear_p_c_m_delta] = 0.0;
}
}
//----------------------------------------
//----------------------------------------
//----------------------------------------
//Clear the whole pol_c_m_delta[][] vector
//----------------------------------------
for(int feat0=0;feat0<FEATURE0;feat0++)
{
for(int clear_p_c0_m_delta=0;clear_p_c0_m_delta<(POL_C0_M_HIGHT * POL_C0_M_WIDTH);clear_p_c0_m_delta++)
{
pol_c0_m_delta[feat0][clear_p_c0_m_delta] = 0.0;
}
}
//----------------------------------------
//----------------------------------------
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE2;go_t_f_ker++)
{
for(int row = 0; row < pol_c2_m1_n.rows; row++ )
{
for(int col =0; col < pol_c2_m1_n.cols; col++)
{
//all_convoluted_frames
delta = InputDelta[col + row * pol_c2_m1_n.cols + pol_c2_m1_n.rows * pol_c2_m1_n.cols * go_t_f_ker];
for(int clear_sum_p=0;clear_sum_p<(FE2KSIZE+1);clear_sum_p++)//FE2KSIZE+1, +1 is for the bias weight
{
SumChangeFeatureWeights[clear_sum_p][go_t_f_ker] = 0.0;
}
//==========
int FirstLayerFeature_nr;
if(go_t_f_ker == 0)
{
back_trk_ptr_zero = c2_m1_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m1.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 0;//Important!! Change manually 0 is the pol_c_m1
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 1)
{
back_trk_ptr_zero = c2_m2_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m2.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 1;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 2)
{
back_trk_ptr_zero = c2_m3_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m3.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 2;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 3)
{
back_trk_ptr_zero = c2_m4_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m4.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 3;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 4)
{
back_trk_ptr_zero = c2_m1_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m5.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 4;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 5)
{
back_trk_ptr_zero = c2_m2_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m6.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 5;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 6)
{
back_trk_ptr_zero = c2_m3_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m7.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 6;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 7)
{
back_trk_ptr_zero = c2_m4_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m8.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 7;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 8)
{
back_trk_ptr_zero = c2_m1_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m9.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 8;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 9)
{
back_trk_ptr_zero = c2_m2_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m10.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 9;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 10)
{
back_trk_ptr_zero = c2_m3_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m11.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 10;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 11)
{
back_trk_ptr_zero = c2_m4_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m12.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 11;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 12)
{
back_trk_ptr_zero = c2_m1_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m13.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 12;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 13)
{
back_trk_ptr_zero = c2_m2_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m14.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 13;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 14)
{
back_trk_ptr_zero = c2_m3_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m15.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 14;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 15)
{
back_trk_ptr_zero = c2_m4_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m16.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 15;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
if(go_t_f_ker == 16)
{
back_trk_ptr_zero = c3_m1_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m17.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 16;//Important!! Change manually 0 is the pol_c_m1
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 17)
{
back_trk_ptr_zero = c3_m2_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m18.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 17;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 18)
{
back_trk_ptr_zero = c3_m3_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m19.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 18;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 19)
{
back_trk_ptr_zero = c3_m4_n.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m20.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 19;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 20)
{
back_trk_ptr_zero = c3_m1_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m21.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 20;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 21)
{
back_trk_ptr_zero = c3_m2_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m22.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 21;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 22)
{
back_trk_ptr_zero = c3_m3_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m23.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 22;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 23)
{
back_trk_ptr_zero = c3_m4_e.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m24.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 23;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 24)
{
back_trk_ptr_zero = c3_m1_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m25.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 24;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 25)
{
back_trk_ptr_zero = c3_m2_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m26.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 25;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 26)
{
back_trk_ptr_zero = c3_m3_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m27.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 26;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 27)
{
back_trk_ptr_zero = c3_m4_w.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m28.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 27;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==========
if(go_t_f_ker == 28)
{
back_trk_ptr_zero = c3_m1_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m29.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 28;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 29)
{
back_trk_ptr_zero = c3_m2_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m30.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 29;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 30)
{
back_trk_ptr_zero = c3_m3_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m31.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 30;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 31)
{
back_trk_ptr_zero = c3_m4_s.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c_m32.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 31;//Important!! Change
make_SumChangeFeatureWeights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
//************
//==================================================================================
//Make the last step in kernel weight calculation
sumOfKerData = 0.0;
normalizeKerData = 0.0;
for(int kernel_index=0;kernel_index<(FE2KSIZE+1);kernel_index++)//FE2KSIZE+1, +1 is for the bias weight
{
SumChangeFeatureWeights[kernel_index][go_t_f_ker] = LearningRateKer * SumChangeFeatureWeights[kernel_index][go_t_f_ker] + MomentumKer2 * ChangeFeatureWeights[kernel_index][go_t_f_ker];
ChangeFeatureWeights[kernel_index][go_t_f_ker] = SumChangeFeatureWeights[kernel_index][go_t_f_ker];//Mean value calculation
Feature2Kernel[go_t_f_ker][kernel_index] += ChangeFeatureWeights[kernel_index][go_t_f_ker];
sumOfKerData += Feature2Kernel[go_t_f_ker][kernel_index];
}
normalizeKerData = sumOfKerData / (FE2KSIZE+1);//
for(int kernel_index=0;kernel_index<(FE2KSIZE+1);kernel_index++)//FE2KSIZE+1, +1 is for the bias weight
{
Feature2Kernel[go_t_f_ker][kernel_index] += -normalizeKerData;
}
//Now the shared weights updated
//==================================================================================
//*********************
}//end col for loop
}//end row for loop
}//End FEATURE2 for loop
// printf("Delta = %f\n", delta);
//Make the last step in backpropagate of second feature kernel layer
// pol_c_m_delta[FirstLayerFeature_nr][offset_add] += Feature2Kernel[go_t_f_ker][kerR*FE2KSIZESQR + kerC] * delta;//here pol_c_m_delta[][] is like the "Accum"
//then end stage outside this function
// InputDelta[i] = Accum * Input[i] * (1.0 - Input[i]) ;
//----------------------------------------
//backprop last step in pol_c_m_delta[][] vector
//----------------------------------------
float pixel_input_second_layer;
float* pol_c_m_zero;
float* pol_c_m_index;
for(int feat1=0;feat1<FEATURE1;feat1++)
{
for(int pol_c_m_delta_indx=0;pol_c_m_delta_indx<(POL_C_M_HIGHT * POL_C_M_WIDTH);pol_c_m_delta_indx++)
{
if(feat1==0)
{
pol_c_m_zero = pol_c_m1.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==1)
{
pol_c_m_zero = pol_c_m2.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==2)
{
pol_c_m_zero = pol_c_m3.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==3)
{
pol_c_m_zero = pol_c_m4.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==4)
{
pol_c_m_zero = pol_c_m5.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==5)
{
pol_c_m_zero = pol_c_m6.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==6)
{
pol_c_m_zero = pol_c_m7.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==7)
{
pol_c_m_zero = pol_c_m8.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==8)
{
pol_c_m_zero = pol_c_m9.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==9)
{
pol_c_m_zero = pol_c_m10.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==10)
{
pol_c_m_zero = pol_c_m11.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==11)
{
pol_c_m_zero = pol_c_m12.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==12)
{
pol_c_m_zero = pol_c_m13.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==13)
{
pol_c_m_zero = pol_c_m14.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==14)
{
pol_c_m_zero = pol_c_m15.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==15)
{
pol_c_m_zero = pol_c_m16.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==16)
{
pol_c_m_zero = pol_c_m17.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==17)
{
pol_c_m_zero = pol_c_m18.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==18)
{
pol_c_m_zero = pol_c_m19.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==19)
{
pol_c_m_zero = pol_c_m20.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==20)
{
pol_c_m_zero = pol_c_m21.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==21)
{
pol_c_m_zero = pol_c_m22.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==22)
{
pol_c_m_zero = pol_c_m23.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==23)
{
pol_c_m_zero = pol_c_m24.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==24)
{
pol_c_m_zero = pol_c_m25.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==25)
{
pol_c_m_zero = pol_c_m26.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==26)
{
pol_c_m_zero = pol_c_m27.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==27)
{
pol_c_m_zero = pol_c_m28.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==28)
{
pol_c_m_zero = pol_c_m29.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==29)
{
pol_c_m_zero = pol_c_m30.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==30)
{
pol_c_m_zero = pol_c_m31.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
if(feat1==31)
{
pol_c_m_zero = pol_c_m32.ptr<float>(0);
pol_c_m_index = pol_c_m_zero + pol_c_m_delta_indx;
pixel_input_second_layer = *pol_c_m_index;
}
pol_c_m_delta[feat1][pol_c_m_delta_indx] = pol_c_m_delta[feat1][pol_c_m_delta_indx] * pixel_input_second_layer * (1.0 - pixel_input_second_layer);
}
}
//----------------------------------------
//----------------------------------------
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE1;go_t_f_ker++)
{
for(int row = 0; row < pol_c_m1_unpad.rows; row++ )
{
for(int col =0; col < pol_c_m1_unpad.cols; col++)
{
//all_convoluted_frames
delta = pol_c_m_delta[go_t_f_ker][(col + PADDING_SIZE_TO_T1P) + (row + PADDING_SIZE_TO_T1P) * pol_c_m1.cols];
for(int clear_sum_p=0;clear_sum_p<(FE1KSIZE+1);clear_sum_p++)//FE1KSIZE+1, +1 is for the bias weight
{
SumChangeFeature1Weights[clear_sum_p][go_t_f_ker] = 0.0;
}
//==========
int FirstLayerFeature_nr;//Not used in this at the first conv layer the always 0
if(go_t_f_ker == 0)
{
back_trk_ptr_zero = c_m1.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m1.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 0;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 1)
{
back_trk_ptr_zero = c_m2.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m2.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 1;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 2)
{
back_trk_ptr_zero = c_m3.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m3.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 2;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 3)
{
back_trk_ptr_zero = c_m4.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m4.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 3;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 4)
{
back_trk_ptr_zero = c_m5.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m5.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 4;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 5)
{
back_trk_ptr_zero = c_m6.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m6.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 5;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 6)
{
back_trk_ptr_zero = c_m7.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m7.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 6;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 7)
{
back_trk_ptr_zero = c_m8.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m8.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 7;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 8)
{
back_trk_ptr_zero = c_m9.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m9.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 8;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 9)
{
back_trk_ptr_zero = c_m10.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m10.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 9;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 10)
{
back_trk_ptr_zero = c_m11.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m11.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 10;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 11)
{
back_trk_ptr_zero = c_m12.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m12.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 11;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 12)
{
back_trk_ptr_zero = c_m13.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m13.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 12;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 13)
{
back_trk_ptr_zero = c_m14.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m14.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 13;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 14)
{
back_trk_ptr_zero = c_m15.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m15.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 14;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 15)
{
back_trk_ptr_zero = c_m16.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m16.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 15;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 16)
{
back_trk_ptr_zero = c_m17.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m17.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 16;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 17)
{
back_trk_ptr_zero = c_m18.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m18.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 17;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 18)
{
back_trk_ptr_zero = c_m19.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m19.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 18;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 19)
{
back_trk_ptr_zero = c_m20.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m20.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 19;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 20)
{
back_trk_ptr_zero = c_m21.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m21.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 20;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 21)
{
back_trk_ptr_zero = c_m22.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m22.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 21;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 22)
{
back_trk_ptr_zero = c_m23.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m23.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 22;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 23)
{
back_trk_ptr_zero = c_m24.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m24.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 23;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 24)
{
back_trk_ptr_zero = c_m25.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m25.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 24;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 25)
{
back_trk_ptr_zero = c_m26.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m26.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 25;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 26)
{
back_trk_ptr_zero = c_m27.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m27.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 26;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 27)
{
back_trk_ptr_zero = c_m28.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m28.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 27;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 28)
{
back_trk_ptr_zero = c_m29.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m29.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 28;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 29)
{
back_trk_ptr_zero = c_m30.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m30.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 29;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 30)
{
back_trk_ptr_zero = c_m31.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m31.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 30;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 31)
{
back_trk_ptr_zero = c_m32.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = pol_c0_m32.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 31;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c_m1)
make_SumChangeFeature1Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
sumOfKerData = 0.0;
normalizeKerData = 0.0;
for(int kernel_index=0;kernel_index<(FE1KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
// printf("SumChangeFeature1Weights[kernel_index][go_t_f_ker] %1.10f\n", SumChangeFeature1Weights[kernel_index][go_t_f_ker]);
SumChangeFeature1Weights[kernel_index][go_t_f_ker] = LearningRateKer1 * SumChangeFeature1Weights[kernel_index][go_t_f_ker] + MomentumKer1 * ChangeFeature1Weights[kernel_index][go_t_f_ker];
ChangeFeature1Weights[kernel_index][go_t_f_ker] = SumChangeFeature1Weights[kernel_index][go_t_f_ker];//Mean value calculation
Feature1Kernel[go_t_f_ker][kernel_index] += ChangeFeature1Weights[kernel_index][go_t_f_ker];
sumOfKerData += Feature1Kernel[go_t_f_ker][kernel_index];
}
normalizeKerData = sumOfKerData / (FE1KSIZE+1);//
for(int kernel_index=0;kernel_index<(FE1KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
Feature1Kernel[go_t_f_ker][kernel_index] += -normalizeKerData;
}
}//end col for loop
}//end row for loop
}//End FEATURE1 for loop
//End part 3
//Part 4
//Make the last step in backpropagate of First feature kernel layer
// pol_c_m_delta[FirstLayerFeature_nr][offset_add] += Feature2Kernel[go_t_f_ker][kerR*FE2KSIZESQR + kerC] * delta;//here pol_c_m_delta[][] is like the "Accum"
//then end stage outside this function
// InputDelta[i] = Accum * Input[i] * (1.0 - Input[i]) ;
//----------------------------------------
//backprop last step in pol_c_m_delta[][] vector
//----------------------------------------
float pixel_input_first_layer;
float* pol_c0_m_zero;
float* pol_c0_m_index;
for(int feat0=0;feat0<FEATURE0;feat0++)
{
for(int pol_c0_m_delta_indx=0;pol_c0_m_delta_indx<(POL_C0_M_HIGHT * POL_C0_M_WIDTH);pol_c0_m_delta_indx++)
{
if(feat0==0)
{
pol_c0_m_zero = pol_c0_m1.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==1)
{
pol_c0_m_zero = pol_c0_m2.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==2)
{
pol_c0_m_zero = pol_c0_m3.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==3)
{
pol_c0_m_zero = pol_c0_m4.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==4)
{
pol_c0_m_zero = pol_c0_m5.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==5)
{
pol_c0_m_zero = pol_c0_m6.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==6)
{
pol_c0_m_zero = pol_c0_m7.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==7)
{
pol_c0_m_zero = pol_c0_m8.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==8)
{
pol_c0_m_zero = pol_c0_m9.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==9)
{
pol_c0_m_zero = pol_c0_m10.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==10)
{
pol_c0_m_zero = pol_c0_m11.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==11)
{
pol_c0_m_zero = pol_c0_m12.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==12)
{
pol_c0_m_zero = pol_c0_m13.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==13)
{
pol_c0_m_zero = pol_c0_m14.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==14)
{
pol_c0_m_zero = pol_c0_m15.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==15)
{
pol_c0_m_zero = pol_c0_m16.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==16)
{
pol_c0_m_zero = pol_c0_m17.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==17)
{
pol_c0_m_zero = pol_c0_m18.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==18)
{
pol_c0_m_zero = pol_c0_m19.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==19)
{
pol_c0_m_zero = pol_c0_m20.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==20)
{
pol_c0_m_zero = pol_c0_m21.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==21)
{
pol_c0_m_zero = pol_c0_m22.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==22)
{
pol_c0_m_zero = pol_c0_m23.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==23)
{
pol_c0_m_zero = pol_c0_m24.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==24)
{
pol_c0_m_zero = pol_c0_m25.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==25)
{
pol_c0_m_zero = pol_c0_m26.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==26)
{
pol_c0_m_zero = pol_c0_m27.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==27)
{
pol_c0_m_zero = pol_c0_m28.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==28)
{
pol_c0_m_zero = pol_c0_m29.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==29)
{
pol_c0_m_zero = pol_c0_m30.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==30)
{
pol_c0_m_zero = pol_c0_m31.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
if(feat0==31)
{
pol_c0_m_zero = pol_c0_m32.ptr<float>(0);
pol_c0_m_index = pol_c0_m_zero + pol_c0_m_delta_indx;
pixel_input_first_layer = *pol_c0_m_index;
}
pol_c0_m_delta[feat0][pol_c0_m_delta_indx] = pol_c0_m_delta[feat0][pol_c0_m_delta_indx] * pixel_input_first_layer * (1.0 - pixel_input_first_layer);
}
}
//----------------------------------------
//----------------------------------------
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE0;go_t_f_ker++)
{
for(int row = 0; row < pol_c0_m1_unpad.rows; row++ )
{
for(int col =0; col < pol_c0_m1_unpad.cols; col++)
{
//all_convoluted_frames
delta = pol_c0_m_delta[go_t_f_ker][(col + PADDING_SIZE_TO_T0P) + (row + PADDING_SIZE_TO_T0P) * pol_c0_m1.cols];
for(int clear_sum_p=0;clear_sum_p<(FE0KSIZE+1);clear_sum_p++)//FE0KSIZE+1, +1 is for the bias weight
{
SumChangeFeature0Weights[clear_sum_p][go_t_f_ker] = 0.0;
}
//==========
int FirstLayerFeature_nr;//Not used in this at the first conv layer the always 0
if(go_t_f_ker == 0)
{
back_trk_ptr_zero = m1_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 0;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 1)
{
back_trk_ptr_zero = m2_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 1;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 2)
{
back_trk_ptr_zero = m3_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 2;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 3)
{
back_trk_ptr_zero = m4_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 3;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 4)
{
back_trk_ptr_zero = m5_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 4;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 5)
{
back_trk_ptr_zero = m6_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 5;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 6)
{
back_trk_ptr_zero = m7_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 6;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 7)
{
back_trk_ptr_zero = m8_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 7;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 8)
{
back_trk_ptr_zero = m9_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 8;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 9)
{
back_trk_ptr_zero = m10_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 9;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 10)
{
back_trk_ptr_zero = m11_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 10;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 11)
{
back_trk_ptr_zero = m12_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 11;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 12)
{
back_trk_ptr_zero = m13_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 12;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 13)
{
back_trk_ptr_zero = m14_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 13;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 14)
{
back_trk_ptr_zero = m15_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 14;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 15)
{
back_trk_ptr_zero = m16_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 15;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 16)
{
back_trk_ptr_zero = m17_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 16;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 17)
{
back_trk_ptr_zero = m18_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 17;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 18)
{
back_trk_ptr_zero = m19_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 18;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 19)
{
back_trk_ptr_zero = m20_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 19;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 20)
{
back_trk_ptr_zero = m21_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 20;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 21)
{
back_trk_ptr_zero = m22_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 21;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 22)
{
back_trk_ptr_zero = m23_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 22;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 23)
{
back_trk_ptr_zero = m24_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 23;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 24)
{
back_trk_ptr_zero = m25_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 24;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 25)
{
back_trk_ptr_zero = m26_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 25;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 26)
{
back_trk_ptr_zero = m27_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 26;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 27)
{
back_trk_ptr_zero = m28_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 27;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 28)
{
back_trk_ptr_zero = m29_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 28;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 29)
{
back_trk_ptr_zero = m30_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 29;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 30)
{
back_trk_ptr_zero = m31_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 30;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
if(go_t_f_ker == 31)
{
back_trk_ptr_zero = m32_conv0.ptr<float>(0);//Important!! Change mat src manually here for each go_t_f_ker number
pol_c_ptr_zeroAside = m1_0_padded.ptr<float>(0);//Important!! Change mat src manually here for each
FirstLayerFeature_nr = 31;//Not used in this at the first conv layer the always 0 (in deaper layers then need Change manually 0 is the pol_c0_m1)
make_SumChangeFeature0Weights(back_trk_ptr_zero, pol_c_ptr_zeroAside, col, row, go_t_f_ker, delta, FirstLayerFeature_nr);
}
sumOfKerData = 0.0;
normalizeKerData = 0.0;
for(int kernel_index=0;kernel_index<(FE0KSIZE+1);kernel_index++)//FE0KSIZE+1, +1 is for the bias weight
{
// printf("SumChangeFeature1Weights[kernel_index][go_t_f_ker] %1.10f\n", SumChangeFeature1Weights[kernel_index][go_t_f_ker]);
SumChangeFeature0Weights[kernel_index][go_t_f_ker] = LearningRateKer0 * SumChangeFeature0Weights[kernel_index][go_t_f_ker] + MomentumKer0 * ChangeFeature0Weights[kernel_index][go_t_f_ker];
ChangeFeature0Weights[kernel_index][go_t_f_ker] = SumChangeFeature0Weights[kernel_index][go_t_f_ker];//Mean value calculation
Feature0Kernel[go_t_f_ker][kernel_index] += ChangeFeature0Weights[kernel_index][go_t_f_ker];
sumOfKerData += Feature0Kernel[go_t_f_ker][kernel_index];
}
normalizeKerData = sumOfKerData / (FE0KSIZE+1);//
for(int kernel_index=0;kernel_index<(FE0KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
Feature0Kernel[go_t_f_ker][kernel_index] += -normalizeKerData;
}
//==================================================================================
//Now the shared weights updated
//==================================================================================
//}
}//end col for loop
}//end row for loop
}//End FEATURE1 for loop
//==========================================
//Kernel auto gain correction
if(check_kernel_gain > DO_KERNEL_GAIN_CALC)
{
check_kernel_gain = 0;
//Calc all stage 0 kernels
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("Do the kernel gain calculation all kernel 0 \n");
#endif
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE0;go_t_f_ker++)
{
kernel_gain_level[0][go_t_f_ker] = 0;
float sum_Abs_ker = 0;
float absolut_value = 0;
for(int kernel_index=0;kernel_index<(FE0KSIZE+1);kernel_index++)//FE0KSIZE+1, +1 is for the bias weight
{
absolut_value = abs_value(Feature0Kernel[go_t_f_ker][kernel_index]);
kernel_gain_level[0][go_t_f_ker] += absolut_value;
}
kernel_gain_level[0][go_t_f_ker] = kernel_gain_level[0][go_t_f_ker] / (FE0KSIZE+1);//Mean value
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[0][%d] = %f\n", go_t_f_ker, kernel_gain_level[0][go_t_f_ker]);
#endif
if(kernel_gain_level[0][go_t_f_ker] == 0.0)
{
kernel_gain_level[0][go_t_f_ker] = 0.0000001;//Protect aginst zero div
}
kernel_gain_level[0][go_t_f_ker] = FEAT0_TYP_GAIN / kernel_gain_level[0][go_t_f_ker];
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[0][%d] after inverting = %f\n", go_t_f_ker, kernel_gain_level[0][go_t_f_ker]);
#endif
for(int kernel_index=0;kernel_index<(FE0KSIZE+1);kernel_index++)//FE0KSIZE+1, +1 is for the bias weight
{
Feature0Kernel[go_t_f_ker][kernel_index] = Feature0Kernel[go_t_f_ker][kernel_index] * kernel_gain_level[0][go_t_f_ker];
}
}
//--------------------------
//Calc all stage 1 kernels
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("Do the kernel gain calculation all kernel 1 \n");
#endif
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE1;go_t_f_ker++)
{
kernel_gain_level[1][go_t_f_ker] = 0;
float sum_Abs_ker = 0;
float absolut_value = 0;
for(int kernel_index=0;kernel_index<(FE1KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
absolut_value = abs_value(Feature1Kernel[go_t_f_ker][kernel_index]);
kernel_gain_level[1][go_t_f_ker] += absolut_value;
}
kernel_gain_level[1][go_t_f_ker] = kernel_gain_level[1][go_t_f_ker] / (FE1KSIZE+1);//Mean value
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[1][%d] = %f\n", go_t_f_ker, kernel_gain_level[1][go_t_f_ker]);
#endif
if(kernel_gain_level[1][go_t_f_ker] == 0.0)
{
kernel_gain_level[1][go_t_f_ker] = 0.0000001;//Protect aginst zero div
}
kernel_gain_level[1][go_t_f_ker] = FEAT1_TYP_GAIN / kernel_gain_level[1][go_t_f_ker];
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[1][%d] after inverting = %f\n", go_t_f_ker, kernel_gain_level[1][go_t_f_ker]);
#endif
for(int kernel_index=0;kernel_index<(FE1KSIZE+1);kernel_index++)//FE1KSIZE+1, +1 is for the bias weight
{
Feature1Kernel[go_t_f_ker][kernel_index] = Feature1Kernel[go_t_f_ker][kernel_index] * kernel_gain_level[1][go_t_f_ker];
}
}
//--------------------------
//Calc all stage 2 kernels
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("Do the kernel gain calculation all kernel 2 \n");
#endif
for(int go_t_f_ker = 0;go_t_f_ker < FEATURE2;go_t_f_ker++)
{
kernel_gain_level[2][go_t_f_ker] = 0;
float sum_Abs_ker = 0;
float absolut_value = 0;
for(int kernel_index=0;kernel_index<(FE2KSIZE+1);kernel_index++)//FE2KSIZE+1, +1 is for the bias weight
{
absolut_value = abs_value(Feature2Kernel[go_t_f_ker][kernel_index]);
kernel_gain_level[2][go_t_f_ker] += absolut_value;
}
kernel_gain_level[2][go_t_f_ker] = kernel_gain_level[2][go_t_f_ker] / (FE2KSIZE+1);//Mean value
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[2][%d] = %f\n", go_t_f_ker, kernel_gain_level[2][go_t_f_ker]);
#endif
if(kernel_gain_level[2][go_t_f_ker] == 0.0)
{
kernel_gain_level[2][go_t_f_ker] = 0.0000001;//Protect aginst zero div
}
kernel_gain_level[2][go_t_f_ker] = FEAT2_TYP_GAIN / kernel_gain_level[2][go_t_f_ker];
#ifdef REPORT_KERNEL_AUTO_ADJ
printf("kernel_gain_level[2][%d] after inverting = %f\n", go_t_f_ker, kernel_gain_level[2][go_t_f_ker]);
#endif
for(int kernel_index=0;kernel_index<(FE2KSIZE+1);kernel_index++)//FE2KSIZE+1, +1 is for the bias weight
{
Feature2Kernel[go_t_f_ker][kernel_index] = Feature2Kernel[go_t_f_ker][kernel_index] * kernel_gain_level[2][go_t_f_ker];
}
}
//--------------------------
}
else
{
check_kernel_gain++;
}
//==========================================
/******************************************************************
* Update Inner-->Hidden Weights
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ )
{
ChangeHiddenWeights[InputNodes][i] = LearningRate * HiddenDelta[i] + Momentum * ChangeHiddenWeights[InputNodes][i] ;
if(dropoutHidden[i] == 0)
{
HiddenWeights[InputNodes][i] += ChangeHiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ )
{
ChangeHiddenWeights[j][i] = LearningRate * Input[j] * HiddenDelta[i] + Momentum * ChangeHiddenWeights[j][i];
HiddenWeights[j][i] += ChangeHiddenWeights[j][i] ;
}
}
}
/******************************************************************
* Update Hidden-->Output Weights
******************************************************************/
for( i = 0 ; i < OutputNodes ; i ++ )
{
ChangeOutputWeights[HiddenNodes][i] = LearningRate * OutputDelta[i] + Momentum * ChangeOutputWeights[HiddenNodes][i] ;
OutputWeights[HiddenNodes][i] += ChangeOutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ )
{
if(dropoutHidden[i] == 0)
{
ChangeOutputWeights[j][i] = LearningRate * Hidden[j] * OutputDelta[i] + Momentum * ChangeOutputWeights[j][i] ;
OutputWeights[j][i] += ChangeOutputWeights[j][i] ;
}
}
}
//End training
} //End if(verification == 0)
#ifdef USE_TRACKBAR_GAIN_ADJUSTMENTS
adjust_ker_gain_by_trackbars();
#endif // USE_TRACKBAR_GAIN_ADJUSTMENTS
//}//repeat_same
}
/******************************************************************
* END OF HERE LOOP Cycle through each training pattern in the randomized order
******************************************************************/
/******************************************************************
* Every cycles send data to terminal for display
******************************************************************/
if(verification == 1)
{
TrainingCycle--;//Not increase number of TrainingCycle when verify
// if((min_Verification_error_level + accetp_increase_verification_error) > Error_level)
if((min_Verification_error_level + (accetp_increase_ver_part_error * Error_level)) > Error_level)
{
printf("Verification test error is OK status\n");
min_Verification_error_level = Error_level;
if(min_Verification_error_level > Error_level)
{
// min_Verification_error_level = Error_level;
printf("Verification test error is droped to %f\n", min_Verification_error_level);
}
else
{
printf("Verification test error is %f\n", min_Verification_error_level);
}
overfitted = 0;
// printf("Verification test error %f\n", min_Verification_error_level);
}
else
{
overfitted = 1;
printf("STATUS the Neural Network is overtrained (or the verification picture set is not suffisient\n");
}
verification = 0;//Next turn run training
}
else
{
printf ("TrainingCycle: %d\n", TrainingCycle);
printf ("* Error_level = %f\n", Error_level);
printf("LearningRateKer0 %f\n", LearningRateKer0);
printf("LearningRateKer1 %f\n", LearningRateKer1);
printf("LearningRateKer2 %f\n", LearningRateKer);
if(min_Error_level > Error_level && overfitted == 0)
{
#ifdef USE_VERIFICATION
verification = 1;//Next turn run verification
#endif
min_Error_level = Error_level;
fp2 = fopen("Feature2Kernel.dat", "w+");
fwrite(Feature2Kernel, sizeof Feature2Kernel[0][0], sizeof(Feature2Kernel) / (sizeof Feature2Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature1Kernel.dat", "w+");
fwrite(Feature1Kernel, sizeof Feature1Kernel[0][0], sizeof(Feature1Kernel) / (sizeof Feature1Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature0Kernel.dat", "w+");
fwrite(Feature0Kernel, sizeof Feature0Kernel[0][0], sizeof(Feature0Kernel) / (sizeof Feature0Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("HiddenWeights.dat", "w+");
fwrite(HiddenWeights, sizeof HiddenWeights[0][0], sizeof(HiddenWeights) / (sizeof HiddenWeights[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("OutputWeights.dat", "w+");
fwrite(OutputWeights, sizeof OutputWeights[0][0], sizeof(OutputWeights) / (sizeof OutputWeights[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "Feature2Kernel_%d.dat", rerun_fully_connected);//Assigne a filename with index number added
fp2 = fopen(filename, "w+");
fwrite(Feature2Kernel, sizeof Feature2Kernel[0][0], sizeof(Feature2Kernel) / (sizeof Feature2Kernel[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "Feature1Kernel_%d.dat", rerun_fully_connected);//Assigne a filename with index number added
fp2 = fopen(filename, "w+");
fwrite(Feature1Kernel, sizeof Feature1Kernel[0][0], sizeof(Feature1Kernel) / (sizeof Feature1Kernel[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "Feature0Kernel_%d.dat", rerun_fully_connected);//Assigne a filename with index number added
fp2 = fopen(filename, "w+");
fwrite(Feature0Kernel, sizeof Feature0Kernel[0][0], sizeof(Feature0Kernel) / (sizeof Feature0Kernel[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "HiddenWeights_%d.dat", rerun_fully_connected);//Assigne a filename with index number added
fp2 = fopen(filename, "w+");
fwrite(HiddenWeights, sizeof HiddenWeights[0][0], sizeof(HiddenWeights) / (sizeof HiddenWeights[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "OutputWeights_%d.dat", rerun_fully_connected);//Assigne a filename with index number added
fp2 = fopen(filename, "w+");
fwrite(OutputWeights, sizeof OutputWeights[0][0], sizeof(OutputWeights) / (sizeof OutputWeights[0][0]) , fp2);
fclose(fp2);
}
else
{
verification = 0;//Skip verification Next turn run training
printf("The Error was increased this training! skip verification and train again until error drops \n");
min_Verification_error_level = Error_level;
}
}
toTerminal();
#ifndef USE_TRACKBAR_GAIN_ADJUSTMENTS
#ifdef USE_LOCK_KERNEL_LAYER
if(train_feature_kernel_layer<NUM_OF_KERNEL_LAYER-1)
{
train_feature_kernel_layer++;
}
else
{
train_feature_kernel_layer = 0;
}
switch(train_feature_kernel_layer)
{
case 0:
LearningRateKer = 0;//
LearningRateKer1 = 0;//
LearningRateKer0 = const_LearningRateKer0;//
printf("Train kernel layer 1\n");
printf("Lock kernel layer 2 and 3\n");
break;
case 1:
LearningRateKer = 0;//
LearningRateKer1 = const_LearningRateKer1;//
LearningRateKer0 = 0;//
printf("Train kernel layer 2\n");
printf("Lock kernel layer 1 and 3\n");
break;
case 2:
LearningRateKer = const_LearningRateKer;//
LearningRateKer1 = 0;//
LearningRateKer0 = 0;//
printf("Train kernel layer 3\n");
printf("Lock kernel layer 1 and 2\n");
break;
//default:
}
#else
LearningRateKer = const_LearningRateKer;//
LearningRateKer1 = const_LearningRateKer1;//
LearningRateKer0 = const_LearningRateKer0;//
#endif
#ifdef USE_HIGH_GAIN_AT_START
if(Error_level > USE_HIGH_GAIN_ERROR && rerun_fully_connected == 0)
{
LearningRateKer = const_LearningRateKer_high_gain;//
LearningRateKer1 = const_LearningRateKer1_high_gain;//
LearningRateKer0 = const_LearningRateKer0_high_gain;//
printf("Now using High gain\n");
}
#endif
#endif // USE_TRACKBAR_GAIN_ADJUSTMENTS
/******************************************************************
* If error rate is less than pre-determined threshold then end
******************************************************************/
if( min_Error_level < Success || overfitted == 1)
{
if(overfitted == 1)
{
printf("Stop trained because overfitting occure\n");
}
if(rerun_fully_connected != NUMBER_OF_RERUN)
{
init_fully_connected();
}
rerun_fully_connected++;
min_Error_level = START_MIN_ERROR_LEVEL;
min_Verification_error_level = START_MIN_ERROR_LEVEL;
overfitted = 0;
printf("rerun_fully_connected = %d\n", rerun_fully_connected);
}
#ifndef USE_TRACKBAR_GAIN_ADJUSTMENTS
locking_kernel_training();
#endif // USE_TRACKBAR_GAIN_ADJUSTMENTS
// if(rerun_fully_connected > NUMBER_OF_RERUN || overfitted == 1) break ;
if(rerun_fully_connected > NUMBER_OF_RERUN) break ;
}
printf("\n");
printf ("TrainingCycle: %d", TrainingCycle);
printf ("Trained min Error_level = %f\n", min_Error_level);
printf ("Latest Error_level test (verification or trainied) = %f\n", Error_level);
toTerminal();
printf ("\n");
printf ("Training Set Solved! \n");
printf ("--------\n");
printf ("\n");
printf("Would you like to start running your Neural Network feed forward <Y>/<N> \n");
int yes = 0;
char answer_char;
while(yes==0)
{
answer_char = getchar();
if(answer_char == 'Y' || answer_char == 'y')
{
yes=1;
}
}
}
void enter_rerun_fully_connected_number(void)
{
printf("Enter a number of what rerun_fully_connected number you will load\n");
printf("Enter an integer betwheen 0 to %d\n", NUMBER_OF_RERUN);
scanf("%d", &rerun_fully_connected);
printf("rerun_fully_connected = %d\n", rerun_fully_connected);
if(rerun_fully_connected == 0)
{
LearningRateKer = const_LearningRateKer_high_gain;//
LearningRateKer1 = const_LearningRateKer1_high_gain;//
LearningRateKer0 = const_LearningRateKer0_high_gain;//
printf("Now using High gain\n");
}
else
{
LearningRateKer = const_LearningRateKer;//
LearningRateKer1 = const_LearningRateKer1;//
LearningRateKer0 = const_LearningRateKer0;//
printf("Using normal gain\n");
}
locking_kernel_training();
}
void adjust_ker_gain_by_trackbars(void)
{
static int pre_ker_gain0=0;
static int pre_ker_gain1=0;
static int pre_ker_gain2=0;
static int pre_mom_full =0;
static int pre_mom_k0 =0;
static int pre_mom_k1 =0;
static int pre_mom_k2 =0;
if((pre_ker_gain0 != ker_gain0) || (pre_ker_gain1 != ker_gain1) || (pre_ker_gain2 != ker_gain2))
{
// printf("Change kernel gain by trackbars\n");
LearningRateKer = const_LearningRateKer * (((float) ker_gain2) * 0.01f);//
LearningRateKer1 = const_LearningRateKer1 * (((float) ker_gain1) * 0.01f);//
LearningRateKer0 = const_LearningRateKer0 * (((float) ker_gain0) * 0.01f);//
// printf("LearningRateKer0 %f\n", LearningRateKer0);
// printf("LearningRateKer1 %f\n", LearningRateKer1);
// printf("LearningRateKer2 %f\n", LearningRateKer);
}
if((pre_mom_k0 != momentum0) || (pre_mom_k1 != momentum1) || (pre_mom_k2 != momentum2) || (pre_mom_full != momentum_fully))
{
Momentum = C_moment * (((float) momentum_fully) * 0.01f);//
MomentumKer0 = C_momentKer0 * (((float) momentum0) * 0.01f);//
MomentumKer1 = C_momentKer1 * (((float) momentum1) * 0.01f);//
MomentumKer2 = C_momentKer2 * (((float) momentum2) * 0.01f);//
}
pre_ker_gain0 = ker_gain0;
pre_ker_gain1 = ker_gain1;
pre_ker_gain2 = ker_gain2;
pre_mom_full = momentum_fully;
pre_mom_k0 = momentum0;
pre_mom_k1 = momentum1;
pre_mom_k2 = momentum2;
}
int main(void)
{
createTrackbars();
make_kernel_dome_gain();
Mat image, src, src_BGR;
raspicam::RaspiCam_Cv Camera;
Camera.set( CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
Camera.set( CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
Camera.set( CV_CAP_PROP_FORMAT, CV_8UC1 );
//Camera.set( CV_CAP_PROP_FORMAT, CV_8U );
//Open camera
cout<<"Opening Camera..."<<endl;
if (!Camera.open()) {cerr<<"Error opening the camera"<<endl;return -1;}
Camera.grab();
Camera.retrieve (src);
#ifdef USE_NORMALIZER
local_normalizing(src);
#endif
waitKey(1);
src.convertTo(image, CV_32F, 1.0/255.0);//Convert pixels from 0..255 char to float 0..1
Mat m1(image, Rect(0, 0, T0_FRAME_WIDTH, T0_FRAME_HEIGHT));// Rect(<start_x>, <start_y>, <width>, <hight>)
m1_0_padded = padded0_image(m1);
Mat t_m1_conv0 = convolute_mat2(&Feature1Kernel[0][0], FE0KSIZESQR, FE0KSIZESQR, m1_0_padded, int (m1_0_padded.cols));// Make a convolution of the image
Mat t_pol_c0_m1_unpad = max_pooling(t_m1_conv0, int (t_m1_conv0.cols));//Pooling c_m1 max pooling
sigmoid_mat(t_pol_c0_m1_unpad);
Mat t_pol_c0_m1 = padded1_image(t_pol_c0_m1_unpad);
int src_img_colums = t_pol_c0_m1.cols;
int m1_img_colums = t_pol_c0_m1.cols;
Mat t1 = convolute_mat2(&Feature1Kernel[0][0], FE1KSIZESQR, FE1KSIZESQR, t_pol_c0_m1, int (t_pol_c0_m1.cols));// Make a convolution of the image
Mat t2 = max_pooling(t1, int (t1.cols));//Pooling c_m1 max pooling
Mat t2_pad = padded_image(t2);
Mat t3 = convolute_mat2(&Feature2Kernel[0][0], FE2KSIZESQR, FE2KSIZESQR, t2_pad, int (t2.cols));// Make a convolution of the image
Mat t4 = max_pooling(t3, int (t3.cols));//Pooling c_m1 max pooling
Mat all_convoluted_frames(t4.rows * NN_CONVOLUTED_FRAMES, t4.cols, CV_32F);//Mat xxx(hight, witdh, type);
printf("How many picture it will be on each training category = %d\n", TrainingPicturesAtOneCategory);
for(int categorys = 0;categorys<OutputNodes;categorys++)
{
printf("Output Node %d will correspond to picture set pos%d.JPG to pos%d.JPG \n", categorys, categorys*TrainingPicturesAtOneCategory, (categorys+1)*TrainingPicturesAtOneCategory-1);
}
printf("Note you must have all this picture set pos0.JPG to pos%d.JPG in this path\n", OutputNodes*TrainingPicturesAtOneCategory-1);
printf("If some of pos...JPG picture are missing program will not start\n");
load_training_images(m1, all_convoluted_frames, m1_img_colums, 0);
load_training_target();//Load the training target answer for the corresponing training image
int answer_character;
printf("Would you like to traning the neural network <Y>/<N> \n");
answer_character = getchar();
if(answer_character == 'Y' || answer_character == 'y')
{
getchar();
printf("Would you like to load stored Feature2Kernel <Y>/<N> \n");
answer_character = getchar();
if(answer_character == 'Y' || answer_character == 'y')
{
load_kernels = 1;
enter_rerun_fully_connected_number();
sprintf(filename, "Feature0Kernel_%d.dat", rerun_fully_connected);
fp2 = fopen(filename, "r");
if (fp2 == NULL)
{
printf("Error while opening file Feature0Kernel_%d.dat", rerun_fully_connected);
exit(0);
}
fread(Feature0Kernel, sizeof Feature0Kernel[0][0], sizeof(Feature0Kernel) / (sizeof Feature0Kernel[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "Feature2Kernel_%d.dat", rerun_fully_connected);
fp2 = fopen(filename, "r");
if (fp2 == NULL)
{
printf("Error while opening file Feature2Kernel_%d.dat", rerun_fully_connected);
exit(0);
}
fread(Feature2Kernel, sizeof Feature2Kernel[0][0], sizeof(Feature2Kernel) / (sizeof Feature2Kernel[0][0]) , fp2);
fclose(fp2);
sprintf(filename, "Feature1Kernel_%d.dat", rerun_fully_connected);
fp2 = fopen(filename, "r");
if (fp2 == NULL)
{
printf("Error while opening file Feature1Kernel_%d.dat", rerun_fully_connected);
exit(0);
}
fread(Feature1Kernel, sizeof Feature1Kernel[0][0], sizeof(Feature1Kernel) / (sizeof Feature1Kernel[0][0]) , fp2);
fclose(fp2);
}
else
{
load_kernels = 0;
}
training_neural_net(m1, all_convoluted_frames, m1_img_colums);
//Store training neural network weights into files
if(load_kernels == 0)
{
fp2 = fopen("Feature2Kernel.dat", "w+");
fwrite(Feature2Kernel, sizeof Feature2Kernel[0][0], sizeof(Feature2Kernel) / (sizeof Feature2Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature1Kernel.dat", "w+");
fwrite(Feature1Kernel, sizeof Feature1Kernel[0][0], sizeof(Feature1Kernel) / (sizeof Feature1Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature0Kernel.dat", "w+");
fwrite(Feature0Kernel, sizeof Feature0Kernel[0][0], sizeof(Feature0Kernel) / (sizeof Feature0Kernel[0][0]) , fp2);
fclose(fp2);
}
fp2 = fopen("HiddenWeights.dat", "w+");
fwrite(HiddenWeights, sizeof HiddenWeights[0][0], sizeof(HiddenWeights) / (sizeof HiddenWeights[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("OutputWeights.dat", "w+");
fwrite(OutputWeights, sizeof OutputWeights[0][0], sizeof(OutputWeights) / (sizeof OutputWeights[0][0]) , fp2);
fclose(fp2);
}
else
{
//Load training neural network weights from files
fp2 = fopen("Feature2Kernel.dat", "r");
if (fp2 == NULL)
{
puts("Error while opening file Feature2Kernel.dat");
exit(0);
}
fread(Feature2Kernel, sizeof Feature2Kernel[0][0], sizeof(Feature2Kernel) / (sizeof Feature2Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature1Kernel.dat", "r");
if (fp2 == NULL)
{
puts("Error while opening file Feature1Kernel.dat");
exit(0);
}
fread(Feature1Kernel, sizeof Feature1Kernel[0][0], sizeof(Feature1Kernel) / (sizeof Feature1Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("Feature0Kernel.dat", "r");
if (fp2 == NULL)
{
puts("Error while opening file Feature0Kernel.dat");
exit(0);
}
fread(Feature0Kernel, sizeof Feature0Kernel[0][0], sizeof(Feature0Kernel) / (sizeof Feature0Kernel[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("HiddenWeights.dat", "r");
if (fp2 == NULL)
{
puts("Error while opening file HiddenWeights.dat");
exit(0);
}
fread(HiddenWeights, sizeof HiddenWeights[0][0], sizeof(HiddenWeights) / (sizeof HiddenWeights[0][0]) , fp2);
fclose(fp2);
fp2 = fopen("OutputWeights.dat", "r");
if (fp2 == NULL)
{
puts("Error while opening file OutputWeights.dat");
exit(0);
}
fread(OutputWeights, sizeof OutputWeights[0][0], sizeof(OutputWeights) / (sizeof OutputWeights[0][0]) , fp2);
fclose(fp2);
}
//Run the Neural network on real time video
show_kernel();
//===================================================================================================================
//== Regard acces gray image pixel data Value pick up and put into Neural Network matrix
//===================================================================================================================
int image_channels2 = all_convoluted_frames.channels();
int image_nRows2 = all_convoluted_frames.rows;
int image_nCols2 = all_convoluted_frames.cols * image_channels2;
int image_pix_nCols2 = all_convoluted_frames.cols;
float* image_pointer2_zero;
float* image_pointer2_index;
image_pointer2_zero = all_convoluted_frames.ptr<float>(0);
printf("all_convoluted_frames image matrix consist of channels: %d\n", image_channels2);//Print out only to understanding the image Mat matrix layout. in this case channels it 3 bytes for image Huge, Sat and Value byte
printf("all_convoluted_frames image matrix consist of nRows: %d\n", image_nRows2);
printf("all_convoluted_frames image matrix consist of nCols: %d\n", image_nCols2);
printf("all_convoluted_frames image matrix consist of pixels on one row: %d\n", image_pix_nCols2);
//===================================================================================================================
//== END Variables regard acces image image pixel data, Value pick up and put into Neural Network matrix
//===================================================================================================================
Mat labeling(80,80, CV_8UC3, Scalar::all(128));
while(1)
{
Camera.grab();
Camera.retrieve (src);
imshow("src", src);
//C++: void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, const Scalar& borderValue=Scalar())
#ifdef USE_NORMALIZER
local_normalizing(src);
#endif
// convert to floating-point image
src.convertTo(image, CV_32F, 1.0/255.0);//Convert pixels from 0..255 char to float 0..1
all_convoluted_frames = convolute_all_stage(m1, all_convoluted_frames, src_img_colums);
//================================
for (int byte_counter = 0; byte_counter< InputNodes; byte_counter++) //InputNodes should match (HSV_nRows * HSV_nCols)
{
image_pointer2_index = image_pointer2_zero + byte_counter;
Input[byte_counter] = (float) *image_pointer2_index;//Sigmoid already done in convolution
}
for(int rows = 0;rows<all_convoluted_frames.rows;rows++)
{
for(int cols = 0; cols<all_convoluted_frames.cols;cols++)
{
}
}
/******************************************************************
* Compute hidden layer activations
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ ) {
Accum = HiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ ) {
Accum += Input[j] * HiddenWeights[j][i] ;
}
// if(dropoutHidden[i] == 0)
// {
Hidden[i] = 1.0/(1.0 + exp(-Accum)) ;
// }
// else
// {
// Hidden[i] = 0.0f;
// }
}
/******************************************************************
* Compute output layer activations and calculate errors
******************************************************************/
char Highest_rate =0;
string highest = "x";
std::string::iterator It = highest.begin();
It = highest.begin();
float compare_outpnodes = 0.0f;
Highest_rate =0;
compare_outpnodes = 0.0f;
for( i = 0 ; i < OutputNodes ; i++ ) {
Accum = OutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += Hidden[j] * OutputWeights[j][i] ;
}
Output[i] = 1.0/(1.0 + exp(-Accum)) ;
Error_level += 0.5 * (Target[p][i] - Output[i]) * (Target[p][i] - Output[i]) ;
printf (" Output Node %d = %f\n", i, Output[i]);
if(compare_outpnodes < Output[i])
{
compare_outpnodes = Output[i];
Highest_rate = i;
}
}
waitKey(1);
//CvPoint num_pos = (15,100);
if(Highest_rate == 5)
{
highest = "x";
}
else
{
*It = Highest_rate+48;
}
labeling.setTo(cv::Scalar(128,128,128));
cv::putText(labeling, highest, cvPoint(15,60), CV_FONT_HERSHEY_PLAIN, 4, cvScalar(0,255,0),3);
//void putText(Mat& img, const string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false )¶
imshow("lable", labeling);
//================================
imshow("ALL", all_convoluted_frames);
imshow("camera", image);
waitKey(1);
}
return 0;
}
void toTerminal()
{
for( p = 0 ; p < PatternCount ; p++ ) {
/******************************************************************
* Compute hidden layer activations
******************************************************************/
for( i = 0 ; i < HiddenNodes ; i++ )
{
Accum = HiddenWeights[InputNodes][i] ;
for( j = 0 ; j < InputNodes ; j++ )
{
Accum += Input[j] * HiddenWeights[j][i] ;
}
if(dropoutHidden[i] == 0)
{
Hidden[i] = 1.0/(1.0 + exp(-Accum)) ;
}
else
{
Hidden[i] = 0.0f;
}
}
/******************************************************************
* Compute output layer activations and calculate errors
******************************************************************/
for( i = 0 ; i < OutputNodes ; i++ ) {
Accum = OutputWeights[HiddenNodes][i] ;
for( j = 0 ; j < HiddenNodes ; j++ ) {
Accum += Hidden[j] * OutputWeights[j][i] ;
}
Output[i] = 1.0/(1.0 + exp(-Accum)) ;
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment