Created
October 19, 2012 02:10
-
-
Save wenhuizhang/3915877 to your computer and use it in GitHub Desktop.
BP-Neural Network
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
% BP 神经网络用于函数拟合 | |
clc | |
clear | |
[filename, pathname]= uigetfile('*.xls'); %寻找源文件 | |
file=[pathname filename]; %赋名 | |
x=xlsread(file); %格式转换为矩阵 | |
% 产生训练样本与测试样本 | |
P1 = x(1:100,1:14)'; % 训练样本,每一列为一个样本 | |
T1 =x(1:100,15)'; % 训练目标 | |
P2 = x(101:340,1:14)'; % 测试样本,每一列为一个样本 | |
T2 = x(101:340,15)'; % 测试目标 | |
%--------------------------------------------------- | |
% 归一化 | |
[PN1,minp,maxp,TN1,mint,maxt] = premnmx(P1,T1); | |
PN2 = tramnmx(P2,minp,maxp); | |
TN2 = tramnmx(T2,mint,maxt); | |
%--------------------------------------------------- | |
% 设置网络参数 | |
NodeNum = 30; % 隐层节点数 | |
TypeNum = 1; % 输出维数 | |
TF1 = 'tansig';TF2 = 'purelin'; % 判别函数(缺省值) | |
%TF1 = 'tansig';TF2 = 'logsig'; | |
%TF1 = 'logsig';TF2 = 'purelin'; | |
%TF1 = 'tansig';TF2 = 'tansig'; | |
%TF1 = 'logsig';TF2 = 'logsig'; | |
%TF1 = 'purelin';TF2 = 'purelin'; | |
net = newff(minmax(PN1),[NodeNum TypeNum],{TF1 TF2}); | |
%--------------------------------------------------- | |
% 指定训练参数 | |
%net.trainFcn = 'traingd'; % 梯度下降算法 | |
%net.trainFcn = 'traingdm'; % 动量梯度下降算法 | |
% | |
%net.trainFcn = 'traingda'; % 变学习率梯度下降算法 | |
% net.trainFcn = 'traingdx'; % 变学习率动量梯度下降算法 | |
% | |
% (大型网络的首选算法) | |
% net.trainFcn = 'trainrp'; % RPROP(弹性BP)算法,内存需求最小 | |
% | |
% 共轭梯度算法 | |
% net.trainFcn = 'traincgf'; % Fletcher-Reeves修正算法 | |
% net.trainFcn = 'traincgp'; % Polak-Ribiere修正算法,内存需求比Fletcher-Reeves修正算法略大 | |
% net.trainFcn = 'traincgb'; % Powell-Beal复位算法,内存需求比Polak-Ribiere修正算法略大 | |
% (大型网络的首选算法) | |
%net.trainFcn = 'trainscg'; % Scaled Conjugate Gradient算法,内存需求与Fletcher-Reeves修正算法相同,计算量比上面三种算法都小很多 | |
% | |
% net.trainFcn = 'trainbfg'; % Quasi-Newton Algorithms - BFGS Algorithm,计算量和内存需求均比共轭梯度算法大,但收敛比较快 | |
% net.trainFcn = 'trainoss'; % One Step Secant Algorithm,计算量和内存需求均比BFGS算法小,比共轭梯度算法略大 | |
% | |
% (中型网络的首选算法) | |
net.trainFcn = 'trainlm'; % Levenberg-Marquardt算法,内存需求最大,收敛速度最快 | |
% | |
% net.trainFcn = 'trainbr'; % 贝叶斯正则化算法 | |
% | |
% 有代表性的五种算法为:'traingdx','trainrp','trainscg','trainoss', 'trainlm' | |
%---------------------% | |
net.trainParam.show = 20; % 训练显示间隔 | |
net.trainParam.lr = 0.3; % 学习步长 - traingd,traingdm | |
net.trainParam.mc = 0.95; % 动量项系数 - traingdm,traingdx | |
net.trainParam.mem_reduc = 1; % 分块计算Hessian矩阵(仅对Levenberg-Marquardt算法有效) | |
net.trainParam.epochs = 1000; % 最大训练次数 | |
net.trainParam.goal = 1e-8; % 最小均方误差 | |
net.trainParam.min_grad = 1e-20; % 最小梯度 | |
net.trainParam.time = inf; % 最大训练时间 | |
%--------------------------------------------------- | |
% 训练 | |
net = train(net,PN1,TN1); % 训练 | |
%--------------------------------------------------- | |
% 测试 | |
YN1 = sim(net,PN1); % 训练样本实际输出 | |
YN2 = sim(net,PN2); % 测试样本实际输出 | |
MSE1 = mean((TN1-YN1).^2) % 训练均方误差 | |
MSE2 = mean((TN2-YN2).^2) % 测试均方误差 | |
%--------------------------------------------------- | |
% 反归一化 | |
Y2 = postmnmx(YN2,mint,maxt); | |
%--------------------------------------------------- | |
% 结果作图 | |
plot(1:length(T2),T2,'r+:',1:length(Y2),Y2,'bo:') | |
title('+为真实值,o为预测值') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment