神经网络的BP算法 ( 积分: 200 )

  • 主题发起人 主题发起人 gxc418gxc
  • 开始时间 开始时间
G

gxc418gxc

Unregistered / Unconfirmed
GUEST, unregistred user!
由四连杆机构产生曲线,然后抽象出数字特征,建立数据库,再给出一些图形曲线,再由这些曲线找到相应的四连杆机构。虽然说是相关曲线的,但是重点却是BP网络算法。

谁有详细的神经网络的BP算法,或者是 改进的BP网络算法啊,可否将代码上传一下啊,谢谢,愿意将全部家档的整数500分奉献给给我代码的同志并且愿意稍加指导让我看明白代码的同志!
 
我在2003年下载过,不知道现在还能不能用:
页面: http://free.cx001.com/lianhc/others.htm
源码: http://free.cx001.com/lianhc/zip/neural1.rar

代码不是很多,我就贴出来了(C++Builder):

#include <vcl.h>
#pragma hdrstop
#include <algorithm.h>
#include <vector.h>
#include <math.h>
#include <stdio.h>
#include <alloc.h>
#include &quot;Bpnn.h&quot;

#pragma package(smart_init)
#pragma resource &quot;*.dfm&quot;
////
////
#define MAXLAYERS 5 //最大层数
#define MAXNNUM 20 //每层最大神经元数
#define MAXTrainDataNum 200 //135 // 9*15 iris.txt文件共150条数据
#define MAXTestDataNum 20 ///15 // 150/15
#define MAXLEARNNUM 2000 //最大学习次数


double W[MAXLAYERS][MAXLAYERS][MAXNNUM][MAXNNUM]; //权矩阵
double dW[MAXLAYERS][MAXLAYERS][MAXNNUM][MAXNNUM]; //dw矩阵
double d[MAXLAYERS][MAXNNUM]; //求dW时用到的一个中间变量
double TH[MAXLAYERS][MAXNNUM]; //阈值矩阵
double dTH[MAXLAYERS][MAXNNUM]; //dTH阈值
double learnRatio[MAXLAYERS];//学习率,注意位置


double Sum[MAXLAYERS][MAXNNUM];//各层输入和->激活值
double Out[MAXLAYERS][MAXNNUM];//各层输出和
//double FinalOut[MAXNNUM];//for 辨识
int NNum[MAXLAYERS];//各层神经元数

double trainData[MAXTrainDataNum][MAXNNUM+MAXNNUM];// 训练数据:输入+ 期望输出
double testData[MAXTestDataNum][MAXNNUM+MAXNNUM]; // 测试数据:输入+ 期望输出
/////
double err[MAXLEARNNUM]; //全局误差
double ERR;//全局误差限
double singleErr;
int learnN;
double sum;


int i,j,k,t,n,pren,postn,randT; //for circulate
int I,H,O,L;//for input ,hidden,output
int LAYERS=3;//层数
int trainDataNum=135;// iris.txt文件共150条数据
int testDataNum=15; // iris.txt文件共150条数据
int randList[MAXTrainDataNum];//for 训练集随机抽取

/////
TForm1 *Form1;

__fastcall TForm1::TForm1(TComponent* Owner)
: TForm(Owner)
{
}

void __fastcall TForm1::readBtnClick(TObject *Sender)
{
char nouse;
char outStr[15];
FILE *inputFile;
inputFile=fopen(&quot;iris.txt&quot;,&quot;rb&quot;);

///前45条为训练数据,后5条为测试数据
for ( i=0;i<3;i++)
{
for( j=i*50;j<(i*50+45);j++)//trainData
{
for( k=0;k<4;k++)
{
fscanf(inputFile, &quot;%lf&quot;, &amp;trainData[j-i*5][k]);
fread(&amp;nouse, 1, 1, inputFile);
}

fscanf(inputFile, &quot;%s&quot;, outStr);

if(CompareStr(outStr,&quot;Iris-setosa&quot;)==0)
{trainData[j-i*5][4]=1;
trainData[j-i*5][5]=0;
trainData[j-i*5][6]=0;}
if(CompareStr(outStr,&quot;Iris-versicolor&quot;)==0)
{trainData[j-i*5][4]=0;
trainData[j-i*5][5]=1;
trainData[j-i*5][6]=0;}
if(CompareStr(outStr,&quot;Iris-virginica&quot;)==0)
{trainData[j-i*5][4]=0;
trainData[j-i*5][5]=0;
trainData[j-i*5][6]=1;}

}

for( j=i*50+45;j<(i*50+50);j++)//testData
{
for( k=0;k<4;k++)
{
fscanf(inputFile, &quot;%lf&quot;, &amp;testData[j-(i+1)*45][k]);
fread(&amp;nouse, 1, 1, inputFile);
}

fscanf(inputFile, &quot;%s&quot;, outStr);
//ListBox1->Items->Append(outStr);

if(CompareStr(outStr,&quot;Iris-setosa&quot;)==0)
{testData[j-(i+1)*45][4]=1;
testData[j-(i+1)*45][5]=0;
testData[j-(i+1)*45][6]=0;}
else if(CompareStr(outStr,&quot;Iris-versicolor&quot;)==0)
{testData[j-(i+1)*45][4]=0;
testData[j-(i+1)*45][5]=1;
testData[j-(i+1)*45][6]=0;}
else if(CompareStr(outStr,&quot;Iris-virginica&quot;)==0)
{testData[j-(i+1)*45][4]=0;
testData[j-(i+1)*45][5]=0;
testData[j-(i+1)*45][6]=1;}
}
}

fclose(inputFile);

/* for( int j=0;j<135;j++)
ListBox1->Items->Append( IntToStr(j)
+&quot; &quot;+FloatToStr(trainData[j][0])
+&quot; &quot;+FloatToStr(trainData[j][1])
+&quot; &quot;+FloatToStr(trainData[j][2])
+&quot; &quot;+FloatToStr(trainData[j][3])
+&quot; &quot;+FloatToStr(trainData[j][4])
+&quot; &quot;+FloatToStr(trainData[j][5])
+&quot; &quot;+FloatToStr(trainData[j][6])); */

}


void __fastcall TForm1::init()
{
I=0;
H=1;
O=2;
NNum=4; //input layer 神经元数
NNum[H]=18; //hidden layer
NNum[O]=3; //output layer

learnN=0;
sum=0;
ERR=0.005;

randomize();
//begin 权、阈值 赋初值

for(L=1;L<LAYERS;L++ )
{
for(n=0;n<NNum[L];n++) //当前层
{
for( pren=0;pren<NNum[L-1];pren++ ) //前一层
{
W[L-1][L][pren][n]=myRand(); //rand[-0.1,0.1]
}

TH[L][n]=myRand(); //rand[-0.1,0.1]
}

}
//end 权、阈值 赋初值
//begin learRadio
for(L=0;L<LAYERS;L++ )
learnRatio[L]=0.006;//0.5? 学习率,注意位置
//end learRadio


}


void __fastcall TForm1::trainBtnClick(TObject *Sender)
{
init();

do
{
randListF();//for 训练集随机抽取,result is randList[trainDataNum]

for( t=0;t<trainDataNum;t++)
{
randT=randList[t];///随机选取的训练集

forward(randT); //use randT , notice t was used
backward(randT); //use randT

//改进算法用于此
}

error();

learnN=learnN+1;
}
while( (learnN<MAXLEARNNUM) &amp; (err[learnN-1]>ERR) ) ;

}


void __fastcall TForm1::forward(int trainID)
{ // notice t was used
//输入层的输出
for(n=0;n<NNum[0];n++)
Out[0][n]=trainData[trainID][n];
//begin 其他层的输出
for(L=1;L<LAYERS;L++) //哪一层
{

for(n=0;n<NNum[L];n++) //哪一层哪个神经元
{
sum=0.0;

for(pren=0;pren<NNum[L-1];pren++) //前一层哪个神经元
{
sum=sum+Out[L-1][pren]*W[L-1][L][pren][n];
}

Sum[L][n]=sum-TH[L][n];///减去阈值 ->激活值
Out[L][n]=f(Sum[L][n]);//激活函数
}

}
//end 其他层的输出
}

void __fastcall TForm1::backward(int trainID)
{ // notice t was used
//begin第LAYERS-1层
for(n=0;n<NNum[LAYERS-1];n++)
d[LAYERS-1][n]=( trainData[trainID][NNum[0]+n] //期望输出
-Out[LAYERS-1][n] )*df(Sum[LAYERS-1][n]);
for(n=0;n<NNum[LAYERS-1];n++)
{
dTH[LAYERS-1][n]=learnRatio[LAYERS-1]*d[LAYERS-1][n]*1;
for(pren=0;pren<NNum[LAYERS-2];pren++)
{
dW[LAYERS-2][LAYERS-1][pren][n]=dTH[LAYERS-1][n]*Out[LAYERS-2][pren];
}
}
//end第LAYERS-1层

//begin第LAYERS-2 ->1层
for(L=LAYERS-2;L>0;L--)
{
for( n=0;n<NNum[L];n++)
{
///求 d[L][n]
sum=0.0;
for(postn=0;postn<NNum[L+1];postn++)
sum+=d[L+1][postn]*W[L][L+1][n][postn];
d[L][n]=sum*df(Sum[L][n]);
//
dTH[L][n]=learnRatio[L]*d[L][n];
for(pren=0;pren<NNum[L-1];pren++)
{
dW[L-1][L][pren][n]=dTH[L][n]*Out[L-1][pren];
}

}
}
//end第LAYERS-2 ->1层

//begin 赋予新W,TH, 第LAYERS-1到第1层
for(L=LAYERS-1;L>0;L--)
{
for( n=0;n<NNum[L];n++)
{
for(pren=0;pren<NNum[L-1];pren++)
{
W[L-1][L][pren][n]+=dW[L-1][L][pren][n];
}
TH[L][n]+=dTH[L][n];
}
}
//end 赋予新W,TH, 第LAYERS-1到第1层


}

double __fastcall TForm1::f(double x)
{ //激活函数
return( 1/(1+exp(-x)));
}

double __fastcall TForm1::df(double x)
{ //激活函数
return( f(x)*(1-f(x)) );
}

void __fastcall TForm1::error()
{
err[learnN]=0;

for( t=0;t<trainDataNum;t++)
{ //need not reandList
forward(t); //get output

singleErr=0;

for (n=0;n<NNum[LAYERS-1];n++)
{
singleErr+=(Out[LAYERS-1][n]-trainData[t][NNum[0]+n])
*(Out[LAYERS-1][n]-trainData[t][NNum[0]+n])/2.0;
}

err[learnN]+=singleErr;

}
err[learnN]=err[learnN]/(trainDataNum+0.0+NNum[LAYERS-1]);
//
}



double __fastcall TForm1::myRand()
{
return( (rand()+0.0)/0x7FFFU/5-0.1 );//rand[-0.1,0.1]
}

void __fastcall TForm1::randListF() //randListF function
{
//randList从0-TrainDataNum从新排序
for(i=0;i<trainDataNum;i++)
randList=i;

vector<int> v(randList, randList+trainDataNum);
random_shuffle(v.begin(), v.end());

for(i=0;i<trainDataNum;i++)
randList=v.at(i);
/* int arr[10] = {0,1,2,3,4,5,6,7,8,9};
vector<int> v(arr, arr+10);
random_shuffle(v.begin(), v.end());
for(i=0;i<10;i++)
ListBox1->Items->Append(IntToStr(v.at(i)));
*/
}


void __fastcall TForm1::showErrorClick(TObject *Sender)
{
DecisionGraph1->Legend->Visible=false;
Series1->Clear();

for(i=0;i<learnN-1;i++)
Series1->Add(err+1,&quot;&quot;,clRed);
// Series1->Add(0.99,&quot;&quot;,clRed);

//Series1->AddArray(err,learnN-1);
}



void __fastcall TForm1::testBtnClick(TObject *Sender)
{
//use testData

String str;
double tmpOut;//for辨识
int flag;//for辨识
int count=0;

ListBox1->Clear();

for(t=0;t<testDataNum;t++)
{
flag=1;
str=&quot;&quot;;
//输入层的输出
for(n=0;n<NNum[0];n++)
Out[0][n]=testData[t][n];
//begin 其他层的输出
for(L=1;L<LAYERS;L++) //哪一层
{

for(n=0;n<NNum[L];n++) //哪一层哪个神经元
{
sum=0.0;

for(pren=0;pren<NNum[L-1];pren++) //前一层哪个神经元
{
sum=sum+Out[L-1][pren]*W[L-1][L][pren][n];
}

Sum[L][n]=sum-TH[L][n];///减去阈值 ->激活值
Out[L][n]=f(Sum[L][n]);//激活函数
}

}
//end 其他层的输出

//begin 辨识
for(n=0;n<NNum[LAYERS-1];n++)
{
if(Out[LAYERS-1][n]>=0.5)
tmpOut=1;
else
tmpOut=0;
/////
if( tmpOut==testData[t][NNum[0]+n] )
flag=flag*1;
else
flag=flag*0;

str+=FloatToStrF(Out[LAYERS-1][n],ffFixed,7,2)+&quot; &quot;;
//(FloatToStr(Out[LAYERS-1][n])).SubString(1,3)+&quot; &quot;;
}

str+=&quot;->&quot;;
for(n=0;n<NNum[LAYERS-1];n++)
{
str+=FloatToStr(testData[t][NNum[0]+n])+&quot; &quot;;
}
if(flag==1) {str+=&quot; R&quot;;count++;}
else str+=&quot; W&quot;;

ListBox1->Items->Append(IntToStr(t+1)

+&quot;: &quot;+str);
//end 辨识
}

if(count==testDataNum) str=&quot;100&quot;;
else str=(FloatToStr((count)/(testDataNum+0.0))).SubString(3,2);

ListBox1->Items->Append(&quot; &quot;);
ListBox1->Items->Append(&quot; 正确率:&quot;
+str
+&quot;%&quot;);

}
 
后退
顶部