改进的BP神经网络算法(C语言源码)
bp神经网络详细步骤C实现
//更新V,V矩阵是隐藏层与输出层之间的权值for(intj=0;j<hideNum;j++)
{
v[j,k]+=rate*qq[k]*x1[j];
}
}
//计算隐层误差
for(intj=0;j<hideNum;j++)
{
//PP矩阵是隐藏层的误差
{
//★数据归一化
for(inti=0;i<inNum;i++)
{
x[i]=p[isamp,i]/in_rate;
}
for(inti=0;i<outNum;i++)
{
yd[i]=t[isamp,i]/in_rate;
}
//计算隐层的输入和输出
for(intj=0;j<hideNum;j++)
{
o1[j]=0.0;
{
w[i]+=dw[i];
}
}
//数据仿真函数
publicdouble[]sim(double[]psim)
{
for(i nti=O;i<i nN um;i++)
x[i]=psim[i]/in_rate;//in_rate为归一化系数
for(i ntj=O;j<hideNum;j++)
{
o1[j]=0.0;
{pMax=Math.Abs(p[isamp,i]);
}
}for(intj=0;j<outNum;j++)
{if(Math.Abs(t[isamp,j])>pMax)
BP算法代码实现
BP算法代码实现BP算法(Backpropagation Algorithm)是一种常用的神经网络训练算法,它主要用于监督式学习任务中的模型训练。
BP算法的核心思想是通过反向传播来更新神经网络的权重和偏差,以使得神经网络的输出逼近目标输出。
在反向传播的过程中,通过求解梯度来更新每个连接权重和偏置的值,从而最小化损失函数。
以下是BP算法的代码实现示例:```pythonimport numpy as npclass NeuralNetwork:def __init__(self, layers):yers = layersself.weights = []self.biases = []self.activations = []#初始化权重和偏置for i in range(1, len(layers)):self.weights.append(np.random.randn(layers[i], layers[i-1])) self.biases.append(np.random.randn(layers[i]))def sigmoid(self, z):return 1 / (1 + np.exp(-z))def sigmoid_derivative(self, z):return self.sigmoid(z) * (1 - self.sigmoid(z))def forward_propagate(self, X):self.activations = []activation = X#前向传播计算每一层的激活值for w, b in zip(self.weights, self.biases):z = np.dot(w, activation) + bactivation = self.sigmoid(z)self.activations.append(activation)return activationdef backward_propagate(self, X, y, output):deltas = [None] * len(yers)deltas[-1] = output - y#反向传播计算每一层的误差(梯度)for i in reversed(range(len(yers)-1)):delta = np.dot(self.weights[i].T, deltas[i+1]) * self.sigmoid_derivative(self.activations[i])deltas[i] = delta#更新权重和偏置for i in range(len(yers)-1):self.weights[i] -= 0.1 * np.dot(deltas[i+1],self.activations[i].T)self.biases[i] -= 0.1 * np.sum(deltas[i+1], axis=1)def train(self, X, y, epochs):for epoch in range(epochs):output = self.forward_propagate(X)self.backward_propagate(X, y, output)def predict(self, X):output = self.forward_propagate(X)return np.round(output)```上述代码使用numpy实现了一个简单的多层神经网络,支持任意层数和任意神经元个数的构建。
改进的BP神经网络算法
改进的BP神经网络算法以下是一个简单的改进的BP神经网络算法的C语言源码,注释已经添加在代码中,代码的运行结果是将一个简单的线性函数拟合为输入值的平方的2倍。
```c#include <stdio.h>#include <stdlib.h>#include <math.h>#define INPUT_LAYER_SIZE 1 // 输入层节点个数#define HIDDEN_LAYER_SIZE 10 // 隐藏层节点个数#define OUTPUT_LAYER_SIZE 1 // 输出层节点个数#define LEARNING_RATE 0.1 // 学习率double sigmoid(double x)return 1 / (1 + exp(-x));double sigmoid_derivative(double x)return x * (1 - x);//训练函数void train(double input[INPUT_LAYER_SIZE], doubleexpected_output[OUTPUT_LAYER_SIZE], doublew_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE], doublew_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE], double *b_h, double *b_o)//前向传播double hidden_layer_activations[HIDDEN_LAYER_SIZE];double output_layer_activations[OUTPUT_LAYER_SIZE];for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < INPUT_LAYER_SIZE; j++)activation += input[j] * w_ih[j][i];}activation += *b_h;hidden_layer_activations[i] = sigmoid(activation);}for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)activation += hidden_layer_activations[j] * w_ho[j][i];}activation += *b_o;output_layer_activations[i] = sigmoid(activation);}//反向传播double output_layer_errors[OUTPUT_LAYER_SIZE];double hidden_layer_errors[HIDDEN_LAYER_SIZE];for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)output_layer_errors[i] = (expected_output[i] - output_layer_activations[i]) *sigmoid_derivative(output_layer_activations[i]);}for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double error = 0;for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)error += output_layer_errors[j] * w_ho[i][j];}hidden_layer_errors[i] = error *sigmoid_derivative(hidden_layer_activations[i]);}//更新权值和偏置for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)w_ho[i][j] += LEARNING_RATE * output_layer_errors[j] * hidden_layer_activations[i];}}for (int i = 0; i < INPUT_LAYER_SIZE; i++)for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)w_ih[i][j] += LEARNING_RATE * hidden_layer_errors[j] * input[i];}}*b_o += LEARNING_RATE * output_layer_errors[0];*b_h += LEARNING_RATE * hidden_layer_errors[0];//测试函数double test(double input[INPUT_LAYER_SIZE], doublew_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE], doublew_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE], double b_h, double b_o)double hidden_layer_activations[HIDDEN_LAYER_SIZE];double output_layer_activations[OUTPUT_LAYER_SIZE];for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < INPUT_LAYER_SIZE; j++)activation += input[j] * w_ih[j][i];}activation += b_h;hidden_layer_activations[i] = sigmoid(activation);}for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)activation += hidden_layer_activations[j] * w_ho[j][i]; }activation += b_o;output_layer_activations[i] = sigmoid(activation);}return output_layer_activations[0];int mai//初始化权值和偏置double w_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE];double w_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE];double b_h = 0;double b_o = 0;for (int i = 0; i < INPUT_LAYER_SIZE; i++)for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)w_ih[i][j] = ((double) rand( / RAND_MAX) * 2 - 1; // [-1, 1]之间的随机数}}for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)w_ho[i][j] = ((double) rand( / RAND_MAX) * 2 - 1; // [-1, 1]之间的随机数}}//训练模型for (int epoch = 0; epoch < MAX_EPOCHS; epoch++)double input = ((double) rand( / RAND_MAX) * 10; // [0, 10]之间的随机数double expected_output = 2 * pow(input, 2); // y = 2x^2train(&input, &expected_output, w_ih, w_ho, &b_h, &b_o);}//测试模型double input = 5;double output = test(&input, w_ih, w_ho, b_h, b_o);printf("Input: %.2f, Output: %.2f\n", input, output);return 0;```这个代码实现了一个包含一个输入层、一个隐藏层、一个输出层的BP神经网络,使用了sigmoid激活函数和均方差误差函数。
BP神经网络C程序代码
BP神经网络C程序在该题的程序设计中采用了文件相关的操作,记录了相关学习和测试信息数据。
权值用伪随机数函数随机产生(范围是(0,0.5))采用结构体及链表来实现神经网络的结构分为实例结构体、层结构体和网络结构体数据结构的设计参照了《人工神经网络原理》(马锐编著,北京:机械工业出版社,2010,7)一书学习算法的优化也参照该书采用学习效率自适应调整算法优化源程序的学习算法,以减少学习次数由于能力和知识有限,该程序存在较大漏洞误差,在调整学习率时,不好掌握调节系数初始权值的限定范围适中,则程序的学习次数将明显减少在随机赋初始权值(0,0.5)时,学习次数可调节至135,但对测试数据的判别效果不理想,没有采用#include<stdio.h>#include<stdlib.h>#include<math.h>#include<malloc.h>#define TRUE 1#define FALSE 0#define NUM_LAYERS 4#define NUM 20 //训练实例个数#define N 2 //输入层单元数#define M 2 //输出层单元数int Units[NUM_LAYERS] = {N,3,3,M}; //每层单元数FILE *fp,*fb;typedef struct //训练实例{float x[N];float y[M];}TRAIN;typedef struct //网络层结构{int Units; //该层中单元的个数float *Output; //第i 个单元的输出float *Error; //第i 个单元的校正误差float **Weight; //第i 个单元的连接权值typedef struct //网络{LAYER **Layer; //隐层定义LAYER *Inputlayer; //输入层LAYER *Outputlayer; //输出层float Error; //允许误差float Eta; //学习率}NET;//初始化伪随机数发生器void InitializeRandoms(){srand(4711);return;}//产生随机实数并规范化float RandomReal() //产生(-0.5,0.5)之间的随机数{return (float)((rand()%100)/200.0);}//初始化训练数据void InitializeTrainingData(TRAIN *training){int i,j;char filename[20];printf("\n请输入训练实例的数据文件名: \n");gets(filename);fb = fopen(filename,"r");fprintf(fp,"\n\n--Saving initialization training datas ...\n");for(i=0;i<NUM;i++){for(j=0;j<N;j++){fscanf(fb,"%f",&(training+i)->x[j]);fprintf(fp,"%10.4f",(training+i)->x[j]);}for(j=0;j<M;j++){fscanf(fb,"%f",&(training+i)->y[j]);fprintf(fp,"%10.4f",(training+i)->y[j]);fprintf(fp,"\n");}fclose(fb);return;}//应用程序初始化void InitializeApplication(NET *Net){Net->Eta = (float)0.3;Net->Error = (float)0.0001;fp = fopen("BPResultData.txt","w+");return;}//应用程序关闭时终止打开的文件void FinalizeApplication(NET *Net){fclose(fp);return;}//分配内存,建立网络void GenerateNetwork(NET *Net){int l,i;Net->Layer = (LAYER **)calloc(NUM_LAYERS,sizeof(LAYER *));for(l=0;l<NUM_LAYERS;l++){Net->Layer[l] = (LAYER *)malloc(sizeof(LAYER));Net->Layer[l]->Units = Units[l];Net->Layer[l]->Output = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Error = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Weight = (float **)calloc(Units[l]+1,sizeof(float *));Net->Layer[l]->Output[0] = 1;if(l != 0)for(i=1;i <= Units[l];i++) //下标从"1"开始Net->Layer[l]->Weight[i] = (float *)calloc(Units[l-1]+1,sizeof(float));}Net->Inputlayer = Net->Layer[0];Net->Outputlayer = Net->Layer[NUM_LAYERS - 1];return;}//产生随机实数作为初始连接权值void RandomWeights(NET *Net){int l,i,j;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++)Net->Layer[l]->Weight[i][j] = RandomReal();return;}//设置输入层的输出值void SetInput(NET *Net,float *Input){int i;for(i=1;i <= Net->Inputlayer->Units;i++)Net->Inputlayer->Output[i] = Input[i-1]; //输入层采用u(x) = xreturn;}//设置输出层的输出值void GetOutput(NET *Net,float *Output){int i;for(i=1;i <= Net->Outputlayer->Units;i++)Output[i-1] = (float)(1/(1 + exp(-Net->Outputlayer->Output[i]))); //输出层采用f(x)=1/(1+e^(-x))return;}//层间顺传播void PropagateLayer(NET *Net,LAYER *Lower,LAYER *Upper){int i,j;float sum;for(i=1;i <= Upper->Units;i++){sum = 0;for(j=1;j <= Lower->Units;j++)sum += (Upper->Weight[i][j] * Lower->Output[j]);Upper->Output[i] = (float)(1/(1 + exp(-sum)));}return;}//整个网络所有层间的顺传播void PropagateNet(NET *Net){int l;for(l=0;l < NUM_LAYERS-1;l++)PropagateLayer(Net,Net->Layer[l],Net->Layer[l+1]);return;}//计算输出层误差void ComputeOutputError(NET *Net,float *target){int i;float Out,Err;for(i=1;i <= Net->Outputlayer->Units;i++){Out = Net->Outputlayer->Output[i];Err = target[i-1] - Out;Net->Outputlayer->Error[i] = Out*(1-Out)*Err;}return;}//层间逆传播void BackpropagateLayer(NET *Net,LAYER *Upper,LAYER *Lower) {int i,j;float Out,Err;for(i=1;i <= Lower->Units;i++){Out = Lower->Output[i];Err = 0;for(j=1;j <= Upper->Units;j++)Err += (Upper->Weight[j][i] * Upper->Error[j]);Lower->Error[i] = Out*(1-Out)*Err;}return;}//整个网络所有层间的逆传播void BackpropagateNet(NET *Net){int l;for(l=NUM_LAYERS-1;l>1;l--)BackpropagateLayer(Net,Net->Layer[l],Net->Layer[l-1]);return;}//权值调整void AdjustWeights(NET *Net){int l,i,j;float Out,Err;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++){Out = Net->Layer[l-1]->Output[j];Err = Net->Layer[l]->Error[i];Net->Layer[l]->Weight[i][j] += (Net->Eta*Err*Out);}return;}//网络处理过程void SimulateNet(NET *Net,float *Input,float *Output,float *target,int TrainOrNot) {SetInput(Net,Input); //输入数据PropagateNet(Net); //模式顺传播GetOutput(Net,Output); //形成输出ComputeOutputError(Net,target); //计算输出误差if(TrainOrNot){BackpropagateNet(Net); //误差逆传播AdjustWeights(Net); //调整权值}return;}//训练过程void TrainNet(NET *Net,TRAIN *training){int l,i,j,k;int count=0,flag=0;float Output[M],outputfront[M],ERR,err,sum;do{flag = 0;sum = 0;ERR = 0;if(count >= 1)for(j=0;j<M;j++)outputfront[j]=Output[j];SimulateNet(Net,(training+(count%NUM))->x,Output,(training+(count%NUM))->y,TRUE);if(count >= 1){k = count%NUM;for(i=1;i <= Net->Outputlayer->Units;i++){sum += Net->Outputlayer->Error[i];err = (training+k-1)->y[i-1] - outputfront[i-1];ERR += (outputfront[i-1] * (1 - outputfront[i-1]) * err);}if(sum <= ERR)Net->Eta = (float)(0.9999 * Net->Eta);elseNet->Eta = (float)(1.0015 * Net->Eta);}if(count >= NUM){for(k=1;k <= M;k++)if(Net->Outputlayer->Error[k] > Net->Error){ flag=1; break; }if(k>M)flag=0;}count++;}while(flag || count <= NUM);fprintf(fp,"\n\n\n");fprintf(fp,"--training results ... \n");fprintf(fp,"training times: %d\n",count);fprintf(fp,"\n*****the final weights*****\n");for(l=1;l<NUM_LAYERS;l++){for(i=1;i <= Net->Layer[l]->Units;i++){for(j=1;j <= Net->Layer[l-1]->Units;j++)fprintf(fp,"%15.6f",Net->Layer[l]->Weight[i][j]);fprintf(fp,"\n");}fprintf(fp,"\n\n");}}//评估过程void EvaluateNet(NET *Net){int i;printf("\n\n(");fprintf(fp,"\n\n(");for(i=1;i <= Net->Inputlayer->Units;i++){printf(" %.4f",Net->Inputlayer->Output[i]);fprintf(fp,"%10.4f",Net->Inputlayer->Output[i]);}printf(")\t");fprintf(fp,")\t");for(i=1;i <= Net->Outputlayer->Units;i++){if(fabs(Net->Outputlayer->Output[i] - 1.0) <= 0.0499){printf("肯定是第%d 类, ",i);fprintf(fp,"肯定是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.9) <= 0.0499){printf("几乎是第%d 类, ",i);fprintf(fp,"几乎是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.8) <= 0.0499){printf("极是第%d 类, ",i);fprintf(fp,"极是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.7) <= 0.0499){printf("很是第%d 类, ",i);fprintf(fp,"很是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.6) <= 0.0499){printf("相当是第%d 类, ",i);fprintf(fp,"相当是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.5) <= 0.0499){printf("差不多是第%d 类, ",i);fprintf(fp,"差不多是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.4) <= 0.0499){printf("比较像是第%d 类, ",i);fprintf(fp,"比较像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.3) <= 0.0499){printf("有些像是第%d 类, ",i);fprintf(fp,"有些像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.2) <= 0.0499){printf("有点像是第%d 类, ",i);fprintf(fp,"有点像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.1) <= 0.0499){printf("稍稍像是第%d 类, ",i);fprintf(fp,"稍稍像是第%d 类, ",i);}if(Net->Outputlayer->Output[i] <= 0.0499){printf("肯定不是第%d 类, ",i);fprintf(fp,"肯定不是第%d 类, ",i);}}printf("\n\n");fprintf(fp,"\n\n\n");return;}//测试过程void TestNet(NET *Net){TRAIN Testdata;float Output[M];int i,j,flag=0;char select;fprintf(fp,"\n\n--Saving test datas ...\n");do{printf("\n请输入测试数据(x1 x2 ... y1 y2 ...): \n");for(j=0;j<N;j++){scanf("%f",&Testdata.x[j]);fprintf(fp,"%10.4f",Testdata.x[j]);}for(j=0;j<M;j++){scanf("%f",&Testdata.y[j]);fprintf(fp,"%10.4f",Testdata.y[j]);}fprintf(fp,"\n");SimulateNet(Net,Testdata.x,Output,Testdata.y,FALSE);fprintf(fp,"\n--NET Output and Error of the Test Data ....\n");for(i=1;i <= Net->Outputlayer->Units;i++)fprintf(fp,"%10.6f %10.6f\n",Net->Outputlayer->Output[i],Net->Outputlayer->Error[i]);EvaluateNet(Net);printf("\n继续测试?(y/n):\n");getchar();scanf("%c",&select);printf("\n");if((select == 'y')||(select == 'Y'))flag = 1;elseflag=0;}while(flag);return;}//主函数void main(){TRAIN TrainingData[NUM];NET Net;InitializeRandoms(); //初始化伪随机数发生器GenerateNetwork(&Net); //建立网络RandomWeights(&Net); //形成初始权值InitializeApplication(&Net); //应用程序初始化,准备运行InitializeTrainingData(TrainingData); //记录训练数据TrainNet(&Net,TrainingData); //开始训练TestNet(&Net);FinalizeApplication(&Net); //程序关闭,完成善后工作return;}。
改进地BP神经网络算法C语言源码
#include "stdio.h"#include "stdlib.h"#include "time.h"#include "math.h"/*********************************************inpoints 为输入神经元个数,可改变outpoints为输出神经元个数defaultpoints为隐层神经元个数datagrough为样本数据个数****************************************************以下数据定义可以修改*****/#define A 0#define a 1#define b 1#define c 1#define ALFA 0.85#define BETA 0.2 //学习率0~1#define Total 20000#define inpoints 9#define outpoints 5#define defaultpoints 28#define datagrough 44#define forecastdata 4/**********定义所需变量********/double InpointData[datagrough][inpoints],OutpointData[datagrough][outpoints]; /* 输入输出数据 */double InpointData_MAX[inpoints],InpointData_MIN[inpoints]; /* 每个因素最大数据 */double OutpointData_MAX[outpoints],OutpointData_MIN[outpoints]; /* 每个因素最小数据 */doublew[defaultpoints][inpoints],limen[defaultpoints],v[outpoints][defaultpoints]; /* 连接权值、阈值 */doubledlta_w[defaultpoints][inpoints],dlta_limen[defaultpoints],dlta_v[outpoints][defaultpoints]; /* 连接权、阈值修正值 */doubledefaultOutpoint[defaultpoints],Outpoint_dp[outpoints],Outpoint_ep[datagrough];/**************************读数据文件******************************/void ReadData(){FILE *fp1,*fp2;int i,j;if((fp1=fopen("D:\\data\\训练输入.txt","r"))==NULL){printf("1can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InpointData[i][j]);fclose(fp1);if((fp2=fopen("D:\\data\\训练输出.txt","r"))==NULL){printf("2can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<outpoints;j++)fscanf(fp2,"%lf",&OutpointData[i][j]);fclose(fp2);}/*****************************************************//*****************************************归一化******************************************************/void unitary(){int i,j;int k=0;for(j=0;j<inpoints;j++) //找出每列的最大、最小值存放在数组InpointData_MAX[j]、InpointData_MIN[j]中{InpointData_MAX[j]=InpointData[0][j];InpointData_MIN[j]=InpointData[0][j];for(i=0;i<datagrough;i++)if(InpointData_MAX[j]<InpointData[i][j])InpointData_MAX[j]=InpointData[i][j];else if(InpointData_MIN[j]>InpointData[i][j])InpointData_MIN[j]=InpointData[i][j];}for(j=0;j<outpoints;j++) //找出每列的最大、最小值存放在数组OutpointData_MAX[j]、OutpointData_MIN[j]中{OutpointData_MAX[j]=OutpointData[0][j];OutpointData_MIN[j]=OutpointData[0][j];for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]<OutpointData[i][j])OutpointData_MAX[j]=OutpointData[i][j];else if(OutpointData_MIN[j]>OutpointData[i][j])OutpointData_MIN[j]=OutpointData[i][j];}/***************将数据归一处理,处理之后的数据全部在[0,1]之间*************************/for(j=0;j<inpoints;j++)for(i=0;i<datagrough;i++)if(InpointData_MAX[j]==0)InpointData[i][j]=0;elseInpointData[i][j]=(InpointData[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]==0)OutpointData[i][j]=0;elseOutpointData[i][j]=(OutpointData[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX [j]-OutpointData_MIN[j]+A);}/*****************************************************//*********************初始化,随机赋初值**************************/void Initialization(){int i,j;srand((unsigned)time(NULL)); //头文件名 #include <time.h>for(i=0;i<defaultpoints;i++) //给输入层到隐层的连接权赋随机值LianJie_w[i][j],这些值在[0,1]for(j=0;j<inpoints;j++){w[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_w[i][j]=0;}for(i=0;i<defaultpoints;i++){limen[i]=(rand()*2.0/RAND_MAX-1)/2;dlta_limen[i]=0;}for(i=0;i<outpoints;i++) //给隐层到输出层的连接权赋初值for(j=0;j<defaultpoints;j++){v[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_v[i][j]=0;}}/**********************求单样本的计算输出误差*******************************/ void out_sub1(int t){int i,j;double defaultInpoint[defaultpoints];double Outpoint_y[outpoints];Outpoint_ep[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InpointData[t][j];defaultInpoint[i]=sum+limen[i];defaultOutpoint[i]=1/(a+b*exp(-1*c*defaultInpoint[i]));//求O[i] }for(j=0;j<outpoints;j++)//求Y[i]{Outpoint_y[j]=0;for(i=0;i<defaultpoints;i++)Outpoint_y[j]+=v[j][i]*defaultOutpoint[i];Outpoint_dp[j]=OutpointData[t][j]-Outpoint_y[j];Outpoint_ep[t]+=Outpoint_dp[j]*Outpoint_dp[j]/2;}}/*****************************反算权值******************************************/void out_sub2(int t){int i,j,k;double s;for(i=0;i<defaultpoints;i++){s=0;for(j=0;j<outpoints;j++){dlta_v[j][i]=ALFA*dlta_v[j][i]+BETA*Outpoint_dp[j]*defaultOutpoint[i]; //s+=v[j][i]*Outpoint_dp[j];v[j][i]+=dlta_v[j][i];}dlta_limen[i]=ALFA*dlta_limen[i]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i]) *s;//limen[i]+=dlta_limen[i];for(k=0;k<inpoints;k++){dlta_w[i][k]=ALFA*dlta_w[i][k]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s *InpointData[t][k];//w[i][k]=w[i][k]+dlta_w[i][k];}}}/*******************************************************/void forecast(){int i,j,t,k=0;double e,e1[forecastdata]={0}; //训练误差double sss;double InputData_x[forecastdata][inpoints],tp[forecastdata][outpoints];doubledefInpoint,defOutpoint[defaultpoints],y[forecastdata][outpoints];//y[forecastda ta][outpoints]为网络检验输出FILE *fp1,*fp3;if((fp1=fopen("D:\\data\\预测输入.txt","r"))==NULL) //检验数据输入{printf("3can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InputData_x[i][j]);fclose(fp1);if((fp3=fopen("D:\\data\\预测输出.txt","r"))==NULL) //实际检验结果输出{printf("31can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<outpoints;j++)fscanf(fp3,"%lf",&tp[i][j]);fclose(fp3);for(j=0;j<inpoints;j++) // 检验数据归一化for(i=0;i<forecastdata;i++)if(InpointData_MAX[j]==0)InputData_x[i][j]=0;elseInputData_x[i][j]=(InputData_x[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<forecastdata;i++)if(OutpointData_MAX[j]==0)tp[i][j]=0;elsetp[i][j]=(tp[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-OutpointData_MIN [j]+A);do{Initialization(); //初始化连接权值w[i][j],limen[i],v[k][i]k=0;do{e=0;for(t=0;t<datagrough;t++){out_sub1(t); //正向计算网络输出out_sub2(t); //反向计算,修正权值e+=Outpoint_ep[t]; //计算输出误差}k++;}while((k<Total)&&(e>0.1));sss=0; //中间参数for(t=0;t<forecastdata;t++){e1[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InputData_x[t][j];defInpoint=sum+limen[i];defOutpoint[i]=1/(a+b*exp(-1*c*defInpoint));}for(j=0;j<outpoints;j++){y[t][j]=0;for(i=0;i<defaultpoints;i++)y[t][j]+=v[j][i]*defOutpoint[i];e1[t]+=(y[t][j]-tp[t][j])*(y[t][j]-tp[t][j])/2;y[t][j]=y[t][j]*(OutpointData_MAX[j]-OutpointData_MIN[j]+A)+OutpointData_MI N[j]-A;}sss+=e1[t];}sss=sss/forecastdata;printf(" %lf %lf\n",e,sss);}while(sss>0.12);}/********************************************************/void main(){int i,j,k;FILE *fp2;ReadData(); //读训练数据:输入和输出unitary(); //归一化,将输入输出数据归一,结果在[0,1]中forecast(); //检验误差if((fp2=fopen("D:\\data\\计算权值.txt","w"))==NULL) //文件输出训练好的权值{printf("6can not open the file\n");exit(0);}for(i=0;i<defaultpoints;i++){for(k=0;k<inpoints;k++)fprintf(fp2," %lf ",w[i][k]);fprintf(fp2,"\n");}fprintf(fp2,"\n");for(i=0;i<defaultpoints;i++)fprintf(fp2," %lf ",limen[i]);fprintf(fp2,"\n\n");for(i=0;i<defaultpoints;i++){for(j=0;j<outpoints;j++)fprintf(fp2," %lf ",v[j][i]);fprintf(fp2,"\n");}fclose(fp2);}。
BP神经网络算法改进
[4] 向国全,董 道 珍.BP 模 型中的 激 励函数 和 改 进 的网络训 练 法 [J].计算 机研究与发展,1997(2):113-117.
D OI:10.16 6 6 0/ j.c n k i.1674- 0 98X.2017.20.14 6
BP神经网络算法改进
信息科学
黄尚晴1 赵志勇2 孙立波3 (1浙江工商大学统计与数学学院 浙江杭州 310018;2华中科技大学自动化学院 湖北武汉 430074;
3东北大学信息科学与工程学院 辽宁沈阳 110819)
文 选 择 训 练 样 本 集中50 0 0 0 个样 本用于训 练,并 选 择 测 试 样
本 集中的10 0 0 0 个样 本用于测 试。由于其中每 个 样 本 数 据 是
32×32 的 矩 阵,其 要 转 化 成 7 8 4 维 特 征 向量,并 根 据 上一 节
中提 到的 样 本分 组 方案。选 择 m i n _b at ch=2 0, =5,计算出
种,一种 是批 量梯度下 降 法、随 机 梯度 下 降 算 法 和 随 机 批 量
梯度 下 降 算 法。这 些 算 法 虽 然 都 对 BP 算 法 的 收 敛 性 有所 改
善,但 是都 没有考虑 样 本之间的 重复性与 其内在 的 联系所 造
成的 训练 过 程的 繁琐。
综 合 以 上 考 虑 ,本 文 作 以 下 改 进 。对 于 给 定 输 入 样
。为了便于本文的讨 论,
选 择隐含层的激 活函数为 S型函数 1( )=1/(1+ ex p (- ));
神经网络BP算法程序C语言实现
/************************************************ Back Propagation Algorithm************************************************/ #include "stdio.h"#include "stdlib.h"#include "math.h"/************************************************ The Definition of User Data************************************************/ #define MAXINPUT 1#define MAXHIDE 3#define MAXOUTPUT 1#define MAX 1#define MIN -1#define T 100#define CA 4double a=0.8;double b=0.05;double k=0;double error=0;int t=0;double sout[MAXOUTPUT];double shide[MAXHIDE];double m=2;double howchange[MAXHIDE][MAXOUTPUT];double ihwchange[MAXINPUT][MAXHIDE];double CatalogueOut[CA][MAXOUTPUT];double CatalogueIn[CA][MAXINPUT];/************************************************ The Definition of Data Structure************************************************/ struct theBP{double input[MAXINPUT];double hide[MAXHIDE];double output[MAXOUTPUT];double ihw[MAXINPUT][MAXHIDE];double how[MAXHIDE][MAXOUTPUT];};struct theBP bpa;/************************************************ Definition of Prototype************************************************/ void WeightInitial();void InitialError();void InPutCatalogue();void CalculateOut(int k);void CalculateError(int k);void ReverseHideError();void CalculateChange();void CalculateNewWeight();void Test();void TestCalculateOut();void camain();void main(){WeightInitial();// InitialError();InPutCatalogue();//doint m=0;while(1){printf("请选择要进行的操作\n");printf("0----------------学习\n");printf("1----------------测试\n");printf("2----------------退出\n");scanf("%d",&m);switch(m){case 0:camain();break;case 1:Test();break;case 2:exit(0);}//while((error)>k);;}}void camain(){for(t=0;t<T;t++){for(int k=0;k<CA;k++){CalculateOut(k);CalculateError(k);ReverseHideError();CalculateChange();CalculateNewWeight();}for(k=0;k<CA;k++){CalculateOut(k);}}}/************************************************Function:initial the weight************************************************/void WeightInitial(){//产生输入层到隐藏层的权值for(int i=0;i<MAXINPUT;i++){for(int j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=0.3;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}//产生从隐藏层到输出层的权值for(i=0;i<MAXHIDE;i++){for(int j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=0.2;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}}/************************************************Function:input the Catalogue************************************************/void InPutCatalogue(){for(int k=0;k<CA;k++){printf("请输入第%d个样本的输入值:\n",k);for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);CatalogueIn[k][i]=bpa.input[i];}printf("请输入第%d个样本的输出值:\n",k);for(i=0;i<MAXOUTPUT;i++){scanf("%lf",&CatalogueOut[k][i]);}}}/************************************************Function:calculate the out************************************************/void CalculateOut(int k){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum2=0;for(int i=0;i<MAXINPUT;i++){bpa.input[i]=CatalogueIn[k][i];sum2+=bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum2));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum3=0;for(int i=0;i<MAXHIDE;i++){sum3+=bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum3;//计算输出bpa.output[j]=1/(1+exp(-sum3))printf("第%d个样本的最后输出%lf\n",k,bpa.output[j]);}}void TestCalculateOut(){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum1=0;for(int i=0;i<MAXINPUT;i++){sum1=sum1+bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum1));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum2=0;for(int i=0;i<MAXHIDE;i++){sum2=sum2+bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum2;//计算输出bpa.output[j]=1/(1+exp(sum2))printf("最后输出%lf\n",bpa.output[j]);}}/************************************************Function:对输出层Calculate************************************************/void CalculateError(int k){double temp=0;error=0;for(int i=0;i<MAXOUTPUT;i++){temp=(CatalogueOut[k][i]-bpa.output[i])*(CatalogueOut[k][i]-bpa.output[i]);error=(0.5)*temp+error;}for(i=0;i<MAXOUTPUT;i++){sout[i]=(CatalogueOut[k][i]-bpa.output[i])*bpa.output[i]*(1-bpa.output[i]);}}/************************************************Function: 从后向前对隐藏层************************************************/void ReverseHideError(){for(int i=0;i<MAXHIDE;i++){double sum=0;for(int j=0;j<MAXOUTPUT;j++){sum+=sout[j]*bpa.how[i][j];}shide[i]=(bpa.hide[i])*(1-bpa.hide[i])*sum;}}/************************************************Function:Calculate the 权值的变化量************************************************/void CalculateChange(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){howchange[i][j]=a*(howchange[i][j])+b*(sout[i])*(bpa.hide[i]);// }}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){ihwchange[i][j]=a*(ihwchange[i][j])+b*(shide[i])*(bpa.input[i]);// }}}/************************************************Function:Calculate the 新的权值************************************************/void CalculateNewWeight(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=bpa.how[i][j]+howchange[i][j];}}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=bpa.ihw[i][j]+ihwchange[i][j];}}}void Test(){printf("请输入测试数据的输入值:\n");for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);}TestCalculateOut();}。
BP神经网络C++源码及训练测试数据
for(int j=0; j<inNodeCount; j++)
{
detaMidW[i][j] = detaMidW[i][j]*alpha+studyRate*detaMid[i]*in[j];
#ifndef _BP_
#define _BP_
#include <iostream>
#include <cmath>
#include <cstdlib>
#include <string>
#include <ctime>
using namespace std;
class BP
{
double a = 1.7159;
double b = 2.0/3.0;
return -a*b*(pow(tanh(b*x),2)-1);
}
double BP::turn(double* in, double* out) //in:输入数据,out:期望输出数据,返回值:单次训练误差
{
inOut[i] = 0.0;
for(int j=0; j<midNodeCount; j++)
inOut[i] += outWeight[i][j]*Omid[j];
Oout[i] = sigmoid(inOut[i]-midThreshold[i]);
}
double e = 0.0;
for(int i=0; i<midNodeCount; i++) //计算隐藏层输出值
{
inMid[i] = 0.0;
BP神经网络--C语言实现下
BP神经⽹络--C语⾔实现下上⼀篇 C语⾔实现上中介绍了程序实现时定义的⼀些数据结构、程序执⾏的流程以及程序的基本⾻架(详情见)。
留下了两个关键函数computO(i) 和 backUpdate(i) 没有分析实现,参数 i 代表的是第 i 个样本,本篇我们⼀起来分析下这两个函数的实现。
BP神经⽹络输出函数 computO(i) 负责的是通过BP神经⽹络的机制对样本 i 的输⼊,预测其输出。
回想BP神经⽹络的基本模型(详情见)对应的公式(1)还有激活函数对应的公式(2):在前篇设计的BP神经⽹络中,输⼊层与隐藏层权重对应的数据结构是w[Neuron][In],隐藏层与输出层权重对应的数据结构是v[Out] [Neuron],并且数组 o[Neuron] 记录的是神经元通过激活函数对外的输出,BP神经⽹络预测的样本结果保存在OutputData[Out]中。
由此,就可以得到以下实现的参考代码:void computO(int var){int i,j;double sum,y;/*神经元输出*/for (i = 0; i < Neuron; ++i){sum=0;for (j = 0; j < In; ++j)sum+=w[i][j]*d_in[var][j];o[i]=1/(1+exp(-1*sum));}/* 隐藏层到输出层输出 */for (i = 0; i < Out; ++i){sum=0;for (j = 0; j < Neuron; ++j)sum+=v[i][j]*o[j];OutputData[i]=sum;}}BP神经⽹络的反馈学习函数 backUpdate(i) 负责的是将预测输出的结果与样本真实的结果进⾏⽐对,然后对神经⽹络中涉及到的权重进⾏修正,也这是BP神经⽹络实现的关键所在。
如何求到对于 w[Neuron][In] 和 v[Out][Neuron] 进⾏修正的误差量便是关键所在!误差修正量的求法在基本模型⼀⽂中数学分析部分有解答,具体问题具体分析,落实到我们设计的这个BP神经⽹络上来说,需要得到的是对w[Neuron][In] 和 v[Out][Neuron]两个数据进⾏修正误差,误差量⽤数据结构 dw[Neuron][In] 和 dv[Out][Neuron] 来进⾏存储。
BP神经网络算法的C语言实现代码
BP神经网络算法的C语言实现代码以下是一个BP神经网络的C语言实现代码,代码的详细说明可以帮助理解代码逻辑:```c#include <stdio.h>#include <stdlib.h>#include <math.h>#define INPUT_SIZE 2#define HIDDEN_SIZE 2#define OUTPUT_SIZE 1#define LEARNING_RATE 0.1//定义神经网络结构体typedef structdouble input[INPUT_SIZE];double hidden[HIDDEN_SIZE];double output[OUTPUT_SIZE];double weights_ih[INPUT_SIZE][HIDDEN_SIZE];double weights_ho[HIDDEN_SIZE][OUTPUT_SIZE];} NeuralNetwork;//激活函数double sigmoid(double x)return 1 / (1 + exp(-x));//创建神经网络NeuralNetwork* create_neural_networNeuralNetwork* nn =(NeuralNetwork*)malloc(sizeof(NeuralNetwork));//初始化权重for (int i = 0; i < INPUT_SIZE; i++)for (int j = 0; j < HIDDEN_SIZE; j++)nn->weights_ih[i][j] = (double)rand( / RAND_MAX * 2 - 1;}}for (int i = 0; i < HIDDEN_SIZE; i++)for (int j = 0; j < OUTPUT_SIZE; j++)nn->weights_ho[i][j] = (double)rand( / RAND_MAX * 2 - 1;}}return nn;//前向传播void forward(NeuralNetwork* nn)//计算隐藏层输出for (int i = 0; i < HIDDEN_SIZE; i++)double sum = 0;for (int j = 0; j < INPUT_SIZE; j++)sum += nn->input[j] * nn->weights_ih[j][i];}nn->hidden[i] = sigmoid(sum);}//计算输出层输出for (int i = 0; i < OUTPUT_SIZE; i++)double sum = 0;for (int j = 0; j < HIDDEN_SIZE; j++)sum += nn->hidden[j] * nn->weights_ho[j][i];}nn->output[i] = sigmoid(sum);}void backpropagation(NeuralNetwork* nn, double target)//计算输出层误差double output_error[OUTPUT_SIZE];for (int i = 0; i < OUTPUT_SIZE; i++)double delta = target - nn->output[i];output_error[i] = nn->output[i] * (1 - nn->output[i]) * delta;}//更新隐藏层到输出层权重for (int i = 0; i < HIDDEN_SIZE; i++)for (int j = 0; j < OUTPUT_SIZE; j++)nn->weights_ho[i][j] += LEARNING_RATE * nn->hidden[i] * output_error[j];}}//计算隐藏层误差double hidden_error[HIDDEN_SIZE];for (int i = 0; i < HIDDEN_SIZE; i++)double delta = 0;for (int j = 0; j < OUTPUT_SIZE; j++)delta += output_error[j] * nn->weights_ho[i][j];}hidden_error[i] = nn->hidden[i] * (1 - nn->hidden[i]) * delta;}//更新输入层到隐藏层权重for (int i = 0; i < INPUT_SIZE; i++)for (int j = 0; j < HIDDEN_SIZE; j++)nn->weights_ih[i][j] += LEARNING_RATE * nn->input[i] * hidden_error[j];}}void train(NeuralNetwork* nn, double input[][2], double target[], int num_examples)int iteration = 0;while (iteration < MAX_ITERATIONS)double error = 0;for (int i = 0; i < num_examples; i++)for (int j = 0; j < INPUT_SIZE; j++)nn->input[j] = input[i][j];}forward(nn);backpropagation(nn, target[i]);error += fabs(target[i] - nn->output[0]);}//判断误差是否已达到允许范围if (error < 0.01)break;}iteration++;}if (iteration == MAX_ITERATIONS)printf("Training failed! Error: %.8lf\n", error); }void predict(NeuralNetwork* nn, double input[]) for (int i = 0; i < INPUT_SIZE; i++)nn->input[i] = input[i];}forward(nn);printf("Prediction: %.8lf\n", nn->output[0]); int maiNeuralNetwork* nn = create_neural_network(; double input[4][2] ={0,0},{0,1},{1,0},{1,1}};double target[4] =0,1,1,};train(nn, input, target, 4);predict(nn, input[0]);predict(nn, input[1]);predict(nn, input[2]);predict(nn, input[3]);free(nn);return 0;```以上代码实现了一个简单的BP神经网络,该神经网络包含一个输入层、一个隐藏层和一个输出层。
bp神经网络详细步骤C实现
//★求p,t中的最大值doublepMax=0.0;
//sampleNum为样本总数for(intisamp=0;isamp<sampleNum;isamp++){
//inNum是输入层的节点数(即神经细胞数)for(inti=0;i<inNum;i++)
{if(Math.Abs(p[isamp,i])>pMax)
}
}
〃初始化v
for(i nti=0;i<hideNum;i++)
{
for(i ntj=0;j<outNum;j++)
{
v[i,j]=(R.NextDouble()*2-1.0)/2;
}
}
rate=0.8;
e=0.0;
in_rate=1.0;?
}
//训练函数publicvoidtra in( double[,]p,double[,]t)
publicdouble[,]dv;〃权值矩阵Vpublicdoublerate;//学习率publicdouble[]b1;〃隐层阈值矩阵publicdouble[]b2;〃输出层阈值矩阵publicdouble[]db1;〃隐层阈值矩阵publicdouble[]db2;〃输出层阈值矩阵double[]pp;〃隐藏层的误差double[]qq;〃输出层的误差
Console.WriteLine(”隐层节点数目:"+hideNum);
Console.WriteLine("输出层节点数目:"+outNum);
Con sole.ReadL in e();
//将这些矩阵规定好矩阵大小
【2019年整理】神经网络BP算法程序C语言
1 1 -1 1 -1 1 0 1 0 1神经网络BP算法(C程序)文件输入输出目录为: F:\BP\训练样本文件名: 训练样本.txt值为:1 1 -1 1 -1 1 0 1 0 1 输出文件名为: 阈值.txt 权值.txt=========================#include "stdlib.h"#include "math.h"#include "conio.h"#include "stdio.h"#define N 2 /*/学习样本个数*/#define IN 3 /*/输入层神经元数目*/#define HN 3 /*/隐层神经元数目*/#define ON 2 /*/输出层神经元数目*/#define Z 20 /*旧权值保存,每次study的权值都保存下来*/ double P[IN]; /*单个样本输入数据*/double T[ON]; /*单个样本输出数据*/double W[HN][IN]; /*/输入层至隐层权值*/double V[ON][HN]; /*/隐层至输出层权值*/double X[HN]; /*/隐层的输入*/double Y[ON]; /*/输出层的输入*/double H[HN]; /*/隐层的输出*/double O[ON]; /*/输出层的输出*/double YU_HN[HN]; /*/隐层的阈值*/double YU_ON[ON]; /*/输出层的阈值*/double err m[N]; /*/第m个样本的总误差*/double a; /*/输出层至隐层的学习效率*/double b; /*/隐层至输入层学习效率*/double alpha; /*/动量因子, 改进型bp算法使用*/ double d err[ON];FILE *fp;/*定义一个放学习样本的结构*/struct {double input[IN];double teach[ON];}Study_Data[N];/*改进型bp算法用来保存每次计算的权值*/struct {double old_W[HN][IN];double old_V[ON][HN];}Old_WV[Z];显示开始界面int Start_Show(){clrscr();printf("\n ***********************\n");printf(" * Welcome to use *\n");printf(" * this program of *\n");printf(" * calculating the BP *\n");printf(" * model! *\n");printf(" * Happy every day! *\n");printf(" ***********************\n");printf("\n\n Before starting, please read the follows carefully:\n\n");printf(" 1.Please ensure the Path of the '训练样本.txt'(xunlianyangben.txt) is \n correct, like 'F:\BP\训练样本.txt'!\n");printf(" 2.The calculating results will be saved in the Path of 'F:\\BP\\'!\n");printf(" 3.The program will load 10 datas when running from 'F:\\BP\\训练样本.txt'!\n");printf(" 4.The program of BP can study itself for no more than 30000 times.\n And surpassing the number, the program will be ended by itself in\n preventing running infinitely because of error!\n");printf("\n\n\n");printf("Now press any key to start...\n");getch();getch();clrscr();}显示结束界面int End_Show(){printf("\n\n---------------------------------------------------\n");printf("The program has reached the end successfully!\n\n Press any key to exit!\n\n");printf("\n ***********************\n");printf(" * This is the end *\n");printf(" * can calculate the BP*\n");printf(" * model! *\n");printf(" ***********************\n");printf(" * Thanks for using! *\n");printf(" * Happy every day! *\n");printf(" ***********************\n");getch();exit(0);}获取训练样本GetTrainingData() /*OK*/{ int m,i,j;int datr;if((fp=fopen("f:\\bp\\训练样本.txt","r"))==NULL) /*读取训练样本*/{printf("Cannot open file and strike any key exit!");getch();exit(1);}m=0;i=0;j=0;while(fscanf(fp,"%d",&datr)!=EOF){ j++;if(j<=(N*IN)) /*N为学习样本个数;IN为输入层神经元数目*/{if(i<IN){Study_Data[m].input[i]=datr;/*printf("\nthe Study_Datat[%d].input[%d]=%f\n",m,i,Study_Data[m].input[i]);getch();*/ /*use to check the loaded training datas*/}if(m==(N-1)&&i==(IN-1)){m=0;i=-1;}if(i==(IN-1)){m++;else if((N*IN)<J&&J<=(N*(IN+ON))){if(i<ON){Study_Data[m].teach[i]=datr;/*printf("\nThe Study_Data[%d].teach[%d]=%f",m,i,Study_Data[m].teach[i]);getch();*/ /*use to check the loaded training datas*/}if(m==(N-1)&&i==(ON-1))printf("\n");if(i==(ON-1)){m++;i=-1;}}i++;}fclose(fp);printf("\nThere are [%d] datats that have been loaded successfully!\n",j);/*show the data which has been loaded!*/printf("\nShow the data which has been loaded as follows:\n");for(m=0;m<N;M++){for(i=0;i<IN;I++){printf("\nStudy_Data[%d].input[%d]=%f",m,i,Study_Data[m].input[i]);}for(j=0;j<ON;J++){printf("\nStudy_Data[%d].teach[%d]=%f",m,j,Study_Data[m].teach[j]);}}printf("\n\nPress any key to start calculating...");getch();return 1;}/*///////////////////////////////////*//*初始化权、阈值子程序*//*///////////////////////////////////*/initial(){int i;int ii;int j;/*隐层权、阈值初始化*/for(i=0;i<HN;i++){for(j=1;j<IN;j++){W[i][j]=(double)((rand()/32767.0)*2-1); /*初始化输入层到隐层的权值, 随机模拟0 和1 -1 */ printf("w[%d][%d]=%f\n",i,j,W[i][j]);}}for(ii=0;ii<ON;II++){for(jj=0;jj<HN;JJ++){V[ii][jj]= (double)((rand()/32767.0)*2-1); /*初始化隐层到输出层的权值, 随机模拟0 和1 -1*/ printf("V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}}for(k=0;k<HN;K++){YU_HN[k] = (double)((rand()/32767.0)*2-1); /*隐层阈值初始化,-0.01 ~ 0.01 之间*/printf("YU_HN[%d]=%f\n",k,YU_HN[k]);}for(kk=0;kk<ON;KK++){YU_ON[kk] = (double)((rand()/32767.0)*2-1); /*输出层阈值初始化,-0.01 ~ 0.01 之间*/}return 1;}/*子程序initial()结束*//*//////////////////////////////////////////*//*第m个学习样本输入子程序*//*/////////////////////////////////////////*/input_P(int m){ int i,j;for(i=0;i<IN;I++){P[i]=Study_Data[m].input[i];printf("P[%d]=%f\n",i,P[i]);}/*获得第m个样本的数据*/return 1;}/*子程序input_P(m)结束*//*/////////////////////////////////////////*//*第m个样本教师信号子程序*//*/////////////////////////////////////////*/input_T(int m)for(k=0;k<ON;k++)T[k]=Study_Data[m].teach[k];return 1;}/*子程序input_T(m)结束*/H_I_O(){double sigma;int i,j;for(j=0;j<HN;j++){sigma=0;for(i=0;i<IN;i++){sigma+=W[j][i]*P[i];/*求隐层内积*/}X[j]=sigma-YU_HN[i];/*求隐层净输入, 为什么减隐层的阀值*/ H[j]=1.0/(1.0+exp(-X[j]));/*求隐层输出siglon算法*/}return 1;}/*子程序H_I_O()结束*/O_I_O(){int k;int j;double sigma;for(k=0;k<ON;k++){sigma=0.0;for(j=0;j<HN;j++){sigma+=V[k][j]*H[k];}Y[k]=sigma-YU_ON[k];O[k]=1.0/(1.0+exp(-Y[k]));}return 1;}int Err_O_H(int m){int k;double abs_err[ON];double sqr_err=0;for (k=0;k<ON;k++){abs_err[k]=T[k]-O[k];sqr_err+=(abs_err[k])*(abs_err[k]);err_m[m]=sqr_err/2;}return 1;}double e_err[HN];int Err_H_I(){int j,k;double sigma;for(j=0;j<HN;j++){sigma=0.0;for(k=0;k<ON;k++){sigma+=d_err[k]*V[k][j];}e_err[j]=sigma*H[j]*(1-H[j]);}return 1;}saveWV(int m){int i;int ii;int j;int jj;for(i=0;i<HN;i++){for(j=0;j<IN;j++){Old_WV[m].old_W[i][j] = W[i][j];}}for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++){Old_WV[m].old_V[ii][jj] = V[ii][jj];}}return 1;}int Delta_O_H(int n) /*(int m,int n)*/ {int k,j;for (k=0;k<ON;k++){for (j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j];}YU_ON[k]+=a*d_err[k];}}else if(n>1){for (k=0;k<ON;k++){for (j=0;j<HN;j++){V[k][j]=V[k][j]+a*d_err[k]*H[j]+alpha*(V[k][j]-Old_WV[(n-1)].old_V[k][j]);}YU_ON[k]+=a*d_err[k];}}return 1;}Delta_H_I(int n) /*(int m,int n)*/{ int i,j;if(n<=1) /*n<=1*/{for (j=0;j<HN;j++){for (i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i];}YU_HN[j]+=b*e_err[j];}}else if(n>1){for(j=0;j<HN;j++){for(i=0;i<IN;i++){W[j][i]=W[j][i]+b*e_err[j]*P[i]+alpha*(W[j][i]-Old_WV[(n-1)].old_W[j][i]);}YU_HN[j]+=b*e_err[j];return 1;}double Err_Sum(){int m;double total_err=0;for(m=0;m<N;m++){total_err+=err_m[m];}return total_err;}void savequan(){ int i,j,k;int ii,jj,kk;if((fp=fopen("f:\\bp\\权值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*.txt*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprin tf(fp,"Save the result of “权值”(quanzhi) as follows:\n");for(i=0;i<HN;i++){for(j=0;j<IN;j++)fprintf(fp,"W[%d][%d]=%f\n",i,j,W[i][j]);}fprintf(fp,"\n");for(ii=0;ii<ON;ii++){for(jj=0;jj<HN;jj++)fprintf(fp,"V[%d][%d]=%f\n",ii,jj,V[ii][jj]);}fclose(fp);printf("\nThe result of “权值.txt”(quanzhi) has been saved successfully!\nPress any key to continue..."); getch();if((fp=fopen("f:\\bp\\阈值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*/{printf("Cannot open file strike any key exit!");getch();exit(1);}fprintf(fp,"Save the result of “输出层的阈值”(huozhi) as follows:\n");for(k=0;k<ON;K++)fprintf(fp,"\nSave the result of “隐层的阈值为”(huozhi) as follows:\n");for(kk=0;kk<HN;KK++)fprintf(fp,"YU_HN[%d]=%f\n",kk,YU_HN[kk]);fclose(fp);printf("\nThe result of “阈值.txt”(huozhi) has been saved successfully!\nPress any key to continue..."); getch ();}/**********************//**程序入口, 即主程序**//**********************/void main(){double Pre_error;double sum_err;int study;int flag;flag=30000;a=0.7;b=0.7;alpha=0.9;study=0;Pre_error=0.0001;/*实际值为Pre_error=0.0001;*/Start_Show(); /*调用函数, 显示开始界面*/GetTrainingData();initial ();do{int m;++study;for(m=0;m<N;m++){input_P(m);input_T(m);H_I_O();O_I_O();Err_O_H(m);Err_H_I();saveWV(m); /****************/Delta_O_H(m); /*(m,study)*/Delta_H_I(m); /*(m,study)*/}sum_err=Err_Sum();printf("sum_err=%f\n",sum_err);printf("Pre_error=%f\n\n",Pre_error);if(study>flag){printf("\n*******************************\n");printf("*****************************\n");getch();break;}} while (sum_err>Pre_error);printf("\n****************\n");printf("\nThe program have studyed for [%d] times!\n",study); printf("\n****************\n");savequan(); /*save the results, 保存计算权值*/End_Show();}==========================权值.txt{Save the result of “权值”(quanzhi) as follows:W[0][0]=0.350578W[0][1]=-1.008697W[0][2]=-0.962250W[1][0]=0.055661W[1][1]=-0.372367W[1][2]=-0.890795W[2][0]=0.129752W[2][1]=-0.332591W[2][2]=-0.521561V[0][0]=-2.932654V[0][1]=-3.720583V[0][2]=-2.648183V[1][0]=2.938970V[1][1]=1.633281V[1][2]=1.944077}阈值.txt{Save the result of “输出层的阈值”(huozhi) as follows:YU_ON[0]=-4.226843YU_ON[1]=1.501791Save the result of “隐层的阈值为”(huozhi) as follows:YU_HN[0]=-0.431459YU_HN[1]=0.452127YU_HN[2]=0.258449}==================================。
(完整版)人工神经网络bp算法C语言程序可出图
for (k = 0; k < h; k++)
v[j][k] = v[j][k] + a * x[i][j] * ChgH[k];
}
if (n % 10 == 0)
printf("误差: %f\n", Ep[n]);
}
printf("总共循环次数:%d\n", n);
}
int TrainBp(bp_nn *bp, float x[COUT][IN_COUT], int y[COUT][OUT_COUT])
{
//训练bp网络,样本为x,理想输出为y
double f = (*bp).b; //精度控制参数
double a = (*bp).a; //学习率
int h = (*bp).h; //隐层节点数
(*bp).v[i][j] = rand() / (double)(RAND_MAX);
for (i = 0; i < (*bp).h; i++)
for (j = 0; j < OUT_COUT; j++)
(*bp).w[i][j] = rand() / (double)(RAND_MAX);
return 1;
int i, j, k, n,d;
double temp;
double e = f + 1;
double c;
double Ep[10000];
for (i = 0; i < IN_COUT; i++) //复制结构体中的权矩阵
BP神经网络算法的C语言实现代码
//BP神经网络算法,c语言版本//VS2010下,无语法错误,可直接运行//添加了简单注释//欢迎学习交流#include <yerNum>#include <yerNum>#include <yerNum>#include <yerNum>#define N_Out 2 //输出向量维数#define N_In 3 //输入向量维数#define N_Sample 6 //样本数量//BP人工神经网络typedef struct{int LayerNum; //中间层数量double v[N_In][50]; //中间层权矩阵i,中间层节点最大数量为50double w[50][N_Out]; //输出层权矩阵double StudyRate; //学习率double Accuracy; //精度控制参数int MaxLoop; //最大循环次数} BPNet;//Sigmoid函数double fnet(double net){return 1/(1+exp(-net));}//初始化int InitBpNet(BPNet *BP);//训练BP网络,样本为x,理想输出为yint TrainBpNet(BPNet *BP, double x[N_Sample][N_In], int y[N_Sample][N_Out]) ; //使用BP网络int UseBpNet(BPNet *BP);//主函数int main(){//训练样本double x[N_Sample][N_In] = {{0.8,0.5,0},{0.9,0.7,0.3},{1,0.8,0.5},{0,0.2,0.3},{0.2,0.1,1.3},{0.2,0.7,0.8}};//理想输出int y[N_Sample][N_Out] = {{0,1},{0,1},{0,1},{1,1},{1,0},{1,0}};BPNet BP;InitBpNet(&BP); //初始化BP网络结构TrainBpNet(&BP, x, y); //训练BP神经网络UseBpNet(&BP); //测试BP神经网络return 1;}//使用BP网络int UseBpNet(BPNet *BP){double Input[N_In];double Out1[50];double Out2[N_Out]; //Out1为中间层输出,Out2为输出层输出//持续执行,除非中断程序while (1){printf("请输入3个数:\n");int i, j;for (i = 0; i < N_In; i++)scanf_s("%f", &Input[i]);double Tmp;for (i = 0; i < (*BP).LayerNum; i++){Tmp = 0;for (j = 0; j < N_In; j++)Tmp += Input[j] * (*BP).v[j][i];Out1[i] = fnet(Tmp);}for (i = 0; i < N_Out; i++){Tmp = 0;for (j = 0; j < (*BP).LayerNum; j++)Tmp += Out1[j] * (*BP).w[j][i];Out2[i] = fnet(Tmp);}printf("结果:");for (i = 0; i < N_Out; i++)printf("%.3f ", Out2[i]);printf("\n");}return 1;}//训练BP网络,样本为x,理想输出为yint TrainBpNet(BPNet *BP, double x[N_Sample][N_In], int y[N_Sample][N_Out]) {double f = (*BP).Accuracy; //精度控制参数double a = (*BP).StudyRate; //学习率int LayerNum = (*BP).LayerNum; //中间层节点数double v[N_In][50], w[50][N_Out]; //权矩阵double ChgH[50], ChgO[N_Out]; //修改量矩阵double Out1[50], Out2[N_Out]; //中间层和输出层输出量int MaxLoop = (*BP).MaxLoop; //最大循环次数int i, j, k, n;double Tmp;for (i = 0; i < N_In; i++)// 复制结构体中的权矩阵for (j = 0; j < LayerNum; j++)v[i][j] = (*BP).v[i][j];for (i = 0; i < LayerNum; i++)for (j = 0; j < N_Out; j++)w[i][j] = (*BP).w[i][j];double e = f + 1;//对每个样本训练网络for (n = 0; e > f && n < MaxLoop; n++){e = 0;for (i= 0; i < N_Sample; i++){//计算中间层输出向量for (k= 0; k < LayerNum; k++){Tmp = 0;for (j = 0; j < N_In; j++)Tmp = Tmp + x[i][j] * v[j][k];Out1[k] = fnet(Tmp);}//计算输出层输出向量for (k = 0; k < N_Out; k++){Tmp = 0;for (j = 0; j < LayerNum; j++)Tmp = Tmp + Out1[j] * w[j][k];Out2[k] = fnet(Tmp);}//计算输出层的权修改量for (j = 0; j < N_Out; j++)ChgO[j] = Out2[j] * (1 - Out2[j]) * (y[i][j] - Out2[j]);//计算输出误差for (j = 0; j < N_Out ; j++)e = e + (y[i][j] - Out2[j]) * (y[i][j] - Out2[j]);//计算中间层权修改量for (j = 0; j < LayerNum; j++){Tmp = 0;for (k = 0; k < N_Out; k++)Tmp = Tmp + w[j][k] * ChgO[k];ChgH[j] = Tmp * Out1[j] * (1 - Out1[j]);}//修改输出层权矩阵for (j = 0; j < LayerNum; j++)for (k = 0; k < N_Out; k++)w[j][k] = w[j][k] + a * Out1[j] * ChgO[k];for (j = 0; j < N_In; j++)for (k = 0; k < LayerNum; k++)v[j][k] = v[j][k] + a * x[i][j] * ChgH[k];}if (n % 10 == 0)printf("误差: %f\n", e);}printf("总共循环次数:%d\n", n);printf("调整后的中间层权矩阵:\n");for (i = 0; i < N_In; i++){for (j = 0; j < LayerNum; j++)printf("%f ", v[i][j]);printf("\n");}printf("调整后的输出层权矩阵:\n");for (i = 0; i < LayerNum; i++) {for (j = 0; j < N_Out; j++)printf("%f ", w[i][j]);printf("\n");}//把结果复制回结构体for (i = 0; i < N_In; i++)for (j = 0; j < LayerNum; j++)(*BP).v[i][j] = v[i][j];for (i = 0; i < LayerNum; i++)for (j = 0; j < N_Out; j++)(*BP).w[i][j] = w[i][j];printf("BP网络训练结束!\n");return 1;}//初始化int InitBpNet(BPNet *BP){printf("请输入中间层节点数,最大数为100:\n");scanf_s("%d", &(*BP).LayerNum);printf("请输入学习率:\n");scanf_s("%lf", &(*BP).StudyRate); //(*BP).StudyRate为double型数据,所以必须是lf printf("请输入精度控制参数:\n");scanf_s("%lf", &(*BP).Accuracy);printf("请输入最大循环次数:\n");scanf_s("%d", &(*BP).MaxLoop);int i, j;srand((unsigned)time(NULL));for (i = 0; i < N_In; i++)for (j = 0; j < (*BP).LayerNum; j++)(*BP).v[i][j] = rand() / (double)(RAND_MAX);for (i = 0; i < (*BP).LayerNum; i++)for (j = 0; j < N_Out; j++)(*BP).w[i][j] = rand() / (double)(RAND_MAX);return 1;}。
C++实现神经BP神经网络
C++实现神经BP神经⽹络本⽂实例为⼤家分享了C++实现神经BP神经⽹络的具体代码,供⼤家参考,具体内容如下BP.h#pragma once#include<vector>#include<stdlib.h>#include<time.h>#include<cmath>#include<iostream>using std::vector;using std::exp;using std::cout;using std::endl;class BP{private:int studyNum;//允许学习次数double h;//学习率double allowError;//允许误差vector<int> layerNum;//每层的节点数,不包括常量节点1vector<vector<vector<double>>> w;//权重vector<vector<vector<double>>> dw;//权重增量vector<vector<double>> b;//偏置vector<vector<double>> db;//偏置增量vector<vector<vector<double>>> a;//节点值vector<vector<double>> x;//输⼊vector<vector<double>> y;//期望输出void iniwb();//初始化w与bvoid inidwdb();//初始化dw与dbdouble sigmoid(double z);//激活函数void forward();//前向传播void backward();//后向传播double Error();//计算误差public:BP(vector<int>const& layer_num, vector<vector<double>>const & input_a0,vector<vector<double>> const & output_y, double hh = 0.5, double allerror = 0.001, int studynum = 1000); BP();void setLayerNumInput(vector<int>const& layer_num, vector<vector<double>> const & input);void setOutputy(vector<vector<double>> const & output_y);void setHErrorStudyNum(double hh, double allerror,int studynum);void run();//运⾏BP神经⽹络vector<double> predict(vector<double>& input);//使⽤已经学习好的神经⽹络进⾏预测~BP();};BP.cpp#include "BP.h"BP::BP(vector<int>const& layer_num, vector<vector<double>>const & input,vector<vector<double>> const & output_y, double hh, double allerror,int studynum){layerNum = layer_num;x = input;//输⼊多少个节点的数据,每个节点有多少份数据y = output_y;h = hh;allowError = allerror;a.resize(layerNum.size());//有这么多层⽹络节点for (int i = 0; i < layerNum.size(); i++){a[i].resize(layerNum[i]);//每层⽹络节点有这么多个节点for (int j = 0; j < layerNum[i]; j++)a[i][j].resize(input[0].size());}a[0] = input;studyNum = studynum;}BP::BP(){a = {};y = {};h = 0;allowError = 0;}BP::~BP(){}void BP::setLayerNumInput(vector<int>const& layer_num, vector<vector<double>> const & input) {layerNum = layer_num;x = input;a.resize(layerNum.size());//有这么多层⽹络节点for (int i = 0; i < layerNum.size(); i++){a[i].resize(layerNum[i]);//每层⽹络节点有这么多个节点for (int j = 0; j < layerNum[i]; j++)a[i][j].resize(input[0].size());}a[0] = input;}void BP::setOutputy(vector<vector<double>> const & output_y){y = output_y;}void BP::setHErrorStudyNum(double hh, double allerror,int studynum){h = hh;allowError = allerror;studyNum = studynum;}//初始化权重矩阵void BP::iniwb(){w.resize(layerNum.size() - 1);b.resize(layerNum.size() - 1);srand((unsigned)time(NULL));//节点层数层数for (int l = 0; l < layerNum.size() - 1; l++){w[l].resize(layerNum[l + 1]);b[l].resize(layerNum[l + 1]);//对应后层的节点for (int j = 0; j < layerNum[l + 1]; j++){w[l][j].resize(layerNum[l]);b[l][j] = -1 + 2 * (rand() / RAND_MAX);//对应前层的节点for (int k = 0; k < layerNum[l]; k++)w[l][j][k] = -1 + 2 * (rand() / RAND_MAX);}}}void BP::inidwdb(){dw.resize(layerNum.size() - 1);db.resize(layerNum.size() - 1);//节点层数层数for (int l = 0; l < layerNum.size() - 1; l++){dw[l].resize(layerNum[l + 1]);db[l].resize(layerNum[l + 1]);//对应后层的节点for (int j = 0; j < layerNum[l + 1]; j++){dw[l][j].resize(layerNum[l]);db[l][j] = 0;for (int k = 0; k < layerNum[l]; k++)w[l][j][k] = 0;}}}//激活函数double BP::sigmoid(double z){return 1.0 / (1 + exp(-z));}void BP::forward(){for (int l = 1; l < layerNum.size(); l++){for (int i = 0; i < layerNum[l]; i++){for (int j = 0; j < x[0].size(); j++){a[l][i][j] = 0;//第l层第i个节点第j个数据样本//计算变量节点乘权值的和for (int k = 0; k < layerNum[l - 1]; k++)a[l][i][j] += a[l - 1][k][j] * w[l - 1][i][k];//加上节点偏置a[l][i][j] += b[l - 1][i];a[l][i][j] = sigmoid(a[l][i][j]);}}}}void BP::backward(){int xNum = x[0].size();//样本个数//daP第l层da,daB第l+1层davector<double> daP, daB;for (int j = 0; j < xNum; j++){//处理最后⼀层的dwdaP.clear();daP.resize(layerNum[layerNum.size() - 1]);for (int i = 0, l = layerNum.size() - 1; i < layerNum[l]; i++){daP[i] = a[l][i][j] - y[i][j];for (int k = 0; k < layerNum[l - 1]; k++)dw[l - 1][i][k] += daP[i] * a[l][i][j] * (1 - a[l][i][j])*a[l - 1][k][j]; db[l - 1][i] += daP[i] * a[l][i][j] * (1 - a[l][i][j]);}//处理剩下层的权重w的增量Dwfor (int l = layerNum.size() - 2; l > 0; l--){daB = daP;daP.clear();daP.resize(layerNum[l]);for (int k = 0; k < layerNum[l]; k++){daP[k] = 0;for (int i = 0; i < layerNum[l + 1]; i++)daP[k] += daB[i] * a[l + 1][i][j] * (1 - a[l + 1][i][j])*w[l][i][k]; //dwfor (int i = 0; i < layerNum[l - 1]; i++)dw[l - 1][k][i] += daP[k] * a[l][k][j] * (1 - a[l][k][j])*a[l - 1][i][j]; //dbdb[l-1][k] += daP[k] * a[l][k][j] * (1 - a[l][k][j]);}}}//计算dw与db平均值{//对应后层的节点for (int j = 0; j < layerNum[l + 1]; j++){db[l][j] = db[l][j] / xNum;//对应前层的节点for (int k = 0; k < layerNum[l]; k++)w[l][j][k] = w[l][j][k] / xNum;}}//更新参数w与bfor (int l = 0; l < layerNum.size() - 1; l++){for (int j = 0; j < layerNum[l + 1]; j++){b[l][j] = b[l][j] - h * db[l][j];//对应前层的节点for (int k = 0; k < layerNum[l]; k++)w[l][j][k] = w[l][j][k] - h * dw[l][j][k];}}}double BP::Error(){int l = layerNum.size() - 1;double temp = 0, error = 0;for (int i = 0; i < layerNum[l]; i++)for (int j = 0; j < x[0].size(); j++){temp = a[l][i][j] - y[i][j];error += temp * temp;}error = error / x[0].size();//求对每⼀组样本的误差平均 error = error / 2;cout << error << endl;return error;}//运⾏神经⽹络void BP::run(){iniwb();inidwdb();int i = 0;for (; i < studyNum; i++){forward();if (Error() <= allowError){cout << "Study Success!" << endl;break;}backward();}if (i == 10000)cout << "Study Failed!" << endl;}vector<double> BP::predict(vector<double>& input) {vector<vector<double>> a1;a1.resize(layerNum.size());for (int l = 0; l < layerNum.size(); l++)a1[l].resize(layerNum[l]);a1[0] = input;for (int l = 1; l < layerNum.size(); l++)for (int i = 0; i < layerNum[l]; i++){a1[l][i] = 0;//第l层第i个节点第j个数据样本//计算变量节点乘权值的和for (int k = 0; k < layerNum[l - 1]; k++)a1[l][i] += a1[l - 1][k] * w[l - 1][i][k];//加上节点偏置a1[l][i] = sigmoid(a1[l][i]);}return a1[layerNum.size() - 1];}验证程序:#include"BP.h"int main(){vector<int> layer_num = { 1, 10, 1 };vector<vector<double>> input_a0 = { { 1,2,3,4,5,6,7,8,9,10 } };vector<vector<double>> output_y = { {0,0,0,0,1,1,1,1,1,1} };BP bp(layer_num, input_a0,output_y,0.6,0.001, 2000);bp.run();for (int j = 0; j < 30; j++){vector<double> input = { 0.5*j };vector<double> output = bp.predict(input);for (auto i : output)cout << "j:" << 0.5*j <<" pridict:" << i << " ";cout << endl;}system("pause");return 0;}输出:以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。
BP神经网络C语言代码 可以直接VC++上运行
//输出只有一个,如果多个,增加输出
return sum*(Maxout[0]-Minout[0]+1)+Minout[0]-1; //数据反归一化,回来到原来的值 }
void writeNeuron() {
FILE *fp1; int i,j; if((fp1=fopen("neuron.txt","w"))==NULL) {
//输入数据归一化处理 for (i = 0; i < In; i++)
for(j = 0; j < Data; j++) d_in[j][i]=(d_in[j][i]-Minin[i]+1)/(Maxin[i]-Minin[i]+1);
//输出数据归一化处理 for (i = 0; i < Out; i++)
writeNeuron();
return 0;
//测试数据 1
}
for (i = 0; i < Neuron; ++i) for (j = 0; j < Out; ++j){ fprintf(fp1,"%lf ",v[j][i]); }
fclose(fp1); }
void trainNetwork() {
int i,c=0;
do{
e=0;
for (i = 0; i < Data; ++i)
}
void computO(int var) //var 为某一个样本数据 {
int i,j; double sum,y; for (i = 0; i < Neuron; ++i) 个神经元的值 {
BP神经网络源代码(C++)
#pragma hdrstop#include <stdio.h>#include <iostream.h>const A=30.0;const B=10.0;const MAX=500;//最大训练次数const COEF=0.0035; //网络的学习效率const BCOEF=0.001;//网络的阀值调整效率const ERROR=0.002; // 网络训练中的允许误差const ACCURACY=0.0005;//网络要求精度double sample[41][4]={{0,0,0,0},{5,1,4,19.020},{5,3,3,14.150}, {5,5,2,14.360},{5,3,3,14.150},{5,3,2,15.390},{5,3,2,15.390},{5,5,1,19.680},{5,1,2,21.060},{5,3,3,14.150},{5,5,4,12.680},{5,5,2,14.360},{5,1,3,19.610},{5,3,4,13.650},{5,5,5,12.430},{5,1,4,19.020},{5,1,4,19.020},{5,3,5,13.390},{5,5,4,12.680},{5,1,3,19.610},{5,3,2,15.390},{1,3,1,11.110},{1,5,2,6.521},{1,1,3,10.190},{1,3,4,6.043},{1,5,5,5.242},{1,5,3,5.724},{1,1,4,9.766},{1,3,5,5.870},{1,5,4,5.406},{1,1,3,10.190},{1,1,5,9.545},{1,3,4,6.043},{1,5,3,5.724},{1,1,2,11.250},{1,3,1,11.110},{1,3,3,6.380},{1,5,2,6.521},{1,1,1,16.000},{1,3,2,7.219},{1,5,3,5.724}};double w[4][10][10],wc[4][10][10],b[4][10],bc[4][10];double o[4][10],netin[4][10],d[4][10],differ;//单个样本的误差double is; //全体样本均方差int count,a;void netout(int m, int n);//计算网络隐含层和输出层的输出void calculd(int m,int n); //计算网络的反向传播误差void calcalwc(int m,int n);//计算网络权值的调整量void calcaulbc(int m,int n); //计算网络阀值的调整量void changew(int m,int n); //调整网络权值void changeb(int m,int n);//调整网络阀值void clearwc(int m,int n);//清除网络权值变化量wcvoid clearbc(int m,int n);//清除网络阀值变化量bcvoid initialw(void);//初始化NN网络权值Wvoid initialb(void); //初始化NN网络阀值void calculdiffer(void);//计算NN网络单个样本误差void calculis(void);//计算NN网络全体样本误差void trainNN(void);//训练NN网络/*计算NN网络隐含层和输出层的输出*/void netout(int m,int n){int i,j,k;//隐含层各节点的的输出for (j=1,i=2;j<=m;j++) //m为隐含层节点个数{netin[i][j]=0.0;for(k=1;k<=3;k++)//隐含层的每个节点均有三个输入变量netin[i][j]=netin[i][j]+o[i-1][k]*w[i][k][j];netin[i][j]=netin[i][j]-b[i][j];o[i][j]=A/(1+exp(-netin[i][j]/B));}//输出层各节点的输出for (j=1,i=3;j<=n;j++){netin[i][j]=0.0;for (k=1;k<=m;k++)netin[i][j]=netin[i][j]+o[i-1][k]*w[i][k][j];netin[i][j]=netin[i][j]-b[i][j];o[i][j]=A/(1+exp(-netin[i][j]/B)) ;}}/*计算NN网络的反向传播误差*/void calculd(int m,int n){int i,j,k;double t;a=count-1;d[3][1]=(o[3][1]-sample[a][3])*(A/B)*exp(-netin[3][1]/B)/pow(1+exp(-netin[3][1]/B),2);//隐含层的误差for (j=1,i=2;j<=m;j++){t=0.00;for (k=1;k<=n;k++)t=t+w[i+1][j][k]*d[i+1][k];d[i][j]=t*(A/B)*exp(-netin[i][j]/B)/pow(1+exp(-netin[i][j]/B),2);}}/*计算网络权值W的调整量*/void calculwc(int m,int n){int i,j,k;// 输出层(第三层)与隐含层(第二层)之间的连接权值的调整for (i=1,k=3;i<=m;i++){for (j=1;j<=n;j++)wc[k][i][j]=-COEF*d[k][j]*o[k-1][i]+0.5*wc[k][i][j];}// printf("\n");}//隐含层与输入层之间的连接权值的调整for (i=1,k=2;i<=m;i++){for (j=1;j<=m;j++){wc[k][i][j]=-COEF*d[k][j]*o[k-1][i]+0.5*wc[k][i][j];}//printf("\n");}}/*计算网络阀值的调整量*/void calculbc(int m,int n){int j;for (j=1;j<=m;j++){bc[2][j]=BCOEF*d[2][j];}for (j=1;j<=n;j++){bc[3][j]=BCOEF*d[3][j];}}/*调整网络权值*/void changw(int m,int n){int i,j;for (i=1;i<=3;i++)for (j=1;j<=m;j++){w[2][i][j]=0.9*w[2][i][j]+wc[2][i][j];//为了保证系统有较好的鲁棒性,计算权值时乘惯性系数0.9 printf("w[2][%d][%d]=%f\n",i,j,w[2][i][j]);}for (i=1;i<=m;i++)for (j=1;j<=n;j++){w[3][i][j]=0.9*w[3][i][j]+wc[3][i][j];printf("w[3][%d][%d]=%f\n",i,j,w[3][i][j]);}/*调整网络阀值*/void changb(int m,int n){int j;for (j=1;j<=m;j++)b[2][j]=b[2][j]+bc[2][j];for (j=1;j<=n;j++)b[3][j]=b[3][j]+bc[3][j];}/*清除网络权值变化量wc*/ void clearwc(void){for (int i=0;i<4;i++)for (int j=0;j<10;j++)for (int k=0;k<10;k++)wc[i][j][k]=0.00;}/*清除网络阀值变化量*/ void clearbc(void){for (int i=0;i<4;i++)for (int j=0;j<10;j++)bc[i][j]=0.00;}/*初始化网络权值W*/void initialw(void){int i,j,k,x;double weight;for (i=0;i<4;i++)for (j=0;j<10;j++)for (k=0;k<10;k++){randomize();x=100+random(400);weight=(double)x/5000.00; w[i][j][k]=weight;}}/*初始化网络阀值*/void initialb(void){int i,j,x;double fazhi;for (i=0;i<4;i++)for (j=0;j<10;j++){randomize();for (int k=0;k<12;k++){x=100+random(400);}fazhi=(double)x/50000.00;b[i][j]=fazhi;}}/*计算网络单个样本误差*/void calculdiffer(void){a=count-1;differ=0.5*(o[3][1]-sample[a][3])*(o[3][1]-sample[a][3]);}void calculis(void){int i;is=0.0;for (i=0;i<=19;i++){o[1][1]=sample[i][0];o[1][2]=sample[i][1];o[1][3]=sample[i][2];netout(8,1);is=is+(o[3][1]-sample[i][3])*(o[3][1]-sample[i][3]);}is=is/20;}/*训练网络*/void trainNN(void){long int time;int i,x[4];initialw();initialb();for (time=1;time<=MAX;time++){count=0;while(count<=40){o[1][1]=sample[count][0];o[1][2]=sample[count][1];o[1][3]=sample[count][2];count=count+1;clearwc();clearbc();netout(8,1);calculdiffer();while(differ>ERROR){calculd(8,1);calculwc(8,1);calculbc(8,1);changw(8,1);changb(8,1);netout(8,1);calculdiffer();}}printf("This is %d times training NN...\n",time);calculis();printf("is==%f\n",is);if (is<ACCURACY) break;}}//--------------------------------------------------------------------------- #pragma argsusedint main(int argc, char* argv[]){double result;int m,test[4];char ch='y';cout<<"Please wait for the train of NN:"<<endl;trainNN();cout<<"Now,this modular network can work for you."<<endl; while(ch=='y' || ch=='Y'){cout<<"Please input data to be tested."<<endl;for (m=1;m<=3;m++)cin>>test[m];ch=getchar();o[1][1]=test[1];o[1][2]=test[2];o[1][3]=test[3];netout(8,1);result=o[3][1];printf("Final result is %f.\n",result); printf("Still test?[Yes] or [No]\n"); ch=getchar();}return 0;}。
- 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
- 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
- 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。
#include "stdio.h"#include "stdlib.h"#include "time.h"#include "math.h"/*********************************************inpoints 为输入神经元个数,可改变outpoints为输出神经元个数defaultpoints为隐层神经元个数datagrough为样本数据个数****************************************************以下数据定义可以修改*****/#define A 0#define a 1#define b 1#define c 1#define ALFA 0.85#define BETA 0.2 //学习率0~1#define Total 20000#define inpoints 9#define outpoints 5#define defaultpoints 28#define datagrough 44#define forecastdata 4/**********定义所需变量********/double InpointData[datagrough][inpoints],OutpointData[datagrough][outpoints]; /* 输入输出数据*/double InpointData_MAX[inpoints],InpointData_MIN[inpoints]; /* 每个因素最大数据*/double OutpointData_MAX[outpoints],OutpointData_MIN[outpoints]; /* 每个因素最小数据*/double w[defaultpoints][inpoints],limen[defaultpoints],v[outpoints][defaultpoints]; /* 连接权值、阈值*/double dlta_w[defaultpoints][inpoints],dlta_limen[defaultpoints],dlta_v[outpoints][defaultpoints]; /* 连接权、阈值修正值*/double defaultOutpoint[defaultpoints],Outpoint_dp[outpoints],Outpoint_ep[datagrough];/**************************读数据文件******************************/void ReadData(){FILE *fp1,*fp2;int i,j;if((fp1=fopen("D:\\data\\训练输入.txt","r"))==NULL){printf("1can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InpointData[i][j]);fclose(fp1);if((fp2=fopen("D:\\data\\训练输出.txt","r"))==NULL){printf("2can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<outpoints;j++)fscanf(fp2,"%lf",&OutpointData[i][j]);fclose(fp2);}/*****************************************************//*****************************************归一化******************************************************/void unitary(){int i,j;int k=0;for(j=0;j<inpoints;j++) //找出每列的最大、最小值存放在数组InpointData_MAX[j]、InpointData_MIN[j]中{InpointData_MAX[j]=InpointData[0][j];InpointData_MIN[j]=InpointData[0][j];for(i=0;i<datagrough;i++)if(InpointData_MAX[j]<InpointData[i][j])InpointData_MAX[j]=InpointData[i][j];else if(InpointData_MIN[j]>InpointData[i][j])InpointData_MIN[j]=InpointData[i][j];}for(j=0;j<outpoints;j++) //找出每列的最大、最小值存放在数组OutpointData_MAX[j]、OutpointData_MIN[j]中{OutpointData_MAX[j]=OutpointData[0][j];OutpointData_MIN[j]=OutpointData[0][j];for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]<OutpointData[i][j])OutpointData_MAX[j]=OutpointData[i][j];else if(OutpointData_MIN[j]>OutpointData[i][j])OutpointData_MIN[j]=OutpointData[i][j];}/***************将数据归一处理,处理之后的数据全部在[0,1]之间*************************/for(j=0;j<inpoints;j++)for(i=0;i<datagrough;i++)if(InpointData_MAX[j]==0)InpointData[i][j]=0;elseInpointData[i][j]=(InpointData[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_ MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]==0)OutpointData[i][j]=0;elseOutpointData[i][j]=(OutpointData[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-Outpoi ntData_MIN[j]+A);}/*****************************************************//*********************初始化,随机赋初值**************************/void Initialization(){int i,j;srand((unsigned)time(NULL)); //头文件名#include <time.h>for(i=0;i<defaultpoints;i++) //给输入层到隐层的连接权赋随机值LianJie_w[i][j],这些值在[0,1]for(j=0;j<inpoints;j++){w[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_w[i][j]=0;}for(i=0;i<defaultpoints;i++)limen[i]=(rand()*2.0/RAND_MAX-1)/2;dlta_limen[i]=0;}for(i=0;i<outpoints;i++) //给隐层到输出层的连接权赋初值for(j=0;j<defaultpoints;j++){v[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_v[i][j]=0;}}/**********************求单样本的计算输出误差*******************************/ void out_sub1(int t){int i,j;double defaultInpoint[defaultpoints];double Outpoint_y[outpoints];Outpoint_ep[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InpointData[t][j];defaultInpoint[i]=sum+limen[i];defaultOutpoint[i]=1/(a+b*exp(-1*c*defaultInpoint[i]));//求O[i]}for(j=0;j<outpoints;j++)//求Y[i]{Outpoint_y[j]=0;for(i=0;i<defaultpoints;i++)Outpoint_y[j]+=v[j][i]*defaultOutpoint[i];Outpoint_dp[j]=OutpointData[t][j]-Outpoint_y[j];Outpoint_ep[t]+=Outpoint_dp[j]*Outpoint_dp[j]/2;}}/*****************************反算权值******************************************/void out_sub2(int t){int i,j,k;double s;for(i=0;i<defaultpoints;i++)s=0;for(j=0;j<outpoints;j++){dlta_v[j][i]=ALFA*dlta_v[j][i]+BETA*Outpoint_dp[j]*defaultOutpoint[i]; //s+=v[j][i]*Outpoint_dp[j];v[j][i]+=dlta_v[j][i];}dlta_limen[i]=ALFA*dlta_limen[i]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s;// limen[i]+=dlta_limen[i];for(k=0;k<inpoints;k++){dlta_w[i][k]=ALFA*dlta_w[i][k]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s*InpointDat a[t][k];//w[i][k]=w[i][k]+dlta_w[i][k];}}}/*******************************************************/void forecast(){int i,j,t,k=0;double e,e1[forecastdata]={0}; //训练误差double sss;double InputData_x[forecastdata][inpoints],tp[forecastdata][outpoints];doubledefInpoint,defOutpoint[defaultpoints],y[forecastdata][outpoints];//y[forecastdata][outpoints]为网络检验输出FILE *fp1,*fp3;if((fp1=fopen("D:\\data\\预测输入.txt","r"))==NULL) //检验数据输入{printf("3can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InputData_x[i][j]);fclose(fp1);if((fp3=fopen("D:\\data\\预测输出.txt","r"))==NULL) //实际检验结果输出{printf("31can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<outpoints;j++)fscanf(fp3,"%lf",&tp[i][j]);fclose(fp3);for(j=0;j<inpoints;j++) // 检验数据归一化for(i=0;i<forecastdata;i++)if(InpointData_MAX[j]==0)InputData_x[i][j]=0;elseInputData_x[i][j]=(InputData_x[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData _MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<forecastdata;i++)if(OutpointData_MAX[j]==0)tp[i][j]=0;elsetp[i][j]=(tp[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-OutpointData_MIN[j]+A);do{Initialization(); //初始化连接权值w[i][j],limen[i],v[k][i]k=0;do{e=0;for(t=0;t<datagrough;t++){out_sub1(t); //正向计算网络输出out_sub2(t); //反向计算,修正权值e+=Outpoint_ep[t]; //计算输出误差}k++;}while((k<Total)&&(e>0.1));sss=0; //中间参数for(t=0;t<forecastdata;t++){e1[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InputData_x[t][j];defInpoint=sum+limen[i];defOutpoint[i]=1/(a+b*exp(-1*c*defInpoint));}for(j=0;j<outpoints;j++){y[t][j]=0;for(i=0;i<defaultpoints;i++)y[t][j]+=v[j][i]*defOutpoint[i];e1[t]+=(y[t][j]-tp[t][j])*(y[t][j]-tp[t][j])/2;y[t][j]=y[t][j]*(OutpointData_MAX[j]-OutpointData_MIN[j]+A)+OutpointData_MIN[j]-A;}sss+=e1[t];}sss=sss/forecastdata;printf(" %lf %lf\n",e,sss);}while(sss>0.12);}/********************************************************/void main(){int i,j,k;FILE *fp2;ReadData(); //读训练数据:输入和输出unitary(); //归一化,将输入输出数据归一,结果在[0,1]中forecast(); //检验误差if((fp2=fopen("D:\\data\\计算权值.txt","w"))==NULL) //文件输出训练好的权值{printf("6can not open the file\n");exit(0);}for(i=0;i<defaultpoints;i++){for(k=0;k<inpoints;k++)fprintf(fp2," %lf ",w[i][k]);fprintf(fp2,"\n");}fprintf(fp2,"\n");for(i=0;i<defaultpoints;i++)fprintf(fp2," %lf ",limen[i]);fprintf(fp2,"\n\n");for(i=0;i<defaultpoints;i++){for(j=0;j<outpoints;j++)fprintf(fp2," %lf ",v[j][i]);fprintf(fp2,"\n");}fclose(fp2);}。