c语言 pso粒子群优化算法源代码
pso优化bp算法python代码
pso优化bp算法python代码PSO优化BP算法Python代码是一种利用粒子群优化算法优化BP 算法的Python代码。
通过将粒子群优化算法与BP算法相结合,可以更好地解决BP算法在训练过程中容易陷入局部最优解的问题,从而提高模型的准确性和收敛速度。
以下是一个简单的PSO优化BP算法Python代码示例:```pythonimport numpy as npimport random# 定义BP神经网络类class BPNN:def __init__(self, n_input, n_hidden, n_output):self.n_input = n_inputself.n_hidden = n_hiddenself.n_output = n_outputself.w1 = np.random.rand(n_input, n_hidden)self.b1 = np.random.rand(n_hidden)self.w2 = np.random.rand(n_hidden, n_output)self.b2 = np.random.rand(n_output)def sigmoid(self, x):return 1.0 / (1.0 + np.exp(-x))def forward(self, x):y1 = np.dot(x, self.w1) + self.b1z1 = self.sigmoid(y1)y2 = np.dot(z1, self.w2) + self.b2z2 = self.sigmoid(y2)return z2def predict(self, X):Y = np.zeros((X.shape[0], self.n_output)) for i in range(X.shape[0]):Y[i] = self.forward(X[i])return Y# 定义粒子类class Particle:def __init__(self, dim):self.position = np.random.rand(dim)self.velocity = np.random.rand(dim)self.fitness = float('inf')self.best_position = self.position.copy() self.best_fitness = float('inf')def update_fitness(self, fitness):self.fitness = fitnessif fitness < self.best_fitness:self.best_fitness = fitnessself.best_position = self.position.copy()# 定义粒子群优化算法类class PSO:def __init__(self, func, dim, n_particles, max_iter, lb, ub, w=0.729, c1=1.49445, c2=1.49445):self.func = funcself.dim = dimself.n_particles = n_particlesself.max_iter = max_iterself.lb = lbself.ub = ubself.w = wself.c1 = c1self.c2 = c2self.particles = [Particle(dim) for i inrange(n_particles)]self.gbest_position = np.zeros(dim)self.gbest_fitness = float('inf')def optimize(self):for i in range(self.max_iter):for j in range(self.n_particles):# 更新速度和位置self.particles[j].velocity = self.w *self.particles[j].velocity +self.c1 * random.random() *(self.particles[j].best_position -self.particles[j].position) +self.c2 * random.random() * (self.gbest_position - self.particles[j].position)self.particles[j].position += self.particles[j].velocity # 边界处理self.particles[j].position[self.particles[j].position < self.lb] = self.lbself.particles[j].position[self.particles[j].position > self.ub] = self.ub# 计算适应度fitness = self.func(self.particles[j].position)# 更新个体最优解和全局最优解self.particles[j].update_fitness(fitness)if fitness < self.gbest_fitness:self.gbest_fitness = fitnessself.gbest_position = self.particles[j].position.copy() # 定义损失函数def loss_function(theta, X, Y):n_input, n_hidden, n_output = 2, 3, 1nn = BPNN(n_input, n_hidden, n_output)nn.w1 = theta[0:6].reshape(n_input, n_hidden)nn.b1 = theta[6:9].reshape(n_hidden)nn.w2 = theta[9:12].reshape(n_hidden, n_output)nn.b2 = theta[12:].reshape(n_output)Y_pred = nn.predict(X)return np.mean((Y_pred - Y)**2)# 生成数据X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])Y = np.array([[0], [1], [1], [0]])# 定义搜索范围lb = np.array([-5] * 12)ub = np.array([5] * 12)# 定义PSO算法pso = PSO(loss_function, dim=12, n_particles=20,max_iter=100, lb=lb, ub=ub)# 运行PSO算法pso.optimize()# 输出结果print('Global best fitness:', pso.gbest_fitness)print('Global best position:', pso.gbest_position)# 计算模型预测结果theta = pso.gbest_positionn_input, n_hidden, n_output = 2, 3, 1nn = BPNN(n_input, n_hidden, n_output)nn.w1 = theta[0:6].reshape(n_input, n_hidden)nn.b1 = theta[6:9].reshape(n_hidden)nn.w2 = theta[9:12].reshape(n_hidden, n_output)nn.b2 = theta[12:].reshape(n_output)Y_pred = nn.predict(X)print('Predicted output:', Y_pred)```该代码实现了一个简单的BP神经网络来解决异或问题,并使用粒子群优化算法来优化BP神经网络的权重和偏置,从而得到更好的模型预测结果。
粒子群优化算法 简洁 c语言
粒子群优化算法的C代码如下,运行后为什么提示:d:\program files\microsoft visual studio\vc98\include\eh.h(32) : fatal error C1189: #error : "eh.h is only for C++!"费解了。
由于学matlab,我C语言长时间未碰,有点生疏。
还请各位多多指导,真的谢谢了!#include<iostream>#include<fstream>#include<cmath>#include<ctime>#include<string>#include<iomanip>#include<cstdlib>using namespace std;#include"ran_number.h"const double pi=3.1415926;const int num=60; /*粒子群规模*/const int T=2000; /*最大迭代次数*///**************适应度函数*************//double func(double x){double f=x*sin(10*x*pi)+2.0;f=1.0/f;return f;}//----------------主程序----------------//void main(int argc,char* argv[]){double ran; //随机变量RandomNumber rand;double c1=2.05,c2=2.05; //学习因子double d1=0.2,d2=0.7; //控制因子double ws=0.9,we=0.4; //最大最小惯性权重double x_down=-1.0,x_up=2.0; //粒子空间上下界double w; //惯性权重int m=2;double Vmax; //粒子最大速度double x[num]; //粒子位置(控制参数)double v[num]; //粒子速度double g[num]; //粒子适应度double pbest[num]; //粒子个体最好值double pbest_x[num]; //粒子个体最好位值double gbest; //粒子群体最好值double gbest_x; //粒子群体最好位置Vmax=(x_up-x_down)/8;//--初始化粒子群:位置、速度、个体最优值for(int i=0;i<num;i++){pbest[i]=0.0;ran=rand.fRandom();x[i]=x_down+(x_up-x_down)*ran;v[i]=Vmax*(2*ran-1); //初始化粒子速度pbest[i]=func(x[i]);pbest_x[i]=x[i];}gbest=pbest[0];gbest_x=x[0];//--------------第一次迭代得到的群体最优----------//for(i=0;i<num;i++) // for 粒子数循环{if(pbest[i]<gbest){gbest=pbest[i];gbest_x=pbest_x[i];}}//---------------共T次迭代过程---------------//while(m<=T) //迭代次数循环{w=(ws-we-d1)*exp(1/(1+d2*(m-1)/t)); //惯性权重for(i=0;i<num;i++) //粒子数循环{g[i]=0.0;ran=rand.fRandom();v[i]=w*v[i]+c1*ran*(pbest_x[i]-x[i])+c2*ran*(gbest_x-x[i]);x[i]+=v[i];if(x[i]<x_down){x[i]=x_down;} //保证粒子位置不能超越下界if(x[i]>x_up){x[i]=x_up;} //保证粒子位置不能超出上界g[i]=func(x[i]);if(pbest[i]>g[i]){pbest[i]=g[i];pbest_x[i]=x[i];}if(pbest[i]<gbest){gbest=pbest[i];gbest_x=pbest_x[i];}printf("第%d次迭代的群体最优值及最优位置为%lf %lf ",m,gbest,gbest_x); //输出第m次迭代的群体最优值及最优位置} //结束for循环m=m+1;} //结束while循环printf("全局最优解及全局最优位置为%lf %lf ",gbest,gbest_x);下面为改正点:注意: vc在处理.c 文件是按C 语言编译的,所以假如有以下文件: 1.c#include <fstream>int main(int argc, char* argv[]){return 0;}那么编译时就会出错:fatal error C1189: #error : "eh.h is only for C++!"这是因为fstream标准库要求用到eh.h文件,而Exception Handling的实现需要c++支持。
混沌粒子群优化算法C源程序
chaos(ps,psbest,d,xmax,xmin);
}
cout<<"收敛结果是"<<psbest<<endl;
}
#define M 20 //粒子的个数
#define D 500 //迭代次数
#define rmax RAND_MAX
typedef struct particle //粒子的结构(包含n维的位置x,速度v,最优位置p,适应度pbest)
{
double x[N];
if((pts[i].x[j]>xmax[j])||(pts[i].x[j]<xmin[j]))
pts[i].x[j]=(xmax[j]-xmin[j])*(double)rand()/(double)RAND_MAX+xmin[j];
}
if(f(pts[i].x)>pts[i].pbest)
for(int i=0;(i<D)&&(psbest<fmin);i++)
{
update(pts,ps,psbest,k,xmax,xmin,vmax,vmin); //更新粒子群
p=1-1/(1+log(k));
if(p>=((double)rand()/(double)RAND_MAX)) //满足混沌搜索的条件
#include<iostream.h>
#include <math.h>
#include<stdlib.h>
pso算法代码
pso算法代码PSO算法简介粒子群优化(Particle Swarm Optimization,PSO)算法是一种基于群体智能的优化算法,它模拟了鸟群或鱼群等生物在搜索过程中的行为,通过不断地调整个体位置和速度来寻找最优解。
PSO算法具有收敛速度快、易于实现、适用范围广等特点,在多目标优化、非线性优化、组合优化等领域得到了广泛应用。
PSO算法流程1.初始化粒子群2.计算每个粒子的适应度值3.更新全局最优解和每个粒子的最优解4.更新每个粒子的速度和位置5.判断是否达到停止条件,如果没有则返回第二步PSO算法代码实现下面是一个简单的PSO算法代码实现,该代码实现了一个求解函数y=x^2在[-5,5]区间内的最小值问题:```import random# 初始化参数pop_size = 50 # 粒子数量max_iter = 100 # 迭代次数w = 0.6 # 惯性权重c1 = 1.5 # 自我认知学习因子c2 = 1.5 # 社会认知学习因子# 定义目标函数def fitness(x):return x ** 2# 初始化粒子群class Particle:def __init__(self):self.position = random.uniform(-5, 5) # 粒子位置self.velocity = random.uniform(-1, 1) # 粒子速度self.pbest_pos = self.position # 粒子最优位置self.pbest_val = fitness(self.position) # 粒子最优适应度值# 初始化全局最优解gbest_pos = random.uniform(-5, 5)gbest_val = fitness(gbest_pos)# 迭代寻优for i in range(max_iter):for j in range(pop_size):particle = Particle()# 更新粒子最优解和全局最优解if particle.pbest_val < gbest_val:gbest_pos = particle.pbest_posgbest_val = particle.pbest_val# 更新粒子速度和位置r1, r2 = random.uniform(0, 1), random.uniform(0, 1)particle.velocity = w * particle.velocity + c1 * r1 * (particle.pbest_pos - particle.position) + c2 * r2 * (gbest_pos - particle.position)particle.position += particle.velocityprint("The minimum value of y=x^2 in [-5,5] is:", gbest_val) print("The optimal solution is:", gbest_pos)```PSO算法参数调节PSO算法中的参数设置对算法的性能有很大影响,下面介绍一些常见的参数调节方法:1.惯性权重w:一般取值范围为[0,1],当w较小时,粒子的速度变化较小,容易陷入局部最优解;当w较大时,粒子的速度变化较大,容易跳出局部最优解。
C++实现粒子群优化算法代码
#include<iostream>#include<cmath>#include<ctime>#include<cstdlib>using namespace std;const int pNum = 10;const int generation = 100;const int dim = 2;const double low = -10;const double high = 10;const double vMax = 10;const double w = 0.5;const double c1 = 2;const double c2 = 2;double p[pNum][dim]; //粒子群体double pv[pNum][dim]; //速度向量double pBest[pNum][dim]; //每个粒子的局部最优向量double pFitness[pNum]; //每个粒子的最优最适值double gFitness; //全局最优适应值double gBest[dim]; //全局最优向量double fitness(double p[]){return p[0] * p[0] + p[1] * p[1]; //根据自己的需要更改函数的适应值}void initialize(){for (int i = 0; i < pNum; i++){for (int j = 0; j < dim; j++){p[i][j] = low + (high - low) * 1.0 * rand() / RAND_MAX;pBest[i][j] = p[1][j];pv[i][j] = -vMax + 2.0 * vMax * rand() / RAND_MAX;}}for (int i = 0; i < pNum; i++)pFitness[i] = fitness(p[i]);gFitness = pFitness[0];for (int i = 0; i < dim; i++){gBest[i] = pBest[0][i];}for (int i = 1; i < pNum; i++){if (gFitness > pFitness[i]){gFitness = pFitness[i];for (int j = 0; j < dim; j++){gBest[j] = pBest[i][j];}}}}void update(){for (int i = 0; i < pNum; i++){for (int j = 0; j < dim; j++){pv[i][j] = w * pv[i][j] + c1 * rand() / RAND_MAX * (pBest[i][j] - p[i][j]) + c2 * rand() / RAND_MAX * (gBest[j] - p[i][j]);if (pv[i][j] < -vMax)pv[i][j] = -vMax;if (pv[i][j] > vMax)pv[i][j] = vMax;p[i][j] = p[i][j] + pv[i][j];if (p[i][j] > high)p[i][j] = high;if (p[i][j] < low)p[i][j] = low;}}}void get_pBest(){for (int i = 0; i < pNum; i++){if (pFitness[i] > fitness(p[i])){pFitness[i] = fitness(p[i]);for (int j = 0; j < dim; j++){pBest[i][j] = p[i][j];}}}}void get_gBest(){for (int i = 0; i < pNum; i++){if (pFitness[i] < gFitness){for (int j = 0; j < dim; j++){gBest[j] = pBest[i][j];}gFitness = pFitness[i];}}}int main(){srand(time(0));initialize();for (int n = 0; n < generation; n++){update();get_pBest();get_gBest();}cout << "gBest is (";for (int i = 0; i < dim - 1; i++)cout << gBest[i] << ",";cout << gBest[dim - 1] << ")" << endl;cout << "the best fitness is " << gFitness << endl;return 0;}。
原始pso算法程序
基本粒子群优化算法Matlab源程序网上找到的别人做的很简单的程序,写的很精练很工整,据说优化效果不是很理想,由于没有时间去做很多测试,所以就先摆在这里,但愿以后能用得上。
下面是主函数的源程序,优化函数则以m文件的形式放在fitness.m 里面,对不同的优化函数只要修改fitness.m就可以了通用性很强。
主函数源程序(main.m)%------基本粒子群优化算法(Particle Swarm Optimization)-----------%------名称:基本粒子群优化算法(PSO)%------作用:求解优化问题%------说明:全局性,并行性,高效的群体智能算法%------------------------------------------------------------------%------初始格式化--------------------------------------------------clear all;clc;format long;%------给定初始化条件----------------------------------------------c1=1.4962; %学习因子1c2=1.4962; %学习因子2w=0.7298; %惯性权重MaxDT=1000; %最大迭代次数D=10; %搜索空间维数(未知数个数)N=40; %初始化群体个体数目eps=10^(-6); %设置精度(在已知最小值时候用)%------初始化种群的个体(可以在这里限定位置和速度的范围)------------for i=1:Nfor j=1:Dx(i,j)=randn; %随机初始化位置v(i,j)=randn; %随机初始化速度endend%------先计算各个粒子的适应度,并初始化Pi和Pg----------------------for i=1:Np(i)=fitness(x(i,:),D);y(i,:)=x(i,:);endpg=x(1,:); %Pg为全局最优for i=2:Nif fitness(x(i,:),D)<fitness(pg,D)pg=x(i,:);endend%------进入主要循环,按照公式依次迭代,直到满足精度要求------------for t=1:MaxDTfor i=1:Nv(i,:)=w*v(i,:)+c1*rand*(y(i,:)-x(i,:))+c2*rand*(pg-x(i,:));x(i,:)=x(i,:)+v(i,:);if fitness(x(i,:),D)<p(i)p(i)=fitness(x(i,:),D);y(i,:)=x(i,:);endif p(i)<fitness(pg,D)pg=y(i,:);endendPbest(t)=fitness(pg,D);end%------最后给出计算结果disp('*************************************************************')disp('函数的全局最优位置为:')Solution=pg'disp('最后得到的优化极值为:')Result=fitness(pg,D)disp('*************************************************************')%------算法结束---DreamSun GL & HF-----------------------------------适应度函数源程序(fitness.m)function result=fitness(x,D)sum=0;for i=1:Dsum=sum+x(i)^2;endresult=sum;对于一些有约束条件和没有约束条件的优化问题,具体什么情况下用什么算法还是要长期的经验积累。
基于粒子群优化 算法的计算卸载源代码
基于粒子群优化算法的计算卸载源代码计算卸载是一种将计算任务从本地设备转移到云端或其他远程计算资源的技术。
通过将计算任务卸载到云端,可以利用云端的强大计算能力和资源来完成计算任务,从而减轻本地设备的计算负担,提高计算效率和性能。
粒子群优化(Particle Swarm Optimization, PSO)算法是一种启发式全局优化算法,它模拟了鸟群或鱼群的社会行为。
在 PSO 算法中,每个粒子代表一个潜在的解,粒子在搜索空间中飞行,并根据自身的经验和群体的经验来调整自己的位置和速度,以寻找最优解。
以下是一个基于 PSO 算法的计算卸载源代码示例,这个示例是一个简单的 PSO 算法实现,用于寻找函数的最优解:```pythonimport random# 目标函数def objective_function(x):return x[0]**2 + x[1]**2# PSO 算法实现def pso(objective_func, lb, ub, dim, n_particles, max_iter):# 初始化粒子的位置和速度particles_pos = [random.uniform(lb, ub) for _ in range(n_particles)]particles_vel = [random.uniform(-0.1, 0.1) for _ in range(n_particles * dim)] # 初始化每个粒子的最佳位置和全局最佳位置personal_best_pos = [particle_pos[i] for i in range(n_particles)]personal_best_val = [objective_func(personal_best_pos[i]) for i in range(n_particles)]global_best_val = min(personal_best_val)global_best_pos = personal_best_pos[personal_best_val.index(global_best_val)]# 惯性权重w = 0.5# 个体经验的学习因子c1 = 2# 社会经验的学习因子c2 = 2for i in range(max_iter):for j in range(n_particles):# 更新速度inertia = w * particles_vel[j]cognitive = c1 * random.random() * (personal_best_pos[j] - particles_pos[j]) social = c2 * random.random() * (global_best_pos - particles_pos[j])particles_vel[j] = inertia + cognitive + social# 更新位置particles_pos[j] += particles_vel[j]# 更新个人最佳位置和全局最佳位置personal_best_val[j] = objective_func(personal_best_pos[j])if personal_best_val[j] < global_best_val:global_best_val = personal_best_val[j]global_best_pos = personal_best_pos[j]return global_best_pos# 示例用法lb = [-10, -10]ub = [10, 10]dim = 2n_particles = 10max_iter = 100best_pos = pso(objective_function, lb, ub, dim, n_particles, max_iter)print("最佳位置:", best_pos)print("最佳解:", objective_function(best_pos))```在这个示例中,我们定义了一个目标函数`objective_function`,它是一个简单的二次函数,代表了计算任务的负载。
【优秀作业】粒子群优化算法
【优秀作业】粒子群优化算法粒子群优化算法一、概述粒子群优化算法(Particle Swarm Optimization,PSO)的思想来源于对鸟捕食行为的模仿,最初,Reynolds.Heppner 等科学家研究的是鸟类飞行的美学和那些能使鸟群同时突然改变方向,分散,聚集的定律上,这些都依赖于鸟的努力来维持群体中个体间最佳距离来实现同步。
而社会生物学家 E.O.Wilson 参考鱼群的社会行为认为从理论上说,在搜寻食物的过程中,尽管食物的分配不可知,群中的个体可以从群中其它个体的发现以及以往的经验中获益。
粒子群从这种模型中得到启发并用于解决优化问题。
如果我们把一个优化问题看作是在空中觅食的鸟群,那么粒子群中每个优化问题的潜在解都是搜索空间的一只鸟,称之为“粒子”(Particle),“食物”就是优化问题的最优解。
每个粒子都有一个由优化问题决定的适应度用来评价粒子的“好坏”程度,每个粒子还有一个速度决定它们飞翔的方向和距离,它根据自己的飞行经验和同伴的飞行经验来调整自己的飞行。
粒子群初始化为一群随机粒子(随机解),然后通过迭代的方式寻找最优解,在每一次的迭代中,粒子通过跟踪两个“极值”来更新自己,第一个是粒子本身所经历过的最好位置,称为个体极值即;另一个是整个群体经历过的最好位置称为全局极值。
每个粒子通过上述的两个极值不断更新自己,从而产生新一代的群体。
二、粒子群算法算法的描述如下:假设搜索空间是维,并且群体中有个粒子。
那么群体中的第个粒子可以表示为一个维的向量,,即第个粒子在维的搜索空间的位置是,它所经历的“最好”位置记作。
粒子的每个位置代表要求的一个潜在解,把它代入目标函数就可以得到它的适应度值,用来评判粒子的“好坏”程度。
整个群体迄今为止搜索到的最优位置记作,是最优粒子位置的索引。
()为惯性权重(inertia weight),为第个粒子到第代为止搜索到的历史最优解,为整个粒子群到目前为止搜索到的最优解,,分别是第个粒子当前的位置和飞行速度,为非负的常数,称为加速度因子,是之间的随机数。
粒子群算法(PSO)源代码
粒子群算法(PSO)源代码%%#################################################################### %%#### Particle swarm optimization%%#### With linkage operator%%#### Deepak devicharan july 2003%%#################################################################### %%## to apply this to different equations do the following%%## generate initial particles in a search space close to actual soln%%## fool around with no of iterations, no of particles, learning rates%%## for a truly generic PSO do the following%%## increase the number of particles , increase the variance%%## i.e let the particles cover a larger area of the search space%%## then fool around as always with the above thins%declare the parameters of the optimizationmax_iterations = 1000;no_of_particles = 50;dimensions = 1;delta_min = -0.003;delta_max = 0.003;c1 = 1.3;c2 = 1.3;%initialise the particles and teir velocity componentsfor count_x = 1:no_of_particlesfor count_y = 1:dimensionsparticle_position(count_x,count_y) = rand*10;particle_velocity(count_x,count_y) = rand;p_best(count_x,count_y) = particle_position(count_x,count_y);endend%initialize the p_best_fitness arrayfor count = 1:no_of_particlesp_best_fitness(count) = -1000;end%particle_position%particle_velocity%main particle swrm routinefor count = 1:max_iterations%find the fitness of each particle%change fitness function as per equation requiresd and dimensionsfor count_x = 1:no_of_particles%x = particle_position(count_x,1);%y = particle_position(count_x,2);%z = particle_position(count_x,3);%soln = x^2 - 3*y*x + z;%x = particle_position(count_x);%soln = x^2-2*x+1;x = particle_position(count_x);soln = x-7;if soln~=0current_fitness(count_x) = 1/abs(soln);elsecurrent_fitness =1000;endend%decide on p_best etc for each particlefor count_x = 1:no_of_particlesif current_fitness(count_x) > p_best_fitness(count_x)p_best_fitness(count_x) = current_fitness(count_x);for count_y = 1:dimensionsp_best(count_x,count_y) = particle_position(count_x,count_y);endendend%decide on the global best among all the particles[g_best_val,g_best_index] = max(current_fitness);%g_best contains the position of teh global bestfor count_y = 1:dimensionsg_best(count_y) = particle_position(g_best_index,count_y);end%update the position and velocity compponentsfor count_x = 1:no_of_particlesfor count_y = 1:dimensionsp_current(count_y) = particle_position(count_x,count_y);endfor count_y = 1:dimensionsparticle_velocity(count_y) = particle_velocity(count_y) + c1*rand*(p_best(count_y)-p_current(count_y)) + c2*rand*(g_best(count_y)-p_current(count_y)); particle_positon(count_x,count_y) = p_current(count_y) +particle_velocity(count_y);endendendg_bestcurrent_fitness(g_best_index)clear all, clc % pso exampleiter = 1000; % number of algorithm iterationsnp = 2; % number of model parametersns = 10; % number of sets of model parametersWmax = 0.9; % maximum inertial weightWmin = 0.4; % minimum inertial weightc1 = 2.0; % parameter in PSO methodologyc2 = 2.0; % parameter in PSO methodologyPmax = [10 10]; % maximum model parameter valuePmin = [-10 -10]; % minimum model parameter valueVmax = [1 1]; % maximum change in model parameterVmin = [-1 -1]; % minimum change in model parametermodelparameters(1:np,1:ns) = 0; % set all model parameter estimates for all model parameter sets to zeromodelparameterchanges(1:np,1:ns) = 0; % set all change in model parameter estimates for all model parameter sets to zerobestmodelparameters(1:np,1:ns) = 0; % set best model parameter estimates for all model parameter sets to zerosetbestcostfunction(1:ns) = 1e6; % set best cost function of each model parameter set to a large numberglobalbestparameters(1:np) = 0; % set best model parameter values for all model parameter sets to zerobestparameters = globalbestparameters'; % best model parameter values for all model parameter sets (to plot)globalbestcostfunction = 1e6; % set best cost function for all model parameter sets to a large numberi = 0; % indicates ith algorithm iterationj = 0; % indicates jth set of model parametersk = 0; % indicates kth model parameterfor k = 1:np % initializationfor j = 1:nsmodelparameters(k,j) = (Pmax(k)-Pmin(k))*rand(1) + Pmin(k); % randomly distribute model parametersmodelparameterchanges(k,j) = (Vmax(k)-Vmin(k))*rand(1) + Vmin(k); % randomly distribute change in model parametersendendfor i = 2:iterfor j = 1:nsx = modelparameters(:,j);% calculate cost functioncostfunction = 105*(x(2)-x(1)^2)^2 + (1-x(1))^2;if costfunction < setbestcostfunction(j) % best cost function for jth set of model parameters bestmodelparameters(:,j) = modelparameters(:,j);setbestcostfunction(j) = costfunction;endif costfunction < globalbestcostfunction % best cost function for all sets of model parameters globalbestparameters = modelparameters(:,j);bestparameters(:,i) = globalbestparameters;globalbestcostfunction(i) = costfunction;elsebestparameters(:,i) = bestparameters(:,i-1);globalbestcostfunction(i) = globalbestcostfunction(i-1);endendW = Wmax - i*(Wmax-Wmin)/iter; % compute inertial weightfor j = 1:ns % update change in model parameters and model parametersfor k = 1:npmodelparameterchanges(k,j) = W*modelparameterchanges(k,j) + c1*rand(1)*(bestmodelparameters(k,j)-modelparameters(k,j))...+ c2*rand(1)*(globalbestparameters(k) - modelparameters(k,j));if modelparameterchanges(k,j) < -Vmax(k), modelparameters(k,j) = modelparameters(k,j) - Vmax(k); endif modelparameterchanges(k,j) > Vmax(k), modelparameters(k,j) = modelparameters(k,j) + Vmax(k); endif modelparameterchanges(k,j) > -Vmax(k) & modelparameterchanges(k,j) < Vmax(k), modelparameters(k,j) = modelparameters(k,j) + modelparameterchanges(k,j); end if modelparameters(k,j) < Pmin(k), modelparameters(k,j) = Pmin(k); endif modelparameters(k,j) > Pmax(k), modelparameters(k,j) = Pmax(k); endendendiendbp = bestparameters; index = linspace(1,iter,iter);figure; semilogy(globalbestcostfunction,'k');set(gca,'FontName','Arial','Fontsize',14); axis tight;xlabel('iteration'); ylabel('cost function');figure; q = plot(index,bp(1,,'k-',index,bp(2,,'k:');set(gca,'FontName','Arial','Fontsize',14); axis tight;legend(q,'x_1','x_2'); xlabel('iteration'); ylabel('parameter')。
clpso算法代码
clpso算法代码CLPSO算法是一种基于粒子群优化的算法,主要用于解决各种优化问题。
以下是CLPSO算法的代码实现:```function [gBest, gBestValue] = CLPSO(fitnessFunction, nParticles, nIterations, nDimensions, xmin, xmax)%fitnessFunction: 目标函数%nParticles: 粒子数量%nIterations: 迭代次数%nDimensions: 变量维度%xmin: 变量最小值%xmax: 变量最大值%初始化particlePositions = repmat(xmin, nParticles, nDimensions) + rand(nParticles, nDimensions) .* repmat(xmax - xmin, nParticles, 1);particleVelocities = zeros(nParticles, nDimensions);pBestPositions = particlePositions;pBestValues = inf(nParticles, 1);gBest = zeros(1, nDimensions);gBestValue = inf;%开始迭代for i = 1:nIterations%更新粒子速度和位置for j = 1:nParticlesparticleVelocities(j,:) = 0.7298 * particleVelocities(j,:) + 1.49618 * rand(1, nDimensions) .* (pBestPositions(j,:) - particlePositions(j,:)) + 1.49618 * rand(1, nDimensions) .* (gBest - particlePositions(j,:));particlePositions(j,:) = particlePositions(j,:) + particleVelocities(j,:);particlePositions(j,:) = max(particlePositions(j,:), xmin);particlePositions(j,:) = min(particlePositions(j,:), xmax);end%更新个体最优解和全局最优解for j = 1:nParticlestemp = fitnessFunction(particlePositions(j,:));if temp < pBestValues(j)pBestPositions(j,:) = particlePositions(j,:);pBestValues(j) = temp;endif temp < gBestValuegBest = particlePositions(j,:);gBestValue = temp;endendend```这段CLPSO算法的代码实现,包含了初始化、迭代、速度和位置更新、最优解的更新等步骤,可用于各种优化问题的解决。
粒子群算法(PSO)程序(C#语言)
粒子群算法(PSO)程序(C#语言)超简洁的随机粒子群算法,PSO,程序,C,语言, using System; using System.Linq;using System.Collections.Generic;class MyPSO {const int NUM=40;//粒子数const int DIM=30;//维数const double c1=1.8;//参数const double c2=1.8;//参数static double xmin=-100.0;//位置下限static double xmax=100.0;//位置上限static double[] gbestx=new double[DIM];//全局最优位置static double gbestf;//全局最优适应度static Random rand=new Random();//用于生成随机数class particle {//定义一个粒子public double[] x=new double[DIM];//当前位置矢量public double[] bestx=new double[DIM];//历史最优位置public double f;//当前适应度public double bestf;//历史最优适应度}particle[] swarm=new particle[NUM];//定义粒子群double f1(double[] x) {//测试函数:超球函数return x.Sum(a => a*a);}static void Main(string[] args) {for(int i=0; i<DIM; i++)//初始化全局最优gbestx[i]=rand.NextDouble()*(xmax-xmin)+xmin; gbestf=double.MaxValue;for(int i=0; i<NUM; i++) {//初始化粒子群particle p1=new particle();for(int j=0; j<DIM; j++)p1.x[j]=rand.NextDouble()*(xmax-xmin)+xmin;p1.f=f1(p1.x);p1.bestf=double.MaxValue;swarm[i]=p1;}for(int t=0; t<5000; t++) {for(int i=0; i<NUM; i++) {particle p1=swarm[i];for(int j=0; j<DIM; j++)//进化方程p1.x[j]+=c1*rand.NextDouble()*(p1.bestx[j]-p1.x[j]) +c2*rand.NextDouble()*(gbestx[j]-p1.x[j]);p1.f=f1(p1.x);if(p1.f<p1.bestf) {//改变历史最优p1.x.CopyTo(p1.bestx, 0);p1.bestf=p1.f;}if(p1.f<gbestf) {//改变全局最优p1.x.CopyTo(gbestx, 0);for(int j=0; j<DIM; j++)//把当前全局最优的粒子随机放到另一位置p1.x[j]=rand.NextDouble()*(xmax-xmin)+xmin;gbestf=p1.f;}}}Console.WriteLine("{0}", gbestf);}}。
pso程序的源代码
pso程序的源代码建议:看代码之前,请先弄明白pso是怎么回事。
然后请对应着来:程序中用Agent代表一只鸟,PSO代表鸟群。
阅读源代码,不要顺着看,先看main(),然后按照出现的东西的顺序,一个一个得来,呵呵,纯粹是建议。
// PSO.cpp : 定义控制台应用程序的入口点。
//粒子群优化算法基本程序//你可以使用本代码,如果感到对你有用的话,请通知作者,作者会很高兴。
//通讯地址:fashionxu@//by FashionXu//本程序在vc++.net 2003下面通过,你如果要在vc6.0下面使用,请查阅相关资料修改,或者联系作者#include "stdafx.h"#include "iostream"#define _USE_MATH_DEFINES#include "math.h"#includeconst int iAgentDim=20;//优化函数的维数const double iRangL=-30;//函数的取值范围const double iRangR=30;const int iPSONum=20;//粒子数int iStep=10000;//跌代次数//下面的值,要具体程序中具体的修改,根据你优化的函数来修改double w=0.9;//惯性系数const double delta1=1;//1.494;//加速度const double delta2=1;//1.494;#define rnd(low,uper)((rand()/(double)RAND_MAX)*((uper)-(low))+(low)) //这个东西,返回low ,uper之间的一个值double gbest[iAgentDim];//global best fitness保留全局最优值的坐标using namespace std;class Agent//这个类表示单个的粒子,也就是一只鸟:){public:double dpos[iAgentDim]; //位置,也就是各个维数的值double dpbest[iAgentDim]; //维护一个“自己”找到的最优值的解 double dv[iAgentDim]; //速度double m_dFitness;//agent's fitness 当前算出的一个值double m_dBestfitness;//agent's best fitness 自己已经找到的最好值Agent()//初始化{srand( (unsigned)(time( NULL )+rand()) );int i=0;for(;i {dpos[i]=rnd(iRangL,iRangR);dv[i]=dpbest[i]=dpos[i];}}void UpdateFitness()/*calculate the fitness and find out the best fitness,record*/{double sum1=0;double sum2=0;/*Ackley Funtion*/for (int i=0;i {sum1+=(dpos [i]*dpos [i]);sum2+=cos(2*M_PI*dpos [i]);}m_dFitness=(-20*exp(-0.2*(sqrt((1.0/(double)iAgentDim *sum1))))-exp( (1.0/(double)iAgentDim )*sum2)+20+M_E);//The Rastrigin function//int i=0;//for (;i //{// sum1+=(dpos [i]*dpos [i])-3.0*cos(2*M_PI*dpos [i]);//}//m_dFitness=3.0*iAgentDim+sum1;//找到一个更好的值后,更新 m_dBestfitnessif (m_dFitness {m_dBestfitness=m_dFitness;int i=0;for(;i {dpbest[i]=dpos[i];}}}void UpdatePos()//agent moving{int i=0;for(;i {// basi psodv[i]=w*dv[i]+delta1*rnd(0,1)*(dpbest[i]-dpos[i])+delta2*rnd(0,1)*( gbest[i]-dpos[i]);dpos[i]+=dv[i];}}};class PSO//这是粒子群,也就是鸟群了{private:Agent agents[iPSONum];double m_dBestFitness;//鸟群找到的最优值int m_iTempPos;public:void Init();void Search();};void PSO::Search(){int k=0;while( k {m_iTempPos=999;int i;for(i=0;i {//此处是找找鸟群中有没有更好的解,如果有,记录下来if (m_dBestFitness>agents[i].m_dBestfitness ){m_dBestFitness=agents[i].m_dBestfitness;m_iTempPos=i;//找到到的最好解的位置}}if (m_iTempPos!=999){int j;for(j=0;j {gbest[j]=agents[m_iTempPos].dpos[j];//记录全局最优解的各个坐标}}//printf("The best is %f \n",m_dBestFitness);//下一次跌代for(i=0;i {agents[i].UpdatePos();agents[i].UpdateFitness ();}k++;}printf("The best result is: %2.15f after %d step. \n",m_dBestFit ness,k);{for (int i=0;i printf(" %2.15f ",gbest[i]);}}void PSO::Init()//初始化,{int i=0;m_dBestFitness=100000;srand( (unsigned)(time( NULL )+rand()) );for(;i {agents[i].m_dBestfitness =100000;//将m_dBestfitness赋值为一个大的值,目的是找最小值,agents[i].UpdateFitness();}}int main(int argc, char* argv[]){PSO pso;pso.Init ();pso.Search();printf("\n");char c;scanf("%c",&c);return 0;}。
pso算法代码
PSO算法代码1. PSO算法简介1.1 什么是PSO算法粒子群优化算法(Particle Swarm Optimization,简称PSO)是一种基于群体智能的随机优化算法。
它通过模拟鸟群觅食时个体间的信息共享和协同行为,在搜索空间中寻找最优解。
1.2 PSO算法原理PSO算法基于群体智能和其自适应能力,具有快速、全局搜索和简单的特点。
其基本原理如下: 1. 初始化粒子群,设置群体大小、位置和速度范围等参数。
2. 评估每个粒子的适应度,根据适应度确定个体最优解。
3. 更新全局最优解和个体最优解。
4. 根据公式更新粒子的速度和位置。
5. 重复步骤2-4,直到满足停止条件。
2. PSO算法代码实现2.1 PSO算法的伪代码以下是PSO算法的伪代码:初始化粒子群的位置和速度初始化全局最优解while (满足停止条件) dofor each 粒子 in 粒子群 do计算粒子的适应度更新个体最优解更新全局最优解for each 粒子 in 粒子群 do更新粒子速度更新粒子位置end while2.2 PSO算法的Python代码实现以下是一个简单的PSO算法的Python代码实现:import randomdef f(x):# 定义适应度函数,根据具体问题进行调整return x ** 2class Particle:def __init__(self, dim):self.position = [random.uniform(-5, 5) for _ in range(dim)]self.velocity = [random.uniform(-1, 1) for _ in range(dim)]self.best_position = self.positionself.best_fitness = f(self.position[0])class PSO:def __init__(self, dim, size, max_iter):self.dim = dimself.size = sizeself.max_iter = max_iterself.population = [Particle(dim) for _ in range(size)]self.global_best_position = self.population[0].positionself.global_best_fitness = self.population[0].best_fitnessdef update_particle(self, particle):# 更新粒子速度for i in range(self.dim):particle.velocity[i] = particle.velocity[i] + 2 * random.random() * \(particle.best_position[i] - particle.position[i]) + \2 * random.random() * (self.global_best_position[i] - particle. position[i])# 更新粒子位置for i in range(self.dim):particle.position[i] = particle.position[i] + particle.velocity[i]# 更新粒子最优解fitness = f(particle.position[0])if fitness < particle.best_fitness:particle.best_position = particle.positionparticle.best_fitness = fitness# 更新全局最优解if fitness < self.global_best_fitness:self.global_best_position = particle.positionself.global_best_fitness = fitnessdef optimize(self):for _ in range(self.max_iter):for particle in self.population:self.update_particle(particle)if __name__ == '__main__':pso = PSO(dim=1, size=50, max_iter=100)pso.optimize()print("Global Best:", pso.global_best_position, pso.global_best_fitness)3. PSO算法应用实例3.1 函数最小化问题假设有一个函数 f(x) = x^2,在定义域 [-5, 5] 内寻找最小值。
粒子群优化算法(PSO)附代码
粒子群优化算法(PSO)附代码PSO算法的基本思想是通过对群体中每个粒子的速度进行随机扰动,并根据当前位置和速度的信息来更新粒子的位置。
每个粒子记住自己曾经达到的最优位置,同时也会记住整个群体中达到的最优位置。
通过不断迭代,群体中的每个粒子会逐渐收敛到最优解附近。
下面给出一个简单的PSO算法的实现代码:```pythonimport randomimport numpy as npclass Particle:def __init__(self, dim, min_bound, max_bound):self.position = np.zeros(dim)self.velocity = np.zeros(dim)self.best_position = np.zeros(dim)self.min_bound = min_boundself.max_bound = max_bounddef initialize(self):for i in range(len(self.position)):self.position[i] = random.uniform(self.min_bound,self.max_bound)self.velocity[i] = random.uniform(self.min_bound,self.max_bound)self.best_position = self.position.copydef update_velocity(self, global_best_position, c1, c2, w): r1 = random.uniform(0, 1)r2 = random.uniform(0, 1)self.velocity = w * self.velocity + c1 * r1 *(self.best_position - self.position) + c2 * r2 *(global_best_position - self.position)def update_position(self):self.position = self.position + self.velocityfor i in range(len(self.position)):if self.position[i] < self.min_bound:self.position[i] = self.min_boundelif self.position[i] > self.max_bound:self.position[i] = self.max_boundclass PSO:def __init__(self, num_particles, dim, min_bound, max_bound, max_iter):self.num_particles = num_particlesself.dim = dimself.min_bound = min_boundself.max_bound = max_boundself.max_iter = max_iterself.particles = []def initialize_particles(self):for _ in range(self.num_particles):particle = Particle(self.dim, self.min_bound, self.max_bound) particle.initializeself.particles.append(particle)def optimize(self, c1, c2, w):global_best_position = Noneglobal_best_fitness = float('inf')for _ in range(self.max_iter):for particle in self.particles:fitness = self.evaluate_fitness(particle.position)if fitness < self.evaluate_fitness(particle.best_position): particle.best_position = particle.position.copyif fitness < global_best_fitness:global_best_fitness = fitnessglobal_best_position = particle.position.copyparticle.update_velocity(global_best_position, c1, c2, w)particle.update_positionreturn global_best_position, global_best_fitnessdef evaluate_fitness(self, position):#根据具体问题定义适应度函数return np.sum(position ** 2)if __name__ == "__main__":num_particles = 50dim = 10min_bound = -10max_bound = 10max_iter = 100pso = PSO(num_particles, dim, min_bound, max_bound, max_iter) pso.initialize_particlesglobal_best_position, global_best_fitness =pso.optimize(c1=2, c2=2, w=0.8)print("Global best position:", global_best_position)print("Global best fitness:", global_best_fitness)```以上代码实现了一个简单的PSO算法,最大迭代次数为100次,粒子数为50个,维度为10维。
PSO优化算法代码
int c1=2; //加速因子
int c2=2; //加速因子
double w=1; //惯性权重
double Wmax=1; //最大惯性权重
double Wmin=0.6; //最小惯性权重
for(i=0; i<P_num; i++) //计算每个粒子的适应度
{
particle_fit[i] = fitness(particle[i]);
particle_loc_fit[i] = particle_fit[i];
}
gfit = particle_loc_fit[0]; //找出全局最优
for(i=0,j=-1; i<P_num; i++) //更新全局变量
{
if(particle_loc_fit[i]<gfit)
{
gfit = particle_loc_fit[i];
j = i;
}
}
if(j != -1)
particle_loc_best[i][j] = particle[i][j]; //将当前最优结果写入局部最优集合
particle_v[i][j] = -V_max+2*V_max*1.0*rand()/RAND_MAX; //速度
}
}
}
}
}
}
void renew_var()
{
int i, j;
for(i=0; i<P_num; i++) //计算每个粒子的适应度
{
particle_fit[i] = fitness(particle[i]);
tlpso算法代码
tlpso算法代码tlpso算法是一种基于粒子群优化算法(PSO)的改进算法,常被应用于解决高维、非线性的优化问题。
以下是一份tlpso算法的代码示例:```import numpy as npclass TLPso:def __init__(self, func, dim, num_particles, max_iter, w=0.9, c1=2.0, c2=2.0, c3=1.5, p=0.2):self.func = funcself.dim = dimself.num_particles = num_particlesself.max_iter = max_iterself.w = wself.c1 = c1self.c2 = c2self.c3 = c3self.p = pself.gbest = np.zeros(dim)self.gbest_val = np.infself.pbest = np.zeros((num_particles, dim))self.pbest_val = np.ones(num_particles) * np.infself.velocity = np.zeros((num_particles, dim))self.position = np.random.uniform(-10, 10, (num_particles, dim))def run(self):for i in range(self.max_iter):for j in range(self.num_particles):# evaluate objective functionfitness = self.func(self.position[j])# update pbestif fitness < self.pbest_val[j]:self.pbest_val[j] = fitnessself.pbest[j] = self.position[j]# update gbestif fitness < self.gbest_val:self.gbest_val = fitnessself.gbest = self.position[j]# update velocityrho1 = np.random.uniform(0, 1)rho2 = np.random.uniform(0, 1)rho3 = np.random.uniform(0, 1)phi1 = self.c1 * rho1phi2 = self.c2 * rho2phi3 = self.c3 * rho3self.velocity[j] = self.w * self.velocity[j] + phi1 * (self.pbest[j] - self.position[j]) + phi2 * (self.gbest - self.position[j]) + phi3 *(self.pbest[np.random.randint(self.num_particles)] -self.position[j])# apply velocity limitsself.velocity[j][self.velocity[j] > 1] = 1self.velocity[j][self.velocity[j] < -1] = -1# update positionself.position[j] = self.position[j] + self.velocity[j] # apply position limitsself.position[j][self.position[j] > 10] = 10self.position[j][self.position[j] < -10] = -10# apply topology learningif np.random.uniform(0, 1) < self.p:neighbors = self.get_neighbors(j)best_neighbor = self.get_best_neighbor(neighbors)rho4 = np.random.uniform(0, 1)phi4 = self.c1 * rho4self.velocity[j] = self.w * self.velocity[j] + phi4 * (self.position[best_neighbor] - self.position[j])# apply velocity limitsself.velocity[j][self.velocity[j] > 1] = 1self.velocity[j][self.velocity[j] < -1] = -1# update positionself.position[j] = self.position[j] + self.velocity[j] # apply position limitsself.position[j][self.position[j] > 10] = 10self.position[j][self.position[j] < -10] = -10def get_neighbors(self, i):neighbors = []for j in range(self.num_particles):if i != j:dist = np.linalg.norm(self.position[i] -self.position[j])if dist < 1:neighbors.append(j)return neighborsdef get_best_neighbor(self, neighbors):best_neighbor = Nonebest_val = np.inffor neighbor in neighbors:if self.pbest_val[neighbor] < best_val:best_val = self.pbest_val[neighbor]best_neighbor = neighborreturn best_neighbor```在上述代码中,`func`表示优化问题的目标函数,`dim`表示变量的维度,`num_particles`表示粒子的数量,`max_iter`表示最大迭代次数,`w`、`c1`、`c2`、`c3`分别表示惯性权重、个体学习因子、社会学习因子、局部学习因子,`p`表示拓扑学习的概率。
- 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
- 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
- 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。
particle[i][j] = low+(high-low)*1.0*rand()/RAND_MAX; //初始化群体
particle_loc_best[i][j] = particle[i][j]; //将当前最优结果写入局部最优集合
particle_v[i][j] = -V_max+2*V_max*1.0*rand()/RAND_MAX; //速度
}
}
for(i=0; i<P_num; i++) //计算每个粒子的适应度
{
particle_fit[i] = fitness(particle[i]);
particle_loc_fit[i] = particle_fit[i];
}
gfit = particle_loc_fit[0]; //找出全局最优
double particle_fit[P_num]; //记录每个粒子的当前代适应度
double Sphere(double a[])
{
int i;
double sum=0.0;
for(i=0; i<dim; i++)
{
sum+=a[i]*a[i];
}
return sum;
double particle_glo_best[dim]; //全局最优向量
double gfit; //全局最优适应度,有全局最优向量计算而来
double particle_v[P_num][dim]; //记录每个个体的当前代速度向量
double particle[P_num][dim]; //个体集合
double particle_loc_best[P_num][dim]; //每个个体局部最优向量
double particle_loc_fit[P_num]; //个体的局部最优适应度,有局部最优向量计算而来
}
}
}
}
void renew_var()
{
int i, j;
for(i=0; i<P_num; i++) //计算每个粒子的适应度
{
particle_fit[i] = fitness(particle[i]);
if(particle_fit[i] < particle_loc_fit[i]) //更新个体局部最优值
}
}
for(i=0; i<P_num; i++) //更新个体速度
{
for(j=0; j<dim; j++)
{
particle_v[i][j]=w*particle_v[i][j]+
c1*1.0*rand()/RAND_MAX*(particle_loc_best[i][j]-particle[i][j])+
}
double fitness(double a[]) //适应度函数
{
return Rastrigin(a);
}
void initial()
{
int i,j;
for(i=0; i<P_num; i++) //随即生成粒子
{
for(j=0; j<dim; j++)
{
particle_v[i][j] = -V_max;
}
}
}
}
int main()
{
freopen("result.txt","a+",stdout);
int i=0;
srand((unsigned)time(NULL));
initial();
while(i < iter_num)
{
renew_particle();
renew_var();
i++;
}
printf("粒子个数:%d\n",P_num);
printf("维度为:%d\n",dim);
printf("最优值为%.10lf\n", gfit);
return 0;
}
{
particle_glo_best[i] = particle_loc_best[j][i];
}
}
void renew_particle()
{
int i,j;
for(i=0; i<P_num; i++) //更新个体位置生成位置
{
for(j=0; j<dim; j++)
#define low -100 //搜索域范围
#define high 100
#define iter_num 1000
#define V_max 20 //速度范围
#define c1 2
#define c2 2
#define w 0.5
#define alp 1
* Company: Dalian University Of Technology
*
* =====================================================================================
*/
//粒子群PSO算法
/*
* =====================================================================================
*
* Filename: particle.c
*
* Description:
*
* Version: 1.0
{
particle_loc_fit[i] = particle_fit[i];
for(j=0; j<dim; j++_loc_best[i][j] = particle[i][j];
}
}
}
for(i=0,j=-1; i<P_num; i++) //更新全局变量
j=0;
for(i=1; i<P_num; i++)
{
if(particle_loc_fit[i]<gfit)
{
gfit = particle_loc_fit[i];
j = i;
}
}
for(i=0; i<dim; i++) //更新全局最优向量
}
double Rosenbrock(double a[])
{
int i;
double sum=0.0;
for(i=0;i<dim-1; i++)
{
sum+= 100*(a[i+1]-a[i]*a[i])*(a[i+1]-a[i]*a[i])+(a[i]-1)*(a[i]-1);
c2*1.0*rand()/RAND_MAX*(particle_glo_best[j]-particle[i][j]);
if(particle_v[i][j] > V_max)
{
particle_v[i][j] = V_max;
}
if(particle_v[i][j] < -V_max)
}
return sum;
}
double Rastrigin(double a[])
{
int i;
double sum=0.0;
for(i=0;i<dim;i++)
{
sum+=a[i]*a[i]-10.0*cos(2*PI*a[i])+10.0;
}
return sum;
{
if(particle_loc_fit[i]<gfit)
{
gfit = particle_loc_fit[i];
j = i;
}
}
if(j != -1)
{
for(i=0; i<dim; i++) //更新全局最优向量
{
particle_glo_best[i] = particle_loc_best[j][i];
* Created: 2012年03月17日 15时27分13秒
* Revision: none
* Compiler: gcc
*
* Author: MaZheng (/mazheng1989), mazheng19891019@
{
particle[i][j] += alp*particle_v[i][j];
if(particle[i][j] > high)
{
particle[i][j] = high;
}
if(particle[i][j] < low)
{
particle[i][j] = low;
#include<stdio.h>
#include<math.h>
#include<time.h>
#include<stdlib.h>
#define PI 3.141592653589 /* */
#define P_num 200 //粒子数目
#define dim 50