Demo entry 5865610

py

   

Submitted by anonymous on Aug 24, 2016 at 06:59
Language: Matlab. Code size: 4.0 kB.

function   mlp()
  trainingdata = [0 0;0 1;1 0;1 1];%训练输入数据
  t = [0 1 1 0];%期望输出
  [number , dimension]= size(trainingdata) ;%训练数据规模(个数/维数)
  accumulate_error=zeros(1,2.5e3);%误差
  eta  = 0.8;%学习速率0.8
  threshold = 0.005;%  收敛条件
  wd1=0;  wd2=0; 
  bd1=0;  bd2=0;  %初始化权向量修正值
  iteration =0; %迭代次数
  hidden_unitnum = 4; %隐藏层的单元数
  w1 = randn(hidden_unitnum,2)-0.5;%4个神经元,每个神经元接受2个输入,初始化第一层权向量
  w2 = randn(1,hidden_unitnum)-0.5;%一个神经元,每个神经元接受4个输入,初始化第二层权向量
  b1 = randn(hidden_unitnum,1)-0.5;
  b2 = randn(1,1)-0.5;
while 1
      temp=0;
      iteration = iteration +1;
      for i=1:number
        %前向传播
         a0 = trainingdata(i,:)';%第i行数据
         n1 = w1*a0+b1;
         a1 = Logistic(n1);%第一个的输出
         n2 = w2*a1+b2;
         a2 = Logistic(n2);%第二个的输出
         a = a2;
         e = t(i)-a;%计算残差
         accumulate_error(iteration) = temp + abs(e)^2;%计算误差总和
         temp=accumulate_error(iteration);             %更新误差
         s2 = F(a2)*e; 	  %输出层delta值    
         s1 = F(a1)*w2'*s2;%隐层delta值
         %修改权值
         wd1 = eta .* s1*a0';
         wd2 = eta .* s2*a1';
         w1 =  w1 + wd1;
         w2 =  w2 + wd2;
         bd1 = eta .* s1;
         bd2 = eta .* s2;
         b1 = b1 + bd1;
         b2 = b2 + bd2;        
    end;
    if accumulate_error(iteration) <= threshold|| iteration>2.5e3
        break;
    end;
  end;
  plot(accumulate_error,'m');
  grid on;
  xlabel('学习次数');
  ylabel('误差');
  disp(['计算误差 = ',num2str(accumulate_error(iteration))] );
  disp(['迭代次数 = ',num2str(iteration)]);
%测试数据
    for i=1:4
      a0=[0 0;0 1;1 0;1 1]';
      n1 = w1*a0(:,i)+b1;
      a1 = Logistic(n1);
      n2 = w2*a1+b2;
      a2 = Logistic(n2);
      a = a2;
      A=a0(:,i)';
      disp([num2str(A),'=',num2str(a)])
    end
  m=0;%flag
%Sigmoid函数
function [a]= Logistic(n)
a = 1./(1+exp(-n));
%感谢赵琳同学提醒,此函数为计算delta值修正误差用
function [result]= F(a)
[r,c] = size(a);
result = zeros(r,r);
for i =1:r
result(i,i) = (1-a(i))*a(i);
end;

function   mlp()
  trainingdata = [0 0;0 1;1 0;1 1];%训练输入数据
  t = [0 1 1 0];%期望输出
  [number , dimension]= size(trainingdata) ;%训练数据规模(个数/维数)
  accumulate_error=zeros(1,2.5e3);%误差
  eta  = 0.8;%学习速率0.8
  threshold = 0.005;%  收敛条件
  wd1=0;  wd2=0; 
  bd1=0;  bd2=0;  %初始化权向量修正值
  iteration =0; %迭代次数
  hidden_unitnum = 4; %隐藏层的单元数
  w1 = randn(hidden_unitnum,2)-0.5;%4个神经元,每个神经元接受2个输入,初始化第一层权向量
  w2 = randn(1,hidden_unitnum)-0.5;%一个神经元,每个神经元接受4个输入,初始化第二层权向量
  b1 = randn(hidden_unitnum,1)-0.5;
  b2 = randn(1,1)-0.5;
while 1
      temp=0;
      iteration = iteration +1;
      for i=1:number
        %前向传播
         a0 = trainingdata(i,:)';%第i行数据
         n1 = w1*a0+b1;
         a1 = Logistic(n1);%第一个的输出
         n2 = w2*a1+b2;
         a2 = Logistic(n2);%第二个的输出
         a = a2;
         e = t(i)-a;%计算残差
         accumulate_error(iteration) = temp + abs(e)^2;%计算误差总和
         temp=accumulate_error(iteration);             %更新误差
         s2 = F(a2)*e; 	  %输出层delta值    
         s1 = F(a1)*w2'*s2;%隐层delta值
         %修改权值
         wd1 = eta .* s1*a0';
         wd2 = eta .* s2*a1';
         w1 =  w1 + wd1;
         w2 =  w2 + wd2;
         bd1 = eta .* s1;
         bd2 = eta .* s2;
         b1 = b1 + bd1;
         b2 = b2 + bd2;        
    end;
    if accumulate_error(iteration) <= threshold|| iteration>2.5e3
        break;
    end;
  end;
  plot(accumulate_error,'m');
  grid on;
  xlabel('学习次数');
  ylabel('误差');
  disp(['计算误差 = ',num2str(accumulate_error(iteration))] );
  disp(['迭代次数 = ',num2str(iteration)]);
%测试数据
    for i=1:4
      a0=[0 0;0 1;1 0;1 1]';
      n1 = w1*a0(:,i)+b1;
      a1 = Logistic(n1);
      n2 = w2*a1+b2;
      a2 = Logistic(n2);
      a = a2;
      A=a0(:,i)';
      disp([num2str(A),'=',num2str(a)])
    end
  m=0;%flag
%Sigmoid函数
function [a]= Logistic(n)
a = 1./(1+exp(-n));
function [result]= F(a)
[r,c] = size(a);
result = zeros(r,r);
for i =1:r
result(i,i) = (1-a(i))*a(i);
end;

This snippet took 0.02 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).