首页 > 代码库 > "如何用70行Java代码实现深度神经网络算法" 的delphi版本

"如何用70行Java代码实现深度神经网络算法" 的delphi版本

 http://blog.csdn.net/hustjoyboy/article/details/50721535

"如何用70行Java代码实现深度神经网络算法" 的delphi版本

技术分享

=====ann.pas源程序===================================

 

{

by 阿甘 2016.2.23


参考自此篇文档
如何用70行Java代码实现深度神经网络算法
http://geek.csdn.net/news/detail/56086
原文中的代码作者:fourinone
原文中的代码是用java写的,现移植为delphi 供参考
略作修改:权重不含输出层
}


unit ann;


interface


type
  Tdbarr=array of double;
  TBpDeep=class
  private
    layer:array of array of double;
    layerErr:array of array of double;
    layer_weight:array of array of array of double;
    layer_weight_delta:array of array of array of double;
    mobp,rate:double;
    procedure updateWeight(tar:array of double);
  public
    constructor Create(layernum:array of integer;rate,mobp:double);
    function computeout(input:array of double):Tdbarr;
    procedure train(input,tar:array of double);
  end;


implementation


constructor TBpDeep.Create(layernum:array of integer;rate,mobp:double);
var
  a,i,j,k:integer;
begin
  self.rate:=rate;
  self.mobp:=mobp;
  a:=length(layernum);
  if a<2 then exit;
  setlength(layer,a);
  setlength(layerErr,a);
  setlength(layer_weight,a-1);
  setlength(layer_weight_delta,a-1);
  Randomize;
  for k:=0 to a-1 do
  begin
    setlength(layer[k],layernum[k]);
    setlength(layerErr[k],layernum[k]);
    if k+1<a then
    begin
      setlength(layer_weight[k],layernum[k]+1,layernum[k+1]);
      setlength(layer_weight_delta[k],layernum[k]+1,layernum[k+1]);
      for j:=0 to layernum[k]-1 do
      for i:=0 to layernum[k+1]-1 do
        layer_weight[k][j][i]:=Random;//随机初始化权重
    end;
  end;
end;


function TBpDeep.computeout(input:array of double):Tdbarr;
var
  i,j,k:integer;
  z:double;
begin
  for k:=1 to high(layer) do
  for j:=0 to high(layer[k]) do
  begin
    z:=layer_weight[k-1][length(layer[k-1])][j];
    for i:=0 to high(layer[k-1]) do
    begin
      if k=1 then layer[k-1][i]:=input[i];
      z:=z+layer_weight[k-1][i][j]*layer[k-1][i];
    end;
    layer[k][j]:=1/(1+exp(-z));
  end;
  result:=Tdbarr(layer[high(layer)]);
end;


procedure TBpDeep.updateWeight(tar:array of double);
var
  i,j,k:integer;
  z:double;
begin
  k:=high(layer);
  for j:=0 to high(layererr[k]) do layerErr[k][j]:=layer[k][j]*(1-layer[k][j])*(tar[j]-layer[k][j]);
  while k>0 do
  begin
    dec(k);
    for j:=0 to high(layererr[k]) do
    begin
      z:=0;
      for i:=0 to high(layererr[k+1]) do
      begin
        if z+k>0 then z:=layerErr[k+1][i]*layer_weight[k][j][i]
        else z:=0;
        layer_weight_delta[k][j][i]:=mobp*layer_weight_delta[k][j][i]+rate*layerErr[k+1][i]*layer[k][j];//隐含层动量调整
        layer_weight[k][j][i]:=layer_weight[k][j][i]+layer_weight_delta[k][j][i];//隐含层权重调整
        if j=high(layererr[k]) then
        begin
          layer_weight_delta[k][j+1][i]:=mobp*layer_weight_delta[k][j+1][i]+rate*layerErr[k+1][i];//截距动量调整
          layer_weight[k][j+1][i]:=layer_weight[k][j+1][i]+layer_weight_delta[k][j+1][i];//截距权重调整
        end;
      end;
      layerErr[k][j]:=z*layer[k][j]*(1-layer[k][j]);//记录误差
    end;
  end;
end;


procedure TBpDeep.train(input,tar:array of double);
begin
  computeout(input);
  updateWeight(tar);
end;


end.

========================================

测试代码:

 

procedure TForm1.bttrainClick(Sender: TObject);//训练
var
  i,k:integer;
const
  data:array[0..3] of array[0..1] of double=((1,2),(2,2),(1,1),(2,1));
  tar:array[0..3] of array[0..1] of double=((1,0),(0,1),(0,1),(1,0));
begin
  for k:=0 to 499 do
  for i:=0 to high(data) do
    bp.train(data[i], tar[i]);
end;


procedure TForm1.btcomputeClick(Sender: TObject);//识别
var
  rst:Tdbarr;
  x:array[0..1] of double;
begin
  x[0]:=strtofloat(xx.Text);
  x[1]:=strtofloat(yy.Text);
  rst:=bp.computeout(x);
  memo1.Lines.Append(floattostr(rst[0])+‘ ‘+floattostr(rst[1]));
end;


procedure TForm1.FormCreate(Sender: TObject);//初始化
begin
  bp:=TBpdeep.Create([2,10,2],0.15,0.8);
end;

 

=====================================

 

"如何用70行Java代码实现深度神经网络算法" 的delphi版本