Neuron Net
NeuroNet.mqh
Go to the documentation of this file.
1 //+------------------------------------------------------------------+
7 //+------------------------------------------------------------------+
16 //+------------------------------------------------------------------+
17 #property copyright "Copyright 2019, DNG"
18 #property link "https://www.mql5.com"
19 #property version "1.00"
20 //+------------------------------------------------------------------+
21 //| Include standart libraries
22 //+------------------------------------------------------------------+
23 #include <Arrays\ArrayDouble.mqh>
24 #include <Arrays\ArrayInt.mqh>
25 #include <Arrays\ArrayObj.mqh>
26 #include <OpenCL\OpenCL.mqh>
27 //+------------------------------------------------------------------+
28 // Defines
29 //+------------------------------------------------------------------+
32 #define lr 0.001
33 #define momentum 0.5
34 //---
35 
36 #define b1 0.99
37 
38 #define b2 0.9999
39 //---
41 double eta=lr;
42 
43 //string com;
44 //+------------------------------------------------------------------+
48 //+------------------------------------------------------------------+
52 #define defArrayConnects 0x7782
53 #define defLayer 0x7787
54 #define defArrayLayer 0x7788
55 #define defNet 0x7790
56 #define defConnect 0x7781
61 #define defNeuronBase 0x7783
62 #define defNeuron 0x7784
63 #define defNeuronConv 0x7785
64 #define defNeuronProof 0x7786
65 #define defNeuronLSTM 0x7791
66 #define defBufferDouble 0x7882
71 #define defNeuronBaseOCL 0x7883
72 #define defNeuronConvOCL 0x7885
73 #define defNeuronProofOCL 0x7886
74 #define defNeuronAttentionOCL 0x7887
75 //#define defNeuronLSTMOCL 0x7884
78 //+------------------------------------------------------------------+
90 #define def_k_FeedForward 0
91 #define def_k_ff_matrix_w 0
92 #define def_k_ff_matrix_i 1
93 #define def_k_ff_matrix_o 2
94 #define def_k_ff_inputs 3
95 #define def_k_ff_activation 4
96 #define def_k_CalcOutputGradient 1
102 #define def_k_cog_matrix_t 0
103 #define def_k_cog_matrix_o 1
104 #define def_k_cog_matrix_ig 2
105 #define def_k_cog_activation 3
106 //---
107 #define def_k_CalcHiddenGradient 2
108 #define def_k_chg_matrix_w 0
109 #define def_k_chg_matrix_g 1
110 #define def_k_chg_matrix_o 2
111 #define def_k_chg_matrix_ig 3
112 #define def_k_chg_outputs 4
113 #define def_k_chg_activation 5
114 #define def_k_UpdateWeightsMomentum 3
121 #define def_k_uwm_matrix_w 0
122 #define def_k_uwm_matrix_g 1
123 #define def_k_uwm_matrix_i 2
124 #define def_k_uwm_matrix_dw 3
125 #define def_k_uwm_inputs 4
126 #define def_k_uwm_learning_rates 5
127 #define def_k_uwm_momentum 6
128 //---
129 #define def_k_UpdateWeightsAdam 4
130 #define def_k_uwa_matrix_w 0
131 #define def_k_uwa_matrix_g 1
132 #define def_k_uwa_matrix_i 2
133 #define def_k_uwa_matrix_m 3
134 #define def_k_uwa_matrix_v 4
135 #define def_k_uwa_inputs 5
136 #define def_k_uwa_l 6
137 #define def_k_uwa_b1 7
138 #define def_k_uwa_b2 8
139 //---
142 #define def_k_FeedForwardProof 5
149 #define def_k_ffp_matrix_i 0
150 #define def_k_ffp_matrix_o 1
151 #define def_k_ffp_inputs 2
152 #define def_k_ffp_window 3
153 #define def_k_ffp_step 4
154 //---
156 #define def_k_CalcInputGradientProof 6
160 #define def_k_cigp_matrix_i 0
161 #define def_k_cigp_matrix_g 1
162 #define def_k_cigp_matrix_o 2
163 #define def_k_cigp_matrix_ig 3
164 #define def_k_cigp_outputs 4
165 #define def_k_cigp_window 5
166 #define def_k_cigp_step 6
167 //---
170 #define def_k_FeedForwardConv 7
177 #define def_k_ffc_matrix_w 0
178 #define def_k_ffc_matrix_i 1
179 #define def_k_ffc_matrix_o 2
180 #define def_k_ffc_inputs 3
181 #define def_k_ffc_step 4
182 #define def_k_ffc_window_in 5
183 #define def_k_ffс_window_out 6
184 #define def_k_ffc_activation 7
185 //---
187 #define def_k_CalcHiddenGradientConv 8
191 #define def_k_chgc_matrix_w 0
192 #define def_k_chgc_matrix_g 1
193 #define def_k_chgc_matrix_o 2
194 #define def_k_chgc_matrix_ig 3
195 #define def_k_chgc_outputs 4
196 #define def_k_chgc_step 5
197 #define def_k_chgc_window_in 6
198 #define def_k_chgc_window_out 7
199 #define def_k_chgc_activation 8
200 //---
202 #define def_k_UpdateWeightsConvMomentum 9
206 #define def_k_uwcm_matrix_w 0
207 #define def_k_uwcm_matrix_g 1
208 #define def_k_uwcm_matrix_i 2
209 #define def_k_uwcm_matrix_dw 3
210 #define def_k_uwcm_inputs 4
211 #define def_k_uwcm_learning_rates 5
212 #define def_k_uwcm_momentum 6
213 #define def_k_uwcm_window_in 7
214 #define def_k_uwcm_window_out 8
215 #define def_k_uwcm_step 9
216 //---
217 #define def_k_UpdateWeightsConvAdam 10
218 #define def_k_uwca_matrix_w 0
219 #define def_k_uwca_matrix_g 1
220 #define def_k_uwca_matrix_i 2
221 #define def_k_uwca_matrix_m 3
222 #define def_k_uwca_matrix_v 4
223 #define def_k_uwca_inputs 5
224 #define def_k_uwca_l 6
225 #define def_k_uwca_b1 7
226 #define def_k_uwca_b2 8
227 #define def_k_uwca_window_in 9
228 #define def_k_uwca_window_out 10
229 #define def_k_uwca_step 11
230 //---
233 #define def_k_AttentionScore 11
242 #define def_k_as_querys 0
243 #define def_k_as_keys 1
244 #define def_k_as_score 2
245 #define def_k_as_dimension 3
246 //---
247 #define def_k_AttentionOut 12
248 #define def_k_aout_scores 0
249 #define def_k_aout_values 1
250 #define def_k_aout_inputs 2
251 #define def_k_aout_out 3
252 //---
253 #define def_k_MatrixSum 13
254 #define def_k_sum_matrix1 0
255 #define def_k_sum_matrix2 1
256 #define def_k_sum_matrix_out 2
257 #define def_k_sum_dimension 3
258 #define def_k_sum_multiplyer 4
259 //---
261 #define def_k_AttentionGradients 14
266 #define def_k_ag_querys 0
267 #define def_k_ag_querys_g 1
268 #define def_k_ag_keys 2
269 #define def_k_ag_keys_g 3
270 #define def_k_ag_values 4
271 #define def_k_ag_values_g 5
272 #define def_k_ag_scores 6
273 #define def_k_ag_gradient 7
274 //---
277 #define def_k_Normilize 15
282 #define def_k_norm_buffer 0
283 #define def_k_norm_dimension 1
284 //---
285 #define def_k_NormilizeWeights 16
286 //+------------------------------------------------------------------+
289 //| |
290 //+------------------------------------------------------------------+
291 #resource "NeuroNet.cl" as string cl_program
292 //+------------------------------------------------------------------+
296 //+------------------------------------------------------------------+
298  {
299  None=-1,
302  LReLU
303  };
304 //+------------------------------------------------------------------+
306 //+------------------------------------------------------------------+
308  {
309  SGD,
310  ADAM
311  };
313 //+------------------------------------------------------------------+
317 //+------------------------------------------------------------------+
318 class CConnection : public CObject
319  {
320 public:
321  double weight;
322  double deltaWeight;
323  double mt;
324  double vt;
325  CConnection(double w) { weight=w; deltaWeight=0; mt=0; vt=0; } ~CConnection() {};
328  //--- methods for working with files
329  virtual bool Save(int const file_handle);
330  virtual bool Load(int const file_handle);
331  virtual int Type(void) const { return defConnect; }
332  };
333 //+------------------------------------------------------------------+
334 //| |
335 //+------------------------------------------------------------------+
336 bool CConnection::Save(int file_handle)
337  {
338  if(file_handle==INVALID_HANDLE)
339  return false;
340 //---
341  if(FileWriteDouble(file_handle,weight)<=0)
342  return false;
343  if(FileWriteDouble(file_handle,deltaWeight)<=0)
344  return false;
345  if(FileWriteDouble(file_handle,mt)<=0)
346  return false;
347  if(FileWriteDouble(file_handle,vt)<=0)
348  return false;
349 //---
350  return true;
351  }
352 //+------------------------------------------------------------------+
353 //| |
354 //+------------------------------------------------------------------+
355 bool CConnection::Load(int file_handle)
356  {
357  if(file_handle==INVALID_HANDLE)
358  return false;
359 //---
360  weight=FileReadDouble(file_handle);
361  deltaWeight=FileReadDouble(file_handle);
362  mt=FileReadDouble(file_handle);
363  vt=FileReadDouble(file_handle);
364 //---
365  return true;
366  }
367 //+------------------------------------------------------------------+
371 //+------------------------------------------------------------------+
372 class CArrayCon : public CArrayObj
373  {
374 public: CArrayCon(void) {}; ~CArrayCon(void) {};
377  //---
378  virtual bool CreateElement(int const index);
379  virtual void IncreaseTotal() { m_data_total++; }
380  virtual int Type(void) const { return defArrayConnects; }
381  };
382 //+------------------------------------------------------------------+
383 //| |
384 //+------------------------------------------------------------------+
386  {
387  if(index<0 || index>=m_data_max)
388  return false;
389 //---
390  double weigh=(MathRand()+1)/32768.0-0.5;
391  if(weigh==0)
392  weigh=0.001;
393  m_data[index]=new CConnection(weigh);
394  if(!CheckPointer(m_data[index])!=POINTER_INVALID)
395  return false;
396 //---
397  return (true);
398  }
399 //+------------------------------------------------------------------+
403 //+------------------------------------------------------------------+
404 class CLayer;
405 //+------------------------------------------------------------------+
409 //+------------------------------------------------------------------+
410 class CNeuronBase : public CObject
411  {
412 protected:
413  double outputVal;
414  double prevVal;
415  uint m_myIndex;
416  double gradient;
420  int t;
421  //---
422  virtual bool feedForward(CLayer *prevLayer) { return false; }
423  virtual bool calcHiddenGradients(CLayer *&nextLayer) { return false; }
424  virtual bool updateInputWeights(CLayer *&prevLayer) { return false; }
425  virtual double activationFunction(double x);
426  virtual double SigmoidFunction(double x) { return MathPow(1+exp(-x),-1); }
427  virtual double TanhFunction(double x) { return tanh(x); }
428  virtual CLayer *getOutputLayer(void) { return NULL; }
429 public:CNeuronBase(void);~CNeuronBase(void);
432  virtual bool Init(uint numOutputs, uint myIndex, ENUM_OPTIMIZATION optimization_type);
433  virtual void SetActivationFunction(ENUM_ACTIVATION value) { activation=value; }
434 //---
435  //static double eta;
436  static double alpha;
437 //---
438  virtual void setOutputVal(double val) { prevVal=outputVal; outputVal=val; }
439  virtual double getOutputVal() { return outputVal; }
440  virtual double getPrevVal() { return prevVal; }
441  virtual void setGradient(double val) { gradient=val; }
442  virtual double getGradient() { return gradient; }
443  virtual CArrayCon *getConnections() { return Connections;}
444  virtual double activationFunctionDerivative(double x);
445  virtual double SigmoidFunctionDerivative(double x) { return x*(1-x); }
446  virtual double TanhFunctionDerivative(double x) { return (1+x)*(1-x); }
447  //---
448  virtual bool feedForward(CObject *&SourceObject);
449  virtual bool calcHiddenGradients(CObject *&TargetObject);
450  virtual bool updateInputWeights(CObject *&SourceObject);
451  //---
452  virtual bool Save(int const file_handle);
453  virtual bool Load(int const file_handle)
454  { activation=(ENUM_ACTIVATION)FileReadInteger(file_handle,INT_VALUE);
455  optimization=(ENUM_OPTIMIZATION)FileReadInteger(file_handle,INT_VALUE);
456  t=(ENUM_OPTIMIZATION)FileReadInteger(file_handle,INT_VALUE);
457  return(Connections.Load(file_handle)); }
458  //---
459  virtual int Type(void) const { return defNeuronBase; }
460  };
461 //+------------------------------------------------------------------+
462 //| |
463 //+------------------------------------------------------------------+
464 //double CNeuronBase::eta=0.0000001; // net learning rate
465 double CNeuronBase::alpha=0.8; // momentum
466 //+------------------------------------------------------------------+
467 //| |
468 //+------------------------------------------------------------------+
470  outputVal(1),
471  gradient(0),
472  activation(TANH),
473  t(1),
474  optimization(SGD)
475  {
476  }
477 //+------------------------------------------------------------------+
478 //| |
479 //+------------------------------------------------------------------+
481  {
482  if(CheckPointer(Connections)!=POINTER_INVALID)
483  delete Connections;
484  }
485 //+------------------------------------------------------------------+
486 //| |
487 //+------------------------------------------------------------------+
488 bool CNeuronBase::Init(uint numOutputs,uint myIndex, ENUM_OPTIMIZATION optimization_type)
489  {
490  if(CheckPointer(Connections)==POINTER_INVALID)
491  {
492  Connections=new CArrayCon();
493  if(CheckPointer(Connections)==POINTER_INVALID)
494  return false;
495  }
496 //---
497  if(Connections.Reserve(fmax(numOutputs,1)))
498  for(uint c=0; c<numOutputs; c++)
499  {
500  if(!Connections.CreateElement(c))
501  return false;
503  }
504 //---
505  m_myIndex=myIndex;
506  optimization=optimization_type;
507  return true;
508  }
509 //+------------------------------------------------------------------+
513 //+------------------------------------------------------------------+
514 class CNeuron : public CNeuronBase
515  {
516 private:
517  virtual bool feedForward(CLayer *prevLayer);
518  virtual bool calcHiddenGradients(CLayer *&nextLayer);
519  virtual bool updateInputWeights(CLayer *&prevLayer);
520 
521 public:CNeuron(void) {};~CNeuron(void) { Connections.Shutdown(); }
524  //---
525  virtual bool calcOutputGradients(double targetVals);
526  virtual double sumDOW(CLayer *&nextLayer) ;
527  virtual int Type(void) const { return defNeuron; }
528  };
529 //+------------------------------------------------------------------+
530 //| |
531 //+------------------------------------------------------------------+
532 bool CNeuron::updateInputWeights(CLayer *&prevLayer)
533  {
534  if(CheckPointer(prevLayer)==POINTER_INVALID)
535  return false;
536 //---
537  double lt=eta*sqrt(1-pow(b2,t))/(1-pow(b1,t));
538  int total=prevLayer.Total();
539  for(int n=0; n<total && !IsStopped(); n++)
540  {
541  CNeuron *neuron= prevLayer.At(n);
542  CConnection *con=neuron.Connections.At(m_myIndex);
543  if(CheckPointer(con)==POINTER_INVALID)
544  continue;
545  if(optimization==SGD)
546  con.weight+=con.deltaWeight=(gradient!=0 ? eta*neuron.getOutputVal()*gradient : 0)+(con.deltaWeight!=0 ? alpha*con.deltaWeight : 0);
547  else
548  {
549  con.mt=b1*con.mt+(1-b1)*gradient;
550  con.vt=b2*con.vt+(1-b2)*pow(gradient,2)+0.00000001;
551  con.weight+=con.deltaWeight=lt*con.mt/sqrt(con.vt);
552  }
553  }
554 //---
555  return true;
556  }
557 //+------------------------------------------------------------------+
558 //| |
559 //+------------------------------------------------------------------+
560 double CNeuron::sumDOW(CLayer *&nextLayer)
561  {
562  double sum=0.0;
563  int total=nextLayer.Total()-1;
564  for(int n=0; n<total; n++)
565  {
566  CConnection *con=Connections.At(n);
567  if(CheckPointer(con)==POINTER_INVALID)
568  continue;
569  double weight=con.weight;
570  if(weight!=0)
571  {
572  CNeuron *neuron=nextLayer.At(n);
573  sum+=weight*neuron.gradient;
574  }
575  }
576 
577  return sum;
578  }
579 //+------------------------------------------------------------------+
580 //| |
581 //+------------------------------------------------------------------+
582 bool CNeuron::calcHiddenGradients(CLayer *&nextLayer)
583  {
584  double targetVal=sumDOW(nextLayer)+outputVal;
585  return calcOutputGradients(targetVal);
586  }
587 //+------------------------------------------------------------------+
588 //| |
589 //+------------------------------------------------------------------+
590 bool CNeuron::calcOutputGradients(double targetVal)
591  {
592  double delta=(targetVal>1 ? 1 : targetVal<-1 ? -1 : targetVal)-outputVal;
593  gradient=(delta!=0 ? delta*activationFunctionDerivative(outputVal) : 0);
594  return true;
595  }
596 //+------------------------------------------------------------------+
597 //| |
598 //+------------------------------------------------------------------+
599 bool CNeuron::feedForward(CLayer *prevLayer)
600  {
601  if(CheckPointer(prevLayer)==POINTER_INVALID || prevLayer.Type()!=defLayer)
602  return false;
603 //---
605  double sum=0.0;
606  int total=prevLayer.Total();
607  for(int n=0; n<total && !IsStopped(); n++)
608  {
609  CNeuron *temp=prevLayer.At(n);
610  double val=temp.getOutputVal();
611  if(val!=0)
612  {
613  CConnection *con=temp.Connections.At(m_myIndex);
614  if(CheckPointer(con)==POINTER_INVALID)
615  continue;
616  sum+=val * con.weight;
617  }
618  }
619  outputVal=activationFunction(MathMin(MathMax(sum,-18),18));
620 //---
621  return true;
622  }
623 //+------------------------------------------------------------------+
626 //+------------------------------------------------------------------+
627 class COpenCLMy : public COpenCL
628  {
629 public:COpenCLMy(void) {};~COpenCLMy(void) {};
632  template<typename T>
633  int AddBufferFromArray(T &data[],const uint data_array_offset,const uint data_array_count,const uint flags);
636  };
637 //+------------------------------------------------------------------+
638 //| |
639 //+------------------------------------------------------------------+
640 class CLayer: public CArrayObj
641  {
642 private:
643  uint iOutputs;
644  int iFileHandle;
645  COpenCLMy *OpenCL;
646 
647 public:CLayer(uint outputs=0, int handle=INVALID_HANDLE, COpenCLMy *OpenCL=NULL);
650 ~CLayer(void) {};
651  //---
652  virtual bool CreateElement(int const index);
653  virtual void IncreaseTotal() { m_data_total++; }
654  virtual int Type(void) const { return defLayer; }
655  virtual bool Load(const int file_handle);
656  };
657 //+------------------------------------------------------------------+
658 //| |
659 //+------------------------------------------------------------------+
660 bool CLayer::CreateElement(int index)
661  {
662  if(index>=m_data_max)
663  return false;
664 //---
665  bool result=false;
666  CNeuronBase *temp=NULL;
667  CNeuronProof *temp_p=NULL;
668  CNeuronBaseOCL *temp_ocl=NULL;
669  CNeuronConvOCL *temp_con_ocl=NULL;
670  CNeuronAttentionOCL *temp_at_ocl=NULL;
671  if(iFileHandle<=0)
672  {
673  temp=new CNeuron();
674  if(CheckPointer(temp)==POINTER_INVALID || !temp.Init(iOutputs,index,SGD))
675  return false;
676  result=true;
677  }
678  else
679  {
680  int type=FileReadInteger(iFileHandle);
681  switch(type)
682  {
683  case defNeuron:
684  temp=new CNeuron();
685  if(CheckPointer(temp)==POINTER_INVALID)
686  result=false;
687  result=temp.Init(iOutputs,index,ADAM);
688  break;
689  case defNeuronProof:
690  temp_p=new CNeuronProof();
691  if(CheckPointer(temp_p)==POINTER_INVALID)
692  result=false;
693  if(temp_p.Init(iOutputs,index,1,1,1,ADAM))
694  {
695  temp=temp_p;
696  result=true;
697  }
698  break;
699  case defNeuronConv:
700  temp_p=new CNeuronConv();
701  if(CheckPointer(temp_p)==POINTER_INVALID)
702  result=false;
703  if(temp_p.Init(iOutputs,index,1,1,1,ADAM))
704  {
705  temp=temp_p;
706  result=true;
707  }
708  break;
709  case defNeuronLSTM:
710  temp_p=new CNeuronLSTM();
711  if(CheckPointer(temp_p)==POINTER_INVALID)
712  result=false;
713  if(temp_p.Init(iOutputs,index,1,1,1,ADAM))
714  {
715  temp=temp_p;
716  result=true;
717  }
718  break;
719  case defNeuronBaseOCL:
720  if(CheckPointer(OpenCL)==POINTER_INVALID)
721  return false;
722  temp_ocl=new CNeuronBaseOCL();
723  if(CheckPointer(temp_ocl)==POINTER_INVALID)
724  result=false;
725  if(temp_ocl.Init(iOutputs,index,OpenCL,1,ADAM))
726  {
727  m_data[index]=temp_ocl;
728  return true;
729  }
730  break;
731  case defNeuronConvOCL:
732  if(CheckPointer(OpenCL)==POINTER_INVALID)
733  return false;
734  temp_con_ocl=new CNeuronConvOCL();
735  if(CheckPointer(temp_con_ocl)==POINTER_INVALID)
736  result=false;
737  if(temp_con_ocl.Init(iOutputs,index,OpenCL,1,1,1,1,ADAM))
738  {
739  m_data[index]=temp_con_ocl;
740  return true;
741  }
742  break;
744  if(CheckPointer(OpenCL)==POINTER_INVALID)
745  return false;
746  temp_at_ocl=new CNeuronAttentionOCL();
747  if(CheckPointer(temp_at_ocl)==POINTER_INVALID)
748  result=false;
749  if(temp_at_ocl.Init(iOutputs,index,OpenCL,1,1,ADAM))
750  {
751  m_data[index]=temp_at_ocl;
752  return true;
753  }
754  break;
755  default:
756  result=false;
757  break;
758  }
759  }
760  if(result)
761  m_data[index]=temp;
762 //---
763  return (result);
764  }
765 //+------------------------------------------------------------------+
769 //+------------------------------------------------------------------+
770 class CArrayLayer : public CArrayObj
771  {
772 public:CArrayLayer(void) {};~CArrayLayer(void) {};
775  //---
776  virtual bool CreateElement(uint neurons, uint outputs);
777  virtual int Type(void) const { return defArrayLayer; }
778  };
779 //+------------------------------------------------------------------+
780 //| |
781 //+------------------------------------------------------------------+
782 bool CArrayLayer::CreateElement(uint neurons, uint outputs)
783  {
784  if(neurons<=0)
785  return false;
786 //---
787  CLayer *layer=new CLayer(outputs);
788  if(!CheckPointer(layer)!=POINTER_INVALID)
789  return false;
790 //---
791  if(!layer.Reserve(neurons+1))
792  return false;
793  for(uint i=0; i<=neurons; i++)
794  {
795  if(!layer.CreateElement(i))
796  return false;
797  layer.IncreaseTotal();
798  }
799 //---
800  return (Add(layer));
801  }
802 //+------------------------------------------------------------------+
806 //+------------------------------------------------------------------+
807 class CNeuronProof : public CNeuronBase
808  {
809 protected:
811  int iWindow;
812  int iStep;
813 
814  virtual bool feedForward(CLayer *prevLayer);
815  virtual bool calcHiddenGradients(CLayer *&nextLayer);
816 
817 public:CNeuronProof(void) {};~CNeuronProof(void);
820  virtual bool Init(uint numOutputs,uint myIndex,int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type);
822  //---
823  virtual CLayer *getOutputLayer(void) { return OutputLayer; }
824  virtual bool calcInputGradients(CLayer *prevLayer);
825  virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index);
826  //--- methods for working with files
827  virtual bool Save(int const file_handle);
828  virtual bool Load(int const file_handle);
829  virtual int Type(void) const { return defNeuronProof; }
830  };
831 //+------------------------------------------------------------------+
835 //+------------------------------------------------------------------+
836 class CNeuronConv : public CNeuronProof
837  {
838 protected:
839  double param; //PReLU param
840  virtual bool feedForward(CLayer *prevLayer);
841  virtual bool calcHiddenGradients(CLayer *&nextLayer);
842  virtual double activationFunction(double x);
843  virtual bool updateInputWeights(CLayer *&prevLayer);
844 public:CNeuronConv() : param(0.01) { };~CNeuronConv(void) { };
847  //---
848  virtual bool calcInputGradients(CLayer *prevLayer);
849  virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index);
850  virtual double activationFunctionDerivative(double x);
851  virtual int Type(void) const { return defNeuronConv; }
852  //--- methods for working with files
853  virtual bool Save(int const file_handle);
854  virtual bool Load(int const file_handle);
855  };
856 //+------------------------------------------------------------------+
857 //| |
858 //+------------------------------------------------------------------+
859 bool CNeuronBase::feedForward(CObject *&SourceObject)
860  {
861  bool result=false;
862 //---
863  if(CheckPointer(SourceObject)==POINTER_INVALID)
864  return result;
865 //---
866  CLayer *temp_l;
867  CNeuronProof *temp_n;
868  switch(SourceObject.Type())
869  {
870  case defLayer:
871  temp_l=SourceObject;
872  result=feedForward(temp_l);
873  break;
874  case defNeuronConv:
875  case defNeuronProof:
876  case defNeuronLSTM:
877  temp_n=SourceObject;
878  result=feedForward(temp_n.getOutputLayer());
879  break;
880  }
881 //---
882  return result;
883  }
884 //+------------------------------------------------------------------+
885 //| |
886 //+------------------------------------------------------------------+
887 bool CNeuronBase::updateInputWeights(CObject *&SourceObject)
888  {
889  bool result=false;
890 //---
891  if(CheckPointer(SourceObject)==POINTER_INVALID)
892  return result;
893 //---
894  CLayer *temp_l;
895  CNeuronProof *temp_n;
896  switch(SourceObject.Type())
897  {
898  case defLayer:
899  temp_l=SourceObject;
900  result=updateInputWeights(temp_l);
901  break;
902  case defNeuronConv:
903  case defNeuronProof:
904  case defNeuronLSTM:
905  temp_n=SourceObject;
906  temp_l=temp_n.getOutputLayer();
907  result=updateInputWeights(temp_l);
908  break;
909  }
910 //---
911  return result;
912  }
913 //+------------------------------------------------------------------+
914 //| |
915 //+------------------------------------------------------------------+
917  {
918  bool result=false;
919 //---
920  if(CheckPointer(prevLayer)==POINTER_INVALID)
921  return result;
922 //---
923  int total=prevLayer.Total()-iWindow+1;
924  CNeuron *temp;
925  CConnection *con;
926  result=true;
927  for(int i=0; (i<total && result); i+=iStep)
928  {
929  double sum=0;
930  for(int j=0; (j<iWindow && result); j++)
931  {
932  temp=prevLayer.At(i+j);
933  con=Connections.At(j);
934  if(CheckPointer(temp)==POINTER_INVALID || CheckPointer(con)==POINTER_INVALID)
935  return false;
936  double val=temp.getOutputVal();
937  sum+=val*con.weight;
938  }
939  temp=OutputLayer.At(i/iStep);
940  if(CheckPointer(temp)==POINTER_INVALID)
941  return false;
942  temp.setOutputVal(activationFunction(sum));
943  }
944 //---
945  return result;
946  }
947 //+------------------------------------------------------------------+
948 //| |
949 //+------------------------------------------------------------------+
951  {
952  if(x>=0)
953  return x;
954  return param*x;
955  }
956 //+------------------------------------------------------------------+
957 //| |
958 //+------------------------------------------------------------------+
959 bool CNeuronBase::calcHiddenGradients(CObject *&TargetObject)
960  {
961  bool result=false;
962 //---
963  if(CheckPointer(TargetObject)==POINTER_INVALID)
964  return result;
965 //---
966  CLayer *temp_l;
967  CNeuronProof *temp_n;
968  switch(TargetObject.Type())
969  {
970  case defLayer:
971  temp_l=TargetObject;
972  result=calcHiddenGradients(temp_l);
973  break;
974  case defNeuronConv:
975  case defNeuronProof:
976  case defNeuronLSTM:
977  switch(Type())
978  {
979  case defNeuron:
980  temp_n=TargetObject;
981  result=temp_n.calcInputGradients(GetPointer(this),m_myIndex);
982  break;
983  case defNeuronLSTM:
984  temp_n=TargetObject;
985  temp_l=getOutputLayer();
986  if(!temp_n.calcInputGradients(temp_l))
987  {
988  result=false;
989  break;
990  }
991  result=calcHiddenGradients(temp_l);
992  break;
993  default:
994  //temp_n=GetPointer(this);
995  temp_l=/*temp_n*/getOutputLayer();
996  temp_n=TargetObject;
997  result=temp_n.calcInputGradients(temp_l);
998  break;
999  }
1000  break;
1001  }
1002 //---
1003  return result;
1004  }
1005 //+------------------------------------------------------------------+
1006 //| |
1007 //+------------------------------------------------------------------+
1009  {
1010  if(CheckPointer(nextLayer)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID || OutputLayer.Total()<=0)
1011  return false;
1012 //---
1013  gradient=0;
1014  int total=OutputLayer.Total();
1015  CNeuron *temp;
1016  for(int i=0; i<total; i++)
1017  {
1018  temp=OutputLayer.At(i);
1019  if(CheckPointer(temp)==POINTER_INVALID)
1020  return false;
1021  temp.setGradient(temp.sumDOW(nextLayer)*activationFunctionDerivative(temp.getOutputVal()));
1022  }
1023  return true;
1024  }
1025 //+------------------------------------------------------------------+
1026 //| |
1027 //+------------------------------------------------------------------+
1029  {
1030  if(x>=0)
1031  return 1;
1032  return param;
1033  }
1034 //+------------------------------------------------------------------+
1035 //| |
1036 //+------------------------------------------------------------------+
1038  {
1039  if(CheckPointer(prevLayer)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID)
1040  return false;
1041 //---
1042  CConnection *con;
1043  double lt=eta*sqrt(1-pow(b2,t))/(1-pow(b1,t));
1044  for(int n=0; n<iWindow && !IsStopped(); n++)
1045  {
1046  con=Connections.At(n);
1047  if(CheckPointer(con)==POINTER_INVALID)
1048  continue;
1049  double delta=0;
1050  int total_i=OutputLayer.Total();
1051  CNeuron *prev, *out;
1052  for(int i=0; i<total_i; i++)
1053  {
1054  prev=prevLayer.At(n*iStep+i);
1055  out=OutputLayer.At(total_i-i-1);
1056  if(CheckPointer(prev)==POINTER_INVALID || CheckPointer(out)==POINTER_INVALID)
1057  continue;
1058  delta+=prev.getOutputVal()*out.getGradient();
1059  }
1060  if(optimization==SGD)
1061  con.weight+=con.deltaWeight=(delta!=0 ? eta*delta : 0)+(con.deltaWeight!=0 ? alpha*con.deltaWeight : 0);
1062  else
1063  {
1064  con.mt=b1*con.mt+(1-b1)*delta;
1065  con.vt=b2*con.vt+(1-b2)*pow(delta,2)+0.00000001;
1066  con.weight+=con.deltaWeight=lt*con.mt/sqrt(con.vt);
1067  t++;
1068  }
1069  }
1070 //---
1071  return true;
1072  }
1073 //+------------------------------------------------------------------+
1074 //| |
1075 //+------------------------------------------------------------------+
1076 bool CNeuronProof::Init(uint numOutputs,uint myIndex,int window,int step, int units_count, ENUM_OPTIMIZATION optimization_type)
1077  {
1078  iWindow=window;
1079  iStep=step;
1080  if(!CNeuronBase::Init(window,myIndex,optimization_type))
1081  return false;
1082  OutputLayer=new CLayer(numOutputs);
1083  if(CheckPointer(OutputLayer)==POINTER_INVALID)
1084  return false;
1085  if(OutputLayer.Reserve(units_count))
1086  for(int i=0; i<units_count; i++)
1087  {
1088  if(!OutputLayer.CreateElement(i))
1089  return false;
1091  }
1092 //---
1093  if(Type()==defNeuronProof)
1094  {
1095  if(CheckPointer(Connections)!=POINTER_INVALID)
1096  Connections.Clear();
1097  }
1098 //---
1099  return true;
1100  }
1101 //+------------------------------------------------------------------+
1102 //| |
1103 //+------------------------------------------------------------------+
1105  {
1106  delete OutputLayer;
1107  }
1108 //+------------------------------------------------------------------+
1109 //| |
1110 //+------------------------------------------------------------------+
1112  {
1113  if(CheckPointer(prevLayer)==POINTER_INVALID)
1114  return false;
1115 //---
1116  int total=prevLayer.Total()-iWindow+1;
1117  CNeuron *temp;
1118  for(int i=0; i<=total; i+=iStep)
1119  {
1120  double sum=0;
1121  for(int j=0; j<iWindow; j++)
1122  {
1123  temp=prevLayer.At(i+j);
1124  if(CheckPointer(temp)==POINTER_INVALID)
1125  continue;
1126  sum+=temp.getOutputVal();
1127  }
1128  temp=OutputLayer.At(i/iStep);
1129  if(CheckPointer(temp)==POINTER_INVALID)
1130  return false;
1131  temp.setOutputVal(sum/iWindow);
1132  }
1133 //---
1134  return true;
1135  }
1136 //+------------------------------------------------------------------+
1137 //| |
1138 //+------------------------------------------------------------------+
1140  {
1141  if(CheckPointer(nextLayer)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID || OutputLayer.Total()<=0)
1142  return false;
1143 //---
1144  gradient=0;
1145  int total=OutputLayer.Total();
1146  CNeuron *temp;
1147  for(int i=0; i<total; i++)
1148  {
1149  temp=OutputLayer.At(i);
1150  if(CheckPointer(temp)==POINTER_INVALID)
1151  return false;
1152  temp.setGradient(temp.sumDOW(nextLayer));
1153  }
1154 //---
1155  return true;
1156  }
1157 //+------------------------------------------------------------------+
1158 //| |
1159 //+------------------------------------------------------------------+
1161  {
1162  if(CheckPointer(prevLayer)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID || CheckPointer(prevLayer.At(0))==POINTER_INVALID)
1163  return false;
1164 //---
1165  if(prevLayer.At(0).Type()!=defNeuron)
1166  {
1167  CNeuronProof *temp=prevLayer.At(m_myIndex);
1168  if(CheckPointer(temp)==POINTER_INVALID)
1169  return false;
1170  prevLayer=temp.getOutputLayer();
1171  if(CheckPointer(prevLayer)==POINTER_INVALID)
1172  return false;
1173  }
1174 //---
1175  CNeuronBase *prevNeuron, *outputNeuron;
1176  int total=prevLayer.Total();
1177  for(int i=0; i<total; i++)
1178  {
1179  prevNeuron=prevLayer.At(i);
1180  if(CheckPointer(prevNeuron)==POINTER_INVALID)
1181  continue;
1182  double prev_gradient=0;
1183  int start=i-iWindow+iStep;
1184  start=(start-start%iStep)/iStep;
1185  double stop=(i-i%iStep)/iStep+1;
1186  for(int out=(int)fmax(0,start); out<(int)fmin(OutputLayer.Total(),stop); out++)
1187  {
1188  outputNeuron=OutputLayer.At(out);
1189  if(CheckPointer(outputNeuron)==POINTER_INVALID)
1190  continue;
1191  prev_gradient+=outputNeuron.getGradient()/iWindow;
1192  }
1193  prevNeuron.setGradient(prev_gradient);
1194  }
1195 //---
1196  return true;
1197  }
1198 //+------------------------------------------------------------------+
1199 //| |
1200 //+------------------------------------------------------------------+
1201 bool CNeuronProof::calcInputGradients(CNeuronBase *prevNeuron,uint index)
1202  {
1203  if(CheckPointer(prevNeuron)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID)
1204  return false;
1205 //---
1206  if(prevNeuron.Type()!=defNeuron)
1207  {
1208  CNeuronProof *temp=prevNeuron;
1209  return calcInputGradients(temp.getOutputLayer());
1210  }
1211 //---
1212  CNeuronBase *outputNeuron;
1213  double prev_gradient=0;
1214  int start=(int)index-iWindow+iStep;
1215  start=(start-start%iStep)/iStep;
1216  double stop=(index-index%iStep)/iStep+1;
1217  for(int out=(int)fmax(0,start); out<(int)fmin(OutputLayer.Total(),stop); out++)
1218  {
1219  outputNeuron=OutputLayer.At(out);
1220  if(CheckPointer(outputNeuron)==POINTER_INVALID)
1221  continue;
1222  prev_gradient+=outputNeuron.getGradient()/iWindow;
1223  }
1224  prevNeuron.setGradient(prev_gradient);
1225 //---
1226  return true;
1227  }
1228 //+------------------------------------------------------------------+
1229 //| |
1230 //+------------------------------------------------------------------+
1232  {
1233  if(CheckPointer(prevLayer)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID)
1234  return false;
1235 //---
1236  if(prevLayer.At(0).Type()!=defNeuron)
1237  {
1238  CNeuronProof *temp=prevLayer.At(m_myIndex);
1239  if(CheckPointer(temp)==POINTER_INVALID)
1240  return false;
1241  prevLayer=temp.getOutputLayer();
1242  if(CheckPointer(prevLayer)==POINTER_INVALID)
1243  return false;
1244  }
1245 //---
1246  CNeuronBase *prevNeuron, *outputNeuron;
1247  CConnection *con;
1248  int total=prevLayer.Total();
1249  for(int i=0; i<total; i++)
1250  {
1251  prevNeuron=prevLayer.At(i);
1252  if(CheckPointer(prevNeuron)==POINTER_INVALID)
1253  continue;
1254  double prev_gradient=0;
1255  int start=i-iWindow+iStep;
1256  start=(start-start%iStep)/iStep;
1257  double stop=(i-i%iStep)/iStep+1;
1258  for(int out=(int)fmax(0,start); out<(int)fmin(OutputLayer.Total(),stop); out++)
1259  {
1260  outputNeuron=OutputLayer.At(out);
1261  int c=((int)fmin(OutputLayer.Total(),stop)-out-1)*iStep+i%iStep;
1262  con=Connections.At(c);
1263  if(CheckPointer(outputNeuron)==POINTER_INVALID || CheckPointer(con)==POINTER_INVALID)
1264  continue;
1265  prev_gradient+=outputNeuron.getGradient()*prevNeuron.activationFunctionDerivative(prevNeuron.getOutputVal())*con.weight;
1266  }
1267  prevNeuron.setGradient(prev_gradient);
1268  }
1269 //---
1270  return true;
1271  }
1272 //+------------------------------------------------------------------+
1273 //| |
1274 //+------------------------------------------------------------------+
1275 bool CNeuronConv::calcInputGradients(CNeuronBase *prevNeuron,uint index)
1276  {
1277  if(CheckPointer(prevNeuron)==POINTER_INVALID || CheckPointer(OutputLayer)==POINTER_INVALID)
1278  return false;
1279 //---
1280  if(prevNeuron.Type()!=defNeuron)
1281  {
1282  CNeuronProof *temp=prevNeuron;
1283  return calcInputGradients(temp.getOutputLayer());
1284  }
1285 //---
1286  CNeuronBase *outputNeuron;
1287  CConnection *con;
1288  double prev_gradient=0;
1289  int start=(int)index-iWindow+iStep;
1290  start=(start-start%iStep)/iStep;
1291  double stop=(index-index%iStep)/iStep+1;
1292  for(int out=(int)fmax(0,start); out<(int)fmin(OutputLayer.Total(),stop); out++)
1293  {
1294  outputNeuron=OutputLayer.At(out);
1295  int c=(int)(((int)fmin(OutputLayer.Total(),stop)-out-1)*iStep+index%iStep);
1296  con=Connections.At(c);
1297  if(CheckPointer(outputNeuron)==POINTER_INVALID || CheckPointer(con)==POINTER_INVALID)
1298  continue;
1299  prev_gradient+=outputNeuron.getGradient()*activationFunctionDerivative(outputNeuron.getOutputVal())*con.weight;
1300  }
1301  prevNeuron.setGradient(prev_gradient);
1302 //---
1303  return true;
1304  }
1305 //+------------------------------------------------------------------+
1306 //| |
1307 //+------------------------------------------------------------------+
1308 bool CNeuronBase::Save(int file_handle)
1309  {
1310  if(file_handle==INVALID_HANDLE)
1311  return false;
1312  if(FileWriteInteger(file_handle,Type())<INT_VALUE)
1313  return false;
1314 //---
1315  if(FileWriteInteger(file_handle,(int)activation,INT_VALUE)<INT_VALUE)
1316  return false;
1317 //---
1318  if(FileWriteInteger(file_handle,(int)optimization,INT_VALUE)<INT_VALUE)
1319  return false;
1320 //---
1321  if(FileWriteInteger(file_handle,t,INT_VALUE)<INT_VALUE)
1322  return false;
1323 //---
1324  return Connections.Save(file_handle);
1325  }
1326 //+------------------------------------------------------------------+
1330 //+------------------------------------------------------------------+
1331 class CLayerDescription : public CObject
1332  {
1333 public:CLayerDescription(void);~CLayerDescription(void) {};
1336  //---
1337  int type;
1338  int count;
1339  int window;
1341  int step;
1344  };
1345 //+------------------------------------------------------------------+
1346 //| |
1347 //+------------------------------------------------------------------+
1349  count(0),
1350  window(1),
1351  step(1),
1352  activation(TANH),
1353  optimization(SGD)
1354  {}
1355 //+------------------------------------------------------------------+
1359 //+------------------------------------------------------------------+
1360 class CNet
1361  {
1362 protected:
1363  void backPropOCL(CArrayDouble *targetVals);
1364 public:CNet(CArrayObj *Description);~CNet(void);
1367  bool feedForward(CArrayDouble *inputVals);
1368  void backProp(CArrayDouble *targetVals);
1369  void getResults(CArrayDouble *&resultVals);
1370  double getRecentAverageError() { return recentAverageError; }
1371  bool Save(string file_name, double error, double undefine, double forecast, datetime time, bool common=true);
1373  bool Load(string file_name, double &error, double &undefine, double &forecast, datetime &time, bool common=true);
1375  //---
1377  virtual int Type(void) const { return defNet; }
1378 
1379 private:
1380  CArrayLayer *layers;
1381  COpenCLMy *opencl;
1382  double recentAverageError;
1383  };
1384 //+------------------------------------------------------------------+
1385 //| |
1386 //+------------------------------------------------------------------+
1387 double CNet::recentAverageSmoothingFactor=10000.0; // Number of training samples to average over
1388 //+------------------------------------------------------------------+
1389 //| |
1390 //+------------------------------------------------------------------+
1391 CNet::CNet(CArrayObj *Description)
1392  {
1393  if(CheckPointer(Description)==POINTER_INVALID)
1394  return;
1395 //---
1396  int total=Description.Total();
1397  if(total<=0)
1398  return;
1399 //---
1400  layers=new CArrayLayer();
1401  if(CheckPointer(layers)==POINTER_INVALID)
1402  return;
1403 //---
1404  CLayer *temp;
1405  CLayerDescription *desc=NULL, *next=NULL, *prev=NULL;
1406  CNeuronBase *neuron=NULL;
1407  CNeuronProof *neuron_p=NULL;
1408  int output_count=0;
1409  int temp_count=0;
1410 //---
1411  next=Description.At(1);
1412  if(next.type==defNeuron || next.type==defNeuronBaseOCL || next.type==defNeuronConvOCL || next.type==defNeuronAttentionOCL)
1413  {
1414  opencl=new COpenCLMy();
1415  if(CheckPointer(opencl)!=POINTER_INVALID && !opencl.Initialize(cl_program,true))
1416  delete opencl;
1417  }
1418  else
1419  {
1420  if(CheckPointer(opencl)!=POINTER_INVALID)
1421  delete opencl;
1422  }
1423 //---
1424  for(int i=0; i<total; i++)
1425  {
1426  prev=desc;
1427  desc=Description.At(i);
1428  if((i+1)<total)
1429  {
1430  next=Description.At(i+1);
1431  if(CheckPointer(next)==POINTER_INVALID)
1432  return;
1433  }
1434  else
1435  next=NULL;
1436  int outputs=(next==NULL || (next.type!=defNeuron && next.type!=defNeuronBaseOCL) ? 0 : next.count);
1437  temp=new CLayer(outputs);
1438  int neurons=(desc.count+(desc.type==defNeuron || desc.type==defNeuronBaseOCL ? 1 : 0));
1439  if(CheckPointer(opencl)!=POINTER_INVALID)
1440  {
1441  CNeuronBaseOCL *neuron_ocl=NULL;
1442  CNeuronConvOCL *neuron_conv_ocl=NULL;
1443  CNeuronAttentionOCL *neuron_attention_ocl=NULL;
1444  switch(desc.type)
1445  {
1446  case defNeuron:
1447  case defNeuronBaseOCL:
1448  neuron_ocl=new CNeuronBaseOCL();
1449  if(CheckPointer(neuron_ocl)==POINTER_INVALID)
1450  {
1451  delete temp;
1452  return;
1453  }
1454  if(!neuron_ocl.Init(outputs,0,opencl,desc.count,desc.optimization))
1455  {
1456  delete neuron_ocl;
1457  delete temp;
1458  return;
1459  }
1460  neuron_ocl.SetActivationFunction(desc.activation);
1461  if(!temp.Add(neuron_ocl))
1462  {
1463  delete neuron_ocl;
1464  delete temp;
1465  return;
1466  }
1467  neuron_ocl=NULL;
1468  break;
1469  case defNeuronConvOCL:
1470  neuron_conv_ocl=new CNeuronConvOCL();
1471  if(CheckPointer(neuron_conv_ocl)==POINTER_INVALID)
1472  {
1473  delete temp;
1474  return;
1475  }
1476  if(!neuron_conv_ocl.Init(outputs,0,opencl,desc.window,desc.step,desc.window_out,desc.count,desc.optimization))
1477  {
1478  delete neuron_conv_ocl;
1479  delete temp;
1480  return;
1481  }
1482  neuron_conv_ocl.SetActivationFunction(desc.activation);
1483  if(!temp.Add(neuron_conv_ocl))
1484  {
1485  delete neuron_conv_ocl;
1486  delete temp;
1487  return;
1488  }
1489  neuron_conv_ocl=NULL;
1490  break;
1491  case defNeuronAttentionOCL:
1492  neuron_attention_ocl=new CNeuronAttentionOCL();
1493  if(CheckPointer(neuron_attention_ocl)==POINTER_INVALID)
1494  {
1495  delete temp;
1496  return;
1497  }
1498  if(!neuron_attention_ocl.Init(outputs,0,opencl,desc.window,desc.count,desc.optimization))
1499  {
1500  delete neuron_attention_ocl;
1501  delete temp;
1502  return;
1503  }
1504  neuron_attention_ocl.SetActivationFunction(desc.activation);
1505  if(!temp.Add(neuron_attention_ocl))
1506  {
1507  delete neuron_attention_ocl;
1508  delete temp;
1509  return;
1510  }
1511  neuron_attention_ocl=NULL;
1512  break;
1513  default:
1514  return;
1515  break;
1516  }
1517  }
1518  else
1519  for(int n=0; n<neurons; n++)
1520  {
1521  switch(desc.type)
1522  {
1523  case defNeuron:
1524  neuron=new CNeuron();
1525  if(CheckPointer(neuron)==POINTER_INVALID)
1526  {
1527  delete temp;
1528  delete layers;
1529  return;
1530  }
1531  neuron.Init(outputs,n,desc.optimization);
1532  neuron.SetActivationFunction(desc.activation);
1533  break;
1534  case defNeuronConv:
1535  neuron_p=new CNeuronConv();
1536  if(CheckPointer(neuron_p)==POINTER_INVALID)
1537  {
1538  delete temp;
1539  delete layers;
1540  return;
1541  }
1542  if(CheckPointer(prev)!=POINTER_INVALID)
1543  {
1544  if(prev.type==defNeuron)
1545  {
1546  temp_count=(int)((prev.count-desc.window)%desc.step);
1547  output_count=(int)((prev.count-desc.window-temp_count)/desc.step+(temp_count==0 ? 1 : 2));
1548  }
1549  else
1550  if(n==0)
1551  {
1552  temp_count=(int)((output_count-desc.window)%desc.step);
1553  output_count=(int)((output_count-desc.window-temp_count)/desc.step+(temp_count==0 ? 1 : 2));
1554  }
1555  }
1556  if(neuron_p.Init(outputs,n,desc.window,desc.step,output_count,desc.optimization))
1557  neuron=neuron_p;
1558  break;
1559  case defNeuronProof:
1560  neuron_p=new CNeuronProof();
1561  if(CheckPointer(neuron_p)==POINTER_INVALID)
1562  {
1563  delete temp;
1564  delete layers;
1565  return;
1566  }
1567  if(CheckPointer(prev)!=POINTER_INVALID)
1568  {
1569  if(prev.type==defNeuron)
1570  {
1571  temp_count=(int)((prev.count-desc.window)%desc.step);
1572  output_count=(int)((prev.count-desc.window-temp_count)/desc.step+(temp_count==0 ? 1 : 2));
1573  }
1574  else
1575  if(n==0)
1576  {
1577  temp_count=(int)((output_count-desc.window)%desc.step);
1578  output_count=(int)((output_count-desc.window-temp_count)/desc.step+(temp_count==0 ? 1 : 2));
1579  }
1580  }
1581  if(neuron_p.Init(outputs,n,desc.window,desc.step,output_count,desc.optimization))
1582  neuron=neuron_p;
1583  break;
1584  case defNeuronLSTM:
1585  neuron_p=new CNeuronLSTM();
1586  if(CheckPointer(neuron_p)==POINTER_INVALID)
1587  {
1588  delete temp;
1589  delete layers;
1590  return;
1591  }
1592  output_count=(next!=NULL ? next.window : desc.step);
1593  if(neuron_p.Init(outputs,n,desc.window,1,output_count,desc.optimization))
1594  neuron=neuron_p;
1595  break;
1596  }
1597  if(!temp.Add(neuron))
1598  {
1599  delete temp;
1600  delete layers;
1601  return;
1602  }
1603  neuron=NULL;
1604  }
1605  if(!layers.Add(temp))
1606  {
1607  delete temp;
1608  delete layers;
1609  return;
1610  }
1611  }
1612 //---
1613  if(CheckPointer(opencl)==POINTER_INVALID)
1614  return;
1615 //--- create kernels
1616  opencl.SetKernelsCount(17);
1617  opencl.KernelCreate(def_k_FeedForward,"FeedForward");
1618  opencl.KernelCreate(def_k_CalcOutputGradient,"CalcOutputGradient");
1619  opencl.KernelCreate(def_k_CalcHiddenGradient,"CalcHiddenGradient");
1620  opencl.KernelCreate(def_k_UpdateWeightsMomentum,"UpdateWeightsMomentum");
1621  opencl.KernelCreate(def_k_UpdateWeightsAdam,"UpdateWeightsAdam");
1622  opencl.KernelCreate(def_k_AttentionGradients,"AttentionIsideGradients");
1623  opencl.KernelCreate(def_k_AttentionOut,"AttentionOut");
1624  opencl.KernelCreate(def_k_AttentionScore,"AttentionScore");
1625  opencl.KernelCreate(def_k_CalcHiddenGradientConv,"CalcHiddenGradientConv");
1626  opencl.KernelCreate(def_k_CalcInputGradientProof,"CalcInputGradientProof");
1627  opencl.KernelCreate(def_k_FeedForwardConv,"FeedForwardConv");
1628  opencl.KernelCreate(def_k_FeedForwardProof,"FeedForwardProof");
1629  opencl.KernelCreate(def_k_MatrixSum,"SumMatrix");
1630  opencl.KernelCreate(def_k_UpdateWeightsConvAdam,"UpdateWeightsConvAdam");
1631  opencl.KernelCreate(def_k_UpdateWeightsConvMomentum,"UpdateWeightsConvMomentum");
1632  opencl.KernelCreate(def_k_Normilize,"Normalize");
1633  opencl.KernelCreate(def_k_NormilizeWeights,"NormalizeWeights");
1634 //---
1635  return;
1636  }
1637 //+------------------------------------------------------------------+
1638 //| |
1639 //+------------------------------------------------------------------+
1640 bool CNet::feedForward(CArrayDouble *inputVals)
1641  {
1642  if(CheckPointer(layers)==POINTER_INVALID || CheckPointer(inputVals)==POINTER_INVALID || layers.Total()<=1)
1643  return false;
1644 //---
1645  CLayer *previous=NULL;
1646  CLayer *current=layers.At(0);
1647  int total=MathMin(current.Total(),inputVals.Total());
1648  CNeuronBase *neuron=NULL;
1649  if(CheckPointer(opencl)==POINTER_INVALID)
1650  {
1651  for(int i=0; i<total; i++)
1652  {
1653  neuron=current.At(i);
1654  if(CheckPointer(neuron)==POINTER_INVALID)
1655  return false;
1656  neuron.setOutputVal(inputVals.At(i)+(i%2==0 ? sin(i) : cos(i)) );
1657  }
1658  }
1659  else
1660  {
1661  CNeuronBaseOCL *neuron_ocl=current.At(0);
1662  double array[];
1663  int total_data=inputVals.Total();
1664  if(ArrayResize(array,total_data)<0)
1665  return false;
1666  for(int d=0;d<total_data;d++)
1667  array[d]=inputVals.At(d)+(d%2==0 ? sin(d) : cos(d));
1668  if(!opencl.BufferWrite(neuron_ocl.getOutputIndex(),array,0,0,total_data))
1669  return false;
1670  }
1671 //---
1672  CObject *temp=NULL;
1673  for(int l=1; l<layers.Total(); l++)
1674  {
1675  previous=current;
1676  current=layers.At(l);
1677  if(CheckPointer(current)==POINTER_INVALID)
1678  return false;
1679  //---
1680  if(CheckPointer(opencl)!=POINTER_INVALID)
1681  {
1682  CNeuronBaseOCL *current_ocl=current.At(0);
1683  if(!current_ocl.FeedForward(previous.At(0)))
1684  return false;
1685  continue;
1686  }
1687  //---
1688  total=current.Total();
1689  if(current.At(0).Type()==defNeuron)
1690  total--;
1691  //---
1692  for(int n=0; n<total; n++)
1693  {
1694  neuron=current.At(n);
1695  if(CheckPointer(neuron)==POINTER_INVALID)
1696  return false;
1697  if(previous.At(0).Type()==defNeuron)
1698  {
1699  temp=previous;
1700  if(!neuron.feedForward(temp))
1701  return false;
1702  continue;
1703  }
1704  if(neuron.Type()==defNeuron)
1705  {
1706  if(n==0)
1707  {
1708  CLayer *temp_l=new CLayer(total);
1709  if(CheckPointer(temp_l)==POINTER_INVALID)
1710  return false;
1711  CNeuronProof *proof=NULL;
1712  for(int p=0; p<previous.Total(); p++)
1713  {
1714  proof=previous.At(p);
1715  if(CheckPointer(proof)==POINTER_INVALID)
1716  return false;
1717  temp_l.AddArray(proof.getOutputLayer());
1718  }
1719  temp=temp_l;
1720  }
1721  if(!neuron.feedForward(temp))
1722  return false;
1723  if(n==total-1)
1724  {
1725  CLayer *temp_l=temp;
1726  temp_l.FreeMode(false);
1727  temp_l.Shutdown();
1728  delete temp_l;
1729  }
1730  continue;
1731  }
1732  temp=previous.At(n);
1733  if(CheckPointer(temp)==POINTER_INVALID)
1734  return false;
1735  if(!neuron.feedForward(temp))
1736  return false;
1737  }
1738  }
1739 //---
1740  return true;
1741  }
1742 //+------------------------------------------------------------------+
1743 //| |
1744 //+------------------------------------------------------------------+
1745 void CNet::backProp(CArrayDouble *targetVals)
1746  {
1747  if(CheckPointer(targetVals)==POINTER_INVALID || CheckPointer(layers)==POINTER_INVALID)
1748  return;
1749  if(CheckPointer(opencl)!=POINTER_INVALID)
1750  {
1751  backPropOCL(targetVals);
1752  return;
1753  }
1754 //---
1755  CLayer *outputLayer=layers.At(layers.Total()-1);
1756  if(CheckPointer(outputLayer)==POINTER_INVALID)
1757  return;
1758 //---
1759  double error=0.0;
1760  int total=outputLayer.Total()-1;
1761  for(int n=0; n<total && !IsStopped(); n++)
1762  {
1763  CNeuron *neuron=outputLayer.At(n);
1764  double target=targetVals.At(n);
1765  double delta=(target>1 ? 1 : target<-1 ? -1 : target)-neuron.getOutputVal();
1766  error+=delta*delta;
1767  neuron.calcOutputGradients(targetVals.At(n));
1768  }
1769  error/= total;
1770  error = sqrt(error);
1771 
1772  recentAverageError+=(error-recentAverageError)/recentAverageSmoothingFactor;
1773 //---
1774  CNeuronBase *neuron=NULL;
1775  CObject *temp=NULL;
1776  for(int layerNum=layers.Total()-2; layerNum>0; layerNum--)
1777  {
1778  CLayer *hiddenLayer=layers.At(layerNum);
1779  CLayer *nextLayer=layers.At(layerNum+1);
1780  total=hiddenLayer.Total();
1781  for(int n=0; n<total && !IsStopped(); ++n)
1782  {
1783  neuron=hiddenLayer.At(n);
1784  if(nextLayer.At(0).Type()==defNeuron)
1785  {
1786  temp=nextLayer;
1787  neuron.calcHiddenGradients(temp);
1788  continue;
1789  }
1790  if(neuron.Type()==defNeuron)
1791  {
1792  double g=0;
1793  for(int i=0; i<nextLayer.Total(); i++)
1794  {
1795  temp=nextLayer.At(i);
1796  neuron.calcHiddenGradients(temp);
1797  g+=neuron.getGradient();
1798  }
1799  neuron.setGradient(g);
1800  continue;
1801  }
1802  temp=nextLayer.At(n);
1803  neuron.calcHiddenGradients(temp);
1804  }
1805  }
1806 //---
1807  for(int layerNum=layers.Total()-1; layerNum>0; layerNum--)
1808  {
1809  CLayer *layer=layers.At(layerNum);
1810  CLayer *prevLayer=layers.At(layerNum-1);
1811  total=layer.Total()-(layer.At(0).Type()==defNeuron ? 1 : 0);
1812  int n_conv=0;
1813  for(int n=0; n<total && !IsStopped(); n++)
1814  {
1815  neuron=layer.At(n);
1816  if(CheckPointer(neuron)==POINTER_INVALID)
1817  return;
1818  if(neuron.Type()==defNeuronProof)
1819  continue;
1820  switch(prevLayer.At(0).Type())
1821  {
1822  case defNeuron:
1823  temp=prevLayer;
1824  neuron.updateInputWeights(temp);
1825  break;
1826  case defNeuronConv:
1827  case defNeuronProof:
1828  case defNeuronLSTM:
1829  if(neuron.Type()==defNeuron)
1830  {
1831  for(n_conv=0; n_conv<prevLayer.Total(); n_conv++)
1832  {
1833  temp=prevLayer.At(n_conv);
1834  neuron.updateInputWeights(temp);
1835  }
1836  }
1837  else
1838  {
1839  temp=prevLayer.At(n);
1840  neuron.updateInputWeights(temp);
1841  }
1842  break;
1843  default:
1844  temp=NULL;
1845  break;
1846  }
1847  }
1848  }
1849  }
1850 //+------------------------------------------------------------------+
1851 //| |
1852 //+------------------------------------------------------------------+
1853 void CNet::backPropOCL(CArrayDouble *targetVals)
1854  {
1855  if(CheckPointer(targetVals)==POINTER_INVALID || CheckPointer(layers)==POINTER_INVALID || CheckPointer(opencl)==POINTER_INVALID)
1856  return;
1857  CLayer *currentLayer=layers.At(layers.Total()-1);
1858  if(CheckPointer(currentLayer)==POINTER_INVALID)
1859  return;
1860 //---
1861  double error=0.0;
1862  int total=targetVals.Total();
1863  double result[];
1864  CNeuronBaseOCL *neuron=currentLayer.At(0);
1865  if(neuron.getOutputVal(result)<total)
1866  return;
1867  for(int n=0; n<total && !IsStopped(); n++)
1868  {
1869  double target=targetVals.At(n);
1870  double delta=(target==0 ? 0 : (target>1 ? 1 : target<-1 ? -1 : target)-result[n]);
1871  error+=MathPow(delta,2);
1872  }
1873  error/= total;
1874  error = sqrt(error);
1875  recentAverageError+=(error-recentAverageError)/recentAverageSmoothingFactor;
1876 
1877  if(!neuron.calcOutputGradients(targetVals))
1878  return;
1879 //--- Calc Hidden Gradients
1880  CObject *temp=NULL;
1881  total=layers.Total();
1882  for(int layerNum=total-2; layerNum>0; layerNum--)
1883  {
1884  CLayer *nextLayer=currentLayer;
1885  currentLayer=layers.At(layerNum);
1886  neuron=currentLayer.At(0);
1887  if(!neuron.calcHiddenGradients(nextLayer.At(0)))
1888  return;
1889  }
1890 //---
1891  CLayer *prevLayer=layers.At(total-1);
1892  for(int layerNum=total-1; layerNum>0; layerNum--)
1893  {
1894  currentLayer=prevLayer;
1895  prevLayer=layers.At(layerNum-1);
1896  neuron=currentLayer.At(0);
1897  if(!neuron.UpdateInputWeights(prevLayer.At(0)))
1898  break;;
1899  }
1900  }
1901 //+------------------------------------------------------------------+
1902 //| |
1903 //+------------------------------------------------------------------+
1904 void CNet::getResults(CArrayDouble *&resultVals)
1905  {
1906  if(CheckPointer(resultVals)==POINTER_INVALID)
1907  {
1908  resultVals=new CArrayDouble();
1909  if(CheckPointer(resultVals)==POINTER_INVALID)
1910  return;
1911  }
1912 //---
1913  resultVals.Clear();
1914  if(CheckPointer(layers)==POINTER_INVALID || layers.Total()<=0)
1915  return;
1916 //---
1917  CLayer *output=layers.At(layers.Total()-1);
1918  if(CheckPointer(output)==POINTER_INVALID)
1919  return;
1920 //---
1921  if(CheckPointer(opencl)!=POINTER_INVALID && output.At(0).Type()==defNeuronBaseOCL)
1922  {
1923  CNeuronBaseOCL *temp=output.At(0);
1924  temp.getOutputVal(resultVals);
1925  return;
1926  }
1927  CNeuronBase *neuron=NULL;
1928  CLayer *temp=NULL;
1929  int total=output.Total();
1930  if(output.At(0).Type()==defNeuron)
1931  total--;
1932 //---
1933  for(int i=0; i<total; i++)
1934  {
1935  neuron=output.At(i);
1936  if(CheckPointer(neuron)==POINTER_INVALID)
1937  continue;
1938  if(neuron.Type()==defNeuron)
1939  {
1940  resultVals.Add(neuron.getOutputVal());
1941  continue;
1942  }
1943  CNeuronProof *n=neuron;
1944  temp=n.getOutputLayer();
1945  for(int ii=0; ii<temp.Total(); ii++)
1946  {
1947  neuron=temp.At(ii);
1948  if(CheckPointer(neuron)==POINTER_INVALID)
1949  continue;
1950  resultVals.Add(neuron.getOutputVal());
1951  }
1952  }
1953  }
1954 //+------------------------------------------------------------------+
1955 //| |
1956 //+------------------------------------------------------------------+
1957 bool CNet::Save(string file_name,double error,double undefine,double forecast,datetime time,bool common=true)
1958  {
1959  if(MQLInfoInteger(MQL_OPTIMIZATION) || MQLInfoInteger(MQL_TESTER) || MQLInfoInteger(MQL_FORWARD) || MQLInfoInteger(MQL_OPTIMIZATION))
1960  return true;
1961  if(file_name==NULL)
1962  return false;
1963 //---
1964  int handle=FileOpen(file_name,(common ? FILE_COMMON : 0)|FILE_BIN|FILE_WRITE);
1965  if(handle==INVALID_HANDLE)
1966  return false;
1967 //---
1968  if(FileWriteDouble(handle,error)<=0 || FileWriteDouble(handle,undefine)<=0 || FileWriteDouble(handle,forecast)<=0 || FileWriteLong(handle,(long)time)<=0)
1969  {
1970  FileClose(handle);
1971  return false;
1972  }
1973  bool result=layers.Save(handle);
1974  FileFlush(handle);
1975  FileClose(handle);
1976 //---
1977  return result;
1978  }
1979 //+------------------------------------------------------------------+
1980 //| |
1981 //+------------------------------------------------------------------+
1982 bool CNet::Load(string file_name,double &error,double &undefine,double &forecast,datetime &time,bool common=true)
1983  {
1984  if(MQLInfoInteger(MQL_OPTIMIZATION) || MQLInfoInteger(MQL_TESTER) || MQLInfoInteger(MQL_FORWARD) || MQLInfoInteger(MQL_OPTIMIZATION))
1985  return false;
1986 //---
1987  if(file_name==NULL)
1988  return false;
1989 //---
1990  Print(file_name);
1991  int handle=FileOpen(file_name,(common ? FILE_COMMON : 0)|FILE_BIN|FILE_READ);
1992  if(handle==INVALID_HANDLE)
1993  return false;
1994 //---
1995  error=FileReadDouble(handle);
1996  undefine=FileReadDouble(handle);
1997  forecast=FileReadDouble(handle);
1998  time=(datetime)FileReadLong(handle);
1999 //---
2000  if(CheckPointer(layers)!=POINTER_INVALID)
2001  layers.Clear();
2002  else
2003  layers=new CArrayLayer();
2004  int i=0,num;
2005 //---
2006  if(CheckPointer(opencl)==POINTER_INVALID)
2007  {
2008  opencl=new COpenCLMy();
2009  if(CheckPointer(opencl)!=POINTER_INVALID && !opencl.Initialize(cl_program,true))
2010  delete opencl;
2011  else
2012  {
2013  //--- create kernels
2014  opencl.SetKernelsCount(17);
2015  opencl.KernelCreate(def_k_FeedForward,"FeedForward");
2016  opencl.KernelCreate(def_k_CalcOutputGradient,"CalcOutputGradient");
2017  opencl.KernelCreate(def_k_CalcHiddenGradient,"CalcHiddenGradient");
2018  opencl.KernelCreate(def_k_UpdateWeightsMomentum,"UpdateWeightsMomentum");
2019  opencl.KernelCreate(def_k_UpdateWeightsAdam,"UpdateWeightsAdam");
2020  opencl.KernelCreate(def_k_AttentionGradients,"AttentionIsideGradients");
2021  opencl.KernelCreate(def_k_AttentionOut,"AttentionOut");
2022  opencl.KernelCreate(def_k_AttentionScore,"AttentionScore");
2023  opencl.KernelCreate(def_k_CalcHiddenGradientConv,"CalcHiddenGradientConv");
2024  opencl.KernelCreate(def_k_CalcInputGradientProof,"CalcInputGradientProof");
2025  opencl.KernelCreate(def_k_FeedForwardConv,"FeedForwardConv");
2026  opencl.KernelCreate(def_k_FeedForwardProof,"FeedForwardProof");
2027  opencl.KernelCreate(def_k_MatrixSum,"SumMatrix");
2028  opencl.KernelCreate(def_k_UpdateWeightsConvAdam,"UpdateWeightsConvAdam");
2029  opencl.KernelCreate(def_k_UpdateWeightsConvMomentum,"UpdateWeightsConvMomentum");
2030  opencl.KernelCreate(def_k_Normilize,"Normalize");
2031  opencl.KernelCreate(def_k_NormilizeWeights,"NormalizeWeights");
2032  }
2033  }
2034 //--- check
2035 //--- read and check start marker - 0xFFFFFFFFFFFFFFFF
2036  long temp=FileReadLong(handle);
2037  if(temp==-1)
2038  {
2039  //--- read and check array type
2040  if(FileReadInteger(handle,INT_VALUE)!=layers.Type())
2041  {
2042  FileClose(handle);
2043  return(false);
2044  }
2045  }
2046  else
2047  {
2048  FileClose(handle);
2049  return(false);
2050  }
2051 //--- read array length
2052  num=FileReadInteger(handle,INT_VALUE);
2053 //--- read array
2054  if(num!=0)
2055  {
2056  for(i=0; i<num; i++)
2057  {
2058  //--- create new element
2059  CLayer *Layer=new CLayer(0,handle,opencl);
2060  if(!Layer.Load(handle))
2061  break;
2062  if(!layers.Add(Layer))
2063  break;
2064  }
2065  }
2066  FileClose(handle);
2067 //--- result
2068  return (layers.Total()==num);
2069  }
2070 //+------------------------------------------------------------------+
2071 //| |
2072 //+------------------------------------------------------------------+
2073 bool CNeuronProof::Save(const int file_handle)
2074  {
2075  if(!CNeuronBase::Save(file_handle) || !OutputLayer.Save(file_handle))
2076  return false;
2077  if(FileWriteInteger(file_handle,iWindow,INT_VALUE)<INT_VALUE)
2078  return false;
2079  if(FileWriteInteger(file_handle,iStep,INT_VALUE)<INT_VALUE)
2080  return false;
2081 //---
2082  return true;
2083  }
2084 //+------------------------------------------------------------------+
2085 //| |
2086 //+------------------------------------------------------------------+
2087 bool CNeuronProof::Load(const int file_handle)
2088  {
2089  if(!CNeuronBase::Load(file_handle) || !OutputLayer.Load(file_handle))
2090  return false;
2091  iWindow=FileReadInteger(file_handle,INT_VALUE);
2092  iStep=FileReadInteger(file_handle,INT_VALUE);
2093 //---
2094  return true;
2095  }
2096 //+------------------------------------------------------------------+
2097 //| |
2098 //+------------------------------------------------------------------+
2099 bool CNeuronConv::Save(const int file_handle)
2100  {
2101  if(!CNeuronProof::Save(file_handle))
2102  return false;
2103  if(FileWriteDouble(file_handle,param)<8)
2104  return false;
2105 //---
2106  return true;
2107  }
2108 //+------------------------------------------------------------------+
2109 //| |
2110 //+------------------------------------------------------------------+
2111 bool CNeuronConv::Load(const int file_handle)
2112  {
2113  if(!CNeuronProof::Load(file_handle))
2114  return false;
2115  param=FileReadDouble(file_handle);
2116 //---
2117  return true;
2118  }
2119 //+------------------------------------------------------------------+
2123 //+------------------------------------------------------------------+
2125  {
2126 protected:
2131  CArrayDouble *Memory;
2132  CArrayDouble *PrevMemory;
2133  CArrayDouble *Input;
2134  CArrayDouble *InputGradient;
2135  //---
2136  virtual bool feedForward(CLayer *prevLayer);
2137  virtual bool calcHiddenGradients(CLayer *&nextLayer);
2138  virtual bool updateInputWeights(CLayer *&prevLayer);
2139  virtual bool updateInputWeights(CLayer *gate, CArrayDouble *input_data);
2140  virtual bool InitLayer(CLayer *layer, int numUnits, int numOutputs, ENUM_OPTIMIZATION optimization_type);
2142  virtual CArrayDouble *CalculateGate(CLayer *gate, CArrayDouble *sequence);
2143 
2144 public:CNeuronLSTM(void);~CNeuronLSTM(void);
2147  virtual bool Init(uint numOutputs,uint myIndex,int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type);
2149  //---
2150  virtual CLayer *getOutputLayer(void) { return OutputLayer; }
2151  virtual bool calcInputGradients(CLayer *prevLayer) ;
2152  virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index) ;
2153  //--- methods for working with files
2154  virtual bool Save(int const file_handle);
2155  virtual bool Load(int const file_handle);
2156  virtual int Type(void) const { return defNeuronLSTM; }
2157  };
2158 //+------------------------------------------------------------------+
2159 //| |
2160 //+------------------------------------------------------------------+
2162  {
2163  ForgetGate = new CLayer();
2164  InputGate = new CLayer();
2165  OutputGate = new CLayer();
2166  NewContent = new CLayer();
2167  Memory = new CArrayDouble();
2168  PrevMemory = new CArrayDouble();
2169  Input = new CArrayDouble();
2170  InputGradient = new CArrayDouble();
2171  }
2172 //+------------------------------------------------------------------+
2173 //| |
2174 //+------------------------------------------------------------------+
2176  {
2177  if(CheckPointer(ForgetGate)!=POINTER_INVALID)
2178  delete ForgetGate;
2179  if(CheckPointer(InputGate)!=POINTER_INVALID)
2180  delete InputGate;
2181  if(CheckPointer(OutputGate)!=POINTER_INVALID)
2182  delete OutputGate;
2183  if(CheckPointer(NewContent)!=POINTER_INVALID)
2184  delete NewContent;
2185  if(CheckPointer(Memory)!=POINTER_INVALID)
2186  delete Memory;
2187  if(CheckPointer(PrevMemory)!=POINTER_INVALID)
2188  delete PrevMemory;
2189  if(CheckPointer(Input)!=POINTER_INVALID)
2190  delete Input;
2191  if(CheckPointer(InputGradient)!=POINTER_INVALID)
2192  delete InputGradient;
2193  }
2194 //+------------------------------------------------------------------+
2195 //| |
2196 //+------------------------------------------------------------------+
2197 bool CNeuronLSTM::Init(uint numOutputs,uint myIndex,int window,int step,int units_count, ENUM_OPTIMIZATION optimization_type)
2198  {
2199  if(units_count<=0)
2200  return false;
2201 //--- Init Layers
2202  if(!CNeuronProof::Init(numOutputs,myIndex,window,step,units_count,optimization_type))
2203  return false;
2204  if(!InitLayer(ForgetGate,units_count,window+units_count,optimization_type))
2205  return false;
2206  if(!InitLayer(InputGate,units_count,window+units_count,optimization_type))
2207  return false;
2208  if(!InitLayer(OutputGate,units_count,window+units_count,optimization_type))
2209  return false;
2210  if(!InitLayer(NewContent,units_count,window+units_count,optimization_type))
2211  return false;
2212  if(!Memory.Reserve(units_count))
2213  return false;
2214  if(!PrevMemory.Reserve(units_count))
2215  return false;
2216  CNeuron *temp;
2217  for(int i=0; i<units_count; i++)
2218  {
2219  if(!Memory.Add(0))
2220  return false;
2221  if(!PrevMemory.Add(0))
2222  return false;
2223  temp=OutputLayer.At(i);
2224  temp.setOutputVal(0);
2225  }
2226 //---
2227  return true;
2228  }
2229 //+------------------------------------------------------------------+
2230 //| |
2231 //+------------------------------------------------------------------+
2232 bool CNeuronLSTM::InitLayer(CLayer *layer,int numUnits, int numOutputs, ENUM_OPTIMIZATION optimization_type)
2233  {
2234  if(CheckPointer(layer)==POINTER_INVALID)
2235  {
2236  layer=new CLayer(numOutputs);
2237  if(CheckPointer(layer)==POINTER_INVALID)
2238  return false;
2239  }
2240  else
2241  layer.Clear();
2242  if(!layer.Reserve(numUnits))
2243  return false;
2244 //---
2245  CNeuron *temp;
2246  for(int i=0; i<numUnits; i++)
2247  {
2248  temp=new CNeuron();
2249  if(CheckPointer(temp)==POINTER_INVALID)
2250  return false;
2251  if(!temp.Init(numOutputs+1,i,optimization_type))
2252  return false;
2253  if(!layer.Add(temp))
2254  return false;
2255  }
2256 //---
2257  return true;
2258  }
2259 //+------------------------------------------------------------------+
2260 //| |
2261 //+------------------------------------------------------------------+
2263  {
2264  if(CheckPointer(prevLayer)==POINTER_INVALID || prevLayer.Total()<=0)
2265  return false;
2266  CNeuronBase *temp;
2267  CConnection *temp_con;
2268  if(CheckPointer(Input)==POINTER_INVALID)
2269  {
2270  Input=new CArrayDouble();
2271  if(CheckPointer(Input)==POINTER_INVALID)
2272  return false;
2273  }
2274  else
2275  Input.Clear();
2276 //--- Concatenate input sequence
2277  int total=prevLayer.Total();
2278  if(!Input.Reserve(total+OutputLayer.Total()))
2279  return false;
2280  for(int i=0; i<total; i++)
2281  {
2282  temp=prevLayer.At(i);
2283  if(CheckPointer(temp)==POINTER_INVALID || !Input.Add(temp.getOutputVal()))
2284  return false;
2285  }
2286  total=OutputLayer.Total();
2287  for(int i=0; i<total; i++)
2288  {
2289  temp=OutputLayer.At(i);
2290  if(CheckPointer(temp)==POINTER_INVALID || !Input.Add(temp.getOutputVal()))
2291  return false;
2292  }
2293  int total_data=Input.Total();
2294 //--- Calculated forget gate
2295  CArrayDouble *forget_gate=CalculateGate(ForgetGate,Input);
2296  if(CheckPointer(forget_gate)==POINTER_INVALID)
2297  return false;
2298 //--- Calculated input gate
2299  CArrayDouble *input_gate=CalculateGate(InputGate,Input);
2300  if(CheckPointer(input_gate)==POINTER_INVALID)
2301  return false;
2302 //--- Calculated output gate
2303  CArrayDouble *output_gate=CalculateGate(OutputGate,Input);
2304  if(CheckPointer(output_gate)==POINTER_INVALID)
2305  return false;
2306 //--- Calculated new content
2307  CArrayDouble *new_content=new CArrayDouble();
2308  if(CheckPointer(new_content)==POINTER_INVALID)
2309  return false;
2310  total=NewContent.Total();
2311  for(int i=0; i<total; i++)
2312  {
2313  temp=NewContent.At(i);
2314  if(CheckPointer(temp)==POINTER_INVALID)
2315  return false;
2316  double val=0;
2317  for(int c=0; c<total_data; c++)
2318  {
2319  temp_con=temp.Connections.At(c);
2320  if(CheckPointer(temp_con)==POINTER_INVALID)
2321  return false;
2322  val+=temp_con.weight*Input.At(c);
2323  }
2324  val=TanhFunction(val);
2325  temp.setOutputVal(val);
2326  if(!new_content.Add(val))
2327  return false;
2328  }
2329 //--- Calculated output sequences
2330  for(int i=0; i<total; i++)
2331  {
2332  if(PrevMemory.Total()<=i)
2333  PrevMemory.Add(Memory.At(i));
2334  else
2335  PrevMemory.Update(i,Memory.At(i));
2336  double value=Memory.At(i)*forget_gate.At(i)+new_content.At(i)*input_gate.At(i);
2337  if(!Memory.Update(i,value))
2338  return false;
2339  temp=OutputLayer.At(i);
2340  value=TanhFunction(value)*output_gate.At(i);
2341  temp.setOutputVal(value);
2342  }
2343 //---
2344  delete forget_gate;
2345  delete input_gate;
2346  delete new_content;
2347  delete output_gate;
2348 //---
2349  return true;
2350  }
2351 //+------------------------------------------------------------------+
2352 //| |
2353 //+------------------------------------------------------------------+
2354 CArrayDouble *CNeuronLSTM::CalculateGate(CLayer *gate,CArrayDouble *sequence)
2355  {
2356  CNeuronBase *temp;
2357  CConnection *temp_con;
2358  CArrayDouble *result=new CArrayDouble();
2359  if(CheckPointer(gate)==POINTER_INVALID)
2360  return NULL;
2361  int total=gate.Total();
2362  int total_data=sequence.Total();
2363  for(int i=0; i<total; i++)
2364  {
2365  temp=gate.At(i);
2366  if(CheckPointer(temp)==POINTER_INVALID)
2367  {
2368  delete result;
2369  return NULL;
2370  }
2371  double val=0;
2372  for(int c=0; c<total_data; c++)
2373  {
2374  temp_con=temp.Connections.At(c);
2375  if(CheckPointer(temp_con)==POINTER_INVALID)
2376  {
2377  delete result;
2378  return NULL;
2379  }
2380  val+=temp_con.weight*(sequence.At(c)==DBL_MAX ? 1 : sequence.At(c));
2381  }
2382  val=SigmoidFunction(val);
2383  temp.setOutputVal(val);
2384  if(!result.Add(val))
2385  {
2386  delete result;
2387  return NULL;
2388  }
2389  }
2390 //---
2391  return result;
2392  }
2393 //+------------------------------------------------------------------+
2394 //| |
2395 //+------------------------------------------------------------------+
2397  {
2398  if(CheckPointer(InputGradient)==POINTER_INVALID)
2399  {
2400  InputGradient=new CArrayDouble();
2401  if(CheckPointer(InputGradient)==POINTER_INVALID)
2402  return false;
2403  }
2404  else
2405  InputGradient.Clear();
2406 //---
2407  int total=OutputLayer.Total();
2408  CNeuron *temp;
2409  CArrayDouble *MemoryGradient=new CArrayDouble();
2410  CNeuron *gate;
2411  CConnection *con;
2412 //---
2413  if(nextLayer!=OutputLayer)
2414  for(int i=0; i<total; i++)
2415  {
2416  temp=OutputLayer.At(i);
2417  if(CheckPointer(temp)==POINTER_INVALID)
2418  return false;
2419  temp.setGradient(temp.sumDOW(nextLayer));
2420  }
2421 //--- Calculated memory and output gate gradients
2422  if(CheckPointer(MemoryGradient)==POINTER_INVALID)
2423  return false;
2424  if(!MemoryGradient.Reserve(total))
2425  return false;
2426  for(int i=0; i<total; i++)
2427  {
2428  temp=OutputLayer.At(i);
2429  gate=OutputGate.At(i);
2430  if(CheckPointer(gate)==POINTER_INVALID)
2431  return false;
2432  double value=temp.getGradient()*gate.getOutputVal();
2433  value=TanhFunctionDerivative(Memory.At(i))*value;
2434  if(i>=MemoryGradient.Total())
2435  {
2436  if(!MemoryGradient.Add(value))
2437  return false;
2438  }
2439  else
2440  {
2441  value=MemoryGradient.At(i)+value;
2442  if(!MemoryGradient.Update(i,value))
2443  return false;
2444  }
2445  gate.setGradient(gate.getOutputVal()!=0 && temp.getGradient()!=0 ? temp.getGradient()*temp.getOutputVal()*SigmoidFunctionDerivative(gate.getOutputVal())/gate.getOutputVal() : 0);
2446  //--- Calcculated gates and new content gradients
2447  gate=ForgetGate.At(i);
2448  if(CheckPointer(gate)==POINTER_INVALID)
2449  return false;
2450  gate.setGradient(gate.getOutputVal()!=0 && value!=0? value*SigmoidFunctionDerivative(gate.getOutputVal()) : 0);
2451  gate=InputGate.At(i);
2452  temp=NewContent.At(i);
2453  if(CheckPointer(gate)==POINTER_INVALID)
2454  return false;
2455  gate.setGradient(gate.getOutputVal()!=0 && value!=0 ? value*temp.getOutputVal()*SigmoidFunctionDerivative(gate.getOutputVal()) : 0);
2456  temp.setGradient(temp.getOutputVal()!=0 && value!=0 ? value*gate.getOutputVal()*TanhFunctionDerivative(temp.getOutputVal()) : 0);
2457  }
2458 //--- Calculated input gradients
2459  int total_inp=temp.getConnections().Total();
2460  for(int n=0; n<total_inp; n++)
2461  {
2462  double value=0;
2463  for(int i=0; i<total; i++)
2464  {
2465  temp=ForgetGate.At(i);
2466  con=temp.getConnections().At(n);
2467  value+=temp.getGradient()*con.weight;
2468  //---
2469  temp=InputGate.At(i);
2470  con=temp.getConnections().At(n);
2471  value+=temp.getGradient()*con.weight;
2472  //---
2473  temp=OutputGate.At(i);
2474  con=temp.getConnections().At(n);
2475  value+=temp.getGradient()*con.weight;
2476  //---
2477  temp=NewContent.At(i);
2478  con=temp.getConnections().At(n);
2479  value+=temp.getGradient()*con.weight;
2480  }
2481  if(InputGradient.Total()>=n)
2482  {
2483  if(!InputGradient.Add(value))
2484  return false;
2485  }
2486  else
2487  if(!InputGradient.Update(n,value))
2488  return false;
2489  }
2490 //--- Calculated gradients for prev. state
2491  int shift=total_inp-total;
2492  for(int i=0; i<total; i++)
2493  {
2494  temp=OutputLayer.At(i);
2495  if(CheckPointer(temp)==POINTER_INVALID)
2496  return false;
2497  temp.setGradient(InputGradient.At(shift+i));
2498  }
2499 //--- Calculated memory and output gate gradients
2500  for(int i=0; i<total; i++)
2501  {
2502  temp=OutputLayer.At(i);
2503  gate=OutputGate.At(i);
2504  if(CheckPointer(gate)==POINTER_INVALID)
2505  return false;
2506  double value=temp.getGradient()*gate.getPrevVal();
2507  value=MemoryGradient.At(i)+TanhFunctionDerivative(PrevMemory.At(i))*value;
2508  if(!MemoryGradient.Update(i,value))
2509  return false;
2510  gate.setGradient(gate.getGradient()+(gate.getPrevVal()!=0 && temp.getGradient()!=0 ? temp.getGradient()*temp.getPrevVal()*SigmoidFunctionDerivative(gate.getPrevVal())/gate.getPrevVal() : 0));
2511  //--- Calcculated gates and new content gradients
2512  gate=ForgetGate.At(i);
2513  if(CheckPointer(gate)==POINTER_INVALID)
2514  return false;
2515  gate.setGradient(gate.getGradient()+(gate.getPrevVal()!=0 && value!=0? value*SigmoidFunctionDerivative(gate.getPrevVal()) : 0));
2516  gate=InputGate.At(i);
2517  temp=NewContent.At(i);
2518  if(CheckPointer(gate)==POINTER_INVALID)
2519  return false;
2520  gate.setGradient(gate.getGradient()+(gate.getPrevVal()!=0 && value!=0 ? value*temp.getPrevVal()*SigmoidFunctionDerivative(gate.getPrevVal()) : 0));
2521  temp.setGradient(temp.getGradient()+(temp.getPrevVal()!=0 && value!=0 ? value*gate.getPrevVal()*TanhFunctionDerivative(temp.getPrevVal()) : 0));
2522  }
2523 //---
2524  delete MemoryGradient;
2525 //---
2526  return true;
2527  }
2528 //+------------------------------------------------------------------+
2529 //| |
2530 //+------------------------------------------------------------------+
2532  {
2533  if(CheckPointer(prevLayer)==POINTER_INVALID || CheckPointer(Input)==POINTER_INVALID)
2534  return false;
2535 //---
2538  {
2539  return false;
2540  }
2541 //---
2542  return true;
2543  }
2544 //+------------------------------------------------------------------+
2545 //| |
2546 //+------------------------------------------------------------------+
2547 bool CNeuronLSTM::updateInputWeights(CLayer *gate,CArrayDouble *input_data)
2548  {
2549  if(CheckPointer(gate)==POINTER_INVALID || CheckPointer(input_data)==POINTER_INVALID)
2550  return false;
2551  CNeuronBase *neuron;
2552  CConnection *con;
2553  int total_n=gate.Total();
2554  int total_data=input_data.Total();
2555  double lt=eta*sqrt(1-pow(b2,t))/(1-pow(b1,t));
2556  for(int n=0; n<total_n; n++)
2557  {
2558  neuron=gate.At(n);
2559  if(CheckPointer(neuron)==POINTER_INVALID)
2560  return false;
2561  for(int i=0; i<total_data; i++)
2562  {
2563  con=neuron.getConnections().At(i);
2564  if(CheckPointer(con)==POINTER_INVALID)
2565  return false;
2566  double data=input_data.At(i);
2567  double g=neuron.getGradient();
2568  if(optimization==SGD)
2569  con.weight+=con.deltaWeight=(g!=0 && data!=0 ? eta*g*(data!=DBL_MAX ? data : 1) : 0)+alpha*con.deltaWeight;
2570  else
2571  {
2572  con.mt=b1*con.mt+(1-b1)*g;
2573  con.vt=b2*con.vt+(1-b2)*pow(g,2)+0.00000001;
2574  con.weight+=con.deltaWeight=lt*con.mt/sqrt(con.vt);
2575  t++;
2576  }
2577  }
2578  }
2579 //---
2580  return true;
2581  }
2582 //+------------------------------------------------------------------+
2583 //| |
2584 //+------------------------------------------------------------------+
2585 bool CNeuronLSTM::calcInputGradients(CNeuronBase *prevNeuron,uint index)
2586  {
2587  if(CheckPointer(prevNeuron)==POINTER_INVALID || CheckPointer(InputGradient)==POINTER_INVALID || InputGradient.Total()<=(int)index)
2588  return false;
2589 //---
2590  prevNeuron.setGradient(InputGradient.At(index));
2591 //---
2592  return true;
2593  }
2594 //+------------------------------------------------------------------+
2595 //| |
2596 //+------------------------------------------------------------------+
2598  {
2599  if(CheckPointer(prevLayer)==POINTER_INVALID)
2600  return false;
2601 //---
2602  int total=prevLayer.Total();
2603  if(total<=0)
2604  return false;
2605  CNeuronBase *neuron;
2606  bool result=true;
2607  for(int i=0; (i<total && result); i++)
2608  {
2609  neuron=prevLayer.At(i);
2610  if(CheckPointer(neuron)==POINTER_INVALID)
2611  {
2612  result=false;
2613  break;
2614  }
2615  result=calcInputGradients(neuron,i);
2616  }
2617 //---
2618  return result;
2619  }
2620 //+------------------------------------------------------------------+
2621 //| |
2622 //+------------------------------------------------------------------+
2623 bool CNeuronLSTM::Save(const int file_handle)
2624  {
2625  if(!CNeuronProof::Save(file_handle))
2626  return false;
2627  if(!ForgetGate.Save(file_handle))
2628  return false;
2629  if(!InputGate.Save(file_handle))
2630  return false;
2631  if(!OutputGate.Save(file_handle))
2632  return false;
2633  if(!NewContent.Save(file_handle))
2634  return false;
2635  if(!Memory.Save(file_handle))
2636  return false;
2637 //---
2638  return true;
2639  }
2640 //+------------------------------------------------------------------+
2641 //| |
2642 //+------------------------------------------------------------------+
2643 bool CNeuronLSTM::Load(const int file_handle)
2644  {
2645  if(!CNeuronProof::Load(file_handle))
2646  return false;
2647  if(!ForgetGate.Load(file_handle))
2648  return false;
2649  if(!InputGate.Load(file_handle))
2650  return false;
2651  if(!OutputGate.Load(file_handle))
2652  return false;
2653  if(!NewContent.Load(file_handle))
2654  return false;
2655  if(!Memory.Load(file_handle))
2656  return false;
2657 //---
2658  return true;
2659  }
2660 //+------------------------------------------------------------------+
2661 //| |
2662 //+------------------------------------------------------------------+
2664  {
2665  switch(activation)
2666  {
2667  case TANH:
2668  return TanhFunction(x);
2669  break;
2670  case SIGMOID:
2671  return SigmoidFunction(x);
2672  break;
2673  }
2674 //---
2675  return x;
2676  }
2677 //+------------------------------------------------------------------+
2678 //| |
2679 //+------------------------------------------------------------------+
2681  {
2682  switch(activation)
2683  {
2684  case TANH:
2685  return TanhFunctionDerivative(x);
2686  break;
2687  case SIGMOID:
2688  return SigmoidFunctionDerivative(x);
2689  break;
2690  }
2691 //---
2692  return 1;
2693  }
2694 //+------------------------------------------------------------------+
2695 //| |
2696 //+------------------------------------------------------------------+
2697 template<typename T>
2698 int COpenCLMy::AddBufferFromArray(T &data[],const uint data_array_offset,const uint data_array_count,const uint flags)
2699  {
2700  int result=-1;
2701  for(int i=0; i<m_buffers_total; i++)
2702  {
2703  if(m_buffers[i]!=INVALID_HANDLE)
2704  continue;
2705  result=i;
2706  break;
2707  }
2708 //---
2709  if(result<0)
2710  {
2711  if(ArrayResize(m_buffers,m_buffers_total+1)>0)
2712  {
2713  m_buffers_total=ArraySize(m_buffers);
2714  result=m_buffers_total-1;
2715  m_buffers[result]=INVALID_HANDLE;
2716  }
2717  else
2718  return result;
2719  }
2720 //---
2721  if(!BufferFromArray(result,data,data_array_offset,data_array_count,flags))
2722  return -1;
2723 //---
2724  return result;
2725  }
2726 //+------------------------------------------------------------------+
2727 //| |
2728 //+------------------------------------------------------------------+
2729 CLayer::CLayer(uint outputs=0,int handle=-1, COpenCLMy *opencl=NULL)
2730  {
2731  iOutputs=outputs;
2732  iFileHandle=handle;
2733  OpenCL=opencl;
2734  }
2735 //+------------------------------------------------------------------+
2736 //| |
2737 //+------------------------------------------------------------------+
2738 bool CLayer::Load(const int file_handle)
2739  {
2740  iFileHandle=file_handle;
2741  if(!CArrayObj::Load(file_handle))
2742  return false;
2743  if(CheckPointer(m_data[0])==POINTER_INVALID)
2744  return false;
2745 //---
2746  if(m_data[0].Type()==defNeuronBaseOCL || m_data[0].Type()==defNeuronConvOCL || m_data[0].Type()==defNeuronAttentionOCL)
2747  {
2748  CNeuronBaseOCL *temp=m_data[0];
2749  iOutputs=temp.getConnections();
2750  }
2751  else
2752  {
2753  CNeuronBase *temp=m_data[0];
2754  iOutputs=temp.getConnections().Total();
2755  }
2756 //---
2757  return true;
2758  }
2759 //+------------------------------------------------------------------+
2760 //| |
2761 //+------------------------------------------------------------------+
2763  {
2764  if(CheckPointer(layers)!=POINTER_INVALID)
2765  delete layers;
2766  if(CheckPointer(opencl)!=POINTER_INVALID)
2767  {
2768  opencl.Shutdown();
2769  delete opencl;
2770  }
2771  }
2772 //+------------------------------------------------------------------+
2776 //+------------------------------------------------------------------+
2777 class CBufferDouble : public CArrayDouble
2778  {
2779 protected:
2782 public:CBufferDouble(void);~CBufferDouble(void);
2785 //---
2786  virtual bool BufferInit(uint count, double value);
2787  virtual bool BufferCreate(COpenCLMy *opencl);
2788  virtual bool BufferFree(void);
2789  virtual bool BufferRead(void);
2790  virtual bool BufferWrite(void);
2791  virtual bool BufferSet(int index) { if(!OpenCL.BufferFree(m_myIndex)) return false; m_myIndex=index; return true; }
2792  virtual int GetData(double &values[]);
2793  virtual int GetData(CArrayDouble *values);
2794  virtual int GetIndex(void) { return m_myIndex; }
2795 //---
2796  virtual int Type(void) const { return defBufferDouble; }
2797  virtual void BufferToCSV(const string file_name);
2798  };
2799 //+------------------------------------------------------------------+
2800 //| |
2801 //+------------------------------------------------------------------+
2802 CBufferDouble::CBufferDouble(void) : m_myIndex(-1)
2803  {
2804  OpenCL=NULL;
2805  }
2806 //+------------------------------------------------------------------+
2807 //| |
2808 //+------------------------------------------------------------------+
2810  {
2811  if(CheckPointer(OpenCL)!=POINTER_INVALID && m_myIndex>=0)
2812  {
2813  if(OpenCL.BufferFree(m_myIndex))
2814  {
2815  m_myIndex=-1;
2816  OpenCL=NULL;
2817  }
2818  }
2819  }
2820 //+------------------------------------------------------------------+
2821 //| |
2822 //+------------------------------------------------------------------+
2824  {
2825  if(CheckPointer(OpenCL)!=POINTER_INVALID && m_myIndex>=0)
2826  {
2827  if(OpenCL.BufferFree(m_myIndex))
2828  {
2829  m_myIndex=-1;
2830  OpenCL=NULL;
2831  }
2832  else
2833  return false;
2834  }
2835 //---
2836  if(CheckPointer(opencl)==POINTER_INVALID)
2837  return false;
2838  if((m_myIndex=opencl.AddBufferFromArray(m_data,0,m_data_total,CL_MEM_READ_WRITE|CL_MEM_ALLOC_HOST_PTR))<0)
2839  return false;
2840  OpenCL=opencl;
2841 //---
2842  return true;
2843  }
2844 //+------------------------------------------------------------------+
2845 //| |
2846 //+------------------------------------------------------------------+
2848  {
2849  if(CheckPointer(OpenCL)!=POINTER_INVALID && m_myIndex>=0)
2850  if(OpenCL.BufferFree(m_myIndex))
2851  {
2852  m_myIndex=-1;
2853  OpenCL=NULL;
2854  return true;
2855  }
2856 //---
2857  return false;
2858  }
2859 //+------------------------------------------------------------------+
2860 //| |
2861 //+------------------------------------------------------------------+
2863  {
2864  if(CheckPointer(OpenCL)==POINTER_INVALID || m_myIndex<0)
2865  return false;
2866 //---
2867  if(!OpenCL.BufferRead(m_myIndex,m_data,0,0,m_data_total))
2868  return false;
2869 //---
2870  for(int i=0;i<m_data_total;i++)
2871  {
2872  if(!MathIsValidNumber(m_data[i]))
2873  m_data[i]=1e-5;
2874  }
2875 //---
2876  return true;
2877  }
2878 //+------------------------------------------------------------------+
2879 //| |
2880 //+------------------------------------------------------------------+
2882  {
2883  if(CheckPointer(OpenCL)==POINTER_INVALID || m_myIndex<0)
2884  return false;
2885 //---
2886  return OpenCL.BufferWrite(m_myIndex,m_data,0,0,m_data_total);
2887  }
2888 //+------------------------------------------------------------------+
2889 //| |
2890 //+------------------------------------------------------------------+
2891 bool CBufferDouble::BufferInit(uint count,double value)
2892  {
2893  if(!Reserve(count))
2894  return false;
2895  m_data_total=(int)fmin(ArrayInitialize(m_data,value),count);
2896 //---
2897  return m_data_total==count;
2898  }
2899 //+------------------------------------------------------------------+
2900 //| |
2901 //+------------------------------------------------------------------+
2902 int CBufferDouble::GetData(double &values[])
2903  {
2904  if(!BufferRead())
2905  return false;
2906  return ArrayCopy(values,m_data,0,0,m_data_total);
2907  }
2908 //+------------------------------------------------------------------+
2909 //| |
2910 //+------------------------------------------------------------------+
2911 int CBufferDouble::GetData(CArrayDouble *values)
2912  {
2913  if(!BufferRead())
2914  return -1;
2915  values.Clear();
2916  if(!values.AddArray(GetPointer(this)))
2917  return -1;
2918  return m_data_total;
2919  }
2920 //+------------------------------------------------------------------+
2925 //+------------------------------------------------------------------+
2926 class CNeuronBaseOCL : public CObject
2927  {
2928 protected:
2937 //---
2938  //const double eta;
2939  const double alpha;
2940  int t;
2941 //---
2945 //---
2947  virtual bool feedForward(CNeuronBaseOCL *NeuronOCL);
2948 
2950  virtual bool updateInputWeights(CNeuronBaseOCL *NeuronOCL);
2951 
2952 public:CNeuronBaseOCL(void);~CNeuronBaseOCL(void);
2955  virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint numNeurons, ENUM_OPTIMIZATION optimization_type);
2957  virtual void SetActivationFunction(ENUM_ACTIVATION value) { activation=value; }
2958 //---
2959  virtual int getOutputIndex(void) { return Output.GetIndex(); }
2960  virtual int getPrevOutIndex(void) { return PrevOutput.GetIndex(); }
2961  virtual int getGradientIndex(void) { return Gradient.GetIndex(); }
2962  virtual int getWeightsIndex(void) { return Weights.GetIndex(); }
2963  virtual int getDeltaWeightsIndex(void) { return DeltaWeights.GetIndex(); }
2964  virtual int getFirstMomentumIndex(void) { return FirstMomentum.GetIndex(); }
2965  virtual int getSecondMomentumIndex(void) { return SecondMomentum.GetIndex();}
2966 //---
2967  virtual int getOutputVal(double &values[]) { return Output.GetData(values); }
2968  virtual int getOutputVal(CArrayDouble *values) { return Output.GetData(values); }
2969  virtual int getPrevVal(double &values[]) { return PrevOutput.GetData(values); }
2970  virtual int getGradient(double &values[]) { return Gradient.GetData(values); }
2971  virtual int getWeights(double &values[]) { return Weights.GetData(values); }
2972  virtual int Neurons(void) { return Output.Total(); }
2973  virtual int Activation(void) { return (int)activation; }
2974  virtual int getConnections(void) { return (CheckPointer(Weights)!=POINTER_INVALID ? Weights.Total()/(Gradient.Total()) : 0); }
2975 //---
2976  virtual bool FeedForward(CObject *SourceObject);
2977  virtual bool calcHiddenGradients(CObject *TargetObject);
2978  virtual bool UpdateInputWeights(CObject *SourceObject);
2979  virtual bool calcHiddenGradients(CNeuronBaseOCL *NeuronOCL);
2982  virtual bool calcOutputGradients(CArrayDouble *Target);
2983 //---
2985  virtual bool Save(int const file_handle);
2986  virtual bool Load(int const file_handle);
2987  //---
2988  virtual int Type(void) const { return defNeuronBaseOCL; }
2989  };
2990 //+------------------------------------------------------------------+
2991 //| |
2992 //+------------------------------------------------------------------+
2994  activation(TANH),
2995  optimization(SGD),
2996  t(1)
2997  {
2998  OpenCL=NULL;
2999  Output=new CBufferDouble();
3000  PrevOutput=new CBufferDouble();
3001  Weights=new CBufferDouble();
3003  Gradient=new CBufferDouble();
3006  }
3007 //+------------------------------------------------------------------+
3008 //| |
3009 //+------------------------------------------------------------------+
3011  {
3012  if(CheckPointer(Output)!=POINTER_INVALID)
3013  delete Output;
3014  if(CheckPointer(PrevOutput)!=POINTER_INVALID)
3015  delete PrevOutput;
3016  if(CheckPointer(Weights)!=POINTER_INVALID)
3017  delete Weights;
3018  if(CheckPointer(DeltaWeights)!=POINTER_INVALID)
3019  delete DeltaWeights;
3020  if(CheckPointer(Gradient)!=POINTER_INVALID)
3021  delete Gradient;
3022  if(CheckPointer(FirstMomentum)!=POINTER_INVALID)
3023  delete FirstMomentum;
3024  if(CheckPointer(SecondMomentum)!=POINTER_INVALID)
3025  delete SecondMomentum;
3026  OpenCL=NULL;
3027  }
3028 //+------------------------------------------------------------------+
3029 //| |
3030 //+------------------------------------------------------------------+
3031 bool CNeuronBaseOCL::Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl,uint numNeurons, ENUM_OPTIMIZATION optimization_type)
3032  {
3033  if(CheckPointer(open_cl)==POINTER_INVALID || numNeurons<=0)
3034  return false;
3035  OpenCL=open_cl;
3036  optimization=optimization_type;
3037 //---
3038  if(CheckPointer(Output)==POINTER_INVALID)
3039  {
3040  Output=new CBufferDouble();
3041  if(CheckPointer(Output)==POINTER_INVALID)
3042  return false;
3043  }
3044  if(!Output.BufferInit(numNeurons,1.0))
3045  return false;
3046  if(!Output.BufferCreate(OpenCL))
3047  return false;
3048 //---
3049  if(CheckPointer(PrevOutput)==POINTER_INVALID)
3050  {
3051  PrevOutput=new CBufferDouble();
3052  if(CheckPointer(PrevOutput)==POINTER_INVALID)
3053  return false;
3054  }
3055  if(!PrevOutput.BufferInit(numNeurons,1.0))
3056  return false;
3058  return false;
3059 //---
3060  if(CheckPointer(Gradient)==POINTER_INVALID)
3061  {
3062  Gradient=new CBufferDouble();
3063  if(CheckPointer(Gradient)==POINTER_INVALID)
3064  return false;
3065  }
3066  if(!Gradient.BufferInit(numNeurons+1,0.0))
3067  return false;
3069  return false;
3070 //---
3071  if(numOutputs>0)
3072  {
3073  if(CheckPointer(Weights)==POINTER_INVALID)
3074  {
3075  Weights=new CBufferDouble();
3076  if(CheckPointer(Weights)==POINTER_INVALID)
3077  return false;
3078  }
3079  int count=(int)((numNeurons+1)*numOutputs);
3080  if(!Weights.Reserve(count))
3081  return false;
3082  for(int i=0;i<count;i++)
3083  {
3084  double weigh=(MathRand()+1)/32768.0-0.5;
3085  if(weigh==0)
3086  weigh=0.001;
3087  if(!Weights.Add(weigh))
3088  return false;
3089  }
3091  return false;
3092  //---
3093  if(optimization==SGD)
3094  {
3095  if(CheckPointer(DeltaWeights)==POINTER_INVALID)
3096  {
3098  if(CheckPointer(DeltaWeights)==POINTER_INVALID)
3099  return false;
3100  }
3101  if(!DeltaWeights.BufferInit(count,0))
3102  return false;
3104  return false;
3105  if(CheckPointer(FirstMomentum)==POINTER_INVALID)
3106  delete FirstMomentum;
3107  if(CheckPointer(SecondMomentum)==POINTER_INVALID)
3108  delete SecondMomentum;
3109  }
3110  else
3111  {
3112  if(CheckPointer(DeltaWeights)==POINTER_INVALID)
3113  delete DeltaWeights;
3114 //---
3115  if(CheckPointer(FirstMomentum)==POINTER_INVALID)
3116  {
3118  if(CheckPointer(FirstMomentum)==POINTER_INVALID)
3119  return false;
3120  }
3121  if(!FirstMomentum.BufferInit(count,0))
3122  return false;
3124  return false;
3125 //---
3126  if(CheckPointer(SecondMomentum)==POINTER_INVALID)
3127  {
3129  if(CheckPointer(SecondMomentum)==POINTER_INVALID)
3130  return false;
3131  }
3132  if(!SecondMomentum.BufferInit(count,0))
3133  return false;
3135  return false;
3136  }
3137  }
3138  else
3139  {
3140  if(CheckPointer(Weights)!=POINTER_INVALID)
3141  delete Weights;
3142  if(CheckPointer(DeltaWeights)!=POINTER_INVALID)
3143  delete DeltaWeights;
3144  }
3145 //---
3146  return true;
3147  }
3148 //+------------------------------------------------------------------+
3149 //| |
3150 //+------------------------------------------------------------------+
3151 bool CNeuronBaseOCL::FeedForward(CObject *SourceObject)
3152  {
3153  if(CheckPointer(SourceObject)==POINTER_INVALID)
3154  return false;
3155 //---
3156  CNeuronBaseOCL *temp=NULL;
3157  switch(SourceObject.Type())
3158  {
3159  case defNeuronBaseOCL:
3160  case defNeuronConvOCL:
3161  case defNeuronAttentionOCL:
3162  temp=SourceObject;
3163  return feedForward(temp);
3164  break;
3165  }
3166 //---
3167  return false;
3168  }
3169 //+------------------------------------------------------------------+
3170 //| |
3171 //+------------------------------------------------------------------+
3173  {
3174  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3175  return false;
3176  uint global_work_offset[1]={0};
3177  uint global_work_size[1];
3178  global_work_size[0]=Output.Total();
3179  OpenCL.SetArgumentBuffer(def_k_FeedForward,def_k_ff_matrix_w,NeuronOCL.getWeightsIndex());
3180  OpenCL.SetArgumentBuffer(def_k_FeedForward,def_k_ff_matrix_i,NeuronOCL.getOutputIndex());
3182  OpenCL.SetArgument(def_k_FeedForward,def_k_ff_inputs,NeuronOCL.Neurons());
3184  //Comment(com+"\n Feedforward");
3185  if(!OpenCL.Execute(def_k_FeedForward,1,global_work_offset,global_work_size))
3186  {
3187  printf("Error of execution kernel FeedForward: %d",GetLastError());
3188  return false;
3189  }
3190  double temp[];
3191  if(!Output.BufferRead())
3192  {
3193  NeuronOCL.Weights.BufferToCSV("weights.csv");
3194  NeuronOCL.Output.BufferToCSV("inputs.csv");
3195  Output.BufferToCSV("output.csv");
3196  return false;
3197  }
3198 //---
3199  return true;
3200  }
3201 //+------------------------------------------------------------------+
3202 //| |
3203 //+------------------------------------------------------------------+
3205  {
3206  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3207  return false;
3208  uint global_work_offset[1]={0};
3209  uint global_work_size[1];
3210  global_work_size[0]=Neurons()+1;
3212  OpenCL.SetArgumentBuffer(def_k_CalcHiddenGradient,def_k_chg_matrix_g,NeuronOCL.getGradientIndex());
3215  OpenCL.SetArgument(def_k_CalcHiddenGradient,def_k_chg_outputs,NeuronOCL.Neurons());
3217  //Comment(com+"\n Calc Hidden Gradient");
3218  if(!OpenCL.Execute(def_k_CalcHiddenGradient,1,global_work_offset,global_work_size))
3219  {
3220  printf("Error of execution kernel CalcHiddenGradient: %d",GetLastError());
3221  return false;
3222  }
3223  if(!Gradient.BufferRead())
3224  {
3225  //Weights.BufferToCSV("weights.csv");
3226  //NeuronOCL.Gradient.BufferToCSV("gradient_p.csv");
3227  //Output.BufferToCSV("output.csv");
3228  //Gradient.BufferToCSV("gradient.csv");
3229  return false;
3230  }
3231 //---
3232  return true;
3233  }
3234 //+------------------------------------------------------------------+
3235 //| |
3236 //+------------------------------------------------------------------+
3237 bool CNeuronBaseOCL::calcOutputGradients(CArrayDouble *Target)
3238  {
3239  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(Target)==POINTER_INVALID)
3240  return false;
3241  uint global_work_offset[1]={0};
3242  uint global_work_size[1];
3243  global_work_size[0]=Target.Total();
3244  for(uint i=0;i<global_work_size[0];i++)
3245  if(!Gradient.Update(i,Target.At(i)))
3246  return false;
3252  ResetLastError();
3253  //Comment(com+"\n Calc Output Gradient");
3254  if(!OpenCL.Execute(def_k_CalcOutputGradient,1,global_work_offset,global_work_size))
3255  {
3256  printf("Error of execution kernel CalcOutputGradient: %d",GetLastError());
3257  return false;
3258  }
3259  Gradient.BufferRead();
3260 //---
3261  return true;
3262  }
3263 //+------------------------------------------------------------------+
3264 //| |
3265 //+------------------------------------------------------------------+
3267  {
3268  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3269  return false;
3270  uint global_work_offset[2]={0,0};
3271  uint global_work_size[2];
3272  global_work_size[0]=Neurons();
3273  global_work_size[1]=NeuronOCL.Neurons();
3274  if(optimization==SGD)
3275  {
3283  ResetLastError();
3284  if(!OpenCL.Execute(def_k_UpdateWeightsMomentum,2,global_work_offset,global_work_size))
3285  {
3286  printf("Error of execution kernel UpdateWeightsMomentum: %d",GetLastError());
3287  return false;
3288  }
3289  }
3290  else
3291  {
3292  if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam,def_k_uwa_matrix_w,NeuronOCL.getWeightsIndex()))
3293  return false;
3295  return false;
3296  if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam,def_k_uwa_matrix_i,NeuronOCL.getOutputIndex()))
3297  return false;
3298  if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam,def_k_uwa_matrix_m,NeuronOCL.getFirstMomentumIndex()))
3299  return false;
3300  if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam,def_k_uwa_matrix_v,NeuronOCL.getSecondMomentumIndex()))
3301  return false;
3302  double lt=eta*sqrt(1-pow(b2,t))/(1-pow(b1,t));
3303  if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam,def_k_uwa_inputs,NeuronOCL.Neurons()))
3304  return false;
3305  if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam,def_k_uwa_l,lt))
3306  return false;
3307  if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam,def_k_uwa_b1,b1))
3308  return false;
3309  if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam,def_k_uwa_b2,b2))
3310  return false;
3311  uint rest=global_work_size[1]%4;
3312  global_work_size[1]=(global_work_size[1]-rest)/4 + (rest>0 ? 1 : 0);
3313  //Comment(com+"\n UpdateWeightsAdam");
3314  ResetLastError();
3315  if(!OpenCL.Execute(def_k_UpdateWeightsAdam,2,global_work_offset,global_work_size))
3316  {
3317  printf("Error of execution kernel UpdateWeightsAdam: %d",GetLastError());
3318  return false;
3319  }
3320  t++;
3321  }
3322 //---
3323  return NeuronOCL.Weights.BufferRead();
3324  }
3325 //+------------------------------------------------------------------+
3326 //| |
3327 //+------------------------------------------------------------------+
3328 bool CNeuronBaseOCL::calcHiddenGradients(CObject *TargetObject)
3329  {
3330  if(CheckPointer(TargetObject)==POINTER_INVALID)
3331  return false;
3332 //---
3333  CNeuronBaseOCL *temp=NULL;
3334  CNeuronAttentionOCL *at=NULL;
3335  CNeuronConvOCL *conv=NULL;
3336  switch(TargetObject.Type())
3337  {
3338  case defNeuronBaseOCL:
3339  temp=TargetObject;
3340  return calcHiddenGradients(temp);
3341  break;
3342  case defNeuronConvOCL:
3343  conv=TargetObject;
3344  temp=GetPointer(this);
3345  return conv.calcInputGradients(temp);
3346  break;
3347  case defNeuronAttentionOCL:
3348  at=TargetObject;
3349  temp=GetPointer(this);
3350  return at.calcInputGradients(temp);
3351  break;
3352  }
3353 //---
3354  return false;
3355  }
3356 //+------------------------------------------------------------------+
3357 //| |
3358 //+------------------------------------------------------------------+
3359 bool CNeuronBaseOCL::UpdateInputWeights(CObject *SourceObject)
3360  {
3361  if(CheckPointer(SourceObject)==POINTER_INVALID)
3362  return false;
3363 //---
3364  CNeuronBaseOCL *temp=NULL;
3365  switch(SourceObject.Type())
3366  {
3367  case defNeuronBaseOCL:
3368  case defNeuronConvOCL:
3369  case defNeuronAttentionOCL:
3370  temp=SourceObject;
3371  return updateInputWeights(temp);
3372  break;
3373  }
3374 //---
3375  return false;
3376  }
3377 //+------------------------------------------------------------------+
3378 //| |
3379 //+------------------------------------------------------------------+
3380 bool CNeuronBaseOCL::Save(const int file_handle)
3381  {
3382  if(file_handle==INVALID_HANDLE)
3383  return false;
3384  if(FileWriteInteger(file_handle,Type())<INT_VALUE)
3385  return false;
3386 //---
3387  if(FileWriteInteger(file_handle,(int)activation,INT_VALUE)<INT_VALUE)
3388  return false;
3389  if(FileWriteInteger(file_handle,(int)optimization,INT_VALUE)<INT_VALUE)
3390  return false;
3391  if(FileWriteInteger(file_handle,(int)t,INT_VALUE)<INT_VALUE)
3392  return false;
3393 //---
3394  if(CheckPointer(Output)==POINTER_INVALID || !Output.BufferRead() || !Output.Save(file_handle))
3395  return false;
3396  if(CheckPointer(PrevOutput)==POINTER_INVALID || !PrevOutput.BufferRead() || !PrevOutput.Save(file_handle))
3397  return false;
3398  if(CheckPointer(Gradient)==POINTER_INVALID || !Gradient.BufferRead() || !Gradient.Save(file_handle))
3399  return false;
3400 //---
3401  if(CheckPointer(Weights)==POINTER_INVALID)
3402  {
3403  FileWriteInteger(file_handle,0);
3404  return true;
3405  }
3406  else
3407  FileWriteInteger(file_handle,1);
3408 //---
3409  if(CheckPointer(Weights)==POINTER_INVALID || !Weights.BufferRead() || !Weights.Save(file_handle))
3410  return false;
3411  if(optimization==SGD)
3412  {
3413  if(CheckPointer(DeltaWeights)==POINTER_INVALID || !DeltaWeights.BufferRead() || !DeltaWeights.Save(file_handle))
3414  return false;
3415  }
3416  else
3417  {
3418  if(CheckPointer(FirstMomentum)==POINTER_INVALID || !FirstMomentum.BufferRead() || !FirstMomentum.Save(file_handle))
3419  return false;
3420  if(CheckPointer(SecondMomentum)==POINTER_INVALID || !SecondMomentum.BufferRead() || !SecondMomentum.Save(file_handle))
3421  return false;
3422  }
3423 //---
3424  return true;
3425  }
3426 //+------------------------------------------------------------------+
3427 //| |
3428 //+------------------------------------------------------------------+
3429 bool CNeuronBaseOCL::Load(const int file_handle)
3430  {
3431  if(file_handle==INVALID_HANDLE)
3432  return false;
3433 //---
3434  activation=(ENUM_ACTIVATION)FileReadInteger(file_handle,INT_VALUE);
3435  optimization=(ENUM_OPTIMIZATION)FileReadInteger(file_handle,INT_VALUE);
3436  t=FileReadInteger(file_handle,INT_VALUE);
3437  if(CheckPointer(Output)==POINTER_INVALID)
3438  {
3439  Output=new CBufferDouble();
3440  if(CheckPointer(Output)==POINTER_INVALID)
3441  return false;
3442  }
3443  if(Output.GetIndex()>=0)
3444  Output.BufferFree();
3445  if(!Output.Load(file_handle))
3446  return false;
3447  if(!Output.BufferCreate(OpenCL))
3448  return false;
3449 //---
3450  if(CheckPointer(PrevOutput)==POINTER_INVALID)
3451  {
3452  PrevOutput=new CBufferDouble();
3453  if(CheckPointer(PrevOutput)==POINTER_INVALID)
3454  return false;
3455  }
3456  if(PrevOutput.GetIndex()>=0)
3458  if(!PrevOutput.Load(file_handle))
3459  return false;
3461  return false;
3462 //---
3463  if(CheckPointer(Gradient)==POINTER_INVALID)
3464  {
3465  Gradient=new CBufferDouble();
3466  if(CheckPointer(Gradient)==POINTER_INVALID)
3467  return false;
3468  }
3469  if(Gradient.GetIndex()>=0)
3470  Gradient.BufferFree();
3471  if(!Gradient.Load(file_handle))
3472  return false;
3474  return false;
3475 //---
3476  if(FileReadInteger(file_handle)==0)
3477  return true;
3478 //---
3479  if(CheckPointer(Weights)==POINTER_INVALID)
3480  {
3481  Weights=new CBufferDouble();
3482  if(CheckPointer(Weights)==POINTER_INVALID)
3483  return false;
3484  }
3485  if(Weights.GetIndex()>=0)
3486  Weights.BufferFree();
3487  if(!Weights.Load(file_handle))
3488  return false;
3490  return false;
3491 //---
3492  if(optimization==SGD)
3493  {
3494  if(CheckPointer(DeltaWeights)==POINTER_INVALID)
3495  {
3497  if(CheckPointer(DeltaWeights)==POINTER_INVALID)
3498  return false;
3499  }
3500  if(DeltaWeights.GetIndex()>=0)
3502  if(!DeltaWeights.Load(file_handle))
3503  return false;
3505  return false;
3506  }
3507  else
3508  {
3509  if(CheckPointer(FirstMomentum)==POINTER_INVALID)
3510  {
3512  if(CheckPointer(FirstMomentum)==POINTER_INVALID)
3513  return false;
3514  }
3515  if(FirstMomentum.GetIndex()>=0)
3517  if(!FirstMomentum.Load(file_handle))
3518  return false;
3520  return false;
3521 //---
3522  if(CheckPointer(SecondMomentum)==POINTER_INVALID)
3523  {
3525  if(CheckPointer(SecondMomentum)==POINTER_INVALID)
3526  return false;
3527  }
3528  if(SecondMomentum.GetIndex()>=0)
3530  if(!SecondMomentum.Load(file_handle))
3531  return false;
3533  return false;
3534 
3535  }
3536 //---
3537  return true;
3538  }
3539 //+------------------------------------------------------------------+
3542 //+------------------------------------------------------------------+
3544  {
3545 protected:
3546  uint iWindow;
3547  uint iStep;
3548 
3549  virtual bool feedForward(CNeuronBaseOCL *NeuronOCL);
3550 
3551 public:CNeuronProofOCL(void) : iWindow(2), iStep(1) {};~CNeuronProofOCL(void);
3554  virtual bool Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl,int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type);
3556  //---
3557  virtual bool calcInputGradients(CNeuronBaseOCL *NeuronOCL);
3558  //--- methods for working with files
3559  virtual bool Save(int const file_handle);
3560  virtual bool Load(int const file_handle);
3561  virtual int Type(void) const { return defNeuronProofOCL; }
3562  };
3563 //+------------------------------------------------------------------+
3564 //| |
3565 //+------------------------------------------------------------------+
3566 bool CNeuronProofOCL::Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl, int window,int step, int units_count, ENUM_OPTIMIZATION optimization_type)
3567  {
3568  iWindow=window;
3569  iStep=step;
3570  if(!CNeuronBaseOCL::Init(numOutputs,myIndex,open_cl, units_count, optimization_type))
3571  return false;
3572 //---
3573  return true;
3574  }
3575 //+------------------------------------------------------------------+
3576 //| |
3577 //+------------------------------------------------------------------+
3579  {
3580  }
3581 //+------------------------------------------------------------------+
3582 //| |
3583 //+------------------------------------------------------------------+
3585  {
3586  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3587  return false;
3588  uint global_work_offset[1]={0};
3589  uint global_work_size[1];
3590  global_work_size[0]=Output.Total();
3591  OpenCL.SetArgumentBuffer(def_k_FeedForwardProof,def_k_ffp_matrix_i,NeuronOCL.getOutputIndex());
3593  OpenCL.SetArgument(def_k_FeedForwardProof,def_k_ffp_inputs,NeuronOCL.Neurons());
3596  //Comment(com+"\n FeedforwardProof");
3597  if(!OpenCL.Execute(def_k_FeedForwardProof,1,global_work_offset,global_work_size))
3598  {
3599  printf("Error of execution kernel FeedForwardProof: %d",GetLastError());
3600  return false;
3601  }
3602 //---
3603  return Output.BufferRead();
3604  }
3605 //+------------------------------------------------------------------+
3606 //| |
3607 //+------------------------------------------------------------------+
3609  {
3610  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3611  return false;
3612  uint global_work_offset[1]={0};
3613  uint global_work_size[1];
3614  global_work_size[0]=NeuronOCL.Neurons();
3622  //Comment(com+"\n CalcInputGradientProof");
3623  if(!OpenCL.Execute(def_k_CalcInputGradientProof,1,global_work_offset,global_work_size))
3624  {
3625  printf("Error of execution kernel CalcInputGradientProof: %d",GetLastError());
3626  return false;
3627  }
3628 //---
3629  double temp[];
3630  return NeuronOCL.getGradient(temp)>0;
3631  }
3632 //+------------------------------------------------------------------+
3636 //+------------------------------------------------------------------+
3638  {
3639 protected:
3640  uint iWindowOut;
3641 //---
3646 //---
3647  virtual bool feedForward(CNeuronBaseOCL *NeuronOCL);
3648  virtual bool updateInputWeights(CNeuronBaseOCL *NeuronOCL);
3649 
3650 public:CNeuronConvOCL(void) : iWindowOut(1) { activation=LReLU; }~CNeuronConvOCL(void);
3653  virtual bool Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl,uint window, uint step, uint window_out, uint units_count, ENUM_OPTIMIZATION optimization_type);
3655 //---
3656  virtual bool SetGradientIndex(int index) { return Gradient.BufferSet(index); }
3657  //---
3658  virtual bool calcInputGradients(CNeuronBaseOCL *NeuronOCL);
3659  virtual int Type(void) const { return defNeuronConvOCL; }
3660  //--- methods for working with files
3661  virtual bool Save(int const file_handle);
3662  virtual bool Load(int const file_handle);
3663  };
3664 //+------------------------------------------------------------------+
3665 //| |
3666 //+------------------------------------------------------------------+
3668  {
3669  if(CheckPointer(WeightsConv)==POINTER_INVALID)
3670  delete WeightsConv;
3671  if(CheckPointer(DeltaWeightsConv)==POINTER_INVALID)
3672  delete DeltaWeightsConv;
3673  if(CheckPointer(FirstMomentumConv)==POINTER_INVALID)
3674  delete FirstMomentumConv;
3675  if(CheckPointer(SecondMomentumConv)==POINTER_INVALID)
3676  delete SecondMomentumConv;
3677  }
3678 //+------------------------------------------------------------------+
3679 //| |
3680 //+------------------------------------------------------------------+
3681 bool CNeuronConvOCL::Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl,uint window_in,uint step,uint window_out,uint units_count,ENUM_OPTIMIZATION optimization_type)
3682  {
3683  if(window_out<=0)
3684  return false;
3685  if(!CNeuronProofOCL::Init(numOutputs,myIndex,open_cl,window_in,step,units_count*window_out,optimization_type))
3686  return false;
3687 //---
3688  iWindowOut=fmax(window_out,1);
3689 //---
3690  if(CheckPointer(WeightsConv)==POINTER_INVALID)
3691  {
3692  WeightsConv=new CBufferDouble();
3693  if(CheckPointer(WeightsConv)==POINTER_INVALID)
3694  return false;
3695  }
3696  int count=(int)((iWindow+1)*iWindowOut);
3697  if(!WeightsConv.Reserve(count))
3698  return false;
3699  for(int i=0;i<count;i++)
3700  {
3701  double weigh=(MathRand()+1)/32768.0-0.5;
3702  if(weigh==0)
3703  weigh=0.001;
3704  if(!WeightsConv.Add(weigh))
3705  return false;
3706  }
3708  return false;
3709 //---
3710  if(optimization==SGD)
3711  {
3712  if(CheckPointer(DeltaWeightsConv)==POINTER_INVALID)
3713  {
3715  if(CheckPointer(DeltaWeightsConv)==POINTER_INVALID)
3716  return false;
3717  }
3718  if(!DeltaWeightsConv.BufferInit(count,0.0))
3719  return false;
3721  return false;
3722  }
3723  else
3724  {
3725  if(CheckPointer(FirstMomentumConv)==POINTER_INVALID)
3726  {
3728  if(CheckPointer(FirstMomentumConv)==POINTER_INVALID)
3729  return false;
3730  }
3731  if(!FirstMomentumConv.BufferInit(count,0.0))
3732  return false;
3734  return false;
3735 //---
3736  if(CheckPointer(SecondMomentumConv)==POINTER_INVALID)
3737  {
3739  if(CheckPointer(SecondMomentumConv)==POINTER_INVALID)
3740  return false;
3741  }
3742  if(!SecondMomentumConv.BufferInit(count,0.0))
3743  return false;
3745  return false;
3746 
3747  }
3748 //---
3749  return true;
3750  }
3751 //+------------------------------------------------------------------+
3752 //| |
3753 //+------------------------------------------------------------------+
3755  {
3756  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3757  return false;
3758  uint global_work_offset[1]={0};
3759  uint global_work_size[1];
3760  global_work_size[0]=Output.Total()/iWindowOut;
3762  OpenCL.SetArgumentBuffer(def_k_FeedForwardConv,def_k_ffc_matrix_i,NeuronOCL.getOutputIndex());
3764  OpenCL.SetArgument(def_k_FeedForwardConv,def_k_ffc_inputs,NeuronOCL.Neurons());
3769  if(!OpenCL.Execute(def_k_FeedForwardConv,1,global_work_offset,global_work_size))
3770  {
3771  printf("Error of execution kernel FeedForwardProof: %d",GetLastError());
3772  return false;
3773  }
3774 //---
3775  return Output.BufferRead();
3776  }
3777 //+------------------------------------------------------------------+
3778 //| |
3779 //+------------------------------------------------------------------+
3781  {
3782  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3783  return false;
3784  uint global_work_offset[1]={0};
3785  uint global_work_size[1];
3786  global_work_size[0]=NeuronOCL.Neurons();
3796  if(!OpenCL.Execute(def_k_CalcHiddenGradientConv,1,global_work_offset,global_work_size))
3797  {
3798  printf("Error of execution kernel CalcHiddenGradientConv: %d",GetLastError());
3799  return false;
3800  }
3801 //---
3802  double temp[];
3803  return NeuronOCL.getGradient(temp)>0;
3804  }
3805 //+------------------------------------------------------------------+
3806 //| |
3807 //+------------------------------------------------------------------+
3809  {
3810  if(CheckPointer(OpenCL)==POINTER_INVALID || CheckPointer(NeuronOCL)==POINTER_INVALID)
3811  return false;
3812  uint global_work_offset[1]={0};
3813  uint global_work_size[1];
3814  global_work_size[0]=WeightsConv.Total();
3815  if(optimization==SGD)
3816  {
3827  ResetLastError();
3828  if(!OpenCL.Execute(def_k_UpdateWeightsConvMomentum,1,global_work_offset,global_work_size))
3829  {
3830  printf("Error of execution kernel UpdateWeightsConvMomentum: %d",GetLastError());
3831  return false;
3832  }
3833  }
3834  else
3835  {
3837  return false;
3839  return false;
3840  if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsConvAdam,def_k_uwca_matrix_i,NeuronOCL.getOutputIndex()))
3841  return false;
3843  return false;
3845  return false;
3846  double lt=eta*sqrt(1-pow(b2,t))/(1-pow(b1,t));
3847  if(!OpenCL.SetArgument(def_k_UpdateWeightsConvAdam,def_k_uwa_inputs,NeuronOCL.Neurons()))
3848  return false;
3849  if(!OpenCL.SetArgument(def_k_UpdateWeightsConvAdam,def_k_uwca_l,lt))
3850  return false;
3852  return false;
3854  return false;
3858  ResetLastError();
3859  if(!OpenCL.Execute(def_k_UpdateWeightsConvAdam,1,global_work_offset,global_work_size))
3860  {
3861  printf("Error of execution kernel UpdateWeightsConvAdam: %d",GetLastError());
3862  return false;
3863  }
3864  t++;
3865  }
3866 //---
3867  if(WeightsConv.BufferRead())
3868  {
3869  //---
3870  global_work_size[0]=1;
3873  if(!OpenCL.Execute(def_k_NormilizeWeights,1,global_work_offset,global_work_size))
3874  {
3875  printf("Error of execution kernel Normalize: %d",GetLastError());
3876  return false;
3877  }
3878  }
3879  else
3880  {
3881  return false;
3882  }
3883  return WeightsConv.BufferRead();
3884  }
3885 //+------------------------------------------------------------------+
3889 //+------------------------------------------------------------------+
3891  {
3892 protected:
3900 //---
3901  uint iWindow;
3902  uint iUnits;
3903 //---
3904  virtual bool feedForward(CNeuronBaseOCL *prevLayer);
3905  virtual bool updateInputWeights(CNeuronBaseOCL *prevLayer);
3906 
3907 public:CNeuronAttentionOCL(void) : iWindow(1), iUnits(0) {};~CNeuronAttentionOCL(void);
3910  virtual bool Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl, uint window, uint units_count, ENUM_OPTIMIZATION optimization_type);
3912  virtual bool calcInputGradients(CNeuronBaseOCL *prevLayer);
3913  //---
3914  virtual int Type(void) const { return defNeuronAttentionOCL; }
3915  //--- methods for working with files
3916  virtual bool Save(int const file_handle);
3917  virtual bool Load(int const file_handle);
3918  };
3919 //+------------------------------------------------------------------+
3920 //| |
3921 //+------------------------------------------------------------------+
3923  {
3924  if(CheckPointer(Querys)==POINTER_INVALID)
3925  delete Querys;
3926  if(CheckPointer(Keys)==POINTER_INVALID)
3927  delete Keys;
3928  if(CheckPointer(Values)==POINTER_INVALID)
3929  delete Values;
3930  if(CheckPointer(Scores)==POINTER_INVALID)
3931  delete Scores;
3932  if(CheckPointer(AttentionOut)==POINTER_INVALID)
3933  delete AttentionOut;
3934  if(CheckPointer(FF1)==POINTER_INVALID)
3935  delete FF1;
3936  if(CheckPointer(FF2)==POINTER_INVALID)
3937  delete FF2;
3938  }
3939 //+------------------------------------------------------------------+
3940 //| |
3941 //+------------------------------------------------------------------+
3942 bool CNeuronAttentionOCL::Init(uint numOutputs,uint myIndex,COpenCLMy *open_cl,uint window,uint units_count,ENUM_OPTIMIZATION optimization_type)
3943  {
3944  if(!CNeuronBaseOCL::Init(numOutputs,myIndex,open_cl,units_count*window,optimization_type))
3945  return false;
3946 //---
3947  if(CheckPointer(Querys)==POINTER_INVALID)
3948  {
3949  Querys=new CNeuronConvOCL();
3950  if(CheckPointer(Querys)==POINTER_INVALID)
3951  return false;
3952  if(!Querys.Init(0,0,open_cl,window,window,window,units_count,optimization_type))
3953  return false;
3955  }
3956 //---
3957  if(CheckPointer(Keys)==POINTER_INVALID)
3958  {
3959  Keys=new CNeuronConvOCL();
3960  if(CheckPointer(Keys)==POINTER_INVALID)
3961  return false;
3962  if(!Keys.Init(0,1,open_cl,window,window,window,units_count,optimization_type))
3963  return false;
3965  }
3966 //---
3967  if(CheckPointer(Values)==POINTER_INVALID)
3968  {
3969  Values=new CNeuronConvOCL();
3970  if(CheckPointer(Values)==POINTER_INVALID)
3971  return false;
3972  if(!Values.Init(0,2,open_cl,window,window,window,units_count,optimization_type))
3973  return false;
3975  }
3976 //---
3977  if(CheckPointer(Scores)==POINTER_INVALID)
3978  {
3979  Scores=new CBufferDouble();
3980  if(CheckPointer(Scores)==POINTER_INVALID)
3981  return false;
3982  }
3983  if(!Scores.BufferInit(units_count*units_count,0.0))
3984  return false;
3985  if(!Scores.BufferCreate(OpenCL))
3986  return false;
3987 //---
3988  if(CheckPointer(AttentionOut)==POINTER_INVALID)
3989  {
3991  if(CheckPointer(AttentionOut)==POINTER_INVALID)
3992  return false;
3993  if(!AttentionOut.Init(0,3,open_cl,window*units_count,optimization_type))
3994  return false;
3996  }
3997 //---
3998  if(CheckPointer(FF1)==POINTER_INVALID)
3999  {
4000  FF1=new CNeuronConvOCL();
4001  if(CheckPointer(FF1)==POINTER_INVALID)
4002  return false;
4003  if(!FF1.Init(0,4,open_cl,window,window,window*2,units_count,optimization_type))
4004  return false;
4006  }
4007 //---
4008  if(CheckPointer(FF2)==POINTER_INVALID)
4009  {
4010  FF2=new CNeuronConvOCL();
4011  if(CheckPointer(FF2)==POINTER_INVALID)
4012  return false;
4013  if(!FF2.Init(0,5,open_cl,window*2,window*2,window,units_count,optimization_type))
4014  return false;
4017  }
4018 //---
4019  iWindow=window;
4020  iUnits=units_count;
4022 //---
4023  return true;
4024  }
4025 //+------------------------------------------------------------------+
4026 //| |
4027 //+------------------------------------------------------------------+
4029  {
4030  if(CheckPointer(prevLayer)==POINTER_INVALID)
4031  return false;
4032 //---
4033  {
4034  uint global_work_offset[1]={0};
4035  uint global_work_size[1];
4036  global_work_size[0]=1;
4037  OpenCL.SetArgumentBuffer(def_k_Normilize,def_k_norm_buffer,prevLayer.getOutputIndex());
4038  OpenCL.SetArgument(def_k_Normilize,def_k_norm_dimension,prevLayer.Neurons());
4039  if(!OpenCL.Execute(def_k_Normilize,1,global_work_offset,global_work_size))
4040  {
4041  printf("Error of execution kernel Normalize: %d",GetLastError());
4042  return false;
4043  }
4044  if(!prevLayer.Output.BufferRead())
4045  return false;
4046  }
4047 //---
4048  if(CheckPointer(Querys)==POINTER_INVALID || !Querys.FeedForward(prevLayer))
4049  return false;
4050  if(CheckPointer(Keys)==POINTER_INVALID || !Keys.FeedForward(prevLayer))
4051  return false;
4052  if(CheckPointer(Values)==POINTER_INVALID || !Values.FeedForward(prevLayer))
4053  return false;
4054 //---
4055  {
4056  uint global_work_offset[1]={0};
4057  uint global_work_size[1];
4058  global_work_size[0]=iUnits;
4063  if(!OpenCL.Execute(def_k_AttentionScore,1,global_work_offset,global_work_size))
4064  {
4065  printf("Error of execution kernel AttentionScore: %d",GetLastError());
4066  return false;
4067  }
4068  if(!Scores.BufferRead())
4069  return false;
4070  }
4071 //---
4072  {
4073  uint global_work_offset[2]={0,0};
4074  uint global_work_size[2];
4075  global_work_size[0]=iUnits;
4076  global_work_size[1]=iWindow;
4078  OpenCL.SetArgumentBuffer(def_k_AttentionOut,def_k_aout_inputs,prevLayer.getOutputIndex());
4081  //Comment(com+"\n Attention Out");
4082  if(!OpenCL.Execute(def_k_AttentionOut,2,global_work_offset,global_work_size))
4083  {
4084  printf("Error of execution kernel Attention Out: %d",GetLastError());
4085  return false;
4086  }
4087  double temp[];
4088  if(!AttentionOut.getOutputVal(temp))
4089  return false;
4090  }
4091 //---
4092  {
4093  uint global_work_offset[1]={0};
4094  uint global_work_size[1];
4095  global_work_size[0]=1;
4098  if(!OpenCL.Execute(def_k_Normilize,1,global_work_offset,global_work_size))
4099  {
4100  printf("Error of execution kernel Normalize: %d",GetLastError());
4101  return false;
4102  }
4103  double temp[];
4104  if(!AttentionOut.getOutputVal(temp))
4105  return false;
4106  }
4107 //---
4109  return false;
4110  if(!FF2.FeedForward(FF1))
4111  return false;
4112 //---
4113  {
4114  uint global_work_offset[1]={0};
4115  uint global_work_size[1];
4116  global_work_size[0]=iUnits;
4121  OpenCL.SetArgument(def_k_MatrixSum,def_k_sum_multiplyer,0.5);
4122  if(!OpenCL.Execute(def_k_MatrixSum,1,global_work_offset,global_work_size))
4123  {
4124  printf("Error of execution kernel MatrixSum: %d",GetLastError());
4125  return false;
4126  }
4127  if(!Output.BufferRead())
4128  return false;
4129  }
4130 //---
4131  return true;
4132  }
4133 //+------------------------------------------------------------------+
4134 //| |
4135 //+------------------------------------------------------------------+
4137  {
4138  if(CheckPointer(prevLayer)==POINTER_INVALID)
4139  return false;
4140 //---
4141  if(!FF2.calcInputGradients(FF1))
4142  return false;
4144  return false;
4145 //---
4146  {
4147  uint global_work_offset[1]={0};
4148  uint global_work_size[1];
4149  global_work_size[0]=iUnits;
4154  OpenCL.SetArgument(def_k_MatrixSum,def_k_sum_multiplyer,0.5);
4155  if(!OpenCL.Execute(def_k_MatrixSum,1,global_work_offset,global_work_size))
4156  {
4157  printf("Error of execution kernel MatrixSum: %d",GetLastError());
4158  return false;
4159  }
4160  double temp[];
4161  if(AttentionOut.getGradient(temp)<=0)
4162  return false;
4163  }
4164 //---
4165  {
4166  uint global_work_offset[2]={0,0};
4167  uint global_work_size[2];
4168  global_work_size[0]=iUnits;
4169  global_work_size[1]=iWindow;
4178  if(!OpenCL.Execute(def_k_AttentionGradients,2,global_work_offset,global_work_size))
4179  {
4180  printf("Error of execution kernel AttentionGradients: %d",GetLastError());
4181  return false;
4182  }
4183  double temp[];
4184  if(Keys.getGradient(temp)<=0)
4185  return false;
4186  }
4187 //---
4188  if(!Querys.calcInputGradients(prevLayer))
4189  return false;
4190 //---
4191  {
4192  uint global_work_offset[1]={0};
4193  uint global_work_size[1];
4194  global_work_size[0]=iUnits;
4196  OpenCL.SetArgumentBuffer(def_k_MatrixSum,def_k_sum_matrix2,prevLayer.getGradientIndex());
4199  OpenCL.SetArgument(def_k_MatrixSum,def_k_sum_multiplyer,1.0);
4200  if(!OpenCL.Execute(def_k_MatrixSum,1,global_work_offset,global_work_size))
4201  {
4202  printf("Error of execution kernel MatrixSum: %d",GetLastError());
4203  return false;
4204  }
4205  double temp[];
4206  if(AttentionOut.getGradient(temp)<=0)
4207  return false;
4208  }
4209 //---
4210  if(!Keys.calcInputGradients(prevLayer))
4211  return false;
4212 //---
4213  {
4214  uint global_work_offset[1]={0};
4215  uint global_work_size[1];
4216  global_work_size[0]=iUnits;
4218  OpenCL.SetArgumentBuffer(def_k_MatrixSum,def_k_sum_matrix2,prevLayer.getGradientIndex());
4221  OpenCL.SetArgument(def_k_MatrixSum,def_k_sum_multiplyer,1.0);
4222  if(!OpenCL.Execute(def_k_MatrixSum,1,global_work_offset,global_work_size))
4223  {
4224  printf("Error of execution kernel MatrixSum: %d",GetLastError());
4225  return false;
4226  }
4227  double temp[];
4228  if(AttentionOut.getGradient(temp)<=0)
4229  return false;
4230  }
4231 //---
4232  if(!Values.calcInputGradients(prevLayer))
4233  return false;
4234 //---
4235  {
4236  uint global_work_offset[1]={0};
4237  uint global_work_size[1];
4238  global_work_size[0]=iUnits;
4240  OpenCL.SetArgumentBuffer(def_k_MatrixSum,def_k_sum_matrix2,prevLayer.getGradientIndex());
4241  OpenCL.SetArgumentBuffer(def_k_MatrixSum,def_k_sum_matrix_out,prevLayer.getGradientIndex());
4243  OpenCL.SetArgument(def_k_MatrixSum,def_k_sum_multiplyer,0.1);
4244  if(!OpenCL.Execute(def_k_MatrixSum,1,global_work_offset,global_work_size))
4245  {
4246  printf("Error of execution kernel MatrixSum: %d",GetLastError());
4247  return false;
4248  }
4249  double temp[];
4250  if(prevLayer.getGradient(temp)<=0)
4251  return false;
4252  }
4253 //---
4254  {
4255  uint global_work_offset[1]={0};
4256  uint global_work_size[1];
4257  global_work_size[0]=1;
4258  OpenCL.SetArgumentBuffer(def_k_Normilize,def_k_norm_buffer,prevLayer.getGradientIndex());
4259  OpenCL.SetArgument(def_k_Normilize,def_k_norm_dimension,prevLayer.Neurons());
4260  if(!OpenCL.Execute(def_k_Normilize,1,global_work_offset,global_work_size))
4261  {
4262  printf("Error of execution kernel Normalize: %d",GetLastError());
4263  return false;
4264  }
4265  double temp[];
4266  if(prevLayer.getGradient(temp)<=0)
4267  return false;
4268  }
4269 //---
4270  return true;
4271  }
4272 //+------------------------------------------------------------------+
4273 //| |
4274 //+------------------------------------------------------------------+
4276  {
4277  if(!Querys.UpdateInputWeights(prevLayer))
4278  return false;
4279  if(!Keys.UpdateInputWeights(prevLayer))
4280  return false;
4281  if(!Values.UpdateInputWeights(prevLayer))
4282  return false;
4284  return false;
4285  if(!FF2.UpdateInputWeights(FF1))
4286  return false;
4287 //---
4288  return true;
4289  }
4290 //+------------------------------------------------------------------+
4291 //| |
4292 //+------------------------------------------------------------------+
4293 void CBufferDouble::BufferToCSV(const string file_name)
4294  {
4295  BufferRead();
4296  int h=FileOpen(file_name,FILE_CSV|FILE_WRITE);
4297  if(h==INVALID_HANDLE)
4298  return;
4299 //---
4300  for(int i=0;i<m_data_total;i++)
4301  FileWrite(h,m_data[i]);
4302  FileFlush(h);
4303  FileClose(h);
4304  }
4305 //+------------------------------------------------------------------+
4306 //| |
4307 //+------------------------------------------------------------------+
4308 bool CNeuronAttentionOCL::Save(const int file_handle)
4309  {
4310  if(!CNeuronBaseOCL::Save(file_handle))
4311  return false;
4312  if(CheckPointer(Querys)==POINTER_INVALID || !Querys.Save(file_handle))
4313  return false;
4314  if(CheckPointer(Keys)==POINTER_INVALID || !Keys.Save(file_handle))
4315  return false;
4316  if(CheckPointer(Values)==POINTER_INVALID || !Values.Save(file_handle))
4317  return false;
4318  if(CheckPointer(Scores)==POINTER_INVALID || !Scores.Save(file_handle))
4319  return false;
4320  if(CheckPointer(AttentionOut)==POINTER_INVALID || !AttentionOut.Save(file_handle))
4321  return false;
4322  if(CheckPointer(FF1)==POINTER_INVALID || !FF1.Save(file_handle))
4323  return false;
4324  if(CheckPointer(FF2)==POINTER_INVALID || !FF2.Save(file_handle))
4325  return false;
4326  if(FileWriteInteger(file_handle,iWindow,INT_VALUE)<INT_VALUE)
4327  return false;
4328  if(FileWriteInteger(file_handle,iUnits,INT_VALUE)<INT_VALUE)
4329  return false;
4330 //---
4331  return true;
4332  }
4333 //+------------------------------------------------------------------+
4334 //| |
4335 //+------------------------------------------------------------------+
4336 bool CNeuronAttentionOCL::Load(const int file_handle)
4337  {
4338  if(!CNeuronBaseOCL::Load(file_handle))
4339  return false;
4340 //---
4341  if(CheckPointer(Querys)==POINTER_INVALID)
4342  Querys=new CNeuronConvOCL();
4343  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronConvOCL || !Querys.Load(file_handle))
4344  return false;
4345 //---
4346  if(CheckPointer(Keys)==POINTER_INVALID)
4347  Keys=new CNeuronConvOCL();
4348  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronConvOCL || !Keys.Load(file_handle))
4349  return false;
4350 //---
4351  if(CheckPointer(Values)==POINTER_INVALID)
4352  Values=new CNeuronConvOCL();
4353  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronConvOCL || !Values.Load(file_handle))
4354  return false;
4355 //---
4356  if(CheckPointer(Scores)==POINTER_INVALID)
4357  Scores=new CBufferDouble();
4358  if(Scores.GetIndex()>=0)
4359  Scores.BufferFree();
4360  if(!Scores.Load(file_handle))
4361  return false;
4362  if(!Scores.BufferCreate(OpenCL))
4363  return false;
4364 //---
4365  if(CheckPointer(AttentionOut)==POINTER_INVALID)
4367  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronBaseOCL || !AttentionOut.Load(file_handle))
4368  return false;
4369  if(CheckPointer(FF1)==POINTER_INVALID)
4370  FF1=new CNeuronConvOCL();
4371  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronConvOCL || !FF1.Load(file_handle))
4372  return false;
4373  if(CheckPointer(FF2)==POINTER_INVALID)
4374  FF2=new CNeuronConvOCL();
4375  if(FileReadInteger(file_handle,INT_VALUE)!=defNeuronConvOCL || !FF2.Load(file_handle))
4376  return false;
4377  iWindow=FileReadInteger(file_handle);
4378  iUnits=FileReadInteger(file_handle);
4379 //---
4380  return true;
4381  }
4382 //+------------------------------------------------------------------+
4383 //| |
4384 //+------------------------------------------------------------------+
4385 bool CNeuronConvOCL::Load(const int file_handle)
4386  {
4387  if(!CNeuronProofOCL::Load(file_handle))
4388  return false;
4389 //---
4390  if(CheckPointer(WeightsConv)==POINTER_INVALID)
4391  WeightsConv=new CBufferDouble();
4392  if(WeightsConv.GetIndex()>=0)
4394  if(!WeightsConv.Load(file_handle))
4395  return false;
4397  return false;
4398 //---
4399  if(optimization==SGD)
4400  {
4401  if(CheckPointer(DeltaWeightsConv)==POINTER_INVALID)
4403  if(DeltaWeightsConv.GetIndex()>=0)
4405  if(!DeltaWeightsConv.Load(file_handle))
4406  return false;
4408  return false;
4409  }
4410  else
4411  {
4412  if(CheckPointer(FirstMomentumConv)==POINTER_INVALID)
4414  if(FirstMomentumConv.GetIndex()>=0)
4416  if(!FirstMomentumConv.Load(file_handle))
4417  return false;
4419  return false;
4420  //---
4421  if(CheckPointer(SecondMomentumConv)==POINTER_INVALID)
4423  if(SecondMomentumConv.GetIndex()>=0)
4425  if(!SecondMomentumConv.Load(file_handle))
4426  return false;
4428  return false;
4429  }
4430  iWindowOut=FileReadInteger(file_handle);
4431 //---
4432  return true;
4433  }
4434 //+------------------------------------------------------------------+
4435 //| |
4436 //+------------------------------------------------------------------+
4437 bool CNeuronConvOCL::Save(const int file_handle)
4438  {
4439  if(!CNeuronProofOCL::Save(file_handle))
4440  return false;
4441 //---
4442  if(CheckPointer(WeightsConv)==POINTER_INVALID || !WeightsConv.Save(file_handle))
4443  return false;
4444  if(optimization==SGD && (CheckPointer(DeltaWeightsConv)==POINTER_INVALID || !DeltaWeightsConv.BufferRead() || !DeltaWeightsConv.Save(file_handle)))
4445  return false;
4446  if(optimization==ADAM && (CheckPointer(FirstMomentumConv)==POINTER_INVALID || !FirstMomentumConv.BufferRead() || !FirstMomentumConv.Save(file_handle)))
4447  return false;
4448  if(optimization==ADAM && (CheckPointer(SecondMomentumConv)==POINTER_INVALID || !SecondMomentumConv.BufferRead() || !SecondMomentumConv.Save(file_handle)))
4449  return false;
4450  if(FileWriteInteger(file_handle,iWindowOut,INT_VALUE)<INT_VALUE)
4451  return false;
4452 //---
4453  return true;
4454  }
4455 //+------------------------------------------------------------------+
4456 //| |
4457 //+------------------------------------------------------------------+
4458 bool CNeuronProofOCL::Save(const int file_handle)
4459  {
4460  if(!CNeuronBaseOCL::Save(file_handle))
4461  return false;
4462  if(FileWriteInteger(file_handle,iWindow,INT_VALUE)<INT_VALUE)
4463  return false;
4464  if(FileWriteInteger(file_handle,iStep,INT_VALUE)<INT_VALUE)
4465  return false;
4466 //---
4467  return true;
4468  }
4469 //+------------------------------------------------------------------+
4470 //| |
4471 //+------------------------------------------------------------------+
4472 bool CNeuronProofOCL::Load(const int file_handle)
4473  {
4474  if(!CNeuronBaseOCL::Load(file_handle))
4475  return false;
4476  iWindow=FileReadInteger(file_handle);
4477  iStep=FileReadInteger(file_handle);
4478 //---
4479  return true;
4480  }
4481 //+------------------------------------------------------------------+
CNeuronConv::calcHiddenGradients
virtual bool calcHiddenGradients(CLayer *&nextLayer)
Method to transfer gradient to previous layer.
Definition: NeuroNet.mqh:1008
defNeuronLSTM
#define defNeuronLSTM
LSTM Neuron.
Definition: NeuroNet.mqh:65
b2
#define b2
Second momentum multiplier of Adam optimization.
Definition: NeuroNet.mqh:38
defBufferDouble
#define defBufferDouble
Data Buffer OpenCL.
Definition: NeuroNet.mqh:70
CNet::CNet
CNet(CArrayObj *Description)
Constructor.
Definition: NeuroNet.mqh:1391
CNeuronBase::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:1308
CNeuronBase::TanhFunction
virtual double TanhFunction(double x)
Calculating .
Definition: NeuroNet.mqh:427
def_k_ag_values
#define def_k_ag_values
Matrix of Values.
Definition: NeuroNet.mqh:270
def_k_ff_matrix_o
#define def_k_ff_matrix_o
Output tensor.
Definition: NeuroNet.mqh:93
def_k_chgc_step
#define def_k_chgc_step
Step size.
Definition: NeuroNet.mqh:196
def_k_uwa_matrix_g
#define def_k_uwa_matrix_g
Adam Tensor of gradients at current layer.
Definition: NeuroNet.mqh:131
CNeuronBaseOCL::Neurons
virtual int Neurons(void)
Get number of neurons in layer.
Definition: NeuroNet.mqh:2972
def_k_ffc_matrix_i
#define def_k_ffc_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:178
CNeuronBase::updateInputWeights
virtual bool updateInputWeights(CLayer *&prevLayer)
Method for updating weights.
Definition: NeuroNet.mqh:424
CNeuronAttentionOCL::FF2
CNeuronConvOCL * FF2
Convolution layer for second layer of Feed Forward block.
Definition: NeuroNet.mqh:3899
CConnection::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:331
def_k_CalcOutputGradient
#define def_k_CalcOutputGradient
Index of Output gradients calculation kernel (CalcOutputGradient)
Definition: NeuroNet.mqh:101
def_k_uwa_matrix_w
#define def_k_uwa_matrix_w
Adam Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons ...
Definition: NeuroNet.mqh:130
def_k_norm_dimension
#define def_k_norm_dimension
Dimension of matrix.
Definition: NeuroNet.mqh:283
CNeuronBaseOCL::getWeights
virtual int getWeights(double &values[])
Get values of weights matrix buffer.
Definition: NeuroNet.mqh:2971
CLayer::IncreaseTotal
virtual void IncreaseTotal()
Method for increase number of items in layer.
Definition: NeuroNet.mqh:653
CNeuronLSTM::CalculateGate
virtual CArrayDouble * CalculateGate(CLayer *gate, CArrayDouble *sequence)
Method of calculation gate iteration.
Definition: NeuroNet.mqh:2354
def_k_ffс_window_out
#define def_k_ffс_window_out
Size of output window.
Definition: NeuroNet.mqh:183
CNeuronLSTM::calcInputGradients
virtual bool calcInputGradients(CLayer *prevLayer)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:2597
def_k_ag_values_g
#define def_k_ag_values_g
Matrix of Values' Gradients.
Definition: NeuroNet.mqh:271
def_k_CalcHiddenGradientConv
#define def_k_CalcHiddenGradientConv
Index of the kernel of the convolution neuron to transfer gradient to previous layer (CalcHiddenGradi...
Definition: NeuroNet.mqh:190
CArrayLayer::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:777
def_k_cigp_outputs
#define def_k_cigp_outputs
Number of outputs.
Definition: NeuroNet.mqh:164
CNeuronBase::t
int t
Count of iterations.
Definition: NeuroNet.mqh:420
def_k_chgc_activation
#define def_k_chgc_activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:199
def_k_chgc_matrix_ig
#define def_k_chgc_matrix_ig
Tensor of gradients at previous layer.
Definition: NeuroNet.mqh:194
CNeuronAttentionOCL::updateInputWeights
virtual bool updateInputWeights(CNeuronBaseOCL *prevLayer)
Method for updating weights.
Definition: NeuroNet.mqh:4275
CBufferDouble::~CBufferDouble
~CBufferDouble(void)
Destructor.
Definition: NeuroNet.mqh:2809
CNeuronAttentionOCL::CNeuronAttentionOCL
CNeuronAttentionOCL(void)
Constructor.
Definition: NeuroNet.mqh:3908
CNeuronProof::getOutputLayer
virtual CLayer * getOutputLayer(void)
Method for getting a pointer to the resulting neural layer. Not used in fully connected neural networ...
Definition: NeuroNet.mqh:823
def_k_uwcm_learning_rates
#define def_k_uwcm_learning_rates
Learning rates.
Definition: NeuroNet.mqh:211
CNeuronBaseOCL::getFirstMomentumIndex
virtual int getFirstMomentumIndex(void)
Get index of first momentum matrix buffer (Adam)
Definition: NeuroNet.mqh:2964
CNeuronBase::setGradient
virtual void setGradient(double val)
Set gradient value to neuron.
Definition: NeuroNet.mqh:441
CNeuronAttentionOCL::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:3914
CNeuronProof::OutputLayer
CLayer * OutputLayer
Layer of output data. Used for connection with next layer.
Definition: NeuroNet.mqh:810
CArrayLayer::CArrayLayer
CArrayLayer(void)
Constructor.
Definition: NeuroNet.mqh:773
def_k_MatrixSum
#define def_k_MatrixSum
Index of the kernel for calculation Sum of 2 matrix with multiplyer (SumMatrix)
Definition: NeuroNet.mqh:253
ENUM_ACTIVATION
ENUM_ACTIVATION
Enum of activation formula used
Definition: NeuroNet.mqh:298
CNeuronBaseOCL::SecondMomentum
CBufferDouble * SecondMomentum
Buffer of second momentum matrix (ADAM)
Definition: NeuroNet.mqh:2936
CNeuronBase::Load
virtual bool Load(int const file_handle)
Definition: NeuroNet.mqh:453
def_k_ffp_inputs
#define def_k_ffp_inputs
Number of inputs.
Definition: NeuroNet.mqh:151
CNeuronConvOCL::SetGradientIndex
virtual bool SetGradientIndex(int index)
Method for change index of gradient buffer.
Definition: NeuroNet.mqh:3656
CNeuronProofOCL::CNeuronProofOCL
CNeuronProofOCL(void)
Constructor.
Definition: NeuroNet.mqh:3552
def_k_cigp_matrix_ig
#define def_k_cigp_matrix_ig
Tensor of gradients at previous layer.
Definition: NeuroNet.mqh:163
CNeuronBaseOCL::Weights
CBufferDouble * Weights
Buffer of weights matrix.
Definition: NeuroNet.mqh:2932
def_k_AttentionGradients
#define def_k_AttentionGradients
Index of the kernel for gradients calculation process (AttentionIsideGradients)
Definition: NeuroNet.mqh:265
COpenCLMy
Class for working with OpenCL.
Definition: NeuroNet.mqh:628
CNeuronConvOCL::SecondMomentumConv
CBufferDouble * SecondMomentumConv
Matrix of second momentum to previous layer (ADAM)
Definition: NeuroNet.mqh:3645
CBufferDouble::BufferInit
virtual bool BufferInit(uint count, double value)
Method for buffer initialization.
Definition: NeuroNet.mqh:2891
CNeuronAttentionOCL::iWindow
uint iWindow
Window size.
Definition: NeuroNet.mqh:3901
CNeuronBase::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:459
CArrayCon
Array of connections to anothe neuron.
Definition: NeuroNet.mqh:373
TANH
@ TANH
Use for activation neuron.
Definition: NeuroNet.mqh:300
CNeuronBaseOCL::getDeltaWeightsIndex
virtual int getDeltaWeightsIndex(void)
Get index of delta weights matrix buffer (SGD)
Definition: NeuroNet.mqh:2963
CNeuronBase::~CNeuronBase
~CNeuronBase(void)
Destructor.
Definition: NeuroNet.mqh:480
def_k_uwca_matrix_g
#define def_k_uwca_matrix_g
Tensor of gradients at current layer.
Definition: NeuroNet.mqh:219
CNeuronBaseOCL::Activation
virtual int Activation(void)
Get type of activation function.
Definition: NeuroNet.mqh:2973
CLayer::CreateElement
virtual bool CreateElement(int const index)
Method for creating new element in layer.
Definition: NeuroNet.mqh:660
ADAM
@ ADAM
Adam.
Definition: NeuroNet.mqh:310
def_k_uwca_matrix_m
#define def_k_uwca_matrix_m
Matrix of first momentum.
Definition: NeuroNet.mqh:221
None
@ None
Without activation formula.
Definition: NeuroNet.mqh:299
def_k_sum_matrix1
#define def_k_sum_matrix1
First matrix.
Definition: NeuroNet.mqh:254
CConnection::~CConnection
~CConnection()
Destructor.
Definition: NeuroNet.mqh:327
CBufferDouble::OpenCL
COpenCLMy * OpenCL
Object for working with OpenCL.
Definition: NeuroNet.mqh:2780
CNeuronBaseOCL
The base class of neuron for GPU calculation.
Definition: NeuroNet.mqh:2927
CLayerDescription::CLayerDescription
CLayerDescription(void)
Constructor.
Definition: NeuroNet.mqh:1348
CBufferDouble::BufferWrite
virtual bool BufferWrite(void)
Method for writing buffer data to GPU.
Definition: NeuroNet.mqh:2881
CBufferDouble::BufferFree
virtual bool BufferFree(void)
Method for deleting buffer from GPU.
Definition: NeuroNet.mqh:2847
CNeuronProof::~CNeuronProof
~CNeuronProof(void)
Destructor.
Definition: NeuroNet.mqh:1104
def_k_uwca_b2
#define def_k_uwca_b2
Second momentum multiplier.
Definition: NeuroNet.mqh:226
CNeuronAttentionOCL::Init
virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint window, uint units_count, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:3942
CNeuron::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:527
CNet::backProp
void backProp(CArrayDouble *targetVals)
Back propagation method.
Definition: NeuroNet.mqh:1745
CNeuronBase::feedForward
virtual bool feedForward(CLayer *prevLayer)
Feed Forward method.
Definition: NeuroNet.mqh:422
CNeuronConvOCL::CNeuronConvOCL
CNeuronConvOCL(void)
Constructor.
Definition: NeuroNet.mqh:3651
def_k_ff_inputs
#define def_k_ff_inputs
Number of inputs.
Definition: NeuroNet.mqh:94
CNeuronLSTM::feedForward
virtual bool feedForward(CLayer *prevLayer)
Feed Forward method. Detailed description on the link.
Definition: NeuroNet.mqh:2262
def_k_cog_matrix_o
#define def_k_cog_matrix_o
Output tensor.
Definition: NeuroNet.mqh:103
CNeuronConvOCL::calcInputGradients
virtual bool calcInputGradients(CNeuronBaseOCL *NeuronOCL)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:3780
CNeuronConvOCL::WeightsConv
CBufferDouble * WeightsConv
Matrix of weights to previous layer.
Definition: NeuroNet.mqh:3642
CNeuronBase::alpha
static double alpha
Multiplier to momentum in SGD optimization.
Definition: NeuroNet.mqh:436
CLayer::CLayer
CLayer(uint outputs=0, int handle=INVALID_HANDLE, COpenCLMy *OpenCL=NULL)
Constructor.
Definition: NeuroNet.mqh:2729
def_k_as_querys
#define def_k_as_querys
Matrix of Querys.
Definition: NeuroNet.mqh:242
CNeuronBase::optimization
ENUM_OPTIMIZATION optimization
Optimization method (ENUM_OPTIMIZATION)
Definition: NeuroNet.mqh:419
CNeuronConv::activationFunctionDerivative
virtual double activationFunctionDerivative(double x)
Calculate derivative of activation function.
Definition: NeuroNet.mqh:1028
CNeuronAttentionOCL::Values
CNeuronConvOCL * Values
Convolution layer for Values.
Definition: NeuroNet.mqh:3895
def_k_sum_matrix2
#define def_k_sum_matrix2
Second matrix.
Definition: NeuroNet.mqh:255
CNeuronProof::Init
virtual bool Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:1076
CNeuronBase::CNeuronBase
CNeuronBase(void)
Constructor.
Definition: NeuroNet.mqh:469
def_k_uwcm_matrix_g
#define def_k_uwcm_matrix_g
Tensor of gradients at current layer.
Definition: NeuroNet.mqh:207
CNeuronBaseOCL::feedForward
virtual bool feedForward(CNeuronBaseOCL *NeuronOCL)
Feed Forward method of calling kernel FeedForward().
Definition: NeuroNet.mqh:3172
CConnection::weight
double weight
Current weight.
Definition: NeuroNet.mqh:321
CNeuronConv::~CNeuronConv
~CNeuronConv(void)
Destructor.
Definition: NeuroNet.mqh:846
CNeuronBase::getPrevVal
virtual double getPrevVal()
Return result of feed forward operations at previous iteration.
Definition: NeuroNet.mqh:440
CNet::recentAverageSmoothingFactor
static double recentAverageSmoothingFactor
Smoothing factor of average error.
Definition: NeuroNet.mqh:1376
CNeuronAttentionOCL::calcInputGradients
virtual bool calcInputGradients(CNeuronBaseOCL *prevLayer)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:4136
def_k_CalcInputGradientProof
#define def_k_CalcInputGradientProof
Index of the kernel of the Pooling neuron to transfer gradient to previous layer (CalcInputGradientPr...
Definition: NeuroNet.mqh:159
CNeuronBase::getOutputVal
virtual double getOutputVal()
Return result of feed forward operations.
Definition: NeuroNet.mqh:439
CNeuronConv::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:2099
CNet::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:1377
CNeuron
Class of neuron for full connected layers.
Definition: NeuroNet.mqh:515
def_k_ffp_window
#define def_k_ffp_window
Size of input window.
Definition: NeuroNet.mqh:152
CNeuronProofOCL::~CNeuronProofOCL
~CNeuronProofOCL(void)
Destructor.
Definition: NeuroNet.mqh:3578
defNeuronConv
#define defNeuronConv
Convolution neuron.
Definition: NeuroNet.mqh:63
def_k_FeedForward
#define def_k_FeedForward
Index of FeedForward kernel.
Definition: NeuroNet.mqh:90
CNeuronLSTM::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:2156
CNeuronLSTM::calcHiddenGradients
virtual bool calcHiddenGradients(CLayer *&nextLayer)
Method to transfer gradient to previous layer.
Definition: NeuroNet.mqh:2396
CLayerDescription::optimization
ENUM_OPTIMIZATION optimization
Type of optimization method (ENUM_OPTIMIZATION)
Definition: NeuroNet.mqh:1343
def_k_uwca_matrix_i
#define def_k_uwca_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:220
def_k_cigp_window
#define def_k_cigp_window
Size of input window.
Definition: NeuroNet.mqh:165
CConnection
Class of connection to anothe neuron.
Definition: NeuroNet.mqh:319
SGD
@ SGD
Stochastic gradient descent.
Definition: NeuroNet.mqh:309
CNeuronBase::Init
virtual bool Init(uint numOutputs, uint myIndex, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:488
b1
#define b1
First momentum multiplier of Adam optimization.
Definition: NeuroNet.mqh:36
def_k_uwm_matrix_dw
#define def_k_uwm_matrix_dw
SGD Matrix of delta weights in last correction.
Definition: NeuroNet.mqh:124
def_k_uwm_matrix_i
#define def_k_uwm_matrix_i
SGD Inputs tesor.
Definition: NeuroNet.mqh:123
CNeuronBase::getOutputLayer
virtual CLayer * getOutputLayer(void)
Method for getting a pointer to the resulting neural layer. Not used in fully connected neural networ...
Definition: NeuroNet.mqh:428
CNeuronBaseOCL::Init
virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint numNeurons, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:3031
CArrayCon::CArrayCon
CArrayCon(void)
Constructor.
Definition: NeuroNet.mqh:375
defNeuronAttentionOCL
#define defNeuronAttentionOCL
Attention neuron OpenCL.
Definition: NeuroNet.mqh:74
def_k_ag_querys_g
#define def_k_ag_querys_g
Matrix of Querys' Gradients.
Definition: NeuroNet.mqh:267
def_k_chgc_outputs
#define def_k_chgc_outputs
Number of outputs.
Definition: NeuroNet.mqh:195
CNet::feedForward
bool feedForward(CArrayDouble *inputVals)
Feed Forward method.
Definition: NeuroNet.mqh:1640
COpenCLMy::AddBufferFromArray
int AddBufferFromArray(T &data[], const uint data_array_offset, const uint data_array_count, const uint flags)
Method for creating OpenCL buffer from array.
Definition: NeuroNet.mqh:2698
def_k_uwca_window_in
#define def_k_uwca_window_in
Size of input window.
Definition: NeuroNet.mqh:227
def_k_uwcm_step
#define def_k_uwcm_step
Step size.
Definition: NeuroNet.mqh:215
CNeuron::CNeuron
CNeuron(void)
Constructor.
Definition: NeuroNet.mqh:522
CNeuronBaseOCL::getGradientIndex
virtual int getGradientIndex(void)
Get index of gradient buffer.
Definition: NeuroNet.mqh:2961
defNeuronBaseOCL
#define defNeuronBaseOCL
Neuron Base OpenCL.
Definition: NeuroNet.mqh:71
CNeuron::calcOutputGradients
virtual bool calcOutputGradients(double targetVals)
Method of output gradients calculation.
Definition: NeuroNet.mqh:590
def_k_uwcm_matrix_dw
#define def_k_uwcm_matrix_dw
Matrix of delta weights in last correction.
Definition: NeuroNet.mqh:209
def_k_sum_matrix_out
#define def_k_sum_matrix_out
Output matrix.
Definition: NeuroNet.mqh:256
CNeuronLSTM::InitLayer
virtual bool InitLayer(CLayer *layer, int numUnits, int numOutputs, ENUM_OPTIMIZATION optimization_type)
Method of gate initialization.
Definition: NeuroNet.mqh:2232
CNeuronBaseOCL::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:3429
def_k_chg_outputs
#define def_k_chg_outputs
Number of outputs.
Definition: NeuroNet.mqh:112
CNeuronLSTM::InputGradient
CArrayDouble * InputGradient
Gradient on previous layer.
Definition: NeuroNet.mqh:2134
CNet::~CNet
~CNet(void)
Destructor.
Definition: NeuroNet.mqh:2762
CNeuronAttentionOCL::Querys
CNeuronConvOCL * Querys
Convolution layer for Querys.
Definition: NeuroNet.mqh:3893
lr
#define lr
learning rate
Definition: NeuroNet.mqh:32
SIGMOID
@ SIGMOID
Use fo activation neuron.
Definition: NeuroNet.mqh:301
def_k_uwa_b1
#define def_k_uwa_b1
Adam First momentum multiplier.
Definition: NeuroNet.mqh:137
CNeuronLSTM::NewContent
CLayer * NewContent
Object of new content.
Definition: NeuroNet.mqh:2130
CConnection::vt
double vt
Second moment in Adam optimization.
Definition: NeuroNet.mqh:324
def_k_as_score
#define def_k_as_score
Matrix of Scores.
Definition: NeuroNet.mqh:244
CArrayCon::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:380
def_k_ffc_step
#define def_k_ffc_step
Step size.
Definition: NeuroNet.mqh:181
CNeuronBaseOCL::FirstMomentum
CBufferDouble * FirstMomentum
Buffer of first momentum matrix (ADAM)
Definition: NeuroNet.mqh:2935
def_k_aout_inputs
#define def_k_aout_inputs
Inputs tesor.
Definition: NeuroNet.mqh:250
COpenCLMy::~COpenCLMy
~COpenCLMy(void)
Destructor.
Definition: NeuroNet.mqh:631
CLayerDescription::window
int window
Size of input window.
Definition: NeuroNet.mqh:1339
CNeuron::~CNeuron
~CNeuron(void)
Destructor.
Definition: NeuroNet.mqh:523
CLayerDescription::activation
ENUM_ACTIVATION activation
Type of activation function (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:1342
CConnection::CConnection
CConnection(double w)
Constructor.
Definition: NeuroNet.mqh:326
CNeuronLSTM::OutputGate
CLayer * OutputGate
Object of output gate.
Definition: NeuroNet.mqh:2129
def_k_aout_scores
#define def_k_aout_scores
Matrix of Scores.
Definition: NeuroNet.mqh:248
CNeuronProof::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:2087
CBufferDouble::BufferCreate
virtual bool BufferCreate(COpenCLMy *opencl)
Method for creating new buffer.
Definition: NeuroNet.mqh:2823
def_k_ffp_step
#define def_k_ffp_step
Step size.
Definition: NeuroNet.mqh:153
def_k_cigp_matrix_o
#define def_k_cigp_matrix_o
Output tensor.
Definition: NeuroNet.mqh:162
CNeuronConvOCL::updateInputWeights
virtual bool updateInputWeights(CNeuronBaseOCL *NeuronOCL)
Method for updating weights.
Definition: NeuroNet.mqh:3808
def_k_ffc_matrix_o
#define def_k_ffc_matrix_o
Output tensor.
Definition: NeuroNet.mqh:179
CNeuronBase::SigmoidFunctionDerivative
virtual double SigmoidFunctionDerivative(double x)
Calculate derivative of Sigmoid function.
Definition: NeuroNet.mqh:445
def_k_uwcm_matrix_w
#define def_k_uwcm_matrix_w
Weights matrix (m+1)*n, where m - input window and n - output window.
Definition: NeuroNet.mqh:206
CNeuronLSTM::Memory
CArrayDouble * Memory
Memory array.
Definition: NeuroNet.mqh:2131
def_k_ff_matrix_w
#define def_k_ff_matrix_w
Weights matrix (m+1)*n, where m - number of neurons in layer and n - number of outputs (neurons in ne...
Definition: NeuroNet.mqh:91
CNeuronProofOCL::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:3561
CNeuronBaseOCL::getPrevOutIndex
virtual int getPrevOutIndex(void)
Get index of previous iteration output buffer.
Definition: NeuroNet.mqh:2960
CLayer::~CLayer
~CLayer(void)
Destructor.
Definition: NeuroNet.mqh:650
def_k_ffc_matrix_w
#define def_k_ffc_matrix_w
Weights matrix (m+1)*n, where m - input window and n - output window.
Definition: NeuroNet.mqh:177
CNeuronBaseOCL::getOutputIndex
virtual int getOutputIndex(void)
Get index of output buffer.
Definition: NeuroNet.mqh:2959
CConnection::deltaWeight
double deltaWeight
last delta of weight used in SGD optimization
Definition: NeuroNet.mqh:322
CBufferDouble
Class of OpenCL buffer data. Used for transfer data from CPU to GPU and back.
Definition: NeuroNet.mqh:2778
CNeuronProof::iWindow
int iWindow
Input window size.
Definition: NeuroNet.mqh:811
def_k_ffp_matrix_i
#define def_k_ffp_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:149
def_k_uwcm_window_in
#define def_k_uwcm_window_in
Size of input window.
Definition: NeuroNet.mqh:213
CNeuronConvOCL::~CNeuronConvOCL
~CNeuronConvOCL(void)
Destructor.
Definition: NeuroNet.mqh:3667
CLayerDescription::count
int count
Number of neurons.
Definition: NeuroNet.mqh:1338
CNeuronBaseOCL::Gradient
CBufferDouble * Gradient
Buffer of gradient tenzor.
Definition: NeuroNet.mqh:2934
CNeuronLSTM::PrevMemory
CArrayDouble * PrevMemory
Ravious iteration memory array.
Definition: NeuroNet.mqh:2132
CNeuronBase::Connections
CArrayCon * Connections
Array of connections with neurons in next layer.
Definition: NeuroNet.mqh:417
def_k_FeedForwardConv
#define def_k_FeedForwardConv
Index of the kernel of the convolution neuron for Feed forward process (FeedForwardConv)
Definition: NeuroNet.mqh:176
CArrayLayer::CreateElement
virtual bool CreateElement(uint neurons, uint outputs)
Method for creating new element.
Definition: NeuroNet.mqh:782
CNeuronProof::calcHiddenGradients
virtual bool calcHiddenGradients(CLayer *&nextLayer)
Method to transfer gradient to previous layer.
Definition: NeuroNet.mqh:1139
CNeuronAttentionOCL::iUnits
uint iUnits
Number of units.
Definition: NeuroNet.mqh:3902
CNeuronAttentionOCL::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:4308
def_k_uwm_matrix_g
#define def_k_uwm_matrix_g
SGD Tensor of gradients at current layer.
Definition: NeuroNet.mqh:122
defArrayConnects
#define defArrayConnects
Array of connections.
Definition: NeuroNet.mqh:52
CNeuronBaseOCL::Output
CBufferDouble * Output
Buffer of Output tenzor.
Definition: NeuroNet.mqh:2930
def_k_as_dimension
#define def_k_as_dimension
Dimension of Key.
Definition: NeuroNet.mqh:245
CNeuronProof::CNeuronProof
CNeuronProof(void)
Constructor.
Definition: NeuroNet.mqh:818
CNeuronBase::activationFunction
virtual double activationFunction(double x)
Method to calculate activation function.
Definition: NeuroNet.mqh:2663
CNeuronBase::setOutputVal
virtual void setOutputVal(double val)
Set the output value.
Definition: NeuroNet.mqh:438
def_k_uwm_learning_rates
#define def_k_uwm_learning_rates
SGD Learning rates.
Definition: NeuroNet.mqh:126
CNeuronProofOCL::iWindow
uint iWindow
Input window size.
Definition: NeuroNet.mqh:3546
CNeuronBaseOCL::PrevOutput
CBufferDouble * PrevOutput
Buffer of previous iteration Output tenzor.
Definition: NeuroNet.mqh:2931
def_k_UpdateWeightsAdam
#define def_k_UpdateWeightsAdam
Index Adam optomization Update weights kernel (UpdateWeightsAdam)
Definition: NeuroNet.mqh:129
CNeuronConv::updateInputWeights
virtual bool updateInputWeights(CLayer *&prevLayer)
Method for updating weights.
Definition: NeuroNet.mqh:1037
def_k_uwm_inputs
#define def_k_uwm_inputs
SGD Number of inputs.
Definition: NeuroNet.mqh:125
def_k_ag_querys
#define def_k_ag_querys
Matrix of Querys.
Definition: NeuroNet.mqh:266
def_k_cog_matrix_t
#define def_k_cog_matrix_t
Target tensor.
Definition: NeuroNet.mqh:102
CNeuronConvOCL::FirstMomentumConv
CBufferDouble * FirstMomentumConv
Matrix of first momentum to previous layer (ADAM)
Definition: NeuroNet.mqh:3644
def_k_uwcm_momentum
#define def_k_uwcm_momentum
Momentum multiplier.
Definition: NeuroNet.mqh:212
def_k_ag_keys
#define def_k_ag_keys
Matrix of Keys.
Definition: NeuroNet.mqh:268
CNeuronBaseOCL::OpenCL
COpenCLMy * OpenCL
Object for working with OpenCL.
Definition: NeuroNet.mqh:2929
CNeuronProofOCL::calcInputGradients
virtual bool calcInputGradients(CNeuronBaseOCL *NeuronOCL)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:3608
CNeuronLSTM::ForgetGate
CLayer * ForgetGate
Object of forget gate.
Definition: NeuroNet.mqh:2127
CNeuronLSTM::updateInputWeights
virtual bool updateInputWeights(CLayer *&prevLayer)
Method for updating weights.
Definition: NeuroNet.mqh:2531
LReLU
@ LReLU
For activation neuron use LReLU.
Definition: NeuroNet.mqh:302
CNet::Load
bool Load(string file_name, double &error, double &undefine, double &forecast, datetime &time, bool common=true)
Load method.
Definition: NeuroNet.mqh:1982
CNeuronProof
Class of pooling layer.
Definition: NeuroNet.mqh:808
def_k_uwca_step
#define def_k_uwca_step
Step size.
Definition: NeuroNet.mqh:229
def_k_ffc_window_in
#define def_k_ffc_window_in
Size of input window.
Definition: NeuroNet.mqh:182
def_k_UpdateWeightsConvAdam
#define def_k_UpdateWeightsConvAdam
Index of the kernel of the convolution neuron to update weights Adam (UpdateWeightsConvAdam)
Definition: NeuroNet.mqh:217
defNeuronConvOCL
#define defNeuronConvOCL
Conolution neuron OpenCL.
Definition: NeuroNet.mqh:72
CNeuronAttentionOCL::Scores
CBufferDouble * Scores
Buffer for Scores matrix.
Definition: NeuroNet.mqh:3896
CNeuronBaseOCL::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:2988
CNeuronLSTM::CNeuronLSTM
CNeuronLSTM(void)
Constructor.
Definition: NeuroNet.mqh:2161
CNeuronConvOCL::iWindowOut
uint iWindowOut
Size of out window.
Definition: NeuroNet.mqh:3640
CNeuronBase::activationFunctionDerivative
virtual double activationFunctionDerivative(double x)
Calculate derivative of activation function.
Definition: NeuroNet.mqh:2680
def_k_chg_matrix_o
#define def_k_chg_matrix_o
Output tensor.
Definition: NeuroNet.mqh:110
CNeuronAttentionOCL::FF1
CNeuronConvOCL * FF1
Convolution layer for first layer of Feed Forward block.
Definition: NeuroNet.mqh:3898
CNet::Save
bool Save(string file_name, double error, double undefine, double forecast, datetime time, bool common=true)
Save method.
Definition: NeuroNet.mqh:1957
def_k_sum_multiplyer
#define def_k_sum_multiplyer
Multiplyer for output.
Definition: NeuroNet.mqh:258
CBufferDouble::GetData
virtual int GetData(double &values[])
Read data from buffer to array.
Definition: NeuroNet.mqh:2902
def_k_UpdateWeightsConvMomentum
#define def_k_UpdateWeightsConvMomentum
Index of the kernel of the convolution neuron to update weights SGD (UpdateWeightsConvMomentum)
Definition: NeuroNet.mqh:205
CNeuronBaseOCL::DeltaWeights
CBufferDouble * DeltaWeights
Buffer of last delta weights matrix (SGD)
Definition: NeuroNet.mqh:2933
def_k_ag_keys_g
#define def_k_ag_keys_g
Matrix of Keys' Gradients.
Definition: NeuroNet.mqh:269
CNeuronConv::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:2111
CBufferDouble::BufferToCSV
virtual void BufferToCSV(const string file_name)
Save buffer data to CSV file.
Definition: NeuroNet.mqh:4293
momentum
#define momentum
momentum for SGD optimization
Definition: NeuroNet.mqh:33
CNeuronProofOCL::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:4458
CNeuronBaseOCL::UpdateInputWeights
virtual bool UpdateInputWeights(CObject *SourceObject)
Dispatch method for defining the subroutine for updating weights.
Definition: NeuroNet.mqh:3359
defLayer
#define defLayer
Layer of neurons.
Definition: NeuroNet.mqh:53
CBufferDouble::GetIndex
virtual int GetIndex(void)
Get buffer index.
Definition: NeuroNet.mqh:2794
defConnect
#define defConnect
Connection.
Definition: NeuroNet.mqh:60
CNeuronBase::activation
ENUM_ACTIVATION activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:418
def_k_uwca_l
#define def_k_uwca_l
Learning rates.
Definition: NeuroNet.mqh:224
CNeuronProofOCL::feedForward
virtual bool feedForward(CNeuronBaseOCL *NeuronOCL)
Feed Forward method.
Definition: NeuroNet.mqh:3584
CNet::getResults
void getResults(CArrayDouble *&resultVals)
Method to get results of feed forward process.
Definition: NeuroNet.mqh:1904
CBufferDouble::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:2796
def_k_FeedForwardProof
#define def_k_FeedForwardProof
Index of the kernel of the Pooling neuron for Feed forward process (FeedForwardProof)
Definition: NeuroNet.mqh:148
CNeuronBaseOCL::SetActivationFunction
virtual void SetActivationFunction(ENUM_ACTIVATION value)
Set the type of activation function (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:2957
CNeuronBaseOCL::getWeightsIndex
virtual int getWeightsIndex(void)
Get index of weights matrix buffer.
Definition: NeuroNet.mqh:2962
CNeuronBase::calcHiddenGradients
virtual bool calcHiddenGradients(CLayer *&nextLayer)
Method to transfer gradient to previous layer.
Definition: NeuroNet.mqh:423
CNeuronAttentionOCL
Class of Self-Attention layer GPU calculation.
Definition: NeuroNet.mqh:3891
CNeuronLSTM::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:2623
def_k_cigp_step
#define def_k_cigp_step
Step size.
Definition: NeuroNet.mqh:166
def_k_uwm_momentum
#define def_k_uwm_momentum
SGD Momentum multiplier.
Definition: NeuroNet.mqh:127
defNeuronProofOCL
#define defNeuronProofOCL
Proof neuron OpenCL.
Definition: NeuroNet.mqh:73
CNet::getRecentAverageError
double getRecentAverageError()
Method to check quality of study.
Definition: NeuroNet.mqh:1370
CNeuronProof::calcInputGradients
virtual bool calcInputGradients(CLayer *prevLayer)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:1160
def_k_uwca_b1
#define def_k_uwca_b1
First momentum multiplier.
Definition: NeuroNet.mqh:225
CConnection::mt
double mt
First moment in Adam optimization.
Definition: NeuroNet.mqh:323
def_k_ff_activation
#define def_k_ff_activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:95
CNeuronBaseOCL::getOutputVal
virtual int getOutputVal(CArrayDouble *values)
Get values of output buffer.
Definition: NeuroNet.mqh:2968
CNeuronBaseOCL::calcOutputGradients
virtual bool calcOutputGradients(CArrayDouble *Target)
Method of output gradients calculation by calling kernel CalcOutputGradient().
Definition: NeuroNet.mqh:3237
CNeuronBaseOCL::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:3380
def_k_uwcm_matrix_i
#define def_k_uwcm_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:208
CNeuronProof::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:2073
def_k_ag_scores
#define def_k_ag_scores
Matrix of Scores.
Definition: NeuroNet.mqh:272
CLayerDescription::~CLayerDescription
~CLayerDescription(void)
Destructor.
Definition: NeuroNet.mqh:1335
def_k_UpdateWeightsMomentum
#define def_k_UpdateWeightsMomentum
Index SGD optomization Update weights kernel (UpdateWeightsMomentum)
Definition: NeuroNet.mqh:120
def_k_AttentionScore
#define def_k_AttentionScore
Index of the kernel of the attention neuron to calculate score matrix (AttentionScore)
Definition: NeuroNet.mqh:241
def_k_chg_matrix_ig
#define def_k_chg_matrix_ig
Tensor of gradients at previous layer.
Definition: NeuroNet.mqh:111
CNeuronBaseOCL::getSecondMomentumIndex
virtual int getSecondMomentumIndex(void)
Get index of Second momentum matrix buffer (Adam)
Definition: NeuroNet.mqh:2965
CNeuronProofOCL::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:4472
def_k_Normilize
#define def_k_Normilize
Index of the kernel for matrix normalization (Normalize)
Definition: NeuroNet.mqh:281
def_k_uwa_inputs
#define def_k_uwa_inputs
Adam Number of inputs.
Definition: NeuroNet.mqh:135
CLayer::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:654
CNeuronProof::iStep
int iStep
Size of step.
Definition: NeuroNet.mqh:812
def_k_uwa_b2
#define def_k_uwa_b2
Adam Second momentum multiplier.
Definition: NeuroNet.mqh:138
CNeuronProofOCL
Class of pooling layer GPU calculation.
Definition: NeuroNet.mqh:3544
CNeuronBaseOCL::~CNeuronBaseOCL
~CNeuronBaseOCL(void)
Destructor.
Definition: NeuroNet.mqh:3010
CNeuronBase::getGradient
virtual double getGradient()
Return gradient of neuron.
Definition: NeuroNet.mqh:442
CNeuronBaseOCL::getOutputVal
virtual int getOutputVal(double &values[])
Get values of output buffer.
Definition: NeuroNet.mqh:2967
CNeuronConv::param
double param
Definition: NeuroNet.mqh:839
CNeuronLSTM
Class of recurrent LSTM unit.
Definition: NeuroNet.mqh:2125
CNeuronProofOCL::iStep
uint iStep
Size of step.
Definition: NeuroNet.mqh:3547
CArrayLayer::~CArrayLayer
~CArrayLayer(void)
Destructor.
Definition: NeuroNet.mqh:774
def_k_ffc_inputs
#define def_k_ffc_inputs
Number of inputs.
Definition: NeuroNet.mqh:180
CConnection::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:336
CNeuronAttentionOCL::~CNeuronAttentionOCL
~CNeuronAttentionOCL(void)
Destructor.
Definition: NeuroNet.mqh:3922
def_k_uwca_window_out
#define def_k_uwca_window_out
Size of output window.
Definition: NeuroNet.mqh:228
CNeuronBaseOCL::t
int t
Count of iterations.
Definition: NeuroNet.mqh:2940
def_k_uwa_matrix_i
#define def_k_uwa_matrix_i
Adam Inputs tesor.
Definition: NeuroNet.mqh:132
CNeuronLSTM::~CNeuronLSTM
~CNeuronLSTM(void)
Destructor.
Definition: NeuroNet.mqh:2175
def_k_sum_dimension
#define def_k_sum_dimension
Dimension of matrix.
Definition: NeuroNet.mqh:257
CNeuronBaseOCL::activation
ENUM_ACTIVATION activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:2943
CNeuronAttentionOCL::feedForward
virtual bool feedForward(CNeuronBaseOCL *prevLayer)
Feed Forward method.
Definition: NeuroNet.mqh:4028
CNeuron::sumDOW
virtual double sumDOW(CLayer *&nextLayer)
A method for collecting gradients from the next layer.
Definition: NeuroNet.mqh:560
def_k_ffc_activation
#define def_k_ffc_activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:184
CNeuronBase::SigmoidFunction
virtual double SigmoidFunction(double x)
Calculating Sigmoid .
Definition: NeuroNet.mqh:426
CNeuronLSTM::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:2643
CNeuronBase::getConnections
virtual CArrayCon * getConnections()
Method to get access to array of connections.
Definition: NeuroNet.mqh:443
def_k_cog_matrix_ig
#define def_k_cog_matrix_ig
Tensor of gradients at previous layer.
Definition: NeuroNet.mqh:104
CNeuronConvOCL::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:4385
ENUM_OPTIMIZATION
ENUM_OPTIMIZATION
Enum of optimization method used
Definition: NeuroNet.mqh:308
CNeuronBaseOCL::CNeuronBaseOCL
CNeuronBaseOCL(void)
Constructor.
Definition: NeuroNet.mqh:2993
CNeuronConvOCL
Class of convolution layer GPU calculation.
Definition: NeuroNet.mqh:3638
CBufferDouble::CBufferDouble
CBufferDouble(void)
Constructor.
Definition: NeuroNet.mqh:2802
def_k_uwm_matrix_w
#define def_k_uwm_matrix_w
SGD Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons i...
Definition: NeuroNet.mqh:121
def_k_uwa_matrix_v
#define def_k_uwa_matrix_v
Adam Matrix of seconfd momentum.
Definition: NeuroNet.mqh:134
defNeuronProof
#define defNeuronProof
Proof neuron.
Definition: NeuroNet.mqh:64
CNeuronConvOCL::Save
virtual bool Save(int const file_handle)
Save method.
Definition: NeuroNet.mqh:4437
CNeuronBase::gradient
double gradient
Current gradient of neuron.
Definition: NeuroNet.mqh:416
CNeuronBase::m_myIndex
uint m_myIndex
Index of neuron in layer.
Definition: NeuroNet.mqh:415
CNeuronBaseOCL::alpha
const double alpha
Multiplier to momentum in SGD optimization.
Definition: NeuroNet.mqh:2939
CNeuronConv::activationFunction
virtual double activationFunction(double x)
Method to calculate activation function.
Definition: NeuroNet.mqh:950
CLayerDescription
Class of layer decription. Used to describe the structure of a neural network from the main program.
Definition: NeuroNet.mqh:1332
def_k_CalcHiddenGradient
#define def_k_CalcHiddenGradient
Index of Hidden gradients calculation kernel (CalcHiddenGradient)
Definition: NeuroNet.mqh:107
def_k_uwca_matrix_v
#define def_k_uwca_matrix_v
Matrix of seconfd momentum.
Definition: NeuroNet.mqh:222
CNeuronBaseOCL::updateInputWeights
virtual bool updateInputWeights(CNeuronBaseOCL *NeuronOCL)
Definition: NeuroNet.mqh:3266
CBufferDouble::BufferRead
virtual bool BufferRead(void)
Method for reading buffer data from GPU.
Definition: NeuroNet.mqh:2862
def_k_chgc_matrix_w
#define def_k_chgc_matrix_w
Weights matrix (m+1)*n, where m - input window and n - output window.
Definition: NeuroNet.mqh:191
CNeuronProofOCL::Init
virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:3566
CNeuronLSTM::InputGate
CLayer * InputGate
Object of input gate.
Definition: NeuroNet.mqh:2128
def_k_cigp_matrix_i
#define def_k_cigp_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:160
CNeuronBaseOCL::FeedForward
virtual bool FeedForward(CObject *SourceObject)
Dispatch method for defining the subroutine for feed forward process.
Definition: NeuroNet.mqh:3151
COpenCLMy::COpenCLMy
COpenCLMy(void)
Constructor.
Definition: NeuroNet.mqh:630
CNeuronConvOCL::feedForward
virtual bool feedForward(CNeuronBaseOCL *NeuronOCL)
Feed Forward method.
Definition: NeuroNet.mqh:3754
CNeuronLSTM::getOutputLayer
virtual CLayer * getOutputLayer(void)
Method for getting a pointer to the resulting neural layer. Not used in fully connected neural networ...
Definition: NeuroNet.mqh:2150
CNeuronProof::feedForward
virtual bool feedForward(CLayer *prevLayer)
Feed Forward method.
Definition: NeuroNet.mqh:1111
def_k_uwa_l
#define def_k_uwa_l
Adam Learning rates.
Definition: NeuroNet.mqh:136
CBufferDouble::m_myIndex
int m_myIndex
Index of buffer.
Definition: NeuroNet.mqh:2781
CArrayLayer
Class of layers collection in Neural Net.
Definition: NeuroNet.mqh:771
def_k_chgc_window_in
#define def_k_chgc_window_in
Size of input window.
Definition: NeuroNet.mqh:197
CNeuronConv::feedForward
virtual bool feedForward(CLayer *prevLayer)
Feed Forward method.
Definition: NeuroNet.mqh:916
CNeuronConvOCL::Init
virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint window, uint step, uint window_out, uint units_count, ENUM_OPTIMIZATION optimization_type)
Method of initialization class.
Definition: NeuroNet.mqh:3681
defNeuron
#define defNeuron
Full connected neuron.
Definition: NeuroNet.mqh:62
CArrayCon::~CArrayCon
~CArrayCon(void)
Destructor.
Definition: NeuroNet.mqh:376
def_k_ffp_matrix_o
#define def_k_ffp_matrix_o
Output tensor.
Definition: NeuroNet.mqh:150
def_k_chgc_window_out
#define def_k_chgc_window_out
Size of output window.
Definition: NeuroNet.mqh:198
CNeuronConv::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:851
CNet::backPropOCL
void backPropOCL(CArrayDouble *targetVals)
Back propagation method for GPU calculation.
Definition: NeuroNet.mqh:1853
CLayer::Load
virtual bool Load(const int file_handle)
Load method.
Definition: NeuroNet.mqh:2738
CNeuronBase
The base class of neuron.
Definition: NeuroNet.mqh:411
def_k_uwca_matrix_w
#define def_k_uwca_matrix_w
Weights matrix (m+1)*n, where m - input window and n - output window.
Definition: NeuroNet.mqh:218
CNet
The main class of the neural network. Contains basic methods for the functioning of a neural network.
Definition: NeuroNet.mqh:1361
CNeuronBase::TanhFunctionDerivative
virtual double TanhFunctionDerivative(double x)
Calculate derivative of .
Definition: NeuroNet.mqh:446
def_k_NormilizeWeights
#define def_k_NormilizeWeights
Index of the kernel for weights matrix normalization (NormalizeWeights)
Definition: NeuroNet.mqh:285
def_k_aout_values
#define def_k_aout_values
Matrix of Values.
Definition: NeuroNet.mqh:249
def_k_chg_matrix_g
#define def_k_chg_matrix_g
Tensor of gradients at current layer.
Definition: NeuroNet.mqh:109
CBufferDouble::BufferSet
virtual bool BufferSet(int index)
Change buffer index number.
Definition: NeuroNet.mqh:2791
CConnection::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:355
def_k_cog_activation
#define def_k_cog_activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:105
def_k_ag_gradient
#define def_k_ag_gradient
Matrix of Gradients from previous iteration.
Definition: NeuroNet.mqh:273
def_k_norm_buffer
#define def_k_norm_buffer
In/Out Matrix.
Definition: NeuroNet.mqh:282
def_k_chg_matrix_w
#define def_k_chg_matrix_w
Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in cu...
Definition: NeuroNet.mqh:108
CNeuronProof::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:829
def_k_as_keys
#define def_k_as_keys
Matriz of Keys.
Definition: NeuroNet.mqh:243
CLayerDescription::type
int type
Type of neurons in layer (Defines Object types identified)
Definition: NeuroNet.mqh:1335
def_k_ff_matrix_i
#define def_k_ff_matrix_i
Inputs tesor.
Definition: NeuroNet.mqh:92
def_k_chgc_matrix_o
#define def_k_chgc_matrix_o
Output tensor.
Definition: NeuroNet.mqh:193
def_k_AttentionOut
#define def_k_AttentionOut
Index of the Attention Neuron Output calculation kernel (AttentionOut)
Definition: NeuroNet.mqh:247
CNeuronConvOCL::DeltaWeightsConv
CBufferDouble * DeltaWeightsConv
Matrix of delta weights to previous layer (SGD)
Definition: NeuroNet.mqh:3643
CNeuronBase::outputVal
double outputVal
Output value.
Definition: NeuroNet.mqh:413
CNeuronConv::CNeuronConv
CNeuronConv()
Constructor.
Definition: NeuroNet.mqh:845
CNeuronBaseOCL::optimization
ENUM_OPTIMIZATION optimization
Optimization method (ENUM_OPTIMIZATION)
Definition: NeuroNet.mqh:2944
CLayerDescription::step
int step
Step size.
Definition: NeuroNet.mqh:1341
CNeuronBaseOCL::getConnections
virtual int getConnections(void)
Get number of connections 1 neuron to next layer.
Definition: NeuroNet.mqh:2974
CNeuronConvOCL::Type
virtual int Type(void) const
Identificator of class.
Definition: NeuroNet.mqh:3659
CNeuronBaseOCL::calcHiddenGradients
virtual bool calcHiddenGradients(CObject *TargetObject)
Dispatch method for defining the subroutine for transferring the gradient to the previous layer.
Definition: NeuroNet.mqh:3328
def_k_uwcm_window_out
#define def_k_uwcm_window_out
Size of output window.
Definition: NeuroNet.mqh:214
eta
double eta
learning rate for SGD optimization
Definition: NeuroNet.mqh:41
CNeuronBase::SetActivationFunction
virtual void SetActivationFunction(ENUM_ACTIVATION value)
Set the type of activation function (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:433
def_k_chg_activation
#define def_k_chg_activation
Activation type (ENUM_ACTIVATION)
Definition: NeuroNet.mqh:113
CNeuronConv
Class of convolution layer.
Definition: NeuroNet.mqh:837
CNeuronBase::prevVal
double prevVal
Previous output value.
Definition: NeuroNet.mqh:414
def_k_chgc_matrix_g
#define def_k_chgc_matrix_g
Tensor of gradients at current layer.
Definition: NeuroNet.mqh:192
CNeuronConv::calcInputGradients
virtual bool calcInputGradients(CLayer *prevLayer)
Method to transfer gradients to previous layer.
Definition: NeuroNet.mqh:1231
CNeuronBaseOCL::getPrevVal
virtual int getPrevVal(double &values[])
Get values of previous iteration output buffer.
Definition: NeuroNet.mqh:2969
CNeuronBaseOCL::getGradient
virtual int getGradient(double &values[])
Get values of gradient buffer.
Definition: NeuroNet.mqh:2970
def_k_aout_out
#define def_k_aout_out
Output tesor.
Definition: NeuroNet.mqh:251
CLayer
Class of neurons collection in one layer of Neural Net.
Definition: NeuroNet.mqh:641
CNeuronAttentionOCL::AttentionOut
CNeuronBaseOCL * AttentionOut
Layer of Self-Attention Out.
Definition: NeuroNet.mqh:3897
CArrayCon::CreateElement
virtual bool CreateElement(int const index)
Method for cearing new element by index.
Definition: NeuroNet.mqh:385
CNeuronAttentionOCL::Load
virtual bool Load(int const file_handle)
Load method.
Definition: NeuroNet.mqh:4336
CNeuronAttentionOCL::Keys
CNeuronConvOCL * Keys
Convolution layer for Keys.
Definition: NeuroNet.mqh:3894
CNeuronLSTM::Init
virtual bool Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type)
Unit initialization method. Detailed description on the link.
Definition: NeuroNet.mqh:2197
defNet
#define defNet
Neuron Net.
Definition: NeuroNet.mqh:55
CArrayCon::IncreaseTotal
virtual void IncreaseTotal()
Increase number of elements in array.
Definition: NeuroNet.mqh:379
CNeuronLSTM::Input
CArrayDouble * Input
Input data.
Definition: NeuroNet.mqh:2133
def_k_cigp_matrix_g
#define def_k_cigp_matrix_g
Tensor of gradients at current layer.
Definition: NeuroNet.mqh:161
def_k_uwa_matrix_m
#define def_k_uwa_matrix_m
Adam Matrix of first momentum.
Definition: NeuroNet.mqh:133
defArrayLayer
#define defArrayLayer
Array of layers.
Definition: NeuroNet.mqh:54
CNeuronBaseOCL::m_myIndex
int m_myIndex
Index of neuron in layer.
Definition: NeuroNet.mqh:2942
defNeuronBase
#define defNeuronBase
Neuron base type.
Definition: NeuroNet.mqh:61
CLayerDescription::window_out
int window_out
Size of output window.
Definition: NeuroNet.mqh:1340
def_k_uwcm_inputs
#define def_k_uwcm_inputs
Number of inputs.
Definition: NeuroNet.mqh:210