Neuron Net
Macros | Functions
Convolution layer's neuron Update weights kernels

Describes the optimization process for the Neuron of convolution layer. More...

Macros

#define def_k_UpdateWeightsConvMomentum   9
 Index of the kernel of the convolution neuron to update weights SGD (UpdateWeightsConvMomentum) More...
 
#define def_k_uwcm_matrix_w   0
 Weights matrix (m+1)*n, where m - input window and n - output window. More...
 
#define def_k_uwcm_matrix_g   1
 Tensor of gradients at current layer. More...
 
#define def_k_uwcm_matrix_i   2
 Inputs tesor. More...
 
#define def_k_uwcm_matrix_dw   3
 Matrix of delta weights in last correction. More...
 
#define def_k_uwcm_inputs   4
 Number of inputs. More...
 
#define def_k_uwcm_learning_rates   5
 Learning rates. More...
 
#define def_k_uwcm_momentum   6
 Momentum multiplier. More...
 
#define def_k_uwcm_window_in   7
 Size of input window. More...
 
#define def_k_uwcm_window_out   8
 Size of output window. More...
 
#define def_k_uwcm_step   9
 Step size. More...
 
#define def_k_UpdateWeightsConvAdam   10
 Index of the kernel of the convolution neuron to update weights Adam (UpdateWeightsConvAdam) More...
 
#define def_k_uwca_matrix_w   0
 Weights matrix (m+1)*n, where m - input window and n - output window. More...
 
#define def_k_uwca_matrix_g   1
 Tensor of gradients at current layer. More...
 
#define def_k_uwca_matrix_i   2
 Inputs tesor. More...
 
#define def_k_uwca_matrix_m   3
 Matrix of first momentum. More...
 
#define def_k_uwca_matrix_v   4
 Matrix of seconfd momentum. More...
 
#define def_k_uwca_inputs   5
 Number of inputs. More...
 
#define def_k_uwca_l   6
 Learning rates. More...
 
#define def_k_uwca_b1   7
 First momentum multiplier. More...
 
#define def_k_uwca_b2   8
 Second momentum multiplier. More...
 
#define def_k_uwca_window_in   9
 Size of input window. More...
 
#define def_k_uwca_window_out   10
 Size of output window. More...
 
#define def_k_uwca_step   11
 Step size. More...
 

Functions

__kernel void UpdateWeightsConvMomentum (__global double *matrix_w, __global double *matrix_g, __global double *matrix_i, __global double *matrix_dw, int inputs, double learning_rates, double momentum, int window_in, int window_out, int step)
 Describes the process of SGD optimization weights for the Convolution Neuron (CNeuronConvOCL). More...
 
__kernel void UpdateWeightsConvAdam (__global double *matrix_w, __global const double *matrix_g, __global const double *matrix_i, __global double *matrix_m, __global double *matrix_v, const int inputs, const double l, const double b1, const double b2, int window_in, int window_out, int step)
 Describes the process of Adam optimization weights for the Convolution Neuron (CNeuronConvOCL). More...
 

Detailed Description

Describes the optimization process for the Neuron of convolution layer.

Macro Definition Documentation

◆ def_k_UpdateWeightsConvAdam

#define def_k_UpdateWeightsConvAdam   10

Index of the kernel of the convolution neuron to update weights Adam (UpdateWeightsConvAdam)

Definition at line 217 of file NeuroNet.mqh.

◆ def_k_UpdateWeightsConvMomentum

#define def_k_UpdateWeightsConvMomentum   9

Index of the kernel of the convolution neuron to update weights SGD (UpdateWeightsConvMomentum)

Definition at line 205 of file NeuroNet.mqh.

◆ def_k_uwca_b1

#define def_k_uwca_b1   7

First momentum multiplier.

Definition at line 225 of file NeuroNet.mqh.

◆ def_k_uwca_b2

#define def_k_uwca_b2   8

Second momentum multiplier.

Definition at line 226 of file NeuroNet.mqh.

◆ def_k_uwca_inputs

#define def_k_uwca_inputs   5

Number of inputs.

Definition at line 223 of file NeuroNet.mqh.

◆ def_k_uwca_l

#define def_k_uwca_l   6

Learning rates.

Definition at line 224 of file NeuroNet.mqh.

◆ def_k_uwca_matrix_g

#define def_k_uwca_matrix_g   1

Tensor of gradients at current layer.

Definition at line 219 of file NeuroNet.mqh.

◆ def_k_uwca_matrix_i

#define def_k_uwca_matrix_i   2

Inputs tesor.

Definition at line 220 of file NeuroNet.mqh.

◆ def_k_uwca_matrix_m

#define def_k_uwca_matrix_m   3

Matrix of first momentum.

Definition at line 221 of file NeuroNet.mqh.

◆ def_k_uwca_matrix_v

#define def_k_uwca_matrix_v   4

Matrix of seconfd momentum.

Definition at line 222 of file NeuroNet.mqh.

◆ def_k_uwca_matrix_w

#define def_k_uwca_matrix_w   0

Weights matrix (m+1)*n, where m - input window and n - output window.

Definition at line 218 of file NeuroNet.mqh.

◆ def_k_uwca_step

#define def_k_uwca_step   11

Step size.

Definition at line 229 of file NeuroNet.mqh.

◆ def_k_uwca_window_in

#define def_k_uwca_window_in   9

Size of input window.

Definition at line 227 of file NeuroNet.mqh.

◆ def_k_uwca_window_out

#define def_k_uwca_window_out   10

Size of output window.

Definition at line 228 of file NeuroNet.mqh.

◆ def_k_uwcm_inputs

#define def_k_uwcm_inputs   4

Number of inputs.

Definition at line 210 of file NeuroNet.mqh.

◆ def_k_uwcm_learning_rates

#define def_k_uwcm_learning_rates   5

Learning rates.

Definition at line 211 of file NeuroNet.mqh.

◆ def_k_uwcm_matrix_dw

#define def_k_uwcm_matrix_dw   3

Matrix of delta weights in last correction.

Definition at line 209 of file NeuroNet.mqh.

◆ def_k_uwcm_matrix_g

#define def_k_uwcm_matrix_g   1

Tensor of gradients at current layer.

Definition at line 207 of file NeuroNet.mqh.

◆ def_k_uwcm_matrix_i

#define def_k_uwcm_matrix_i   2

Inputs tesor.

Definition at line 208 of file NeuroNet.mqh.

◆ def_k_uwcm_matrix_w

#define def_k_uwcm_matrix_w   0

Weights matrix (m+1)*n, where m - input window and n - output window.

Definition at line 206 of file NeuroNet.mqh.

◆ def_k_uwcm_momentum

#define def_k_uwcm_momentum   6

Momentum multiplier.

Definition at line 212 of file NeuroNet.mqh.

◆ def_k_uwcm_step

#define def_k_uwcm_step   9

Step size.

Definition at line 215 of file NeuroNet.mqh.

◆ def_k_uwcm_window_in

#define def_k_uwcm_window_in   7

Size of input window.

Definition at line 213 of file NeuroNet.mqh.

◆ def_k_uwcm_window_out

#define def_k_uwcm_window_out   8

Size of output window.

Definition at line 214 of file NeuroNet.mqh.

Function Documentation

◆ UpdateWeightsConvAdam()

__kernel void UpdateWeightsConvAdam ( __global double *  matrix_w,
__global const double *  matrix_g,
__global const double *  matrix_i,
__global double *  matrix_m,
__global double *  matrix_v,
const int  inputs,
const double  l,
const double  b1,
const double  b2,
int  window_in,
int  window_out,
int  step 
)

Describes the process of Adam optimization weights for the Convolution Neuron (CNeuronConvOCL).

Parameters
[in,out]matrix_wWeights matrix (m+1)*n, where m - input window and n - output window
[in]matrix_gTensor of gradients at current layer
[in]matrix_iInputs tesor
[in]matrix_mMatrix of first momentum
[in]matrix_vMatrix of seconfd momentum
inputsNumber of inputs
lLearning rates
b1First momentum multiplier
b2Second momentum multiplier
window_inSize of input window
window_outSize of output window
stepStep size

Definition at line 481 of file NeuroNet.cl.

494  {
495  const int i=get_global_id(0);
496  const int shift=i%(window_in+1);
497  const int shift_out=(i-shift)/(window_in+1);
498  int total=(inputs-(window_in-step))%step;
499  total=(inputs-(window_in-step)-total)/step+(total>0 ? 1 : 0);
500  double grad=0;
501  for(int t=0;t<total;t++)
502  {
503  if(shift!=window_in && (shift+t*window_in)>=inputs)
504  break;
505  grad+=matrix_g[t*window_out+shift_out]*(shift==window_in ? 1 : matrix_i[shift+t*step]);
506  }
507  double mt=b1*matrix_m[i]+(1-b1)*grad;
508  double vt=b2*matrix_v[i]+(1-b2)*pow(grad,2);
509  double delta=(vt>0 ? l*mt/sqrt(vt) : 0);
510  matrix_w[i]+=delta;
511  matrix_m[i]=mt;
512  matrix_v[i]=vt;
513  };

◆ UpdateWeightsConvMomentum()

__kernel void UpdateWeightsConvMomentum ( __global double *  matrix_w,
__global double *  matrix_g,
__global double *  matrix_i,
__global double *  matrix_dw,
int  inputs,
double  learning_rates,
double  momentum,
int  window_in,
int  window_out,
int  step 
)

Describes the process of SGD optimization weights for the Convolution Neuron (CNeuronConvOCL).

Parameters
[in,out]matrix_wWeights matrix (m+1)*n, where m - input window and n - output window
[in]matrix_gTensor of gradients at current layer
[in]matrix_iInputs tesor
[in,out]matrix_dwMatrix of delta weights in last correction
inputsNumber of inputs
learning_ratesLearning rates
momentumMomentum multiplier
window_inSize of input window
window_outSize of output window
stepStep size

Definition at line 449 of file NeuroNet.cl.

460  {
461  const int i=get_global_id(0);
462  const int shift=i%(window_in+1);
463  const int shift_out=(i-shift)/(window_in+1);
464  int total=(inputs-window_in)%step;
465  total=(inputs-window_in-total)/step+(total>0 ? 1 : 0);
466  double grad=0;
467  for(int t=0;t<total;t++)
468  {
469  if(shift!=window_in && (shift+t*window_in)>=inputs)
470  break;
471  grad+=matrix_g[t*window_out+shift_out]*(shift==window_in ? 1 : matrix_i[shift+t*step]);
472  }
473  double delta=learning_rates*grad + momentum*matrix_dw[i];
474  matrix_dw[i]=delta;
475  matrix_w[i]+=delta;
476  };
b2
#define b2
Second momentum multiplier of Adam optimization.
Definition: NeuroNet.mqh:38
b1
#define b1
First momentum multiplier of Adam optimization.
Definition: NeuroNet.mqh:36
momentum
#define momentum
momentum for SGD optimization
Definition: NeuroNet.mqh:33