![]() |
Neuron Net
|
Describes the process of optimization weights for the Neuron Base. More...
Macros | |
#define | def_k_UpdateWeightsMomentum 3 |
Index SGD optomization Update weights kernel (UpdateWeightsMomentum) More... | |
#define | def_k_uwm_matrix_w 0 |
SGD Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer. More... | |
#define | def_k_uwm_matrix_g 1 |
SGD Tensor of gradients at current layer. More... | |
#define | def_k_uwm_matrix_i 2 |
SGD Inputs tesor. More... | |
#define | def_k_uwm_matrix_dw 3 |
SGD Matrix of delta weights in last correction. More... | |
#define | def_k_uwm_inputs 4 |
SGD Number of inputs. More... | |
#define | def_k_uwm_learning_rates 5 |
SGD Learning rates. More... | |
#define | def_k_uwm_momentum 6 |
SGD Momentum multiplier. More... | |
#define | def_k_UpdateWeightsAdam 4 |
Index Adam optomization Update weights kernel (UpdateWeightsAdam) More... | |
#define | def_k_uwa_matrix_w 0 |
Adam Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer. More... | |
#define | def_k_uwa_matrix_g 1 |
Adam Tensor of gradients at current layer. More... | |
#define | def_k_uwa_matrix_i 2 |
Adam Inputs tesor. More... | |
#define | def_k_uwa_matrix_m 3 |
Adam Matrix of first momentum. More... | |
#define | def_k_uwa_matrix_v 4 |
Adam Matrix of seconfd momentum. More... | |
#define | def_k_uwa_inputs 5 |
Adam Number of inputs. More... | |
#define | def_k_uwa_l 6 |
Adam Learning rates. More... | |
#define | def_k_uwa_b1 7 |
Adam First momentum multiplier. More... | |
#define | def_k_uwa_b2 8 |
Adam Second momentum multiplier. More... | |
Functions | |
__kernel void | UpdateWeightsMomentum (__global double *matrix_w, __global double *matrix_g, __global double *matrix_i, __global double *matrix_dw, int inputs, double learning_rates, double momentum) |
Describes the process of SGD optimization weights for the Neuron Base (CNeuronBaseOCL). More... | |
__kernel void | UpdateWeightsAdam (__global double *matrix_w, __global const double *matrix_g, __global const double *matrix_i, __global double *matrix_m, __global double *matrix_v, const int inputs, const double l, const double b1, const double b2) |
Describes the process of Adam optimization weights for the Neuron Base (CNeuronBaseOCL). More... | |
virtual bool | CNeuronBaseOCL::updateInputWeights (CNeuronBaseOCL *NeuronOCL) |
Describes the process of optimization weights for the Neuron Base.
Detailed description on the link. For Adam optimization look the link.
#define def_k_UpdateWeightsAdam 4 |
Index Adam optomization Update weights kernel (UpdateWeightsAdam)
Definition at line 129 of file NeuroNet.mqh.
#define def_k_UpdateWeightsMomentum 3 |
Index SGD optomization Update weights kernel (UpdateWeightsMomentum)
Definition at line 120 of file NeuroNet.mqh.
#define def_k_uwa_b1 7 |
Adam First momentum multiplier.
Definition at line 137 of file NeuroNet.mqh.
#define def_k_uwa_b2 8 |
Adam Second momentum multiplier.
Definition at line 138 of file NeuroNet.mqh.
#define def_k_uwa_inputs 5 |
Adam Number of inputs.
Definition at line 135 of file NeuroNet.mqh.
#define def_k_uwa_l 6 |
Adam Learning rates.
Definition at line 136 of file NeuroNet.mqh.
#define def_k_uwa_matrix_g 1 |
Adam Tensor of gradients at current layer.
Definition at line 131 of file NeuroNet.mqh.
#define def_k_uwa_matrix_i 2 |
Adam Inputs tesor.
Definition at line 132 of file NeuroNet.mqh.
#define def_k_uwa_matrix_m 3 |
Adam Matrix of first momentum.
Definition at line 133 of file NeuroNet.mqh.
#define def_k_uwa_matrix_v 4 |
Adam Matrix of seconfd momentum.
Definition at line 134 of file NeuroNet.mqh.
#define def_k_uwa_matrix_w 0 |
Adam Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer.
Definition at line 130 of file NeuroNet.mqh.
#define def_k_uwm_inputs 4 |
SGD Number of inputs.
Definition at line 125 of file NeuroNet.mqh.
#define def_k_uwm_learning_rates 5 |
SGD Learning rates.
Definition at line 126 of file NeuroNet.mqh.
#define def_k_uwm_matrix_dw 3 |
SGD Matrix of delta weights in last correction.
Definition at line 124 of file NeuroNet.mqh.
#define def_k_uwm_matrix_g 1 |
SGD Tensor of gradients at current layer.
Definition at line 122 of file NeuroNet.mqh.
#define def_k_uwm_matrix_i 2 |
SGD Inputs tesor.
Definition at line 123 of file NeuroNet.mqh.
#define def_k_uwm_matrix_w 0 |
SGD Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer.
Definition at line 121 of file NeuroNet.mqh.
#define def_k_uwm_momentum 6 |
SGD Momentum multiplier.
Definition at line 127 of file NeuroNet.mqh.
|
protectedvirtual |
Method for updating weights. Calling one of kernels UpdateWeightsMomentum() or UpdateWeightsAdam() in depends of optimization type (ENUM_OPTIMIZATION).
NeuronOCL | Pointer to previos layer. |
Reimplemented in CNeuronAttentionOCL, and CNeuronConvOCL.
Definition at line 3266 of file NeuroNet.mqh.
__kernel void UpdateWeightsAdam | ( | __global double * | matrix_w, |
__global const double * | matrix_g, | ||
__global const double * | matrix_i, | ||
__global double * | matrix_m, | ||
__global double * | matrix_v, | ||
const int | inputs, | ||
const double | l, | ||
const double | b1, | ||
const double | b2 | ||
) |
Describes the process of Adam optimization weights for the Neuron Base (CNeuronBaseOCL).
Detailed description on the link.
[in,out] | matrix_w | Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer |
[in] | matrix_g | Tensor of gradients at current layer |
[in] | matrix_i | Inputs tesor |
[in,out] | matrix_m | Matrix of first momentum |
[in,out] | matrix_v | Matrix of seconfd momentum |
inputs | Number of inputs | |
l | Learning rates | |
b1 | First momentum multiplier | |
b2 | Second momentum multiplier |
Definition at line 189 of file NeuroNet.cl.
__kernel void UpdateWeightsMomentum | ( | __global double * | matrix_w, |
__global double * | matrix_g, | ||
__global double * | matrix_i, | ||
__global double * | matrix_dw, | ||
int | inputs, | ||
double | learning_rates, | ||
double | momentum | ||
) |
Describes the process of SGD optimization weights for the Neuron Base (CNeuronBaseOCL).
Detailed description on the link.
[in,out] | matrix_w | Weights matrix (m+1)*n, where m - number of neurons in previous layer and n - number of neurons in current layer |
[in] | matrix_g | Tensor of gradients at current layer |
[in] | matrix_i | Inputs tesor |
[in,out] | matrix_dw | Matrix of delta weights in last correction |
inputs | Number of inputs | |
learning_rates | Learning rates | |
momentum | Momentum multiplier |
Definition at line 168 of file NeuroNet.cl.