Discussing the article: "Neural networks made easy (Part 52): Research with optimism and distribution correction" - page 2

 
Dmitriy Gizlyk #:

I noticed a typo in the code of the Study.mq5 Expert Advisor

The files in the article have been updated.

 
Dmitry, why does this network open all trades with exactly 1 lot, when training on all tests, and does not try to change the lot? It does not try to set fractional lots and does not want to set more than 1 lot either. EURUSD instrument. Training parameters are the same as yours.
 
Viktor Kudriavtsev #:
Dmitry, why does this network open all trades with exactly 1 lot, when training on all tests, and does not try to change the lot? It doesn't try to put fractional lots and doesn't want to put more than 1 lot either. EURUSD instrument. Training parameters are the same as yours.

On the last Actor layer we use sigmoid as an activation function, which limits the values in the range [0,1]. For TP and SL, we use a multiplier to adjust the values. The lot size is not adjusted. Therefore, 1 lot is the maximum possible value.

//--- layer 9
   if(!(descr = new CLayerDescription()))
      return false;
   descr.type = defNeuronSoftActorCritic;
   descr.count = NActions;
   descr.window_out = 32;
   descr.optimization = ADAM;
   descr.activation = SIGMOID;
   if(!actor.Add(descr))
     {
      delete descr;
      return false;
     }
 
Dmitriy Gizlyk #:

In the last Actor layer, we use sigmoid as the activation function, which limits the values to the range [0,1]. For TP and SL we use a multiplier to adjust the values. The lot size is not adjusted. Therefore, 1 lot is the maximum possible value.

Understood, Thank you.

 
ENJOY <3

//+------------------------------------------------------------------+
//| Expert tick function |
//+------------------------------------------------------------------+
void OnTick()
{
//---
if(!IsNewBar())
return;
//---
int bars = CopyRates(Symb.Name(), TimeFrame, iTime(Symb.Name(), TimeFrame, 1), HistoryBars, Rates);
if(!ArraySetAsSeries(Rates, true))
return;
//---
RSI.Refresh();
CCI.Refresh();
ATR.Refresh();
MACD.Refresh();
Symb.Refresh();
Symb.RefreshRates();
//---
float atr = 0;
for(int b = 0; b < (int)HistoryBars; b++)
{
float open = (float)Rates[b].open;
float rsi = (float)RSI.Main(b);
float cci = (float)CCI.Main(b);
atr = (float)ATR.Main(b);
float macd = (float)MACD.Main(b);
float sign = (float)MACD.Signal(b);
if(rsi == EMPTY_VALUE || cci == EMPTY_VALUE || atr == EMPTY_VALUE || macd == EMPTY_VALUE || sign == EMPTY_VALUE)
continue;
//---
int shift = b * BarDescr;
sState.state[shift] = (float)(Rates[b].close - open);
sState.state[shift + 1] = (float)(Rates[b].high - open);
sState.state[shift + 2] = (float)(Rates[b].low - open);
sState.state[shift + 3] = (float)(Rates[b].tick_volume / 1000.0f);
sState.state[shift + 4] = rsi;
sState.state[shift + 5] = cci;
sState.state[shift + 6] = atr;
sState.state[shift + 7] = macd;
sState.state[shift + 8] = sign;
}
bState.AssignArray(sState.state);
//---
sState.account[0] = (float)AccountInfoDouble(ACCOUNT_BALANCE);
sState.account[1] = (float)AccountInfoDouble(ACCOUNT_EQUITY);
//---
double buy_value = 0, sell_value = 0, buy_profit = 0, sell_profit = 0;
double position_discount = 0;
double multiplyer = 1.0 / (60.0 * 60.0 * 10.0);
int total = PositionsTotal();
datetime current = TimeCurrent();
for(int i = 0; i < total; i++)
{
if(PositionGetSymbol(i) != Symb.Name())
continue;
double profit = PositionGetDouble(POSITION_PROFIT);
switch((int)PositionGetInteger(POSITION_TYPE))
{
case POSITION_TYPE_BUY:
buy_value += PositionGetDouble(POSITION_VOLUME);
buy_profit += profit;
break;
case POSITION_TYPE_SELL:
sell_value += PositionGetDouble(POSITION_VOLUME);
sell_profit += profit;
break;
}
position_discount += profit - (current - PositionGetInteger(POSITION_TIME)) * multiplyer * MathAbs(profit);
}
sState.account[2] = (float)buy_value;
sState.account[3] = (float)sell_value;
sState.account[4] = (float)buy_profit;
sState.account[5] = (float)sell_profit;
sState.account[6] = (float)position_discount;
sState.account[7] = (float)Rates[0].time;
//---
bAccount.Clear();
bAccount.Add((float)((sState.account[0] - PrevBalance) / PrevBalance));
bAccount.Add((float)(sState.account[1] / PrevBalance));
bAccount.Add((float)((sState.account[1] - PrevEquity) / PrevEquity));
bAccount.Add(sState.account[2]);
bAccount.Add(sState.account[3]);
bAccount.Add((float)(sState.account[4] / PrevBalance));
bAccount.Add((float)(sState.account[5] / PrevBalance));
bAccount.Add((float)(sState.account[6] / PrevBalance));
double x = (double)Rates[0].time / (double)(D'2024.01.01.01' - D'2023.01.01');
bAccount.Add((float)MathSin(x != 0 ? 2.0 * M_PI * x : 0));
x = (double)Rates[0].time / (double)PeriodSeconds(PERIOD_MN1);
bAccount.Add((float)MathCos(x != 0 ? 2.0 * M_PI * x : 0));
x = (double)Rates[0].time / (double)PeriodSeconds(PERIOD_W1);
bAccount.Add((float)MathSin(x != 0 ? 2.0 * M_PI * x : 0));
x = (double)Rates[0].time / (double)PeriodSeconds(PERIOD_D1);
bAccount.Add((float)MathSin(x != 0 ? 2.0 * M_PI * x : 0));
//---
if (bAccount.GetIndex() >= 0 && !bAccount.BufferWrite())
return;
//---
if (!Actor.feedForward(GetPointer(bState), 1, false, GetPointer(bAccount)))
return;
//---
PrevBalance = sState.account[0];
PrevEquity = sState.account[1];
//---
vector<float> temp;
Actor.getResults(temp);
float delta = MathAbs(ActorResult - temp).Sum();
ActorResult = temp;
//---
double min_lot = Symb.LotsMin();
double step_lot = Symb.LotsStep();
double stops = MathMax(Symb.StopsLevel(), 1) * Symb.Point();
if (temp[0] >= temp[3])
{
temp[0] -= temp[3];
temp[3] = 0;
}
else
{
temp[3] -= temp[0];
temp[0] = 0;
}
//--- buy control
if (temp[0] < min_lot || (temp[1] * MaxTP * Symb.Point())) <= stops || (temp[2] * MaxSL * Symb.Point()) <= stops)
{
if (buy_value > 0)
CloseByDirection(POSITION_TYPE_BUY);
}
else
{
buy_lot = min_lot + MathRound((double)(temp[0] - min_lot) / step_lot) * step_lot;
buy_tp = NormalizeDouble(Symb.Ask() + temp[1] * MaxTP * Symb.Point(), Symb.Digits());
double buy_sl = NormalizeDouble(Symb.Ask() - temp[2] * MaxSL * Symb.Point(), Symb.Digits());
if (buy_value > 0)
TrailPosition(POSITION_TYPE_BUY, buy_sl, buy_tp);
if (buy_value != buy_lot)
{
if (buy_value > buy_lot)
ClosePartial(POSITION_TYPE_BUY, buy_value - buy_lot);
else
Trade.Buy(buy_lot - buy_value, Symb.Name(), Symb.Ask(), buy_sl, buy_tp);
}
}
//--- sell control
if (temp[3] < min_lot || (temp[4] * MaxTP * Symb.Point())) <= stops || (temp[5] * MaxSL * Symb.Point()) <= stops)
{
if (sell_value > 0)
CloseByDirection(POSITION_TYPE_SELL);
}
else
{
double sell_lot = min_lot + MathRound((double)(temp[3] - min_lot) / step_lot) * step_lot;
double sell_tp = NormalizeDouble(Symb.Bid() - temp[4] * MaxTP * Symb.Point(), Symb.Digits());
double sell_sl = NormalizeDouble(Symb.Bid() + temp[5] * MaxSL * Symb.Point(), Symb.Digits());
if (sell_value > 0)
TrailPosition(POSITION_TYPE_SELL, sell_sl, sell_tp);
if (sell_value != sell_lot)
{
if (sell_value > sell_lot)
ClosePartial(POSITION_TYPE_SELL, sell_value - sell_lot);
else
Trade.Sell(sell_lot - sell_value, Symb.Name(), Symb.Bid(), sell_sl, sell_tp);
}
}
// Calculate initial rewards
float iRewards = bAccount[0];
vector<float> log_prob;
Actor.GetLogProbs(log_prob);
// Normalize ATR to the range [0, 1]
float minATR = -100.0; // Adjust these values based on your data
float maxATR = 100.0;
float norm_atr = (atr - minATR) / (maxATR - minATR);
// Define weights for normalisation
float minWeight = 0.0;
float maxWeight = 1.0;
// Normalize initial rewards and normalized ATR based on weights
float norm_iRewards = (iRewards - minWeight) / (maxWeight - minWeight);
float norm_norm_atr = (norm_atr - minWeight) / (maxWeight - minWeight);
// Calculate penalty for no trades
double penalty = (buy_value + sell_value) == 0 ? (norm_norm_atr + atr / (PrevBalance + LogProbMultiplier)) : 0.0;
// Update rewards and log probabilities
for (ulong i = 0; i < temp.Size(); i++)
{
sState.action[i] = temp[i];
sState.log_prob[i] = log_prob[i];
}
// Calculate increment based on log_prob_sum
float iRewards_increment = MathLog((float)PrevBalance);
// Adjust rewards based on initial norm_iRewards
if (norm_iRewards != 0)
{
norm_iRewards += norm_norm_atr + iRewards_increment;
}
// Apply Z-score normalisation to norm_iRewards
float meanRewards = -10.0; // Calculate the mean of rewards over time
float stdRewards = 10.0; // Calculate the standard deviation of rewards over time
float normalized_iRewards = (norm_iRewards - meanRewards) / stdRewards;
// Apply power transformation to enhance data distribution (e.g., Box-Cox transform)
float power = 0.5; // Adjust the power parameter as needed
float transformed_reward = (pow(1 + normalised_iRewards, power) - 1) / power;
// Apply exponential transformation to enhance data distribution
float ZReward = (transformed_reward - (LogProbMultiplier)) / (5 - (LogProbMultiplier));
float Mreward = MathLog10(ZReward + 1);
float reward = (Mreward - (-1.0f)) / (0.4f - (-1.0f));
Apply square root transformation
Calculate the mapped values back to the range [1, 100]
float SRQTreward = sqrt(reward);
Print the rewards and other information
Print("Buy Value: ", buy_value);
Print("Sell Value: ", sell_value);
Print("Temperature: ", temp);
Print("Temperature Size: ", temp. Size());
Print("iRewards: ", iRewards);
Print("Normalised ATR: ", norm_norm_atr);
Print("Normalized iRewards: ", normalized_iRewards);
Print("Transformed Reward: ", transformed_reward);
Print("Trajectory Reward: ", ZReward);
Print("MATHLOG: ", Mreward);
Print("Scaled Reward: ", reward);
Print("SRQT Reward: ", SRQTreward);
Add state and normalised rewards to the trajectory
if (! Base.Add(sState, reward))
{
ExpertRemove(); Handle the case when adding to Base fails
}
}
//+------------------------------------------------------------------+



//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CreateDescriptions(CArrayObj *actor, CArrayObj *critic)
{
//---
CLayerDescription *descr;
//---
if(!actor)
{
actor = new CArrayObj();
if(!actor)
return false;
}
if(!critic)
{
critic = new CArrayObj();
if(!critic)
return false;
}
//--- Actor
actor.Clear();
//--- Input layer
if(!(descr = new CLayerDescription()))
return false;
descr.type = defNeuronBaseOCL;
int prev_count = descr.count = (HistoryBars * BarDescr);
descr.window = 0;
descr.activation = None;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 1
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 7;
descr.step = 3;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 2
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 5;
descr.step = 2;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 3
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 3;
descr.step = 1;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 4
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 5
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
prev_count = descr.count = 512;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 6
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 6;
descr.step = 2;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 7
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 4;
descr.step = 2;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 8
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count - 1;
descr.window = 2;
descr.step = 1;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 9
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConvOCL;
prev_count = descr.count = prev_count;
descr.window = 8;
descr.step = 8;
descr.window_out = 8;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 10
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBatchNormOCL;
descr.count = prev_count;
descr.batch = 1000;
descr.activation = None;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 11
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 12
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
prev_count = descr.count = 512;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 13
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConcatenate;
descr.count = LatentCount;
descr.window = prev_count;
descr.step = AccountDescr;
descr.optimise = ADAM;
descr.activation = SIGMOID;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 14
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 15
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
prev_count = descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 16
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = prev_count;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- layer 17
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronSoftActorCritic;
descr.count = NActions;
descr.window_out = 32;
descr.optimise = ADAM;
descr.activation = SIGMOID;
if(!actor.Add(descr))
{
delete descr;
return false;
}
//--- Critic
critic.Clear();
//--- Input layer
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
prev_count = descr.count = LatentCount;
descr.window = 0;
descr.activation = None;
descr.optimisation = ADAM;
if(!critic.Add(descr))
{
delete descr;
return false;
}
//--- layer 1
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronConcatenate;
descr.count = 1024;
descr.window = prev_count;
descr.step = 6;
descr.optimise = ADAM;
descr.activation = LReLU;
if(!critic.Add(descr))
{
delete descr;
return false;
}
//--- layer 2
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!critic.Add(descr))
{
delete descr;
return false;
}
//--- layer 3
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!critic.Add(descr))
{
delete descr;
return false;
}
//--- layer 4
if(!(descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1024;
descr.activation = LReLU;
descr.optimisation = ADAM;
if(!critic. Add(descr))
{
delete descr;
return false;
}
--- layer 5
if(!( descr = new CLayerDescription())))
return false;
descr.type = defNeuronBaseOCL;
descr.count = 1;
descr.optimise = ADAM;
descr.activation = None;
if(!critic. Add(descr))
{
delete descr;
return false;
}
//---
return true;
}
 
Thanks
Files:
Dmtry.PNG  34 kb