# Copyright 2023, MetaQuotes Ltd.
# https://www.mql5.com

# python libraries
import MetaTrader5 as mt5
import tensorflow as tf
import numpy as np
import pandas as pd
import tf2onnx
from tensorflow.keras import layers, callbacks
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score 
from sklearn.preprocessing import  MinMaxScaler
# input parameters
inp_model_name = "LSTM.15m."
symbol="EURUSD"
     #120 days xa 30 dias ... 1h 24*1*30*4 = 2880 barras  - 4*24*4 = 384   barras totales = 2496
            #                         
inp_history_size =120
window_size = inp_history_size
batch_size = 32
epochs = 300
patience = 20



if not mt5.initialize():
    print("initialize() failed, error code =",mt5.last_error())
    quit()

# we will save generated onnx-file near our script to use as resource
from sys import argv
data_path=argv[0]
last_index=data_path.rfind("\\")+1
data_path=data_path[0:last_index]
print("data path to save onnx model",data_path)

# set start and end dates for history dat
from datetime import timedelta, datetime
#end_date = datetime.now()
end_date = datetime(2024, 2, 1, 0)

# print start and end dates
print("data end date = ",end_date)
#       hoy es 11 de marzo hasta el 1 de febrero son en barras de 1h? 29 dias + 11 dias = 40 dias - 2*6 = 38 * 24h = 912 
#          2520 barras son 
barras= 912*2*2
eurusd_rates = mt5.copy_rates_from_pos(symbol, mt5.TIMEFRAME_M15, barras , 2520)
print(eurusd_rates)

# create dataframe
df = pd.DataFrame(eurusd_rates)

###############################################################################################

from sklearn.preprocessing import MinMaxScaler
from scipy.signal import butter, lfilter

# Parámetros del filtro pasa bajo
cutoff_frequency = 0.1  # Frecuencia de corte en proporción a la frecuencia de Nyquist
order = 1

# Aplicar filtro pasa bajo
def butter_lowpass_filter(data, cutoff, fs, order):
    nyquist = 0.5 * fs
    normal_cutoff = cutoff / nyquist
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    print("Coeficientes del filtro - b:", b)
    print("Coeficientes del filtro - a:", a)
    y = lfilter(b, a, data)
    return y

scaled = MinMaxScaler(feature_range=(0,1))

valores_df1 = df.filter(['close']).values
valores_df1 = pd.DataFrame(valores_df1)
x_features1 = valores_df1[[0]]
valores_df2 = x_features1.values


data_escalada = scaled.fit_transform(valores_df2)

print(data_escalada)

filtered_data_low = butter_lowpass_filter(data_escalada, cutoff_frequency, fs=1, order=order)
print("filtered_data_low",filtered_data_low)

filtered_data_low_unscaled = scaled.inverse_transform(filtered_data_low)

print(filtered_data_low_unscaled)

suavizados_original_scale = pd.DataFrame(filtered_data_low_unscaled)
print("suavizados original scale",suavizados_original_scale)

df['suavizado']=suavizados_original_scale.values

###############################################################################################

# get close prices only
data = df.filter(['suavizado']).values
data = pd.DataFrame(data)
print(data)
# Check columns in 'data'
print(data.columns)

# If 'Close' exists in columns, proceed with assignment
if 'suavizado' in data.columns:
    data['target'] = data['suavizado']
else:
    data['target'] = data.iloc[:, 0]

# Extract OHLC columns
x_features = data[[0]]
# Target variable
y_target = data['target']

data5 = x_features.values

# scale data
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data5)

# training size is 80% of the data
training_size = int(len(scaled_data)*0.80) 
print("Training_size:",training_size)
train_data_initial = scaled_data[0:training_size,:]
test_data_initial = scaled_data[training_size:,:1]

# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
    X, y = list(), list()
    for i in range(len(sequence)):
       # find the end of this pattern
       end_ix = i + n_steps
       # check if we are beyond the sequence
       if end_ix > len(sequence)-1:
          break
       # gather input and output parts of the pattern
       seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
       X.append(seq_x)
       y.append(seq_y)
    return np.array(X), np.array(y)

# split into samples
time_step = inp_history_size
x_train, y_train = split_sequence(train_data_initial, time_step)
x_test, y_test = split_sequence(test_data_initial, time_step)

# reshape input to be [samples, time steps, features] which is required for LSTM
x_train =x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1)

# define model
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv1D, MaxPooling1D, Dropout, Flatten, LSTM
from keras.metrics import RootMeanSquaredError as rmse
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=2, activation='relu',padding = 'same',input_shape=(inp_history_size,1)))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100, return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(100, return_sequences = False))
model.add(Dropout(0.3))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss= 'mse' , metrics = [rmse()])

# Set up early stopping
early_stopping = callbacks.EarlyStopping(
    min_delta=0.0001,
    patience=patience,
    restore_best_weights=True,
)
# model training for 300 epochs
history = model.fit(x_train, y_train, epochs = epochs , validation_data = (x_test,y_test), batch_size=batch_size, callbacks=[early_stopping], verbose=2)

# evaluate training data
train_loss, train_rmse = model.evaluate(x_train,y_train, batch_size =batch_size)
print(f"train_loss={train_loss:.3f}")
print(f"train_rmse={train_rmse:.3f}")

# evaluate testing data
test_loss, test_rmse = model.evaluate(x_test,y_test, batch_size = batch_size)
print(f"test_loss={test_loss:.3f}")
print(f"test_rmse={test_rmse:.3f}")



#prediction using testing data
test_predict = model.predict(x_test)
plot_y_test = y_test.reshape(-1,1)

#calculate metrics
from sklearn import metrics
from sklearn.metrics import r2_score
#transform data to real values
value1=scaler.inverse_transform(plot_y_test)
value2=scaler.inverse_transform(test_predict)
#calc score
score = np.sqrt(metrics.mean_squared_error(value1,value2))
print("RMSE         : {}".format(score))
print("MSE          :", metrics.mean_squared_error(value1,value2))
print("R2 score     :",metrics.r2_score(value1,value2))

r2 = metrics.r2_score(value1,value2)

final_data = symbol + "." + str(inp_history_size) +"."+ str(round(r2,2)) +".onnx"

# save model to ONNX
output_path = data_path+inp_model_name + final_data
onnx_model = tf2onnx.convert.from_keras(model, output_path=output_path)
print(f"saved model to {output_path}")

# finish
mt5.shutdown()