# Copyright 2023, MetaQuotes Ltd.
# https://www.mql5.com

# python libraries
import MetaTrader5 as mt5
import tensorflow as tf
import numpy as np
import pandas as pd
import tf2onnx
from tensorflow.keras import layers, callbacks


symbol="EURUSD"

inp_history_size = 30*9

# we will save generated onnx-file near our script to use as resource
from sys import argv
data_path=argv[0]
last_index=data_path.rfind("\\")+1
data_path=data_path[0:last_index]
print("data path to save onnx model",data_path)

import os

if os.path.exists(data_path):
    chunk_size = 1000
    rates = pd.read_csv("seasonal_feb_concat_v2.csv")#(file_path, encoding='utf-16le', dtype={1: 'float32', 2: 'float32'})#,chunksize=chunk_size)

else:
    print(f"Error: File not found - {data_path}")
    quit()

print(rates)

####################################################################################################################################
print(rates)

df2=pd.DataFrame(rates)

# Convert 'Time' column to datetime format
df2['time'] = pd.to_datetime(df2['time'])

# Set 'Time' column as the index
df2.set_index('time', inplace=True)

########################################################################################################
df2345 = pd.DataFrame()
df2345=df2['close']
print("df2345",df2345)

from sklearn.preprocessing import MinMaxScaler
from scipy.signal import butter, lfilter

# Parámetros del filtro pasa bajo
cutoff_frequency = 0.01  # Frecuencia de corte en proporción a la frecuencia de Nyquist
order = 4

# Aplicar filtro pasa bajo
def butter_lowpass_filter(data, cutoff, fs, order):
    nyquist = 0.5 * fs
    normal_cutoff = cutoff / nyquist
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    print("Coeficientes del filtro - b:", b)
    print("Coeficientes del filtro - a:", a)
    y = lfilter(b, a, data)
    return y


filtered_data_low = butter_lowpass_filter(df2['close'], cutoff_frequency, fs=1, order=order)
print("filtered_data_low",filtered_data_low)

suavizados_original_scale = pd.DataFrame(filtered_data_low)
print("suavizados original scale",suavizados_original_scale)


df2['suavizado']=suavizados_original_scale.values

print("df2 con suavizado",df2)
####################################################################################################################

# Resample the data to 1-minute intervals and use the first (open), last (close),
# maximum (high), and minimum (low) values within each minute
ohlc_data = df2.resample('1H').agg({'Bid_Bid': 'first', 'Ask_Ask': 'first', 'close': 'ohlc', 'suavizado':'ohlc'})

# Flatten the multi-level columns
ohlc_data.columns = ohlc_data.columns.map('_'.join)

# Rename the OHLC columns
ohlc_data.rename(columns={'close_open': 'open', 'close_high': 'high', 'close_low': 'low', 'close_close': 'close', 'suavizado_open':'s_open', 'suavizado_high':'s_high', 'suavizado_low':'s_low', 'suavizado_close':'s_close'}, inplace=True)

# Drop NaN rows if any
ohlc_data.dropna(inplace=True)

# Reset the index to have a separate 'Time' column
ohlc_data.reset_index(inplace=True)

# Display the resulting OHLC data
ohlc_data = ohlc_data.dropna()
print("ohlc",ohlc_data)
ohlc_data=pd.DataFrame(ohlc_data)
#############################################
###########

# create dataframe
dff3 = pd.DataFrame(ohlc_data)

# get close prices only
data3 = dff3.filter(['s_close']).values
data3 = pd.DataFrame(data3)
print(data3)
# Check columns in 'data'
print(data3.columns)

# If 'Close' exists in columns, proceed with assignment
if 's_close' in data3.columns:
    result = data3['s_close']
else:
    result = data3.iloc[:, 0]

###########################################################################################################
# create dataframe
df4 = pd.DataFrame(result)
print("df4", df4)
############################################################################################################# reducción de ruido filtros pasa bajo pasa alto

# get close prices only
data5 = df4.values

# scale data
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data5)

# training size is 80% of the data
training_size = int(len(scaled_data)*0.80) 
print("Training_size:",training_size)
train_data_initial = scaled_data[0:training_size,:]
test_data_initial = scaled_data[training_size:,:1]

# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
    X, y = list(), list()
    for i in range(len(sequence)):
       # find the end of this pattern
       end_ix = i + n_steps
       # check if we are beyond the sequence
       if end_ix > len(sequence)-1:
          break
       # gather input and output parts of the pattern
       seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
       X.append(seq_x)
       y.append(seq_y)
    return np.array(X), np.array(y)

# split into samples
time_step = inp_history_size
x_train, y_train = split_sequence(train_data_initial, time_step)
x_test, y_test = split_sequence(test_data_initial, time_step)

# reshape input to be [samples, time steps, features] which is required for LSTM
x_train =x_train.reshape(x_train.shape[0],x_train.shape[1],1)
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1)

# define model
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv1D, MaxPooling1D, Dropout, Flatten, LSTM
from keras.metrics import RootMeanSquaredError as rmse
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=2, activation='relu',padding = 'same',input_shape=(inp_history_size,1)))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100, return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(100, return_sequences = False))
model.add(Dropout(0.3))
model.add(Dense(units=1, activation = 'sigmoid'))
model.compile(optimizer='adam', loss= 'mse' , metrics = [rmse()])

# Set up early stopping
early_stopping = callbacks.EarlyStopping(
    min_delta=0.0001,
    patience=25,
    restore_best_weights=True,
)
# model training for 300 epochs  ### 120 days are 300 epochs ... so 270 days are 675 epochs  # 120 dias son 300 epochs, 30 días son 75 epochs
history = model.fit(x_train, y_train, epochs = 100 , validation_data = (x_test,y_test), batch_size=32, callbacks=[early_stopping], verbose=2)

# evaluate training data
train_loss, train_rmse = model.evaluate(x_train,y_train, batch_size = 32)
print(f"train_loss={train_loss:.3f}")
print(f"train_rmse={train_rmse:.3f}")

# evaluate testing data
test_loss, test_rmse = model.evaluate(x_test,y_test, batch_size = 32)
print(f"test_loss={test_loss:.3f}")
print(f"test_rmse={test_rmse:.3f}")

#prediction using testing data
test_predict = model.predict(x_test)
plot_y_test = y_test.reshape(-1,1)

#calculate metrics
from sklearn import metrics
from sklearn.metrics import r2_score
#transform data to real values
value1=scaler.inverse_transform(plot_y_test)
value2=scaler.inverse_transform(test_predict)
#calc score
score = np.sqrt(metrics.mean_squared_error(value1,value2))
print("RMSE         : {}".format(score))
print("MSE          :", metrics.mean_squared_error(value1,value2))
print("R2 score     :",metrics.r2_score(value1,value2))

r2 = round(metrics.r2_score(value1,value2),2)

inp_model_name = str(symbol)+"_LSTM_"+str(inp_history_size)+"_1h_filtered_seasonal"+str(r2)+".onnx" 

# save model to ONNX
output_path = data_path+inp_model_name
onnx_model = tf2onnx.convert.from_keras(model, output_path=output_path)
print(f"saved model to {output_path}")