import MetaTrader5 as mt5
import pandas as pd
import numpy as np
from datetime import datetime
import random
import matplotlib.pyplot as plt
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import mixture

mt5.initialize()

MA_PERIODS = [5, 15, 25, 50, 75,
              100, 125, 150, 200, 250, 300, 350, 400, 450, 500]

SYMBOL = 'GBPUSD'
MARKUP = 0.00010
TIMEFRAME = mt5.TIMEFRAME_H1
START_DATE = datetime(2017, 1, 1)
TSTART_DATE = datetime(2015, 1, 1)
FULL_DATE = datetime(2015, 1, 1)
STOP_DATE = datetime(2022, 1, 1)
hours = [5]
n_compnents = 75


def get_prices(START, STOP):
    prices = pd.DataFrame(mt5.copy_rates_range(SYMBOL, TIMEFRAME, START, STOP),
                          columns=['time', 'close']).set_index('time')
    prices.index = pd.to_datetime(prices.index, unit='s')
    prices = prices.dropna()
    pr = prices.copy()

    count = 0
    for i in MA_PERIODS:
        prices[str(count)] = pr - pr.rolling(i).mean()
        count += 1

    return prices.dropna()


def time_filter(data, count):
    # filter by hour
    global hours
    if data.index[count].hour not in hours:
        return False

    # filter by day of week
    # days = [2]
    # if data.index[count].dayofweek not in days:
    #     return False

    return True


def add_labels(dataset, min, max, filter=time_filter):
    labels = []
    for i in range(dataset.shape[0]-max):
        rand = random.randint(min, max)
        curr_pr = dataset['close'][i]
        future_pr = dataset['close'][i + rand]
        if filter(dataset, i):
            if future_pr + MARKUP < curr_pr:
                labels.append(1.0)
            elif future_pr - MARKUP > curr_pr:
                labels.append(0.0)
            else:
                labels.append(2.0)
        else:
            labels.append(2.0)
    dataset = dataset.iloc[:len(labels)].copy()
    dataset['labels'] = labels
    dataset = dataset.dropna()
    dataset = dataset.drop(
        dataset[dataset.labels == 2].index)

    return dataset


def tester(dataset, markup=0.0, plot=False, filter=time_filter):
    last_deal = int(2)
    last_price = 0.0
    report = [0.0]
    for i in range(dataset.shape[0]):
        pred = dataset['labels'][i]
        ind = dataset.index[i].hour
        if last_deal == 2 and filter(dataset, i):
            last_price = dataset['close'][i]
            last_deal = 0 if pred <= 0.5 else 1
            continue
        if last_deal == 0 and pred > 0.5:
            last_deal = 2
            report.append(report[-1] - markup +
                          (dataset['close'][i] - last_price))
            continue
        if last_deal == 1 and pred < 0.5:
            last_deal = 2
            report.append(report[-1] - markup +
                          (last_price - dataset['close'][i]))

    y = np.array(report).reshape(-1, 1)
    X = np.arange(len(report)).reshape(-1, 1)
    lr = LinearRegression()
    lr.fit(X, y)

    l = lr.coef_
    if l >= 0:
        l = 1
    else:
        l = -1

    if(plot):
        plt.plot(report)
        plt.plot(lr.predict(X))
        plt.title("Strategy performance")
        plt.xlabel("the number of trades")
        plt.ylabel("cumulative profit in pips")
        plt.show()

    return lr.score(X, y) * l


def brute_force(samples, gmm):
    # # make dataset
    # pr = get_prices(START_DATE, STOP_DATE)
    # pr = add_labels(pr, min=5, max=25, filter=time_filter)
    # # tester(pr, MARKUP, plot=True, filter=time_filter)
    # pr = pr[pr.columns[1:]]
    # # perform GMM clasterizatin over dataset
    # prUp = pr[pr['labels'] == 0]
    # prDwn = pr[pr['labels'] == 1]
    # gmmUp = mixture.GaussianMixture(
    #     n_components=n_compnents, covariance_type='full', n_init=1).fit(prUp[prUp.columns[:-1]])
    # gmmDwn = mixture.GaussianMixture(
    #     n_components=n_compnents, covariance_type='full', n_init=1).fit(prDwn[prDwn.columns[:-1]])

    # # sample new dataset
    # generatedUp = gmmUp.sample(samples)
    # generatedDwn = gmmDwn.sample(samples)

    # genUp = pd.DataFrame(generatedUp[0])
    # genUp['labels'] = 0
    # genDwn = pd.DataFrame(generatedDwn[0])
    # genDwn['labels'] =1

    # gen = pd.concat([genUp, genDwn])
    # sample new dataset

    # sample new dataset
    generated = gmm.sample(samples)
    # make labels
    gen = pd.DataFrame(generated[0])
    gen.rename(columns={gen.columns[-1]: "labels"}, inplace=True)
    gen.loc[gen['labels'] >= 0.5, 'labels'] = 1
    gen.loc[gen['labels'] < 0.5, 'labels'] = 0

    X = gen[gen.columns[:-1]]
    y = gen[gen.columns[-1]]

    X = gen[gen.columns[:-1]]
    y = gen[gen.columns[-1]]

    # train\test split
    train_X, test_X, train_y, test_y = train_test_split(
        X, y, train_size=0.5, test_size=0.5, shuffle=True)
    # learn with train and validation subsets
    model = CatBoostClassifier(iterations=1000,
                               depth=6,
                               learning_rate=0.01,
                               custom_loss=['Accuracy'],
                               eval_metric='Accuracy',
                               verbose=False,
                               use_best_model=True,
                               task_type='CPU')
    model.fit(train_X, train_y, eval_set=(test_X, test_y),
              early_stopping_rounds=25, plot=False)
    # test on new data
    pr_tst = get_prices(TSTART_DATE, STOP_DATE)
    X = pr_tst[pr_tst.columns[1:]]
    X.columns = [''] * len(X.columns)

    # test the learned model
    p = model.predict_proba(X)
    p2 = [x[0] < 0.5 for x in p]
    pr2 = pr_tst.iloc[:len(p2)].copy()
    pr2['labels'] = p2
    R2 = tester(pr2, MARKUP, plot=False, filter=time_filter)

    return [R2, model]


def test_model(result):
    pr_tst = get_prices(FULL_DATE, STOP_DATE)
    X = pr_tst[pr_tst.columns[1:]]
    X.columns = [''] * len(X.columns)

    # test the learned model
    p = result[1].predict_proba(X)
    p2 = [x[0] < 0.5 for x in p]
    pr2 = pr_tst.iloc[:len(p2)].copy()
    pr2['labels'] = p2
    R2 = tester(pr2, MARKUP, plot=True, filter=time_filter)

def exploratory_analysis():
    h = [x for x in range(24)]
    result = pd.DataFrame()
    for _h in h:
        global hours 
        hours = [_h]
        pr = get_prices(START_DATE, STOP_DATE)
        pr = add_labels(pr, min=15, max=15, filter=time_filter)
        gmm = mixture.GaussianMixture(
            n_components=n_compnents, covariance_type='full', n_init=1).fit(pr[pr.columns[1:]])

        # iterative learning
        res = []
        iterations = 10
        for i in range(iterations):
            res.append(brute_force(10000, gmm))
            print('Iteration: ', i, 'R^2: ', res[-1][0], ' hour= ', _h)
        
        r = pd.DataFrame(np.array(res)[:, 0], np.full(iterations,_h))
        result = result.append(r)

    plt.scatter(result.index, result, c = result.index)
    plt.xticks(np.arange(0, 24, 1))
    plt.title("Performance by hour")
    plt.xlabel("hours")
    plt.ylabel("R^2 estimation")
    plt.show()
    return result

def deals_frequency_analyzer():
    freq = [x for x in range(1,25)]
    result = pd.DataFrame()
    for _h in freq:
        pr = get_prices(START_DATE, STOP_DATE)
        pr = add_labels(pr, min=_h, max=_h, filter=time_filter)
        gmm = mixture.GaussianMixture(
            n_components=n_compnents, covariance_type='full', n_init=1).fit(pr[pr.columns[1:]])

        # iterative learning
        res = []
        iterations = 5
        for i in range(iterations):
            res.append(brute_force(10000, gmm))
            print('Iteration: ', i, 'R^2: ', res[-1][0], ' deal lifetime = ', _h)
        
        r = pd.DataFrame(np.array(res)[:, 0], np.full(iterations,_h))
        result = result.append(r)

    plt.scatter(result.index, result, c = result.index)
    plt.xticks(np.arange(1, len(freq)+1, 1))
    plt.title("Performance by deals lifetime")
    plt.xlabel("deals frequency")
    plt.ylabel("R^2 estimation")
    plt.show()
    return result


r = exploratory_analysis()
r = deals_frequency_analyzer()

hours = [5]
# make dataset
pr = get_prices(START_DATE, STOP_DATE)
pr = add_labels(pr, min=15, max=15, filter=time_filter)
tester(pr, MARKUP, plot=True, filter=time_filter)

# perform GMM clasterizatin over dataset
# gmm = mixture.BayesianGaussianMixture(n_components=n_compnents, covariance_type='full').fit(X)
gmm = mixture.GaussianMixture(
    n_components=n_compnents, covariance_type='full', n_init=1).fit(pr[pr.columns[1:]])

# iterative learning
res = []
for i in range(20):
    res.append(brute_force(10000, gmm))
    print('Iteration: ', i, 'R^2: ', res[-1][0])

# test best model
res.sort()
test_model(res[-1])

# export best model to mql
# export_model_to_MQL_code(res[-1][1])