import pandas as pd
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from transformers import TextDataset, DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
import torch

dvc='cuda' if torch.cuda.is_available() else 'cpu'

print(dvc)

df = pd.read_csv('llm_data.csv')

sentences = [' '.join(map(str, prices)) for prices in df.iloc[:-10,1:].values]
with open('train.txt', 'w') as f:
    for sentence in sentences:
        f.write(sentence + '\n')

tokenizer = GPT2Tokenizer.from_pretrained('gpt2')

train_dataset = TextDataset(tokenizer=tokenizer,
                            file_path="train.txt", 
                            block_size=60)

data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)



model = GPT2LMHeadModel.from_pretrained('gpt2')

training_args = TrainingArguments(output_dir="./gpt2_stock",     
                                  overwrite_output_dir=True,    
                                  num_train_epochs=3,     
                                  per_device_train_batch_size=32,   
                                  save_steps=10_000,    
                                  save_total_limit=2,
                                  load_best_model_at_end=True,
                                  )

trainer = Trainer(model=model,
                  args=training_args,
                  data_collator=data_collator,
                  train_dataset=train_dataset,)
trainer.train()
trainer.save_model("./gpt2_stock")

prompt = ' '.join(map(str, df.iloc[:,1:20].values[-1])) 
generated = tokenizer.decode(model.generate(tokenizer.encode(prompt, return_tensors='pt').to(dvc), do_sample=True, max_length=200)[0], skip_special_tokens=True)
print(f"test the model:{generated}")