
corpus = """

Hey, how have you been doing lately? <eos>  
Hello, I’ve been doing quite well, thank you! I’ve just been really busy with my work and projects. <eos>  
That’s great to hear! What exactly has been keeping you occupied these days? <eos>  
I’m currently working on a big project at my job that has a very tight deadline, so it has been quite hectic. <eos>  
That sounds pretty stressful! How are you managing to handle all of that? <eos>  
Hi, I’m trying my best to stay organized and make sure I take breaks whenever I can to clear my mind. <eos>  
That’s a smart approach! Have you had any time to relax and unwind at all? <eos>  
Not really, to be honest, but I am planning to take a weekend trip soon to get away for a bit. <eos>  
That sounds like a fantastic idea! Where are you thinking of going for your trip? <eos>  
I’m considering going to the beach because I really need some sun and a nice sandy place to relax. <eos>  
That would be so refreshing! Do you have a favorite beach spot that you like to visit? <eos>  
Yes, I absolutely love going to the beach that’s near my hometown. It’s always been my favorite. <eos>  
Nice! How often do you get the chance to visit that beach? <eos>  
Not nearly as often as I would like, maybe just once a year if I’m lucky enough to find the time. <eos>  
I understand completely; life can get really busy sometimes. What do you usually enjoy doing when you’re there? <eos>  
I love swimming in the ocean, reading a good book, and just relaxing by the water while listening to the waves. <eos>  
That sounds absolutely perfect! Have you read any good books lately? <eos>  
Yes, I just finished reading a really good mystery novel that kept me guessing the whole time. <eos>  
I love a good mystery! What was the title of the book you just finished? <eos>  
The book was called "The Silent Patient," and I would highly recommend it to anyone who enjoys that genre. <eos>  
I’ve heard a lot of great things about that book! I’ll definitely add it to my reading list for sure. <eos>  
You really should! It has such an amazing twist at the end that you won’t see coming. <eos>  
I love a good twist in a story! Do you read often, or do you find it hard to make time for it? <eos>  
I try to read a little bit every night before I go to bed to help me relax. <eos>  
That’s a wonderful habit! I usually end up watching TV instead of reading. <eos>  
What shows are you currently watching? <eos>  
I’ve been really into a lot of crime dramas lately; they are always so captivating. <eos>  
Those shows can be really gripping! Do you have any favorites that you would recommend? <eos>  
I really enjoy "Mindhunter" and "True Detective." They are both so well done! <eos>  
Both of those are excellent shows! I’ve seen "Mindhunter," and I thought it was amazing! <eos>  
Right? The psychological aspects and character development are just fascinating to watch. <eos>  
I completely agree! Do you also like watching documentaries in your free time? <eos>  
Yes, I especially enjoy true crime documentaries; they always tell such interesting stories. <eos>  
Same here! There are so many fascinating documentaries out there to watch. <eos>  
Absolutely! I could easily binge-watch them for hours without getting bored. <eos>  
What was the last documentary you watched that you found particularly interesting? <eos>  
I recently watched "The Staircase," and it was so intense and thought-provoking. <eos>  
I’ve heard really good things about that documentary! I’ll definitely check it out when I have some time. <eos>  
You should absolutely do that! I think you won’t regret it at all; it’s very compelling. <eos>  
Thanks for the recommendation! I’m always looking for something new to watch. <eos>  
No problem! I’m glad to share. Let me know what you think once you watch it! <eos>  
Will do! It’s always nice to have someone to discuss these things with. <eos>  
I agree! It makes watching shows and reading books much more enjoyable when you can talk about them. <eos>  
Definitely! Let’s keep sharing recommendations. <eos>  
For sure! I’m looking forward to it!  <eos>
2 + 2 = 4 <eos>
3 + 5 = 8 <eos>
10 + 7 = 17 <eos>
20 - 5 = 15 <eos>
18 - 7 = 11 <eos>
50 - 25 = 25 <eos>
4 * 3 = 12 <eos>
6 * 6 = 36 <eos>
7 * 5 = 35 <eos>
8 * 4 = 32 <eos>
15 / 3 = 5 <eos>
12 / 4 = 3 <eos>
9 / 3 = 3 <eos>
25 + 30 = 55 <eos>
45 - 15 = 30 <eos>
100 / 10 = 10 <eos>
40 * 2 = 80 <eos>
81 / 9 = 9 <eos>
16 + 24 = 40 <eos>
9 * 7 = 63 <eos>
72 / 8 = 9 <eos>
13 + 14 = 27 <eos>
90 - 33 = 57 <eos>
22 * 3 = 66 <eos>
54 / 6 = 9 <eos>
27 + 32 = 59 <eos>
80 / 5 = 16 <eos>
11 * 9 = 99 <eos>
64 - 14 = 50 <eos>
5 + 8 = 13 <eos>
3 * 7 = 21 <eos>
49 / 7 = 7 <eos>
19 + 23 = 42 <eos>
77 - 28 = 49 <eos>
48 / 4 = 12 <eos>
9 * 8 = 72 <eos>
35 + 17 = 52 <eos>
88 / 8 = 11 <eos>
50 - 30 = 20 <eos>
4 * 11 = 44 <eos>
36 / 6 = 6 <eos>
5 + 10 = 15 <eos>
7 * 4 = 28 <eos>
72 - 18 = 54 <eos>
80 / 10 = 8 <eos>
60 * 2 = 120 <eos>
2 + 3 = 5 <eos>
25 - 10 = 15 <eos>
14 * 2 = 28 <eos>
28 / 4 = 7 <eos>
90 + 10 = 100 <eos>
56 - 36 = 20 <eos>
3 * 12 = 36 <eos>
45 / 5 = 9 <eos>
33 + 66 = 99 <eos>
100 - 50 = 50 <eos>
8 * 9 = 72 <eos>
10 / 2 = 5 <eos>
14 + 6 = 20 <eos>
21 - 9 = 12 <eos>
6 * 7 = 42 <eos>
35 / 5 = 7 <eos>
15 + 15 = 30 <eos>
8 - 3 = 5 <eos>
16 / 2 = 8 <eos>
50 * 1 = 50 <eos>
2^3 = 8 <eos>
5^2 = 25 <eos>
10^0 = 1 <eos>
3^4 = 81 <eos>
4^2 = 16 <eos>
6 + 12 = 18 <eos>
2 * 5 = 10 <eos>
9 - 3 = 6 <eos>
20 / 4 = 5 <eos>
2 + 7 = 9 <eos>
8^3 = 512 <eos>
15^2 = 225 <eos>
30 - 15 = 15 <eos>
25 + 5 = 30 <eos>
12 / 3 = 4 <eos>
7 * 6 = 42 <eos>
100 - 40 = 60 <eos>
9 + 11 = 20 <eos>
16 / 4 = 4 <eos>
5^3 = 125 <eos>
18 - 6 = 12 <eos>
4 * 4 = 16 <eos>
36 / 6 = 6 <eos>
20^2 = 400 <eos>
8 + 16 = 24 <eos>
11 * 2 = 22 <eos>
14 - 7 = 7 <eos>
5 + 9 = 14 <eos>
50 / 5 = 10 <eos>
6^2 = 36 <eos>
7 + 3 = 10 <eos>
12 - 8 = 4 <eos>
30 + 20 = 50 <eos>
45 / 9 = 5 <eos>
13 * 3 = 39 <eos>
3^5 = 243 <eos>
2^6 = 64 <eos>
27 - 9 = 18 <eos>
35 + 15 = 50 <eos>
10 / 2 = 5 <eos>
21 + 6 = 27 <eos>
4^3 = 64 <eos>
30 - 10 = 20 <eos>
5 * 5 = 25 <eos>
48 / 12 = 4 <eos>
10 + 10 = 20 <eos>
3 * 4 = 12 <eos>
100 / 25 = 4 <eos>
8 - 5 = 3 <eos>
14 + 11 = 25 <eos>
12^2 = 144 <eos>
50 - 20 = 30 <eos>
9 + 8 = 17 <eos>
15 / 3 = 5 <eos>
24 - 8 = 16 <eos>
2 * 9 = 18 <eos>
30^2 = 900 <eos>
4 + 9 = 13 <eos>
18 / 2 = 9 <eos>
8 * 7 = 56 <eos>
16 + 16 = 32 <eos>
6 - 2 = 4 <eos>
75 - 25 = 50 <eos>
5 * 4 = 20 <eos>
10^3 = 1000 <eos>
9^2 = 81 <eos>
3 + 6 = 9 <eos>
15 - 5 = 10 <eos>
12 * 5 = 60 <eos>
18 / 6 = 3 <eos>
7 - 1 = 6 <eos>
5 + 15 = 20 <eos>
10 * 3 = 30 <eos>
20 / 2 = 10 <eos>
50 + 30 = 80 <eos>
2^5 = 32 <eos>
4 + 4 = 8 <eos>
27 + 3 = 30 <eos>
10 - 7 = 3 <eos>
30 / 3 = 10 <eos>
14 * 2 = 28 <eos>
8^2 = 64 <eos>
12 - 4 = 8 <eos>
6 + 3 = 9 <eos>
40 / 8 = 5 <eos>
22 + 2 = 24 <eos>
33 - 13 = 20 <eos>
5^4 = 625 <eos>
2 + 6 = 8 <eos>

"""

import math
import re
import random


ModelName = 'AgGPT-3'
output_length = 15 # Set the maximum number of words to generate
creativity = 0.2  # Set the creativity level from 0 (rigid) to 1 (creative)

def mat_mul(A, B):
    result = []
    for i in range(len(A)):
        result.append([])
        for j in range(len(B[0])):
            result[i].append(sum(A[i][k] * B[k][j] for k in range(len(B))))
    return result

def softmax(x):
    exp_x = [math.exp(v - max(x)) for v in x]
    sum_exp_x = sum(exp_x)
    return [e / sum_exp_x for e in exp_x]

def self_attention(Q, K, V):
    scores = []
    for i in range(len(Q)):
        row = []
        for j in range(len(K)):
            score = sum(Q[i][idx] * K[j][idx] for idx in range(len(Q[i])))
            row.append(score)
        scores.append(row)

    attention_weights = [softmax(row) for row in scores]

    output = []
    for i in range(len(V)):
        weighted_sum = [sum(attention_weights[i][k] * V[k][j] for k in range(len(V)))
                        for j in range(len(V[0]))]
        output.append(weighted_sum)

    return output

def multi_head_attention(Q, K, V, num_heads):
    d_model = len(Q[0])
    head_size = d_model // num_heads
    outputs = []

    for head in range(num_heads):
        q_head = [row[head * head_size:(head + 1) * head_size] for row in Q]
        k_head = [row[head * head_size:(head + 1) * head_size] for row in K]
        v_head = [row[head * head_size:(head + 1) * head_size] for row in V]

        attention_output = self_attention(q_head, k_head, v_head)
        outputs.extend(attention_output)

    return outputs

def positional_encoding(seq_len, d_model):
    encoding = []
    for pos in range(seq_len):
        row = []
        for i in range(d_model):
            if i % 2 == 0:
                row.append(math.sin(pos / (10000 ** (i / d_model))))
            else:
                row.append(math.cos(pos / (10000 ** (i / d_model))))
        encoding.append(row)
    return encoding

def add_positional_encoding(embeddings, positional_encodings):
    return [[val + positional_encodings[i][j] for j, val in enumerate(row)]
            for i, row in enumerate(embeddings)]

def feed_forward_network(x):
    input_dim = len(x[0])
    hidden_dim = 4
    output_dim = 2
    W1 = [[1 if i == j else 0 for j in range(hidden_dim)] for i in range(input_dim)]
    b1 = [0] * hidden_dim
    W2 = [[1 for _ in range(output_dim)] for _ in range(hidden_dim)]
    b2 = [0] * output_dim
    hidden = [[max(0, sum(x[i][k] * W1[k][j] for k in range(len(W1))) + b1[j])
               for j in range(hidden_dim)] for i in range(len(x))]
    output = [[sum(hidden[i][k] * W2[k][j] for k in range(len(W2))) + b2[j]
               for j in range(output_dim)] for i in range(len(hidden))]
    return output

def tokenize(text):
    return re.sub(r'[.,!?]', '', text.lower()).split()

def embed_tokens(tokens):
    return [[random.random() for _ in range(3)] for _ in tokens]

def build_ngram_models(corpus):
    bigram_model = {}
    trigram_model = {}
    words = tokenize(corpus)

    for i in range(len(words) - 1):
        word1, word2 = words[i], words[i + 1]
        if word1 not in bigram_model:
            bigram_model[word1] = []
        bigram_model[word1].append(word2)

    for i in range(len(words) - 2):
        word1, word2, word3 = words[i], words[i + 1], words[i + 2]
        bigram = f"{word1} {word2}"
        if bigram not in trigram_model:
            trigram_model[bigram] = []
        trigram_model[bigram].append(word3)

    return {"bigram_model": bigram_model, "trigram_model": trigram_model}

def predict_next_word(text, models):
    bigram_model, trigram_model = models["bigram_model"], models["trigram_model"]
    words = tokenize(text)

    if not words:
        return ''

    if len(words) == 1:
        last_word = words[0]
        if last_word in bigram_model:
            next_words = bigram_model[last_word]
            return random.choice(next_words)
    elif len(words) >= 2:
        last_bigram = f"{words[-2]} {words[-1]}"
        if last_bigram in trigram_model:
            next_words = trigram_model[last_bigram]
            return random.choice(next_words)
        elif words[-1] in bigram_model:
            next_words = bigram_model[words[-1]]
            return random.choice(next_words)

    return ''

def predict_next_word_with_attention(text, ngram_models):
    bigram_model, trigram_model = ngram_models["bigram_model"], ngram_models["trigram_model"]
    tokens = tokenize(text)
    d_model = 3
    embeddings = embed_tokens(tokens)
    positional_encodings = positional_encoding(len(tokens), d_model)
    encoded_embeddings = add_positional_encoding(embeddings, positional_encodings)

    num_heads = 2
    attention_output = multi_head_attention(encoded_embeddings, encoded_embeddings, encoded_embeddings, num_heads)

    ff_output = feed_forward_network(attention_output)

    ngram_prediction = predict_next_word(text, ngram_models)
    return ngram_prediction

def clean_user_input(text):
    return re.sub(r'[<>,./;\'"\[\]{}|=_+`~!@#$%^&*()?\-]', '', text).strip().lower()

def print_progress(progress, total):
    percent = (progress / total) * 100
    bar_length = 40
    filled_length = int(bar_length * progress // total)
    bar = '=' * filled_length + '-' * (bar_length - filled_length)
    print(f'\r[{bar}] {percent:.2f}% Complete', end='')

def train_model(corpus):
    print('\nTraining for ' + ModelName + ' has begun.')
    cleaned_corpus = re.sub(r'[\r\n]+', ' ', corpus.strip())
    print_progress(0, 3)
    cleaned_corpus = re.sub(r'[.,!?]', '', cleaned_corpus)
    print_progress(1, 3)
    ngram_models = build_ngram_models(cleaned_corpus)
    print_progress(2, 3)
    print_progress(3, 3)
    print('\nTraining complete.')
    return ngram_models


def correct_text(text):
    text = text.strip()
    text = text[0].upper() + text[1:]
    
    if not re.search(r'[.!?]$', text):
        if re.search(r'\b(?:how|when|what|why|where|who|is|are|can|do|does|will|shall)\b', text, re.IGNORECASE):
            text += '?'
        else:
            text += '.'
    
    text = re.sub(r'(?<=\.\s)(\w)', lambda x: x.group().upper(), text)
    text = re.sub(r'\bi\b', 'I', text)
    text = re.sub(r'\b(i\'m|i\'ve|i\'d|i\'ll)\b', lambda x: x.group().capitalize(), text)
    
    return text

def is_sentence_complete(sentence, corpus):
    sentence = sentence.strip()
    if len(sentence) == 0:
        return False
    if 'eos' in sentence.lower() or '<eos>' in sentence.lower():
        return True
    return False

def predict_sentence_with_attention(input_text, ngram_models, output_length, creativity, recent_history_length=3):
    cleaned_input = clean_user_input(input_text)
    sentence = cleaned_input
    recent_history = []

    for _ in range(output_length):
        prediction = predict_next_word_with_attention(sentence, ngram_models)
        if not prediction:
            break
        if prediction in recent_history:
            continue  
        
        recent_history.append(prediction)
        if len(recent_history) > recent_history_length:
            recent_history.pop(0) 

        if random.random() < creativity: 
            sentence += ' ' + prediction
        else:
            next_words = ngram_models["bigram_model"].get(sentence.split()[-1], [])
            if next_words:
                most_likely_word = max(set(next_words), key=next_words.count)
                sentence += ' ' + most_likely_word
        
        if is_sentence_complete(sentence, corpus):
            break
    
    sentence = correct_text(sentence)

    if cleaned_input in sentence:
        sentence = sentence.replace(cleaned_input, '', 1).strip()
    
    return sentence


ngram_models = train_model(corpus)

while True:
    input_text = input('Type a message: ').strip()
    if input_text.lower() == 'exit':
        break
    predicted_sentence = predict_sentence_with_attention(input_text, ngram_models, output_length, creativity)
    print(f'{ModelName}:', predicted_sentence)
