File size: 1,536 Bytes
a717421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# Install required packages first:
# pip install torch transformers safetensors

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# -----------------------------
# 1️⃣ Load the trained model
# -----------------------------
model_path = "./mini_gpt_safetensor"  # folder where model was saved

print("📥 Loading model and tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.pad_token = tokenizer.eos_token  # GPT models don't have pad_token

model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")  # load model

# -----------------------------
# 2️⃣ Generate text
# -----------------------------
def generate_text(prompt, max_length=50):
    # Tokenize prompt
    input_ids = tokenizer(prompt, return_tensors="pt").input_ids
    input_ids = input_ids.to(model.device)

    # Generate text
    output_ids = model.generate(
        input_ids,
        max_length=max_length,
        do_sample=True,      # for randomness
        top_k=50,            # sample from top 50 tokens
        top_p=0.95,          # nucleus sampling
        temperature=0.7,
        num_return_sequences=1
    )

    # Decode output
    output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
    return output_text

# -----------------------------
# 3️⃣ Test generation
# -----------------------------
prompt = "Hello, I am training a mini GPT model"
generated_text = generate_text(prompt, max_length=50)
print("\n📝 Generated text:")
print(generated_text)