import torch from transformers import AutoModelForCausalLM, AutoTokenizer # Insert your research topic here RESEARCH_TOPIC = """ """ def load_model(model_path): model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=torch.float16, device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_path) return model, tokenizer def generate_response(model, tokenizer, topic): topic = topic.strip() prompt = f"USER: Research Topic: \"{topic}\"\nLet's think step by step:\nASSISTANT:" inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=2500, temperature=0.7, top_p=0.9, repetition_penalty=1.1, do_sample=True ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response.split("ASSISTANT:")[-1].strip() def run(): model_path = "./" # Path to the directory containing your model weight files model, tokenizer = load_model(model_path) result = generate_response(model, tokenizer, RESEARCH_TOPIC) print(result) if __name__ == "__main__": run()