hongzhouyu commited on
Commit
4e752b5
·
verified ·
1 Parent(s): 1a5bba5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +33 -11
README.md CHANGED
@@ -34,27 +34,49 @@ You can use FineMedLM in the same way as `Llama-3.1-8B-Instruct`:
34
  ```python
35
  from transformers import AutoModelForCausalLM, AutoTokenizer
36
 
37
- model = AutoModelForCausalLM.from_pretrained("hongzhouyu/FineMedLM")
38
- tokenizer = AutoTokenizer.from_pretrained("hongzhouyu/FineMedLM")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- prompt = "How do the interactions between neuronal activity, gonadal hormones, and neurotrophins influence axon regeneration post-injury, and what are the potential therapeutic implications of this research? Please think step by step."
41
  messages = [
42
- {"role": "system", "content": "You are a helpful professional doctor."},
43
  {"role": "user", "content": prompt}
44
  ]
 
45
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
46
- model_inputs = tokenizer([text], return_tensors="pt")
47
 
 
 
 
48
  generated_ids = model.generate(
49
  model_inputs.input_ids,
50
- max_new_tokens=4096
 
51
  )
52
- generated_ids = [
53
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
54
- ]
55
- response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
56
 
57
- print(response)
 
58
  ```
59
 
60
  # <span>Citation</span>
 
34
  ```python
35
  from transformers import AutoModelForCausalLM, AutoTokenizer
36
 
37
+ main_model_name = "hongzhouyu/FineMedLM"
38
+ model = AutoModelForCausalLM.from_pretrained(main_model_name, device_map="auto")
39
+ tokenizer = AutoTokenizer.from_pretrained(main_model_name)
40
+
41
+ # 构造输入
42
+ prompt = (
43
+ """The following are multiple choice questions (with answers) about health. Think step by step and then finish your answer with "the answer is (X)" where X is the correct letter choice.
44
+
45
+
46
+ Question:
47
+ Polio can be eradicated by which of the following?
48
+ Options:
49
+ A. Herbal remedies
50
+ B. Use of antibiotics
51
+ C. Regular intake of vitamins
52
+ D. Administration of tetanus vaccine
53
+ E. Attention to sewage control and hygiene
54
+ F. Natural immunity acquired through exposure
55
+ G. Use of antiviral drugs
56
+ Answer: Let's think step by step.
57
+ """
58
+ )
59
+
60
 
 
61
  messages = [
62
+ {"role": "system", "content": "You are a helpful professional doctor. The user will give you a medical question, and you should answer it in a professional way."},
63
  {"role": "user", "content": prompt}
64
  ]
65
+
66
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
67
+ print(text)
68
 
69
+ model_inputs = tokenizer(text, return_tensors="pt").to(model.device)
70
+
71
+ print("-----start generate-----")
72
  generated_ids = model.generate(
73
  model_inputs.input_ids,
74
+ max_new_tokens=2048,
75
+ eos_token_id=tokenizer.eos_token_id
76
  )
 
 
 
 
77
 
78
+ answer = tokenizer.decode(generated_ids[0], skip_special_tokens=False)
79
+ print(answer)
80
  ```
81
 
82
  # <span>Citation</span>