Transformers
Safetensors
English
Japanese
text-generation-inference
unsloth
llama
trl
Inference Endpoints
rlcgn589 commited on
Commit
ee2684c
·
verified ·
1 Parent(s): d63b7ed

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -113,7 +113,7 @@ for dt in tqdm(datasets):
113
 
114
  inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
115
 
116
- outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
117
  prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
118
 
119
  results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
 
113
 
114
  inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
115
 
116
+ outputs = model.generate(**inputs, max_new_tokens = 1024, use_cache = True, do_sample=False, repetition_penalty=1.2)
117
  prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
118
 
119
  results.append({"task_id": dt["task_id"], "input": input, "output": prediction})