File size: 7,889 Bytes
e20ef19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import os
import json
import random
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
from arguments import get_args
random.seed(1234)
def load_data(datapath):
"""Load data from a JSON file."""
print("loading data from %s" % datapath)
with open(datapath, "r", encoding="utf-8") as f:
data_list = json.load(f)
return data_list
def reformat_question(turn_list, dataset_name):
"""Reformat question based on dataset type and keep last 7 turns."""
## only take the lastest 7 turns
_turn_list = turn_list[-7:]
idx = -6
while _turn_list[0]['role'] != 'user':
_turn_list = turn_list[idx:]
idx += 1
turn_list = _turn_list
assert turn_list[-1]['role'] == 'user'
assert turn_list[0]['role'] == 'user'
long_answer_dataset_list = ["doc2dial", "quac", "qrecc", "inscit", "doqa_movies", "doqa_travel", "doqa_cooking", "hybridial", "convfinqa"]
if dataset_name in long_answer_dataset_list:
for item in turn_list:
if item['role'] == 'user':
## only needs to add it on the first user turn
item['content'] = 'Please give a full and complete answer for the question: ' + item['content']
break
else:
raise Exception("please input a correct dataset name!")
return turn_list
def get_inputs_hf(data_list, dataset_name, num_ctx):
"""
Get inputs formatted for HuggingFace chat template.
Returns a list of message lists (chat format).
"""
system = "You are a helpful AI assistant that gives concise and detailed answers to the user's questions based on the given contexts. You should indicate when the answer cannot be found in any of the contexts. You should only respond with the answer."
prompt_list = []
for item in data_list:
turn_list = item['messages']
turn_list = reformat_question(turn_list, dataset_name)
ctx_list = ["title: " + ctx["title"] + ", context: " + ctx["text"]
if ctx["title"] else "context: " + ctx["text"] for ctx in item['ctxs'][:num_ctx]]
context = "\n\n".join(ctx_list)
turn_list[0]["content"] = f"{system}\n\n{context}\n\n{turn_list[0]['content']}"
# Clean consecutive assistant turns
cleaned_turn_list = []
for turn in turn_list:
try:
if turn["role"] != "assistant":
cleaned_turn_list.append(turn)
else:
if cleaned_turn_list[-1]["role"] == "assistant":
cleaned_turn_list[-1]["content"] += ". " + turn["content"]
else:
cleaned_turn_list.append(turn)
except Exception as ex:
print(str(ex.args))
import pdb; pdb.set_trace()
prompt_list.append(cleaned_turn_list)
return prompt_list
def get_input_datapath(args):
"""Get the input data path based on the eval_dataset."""
if args.eval_dataset == "doc2dial":
input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
elif args.eval_dataset == "convfinqa":
input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
elif args.eval_dataset == "quac":
input_datapath = os.path.join(args.data_folder, args.quac_path)
elif args.eval_dataset == "qrecc":
input_datapath = os.path.join(args.data_folder, args.qrecc_path)
elif args.eval_dataset == "doqa_cooking":
input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
elif args.eval_dataset == "doqa_travel":
input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
elif args.eval_dataset == "doqa_movies":
input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
elif args.eval_dataset == "inscit":
input_datapath = os.path.join(args.data_folder, args.inscit_path)
elif args.eval_dataset == "hybridial":
input_datapath = os.path.join(args.data_folder, args.hybridial_path)
else:
raise Exception("please input a correct eval_dataset name!")
return input_datapath
def get_prompt_list(args):
"""Get prompt list for the given dataset."""
input_datapath = get_input_datapath(args)
data_list = load_data(input_datapath)
print("number of samples in the dataset:", len(data_list))
# Apply limit if specified
if args.limit is not None:
data_list = data_list[:args.limit]
print(f"limited to {args.limit} samples")
prompt_list = get_inputs_hf(data_list, args.eval_dataset, num_ctx=args.num_ctx)
return prompt_list
def run_inference(args, tokenizer, model):
"""Run inference for a given dataset."""
# Get output filepath
model_name = args.model_id.replace('/', '_')
os.makedirs(os.path.join(args.output_folder, model_name), exist_ok=True)
output_filepath = os.path.join(args.output_folder, model_name, f"{args.eval_dataset}.txt")
# Check for existing results
existing_count = 0
if os.path.exists(output_filepath):
with open(output_filepath, "r") as f:
lines = f.readlines()
if len(lines) >= args.expected_samples:
print(f"Skipping as results exist ({len(lines)} samples)", "\n\n")
return
else:
existing_count = len(lines)
print(f"Resuming from {existing_count} existing samples")
# Get prompt list
prompt_list = get_prompt_list(args)
# Run generation
output_list = []
with open(output_filepath, "a", encoding='utf-8') as f:
for idx, messages in enumerate(tqdm(prompt_list, desc=f"Generating for {args.eval_dataset}")):
if idx < existing_count:
continue
try:
# Apply chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Generate
model_inputs = tokenizer([text], return_tensors="pt").to(args.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=args.max_tokens,
stop_strings=args.stop_strings,
tokenizer=tokenizer
)
# Decode
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
generated_text = response.strip().replace("\n", " ").strip(" <extra_id_1>")
output_list.append(generated_text)
f.write(generated_text + "\n")
except Exception as ex:
print(f"Error at index {idx}: {str(ex)}")
break
print(f"Generated {len(output_list)} responses for {args.eval_dataset}")
def main():
"""Main function to run HuggingFace model inference."""
args = get_args()
print(f"Evaluating model: {args.model_id}")
print(f"Dataset: {args.eval_dataset}")
print(f"Device: {args.device}")
print(f"Num contexts: {args.num_ctx}")
print(f"Max tokens: {args.max_tokens}")
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(args.model_id, stop_strings=args.stop_strings)
model = AutoModelForCausalLM.from_pretrained(args.model_id)
model.to(args.device)
# Run inference
run_inference(args, tokenizer, model)
print("Inference completed!")
if __name__ == "__main__":
main()
|