Datasets:

Modalities:
Text
Formats:
json
Languages:
Hindi
ArXiv:
Libraries:
Datasets
pandas
License:
ChatRAG-Hi / evaluation /run_generation_hf.py
ravirajoshi's picture
Upload 10 files
e20ef19 verified
import os
import json
import random
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
from arguments import get_args
random.seed(1234)
def load_data(datapath):
"""Load data from a JSON file."""
print("loading data from %s" % datapath)
with open(datapath, "r", encoding="utf-8") as f:
data_list = json.load(f)
return data_list
def reformat_question(turn_list, dataset_name):
"""Reformat question based on dataset type and keep last 7 turns."""
## only take the lastest 7 turns
_turn_list = turn_list[-7:]
idx = -6
while _turn_list[0]['role'] != 'user':
_turn_list = turn_list[idx:]
idx += 1
turn_list = _turn_list
assert turn_list[-1]['role'] == 'user'
assert turn_list[0]['role'] == 'user'
long_answer_dataset_list = ["doc2dial", "quac", "qrecc", "inscit", "doqa_movies", "doqa_travel", "doqa_cooking", "hybridial", "convfinqa"]
if dataset_name in long_answer_dataset_list:
for item in turn_list:
if item['role'] == 'user':
## only needs to add it on the first user turn
item['content'] = 'Please give a full and complete answer for the question: ' + item['content']
break
else:
raise Exception("please input a correct dataset name!")
return turn_list
def get_inputs_hf(data_list, dataset_name, num_ctx):
"""
Get inputs formatted for HuggingFace chat template.
Returns a list of message lists (chat format).
"""
system = "You are a helpful AI assistant that gives concise and detailed answers to the user's questions based on the given contexts. You should indicate when the answer cannot be found in any of the contexts. You should only respond with the answer."
prompt_list = []
for item in data_list:
turn_list = item['messages']
turn_list = reformat_question(turn_list, dataset_name)
ctx_list = ["title: " + ctx["title"] + ", context: " + ctx["text"]
if ctx["title"] else "context: " + ctx["text"] for ctx in item['ctxs'][:num_ctx]]
context = "\n\n".join(ctx_list)
turn_list[0]["content"] = f"{system}\n\n{context}\n\n{turn_list[0]['content']}"
# Clean consecutive assistant turns
cleaned_turn_list = []
for turn in turn_list:
try:
if turn["role"] != "assistant":
cleaned_turn_list.append(turn)
else:
if cleaned_turn_list[-1]["role"] == "assistant":
cleaned_turn_list[-1]["content"] += ". " + turn["content"]
else:
cleaned_turn_list.append(turn)
except Exception as ex:
print(str(ex.args))
import pdb; pdb.set_trace()
prompt_list.append(cleaned_turn_list)
return prompt_list
def get_input_datapath(args):
"""Get the input data path based on the eval_dataset."""
if args.eval_dataset == "doc2dial":
input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
elif args.eval_dataset == "convfinqa":
input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
elif args.eval_dataset == "quac":
input_datapath = os.path.join(args.data_folder, args.quac_path)
elif args.eval_dataset == "qrecc":
input_datapath = os.path.join(args.data_folder, args.qrecc_path)
elif args.eval_dataset == "doqa_cooking":
input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
elif args.eval_dataset == "doqa_travel":
input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
elif args.eval_dataset == "doqa_movies":
input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
elif args.eval_dataset == "inscit":
input_datapath = os.path.join(args.data_folder, args.inscit_path)
elif args.eval_dataset == "hybridial":
input_datapath = os.path.join(args.data_folder, args.hybridial_path)
else:
raise Exception("please input a correct eval_dataset name!")
return input_datapath
def get_prompt_list(args):
"""Get prompt list for the given dataset."""
input_datapath = get_input_datapath(args)
data_list = load_data(input_datapath)
print("number of samples in the dataset:", len(data_list))
# Apply limit if specified
if args.limit is not None:
data_list = data_list[:args.limit]
print(f"limited to {args.limit} samples")
prompt_list = get_inputs_hf(data_list, args.eval_dataset, num_ctx=args.num_ctx)
return prompt_list
def run_inference(args, tokenizer, model):
"""Run inference for a given dataset."""
# Get output filepath
model_name = args.model_id.replace('/', '_')
os.makedirs(os.path.join(args.output_folder, model_name), exist_ok=True)
output_filepath = os.path.join(args.output_folder, model_name, f"{args.eval_dataset}.txt")
# Check for existing results
existing_count = 0
if os.path.exists(output_filepath):
with open(output_filepath, "r") as f:
lines = f.readlines()
if len(lines) >= args.expected_samples:
print(f"Skipping as results exist ({len(lines)} samples)", "\n\n")
return
else:
existing_count = len(lines)
print(f"Resuming from {existing_count} existing samples")
# Get prompt list
prompt_list = get_prompt_list(args)
# Run generation
output_list = []
with open(output_filepath, "a", encoding='utf-8') as f:
for idx, messages in enumerate(tqdm(prompt_list, desc=f"Generating for {args.eval_dataset}")):
if idx < existing_count:
continue
try:
# Apply chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Generate
model_inputs = tokenizer([text], return_tensors="pt").to(args.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=args.max_tokens,
stop_strings=args.stop_strings,
tokenizer=tokenizer
)
# Decode
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
generated_text = response.strip().replace("\n", " ").strip(" <extra_id_1>")
output_list.append(generated_text)
f.write(generated_text + "\n")
except Exception as ex:
print(f"Error at index {idx}: {str(ex)}")
break
print(f"Generated {len(output_list)} responses for {args.eval_dataset}")
def main():
"""Main function to run HuggingFace model inference."""
args = get_args()
print(f"Evaluating model: {args.model_id}")
print(f"Dataset: {args.eval_dataset}")
print(f"Device: {args.device}")
print(f"Num contexts: {args.num_ctx}")
print(f"Max tokens: {args.max_tokens}")
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(args.model_id, stop_strings=args.stop_strings)
model = AutoModelForCausalLM.from_pretrained(args.model_id)
model.to(args.device)
# Run inference
run_inference(args, tokenizer, model)
print("Inference completed!")
if __name__ == "__main__":
main()