|
|
|
|
|
from evaluation_utils import quac_correct_retrieved_instance_idx_list |
|
|
from evaluation_utils import unanswerable_keyphrases |
|
|
from arguments import get_args_scores |
|
|
import json |
|
|
from metrics import F1Metric |
|
|
import copy |
|
|
import re |
|
|
import pandas as pd |
|
|
import os |
|
|
import io |
|
|
import sys |
|
|
import argparse |
|
|
|
|
|
def compute_f1_score(predicted_answers, groundtruth_answer, exp_name="default"): |
|
|
"""Evaluating F1 Score""" |
|
|
print(len(predicted_answers), len(groundtruth_answer)) |
|
|
if len(predicted_answers) != len(groundtruth_answer): |
|
|
groundtruth_answer = groundtruth_answer[:len(predicted_answers)] |
|
|
|
|
|
guess_list = [] |
|
|
for guess in predicted_answers: |
|
|
guess = guess.strip() |
|
|
if "</s>" in guess: |
|
|
guess = guess.replace("</s>", "") |
|
|
guess_list.append(guess) |
|
|
|
|
|
answer_list = [] |
|
|
for answer in groundtruth_answer: |
|
|
answer_list.append(answer) |
|
|
|
|
|
assert len(guess_list) == len(answer_list), \ |
|
|
"lengths of guess and answer are different!" |
|
|
|
|
|
precision, recall, f1 = F1Metric.compute_all_pairs(guess_list, answer_list) |
|
|
print('Method: %s; Precision: %.4f; recall: %.4f; f1: %.4f' % ( |
|
|
exp_name, precision, recall, f1)) |
|
|
|
|
|
return f1 |
|
|
|
|
|
|
|
|
def load_groundtruth_file(data_file): |
|
|
"""Load ground truth answers from JSON file""" |
|
|
with open(data_file, "r") as f: |
|
|
examples = json.load(f) |
|
|
|
|
|
data = [] |
|
|
for instance in examples: |
|
|
if "answers" in instance: |
|
|
answers = instance["answers"] |
|
|
elif "answer" in instance: |
|
|
if type(instance["answer"]) is str: |
|
|
answers = [instance["answer"]] |
|
|
elif type(instance["answer"]) is list: |
|
|
answers = instance["answer"] |
|
|
else: |
|
|
answers = [str(instance["answer"])] |
|
|
else: |
|
|
raise ValueError("need to have answer or answers") |
|
|
data.append(answers) |
|
|
|
|
|
return data |
|
|
|
|
|
|
|
|
def load_prediction(data_file): |
|
|
"""Load predictions from text file""" |
|
|
data = [] |
|
|
with open(data_file, "r") as f: |
|
|
for line in f.readlines(): |
|
|
if "_on" in data_file or "_medium" in data_file or "_high" in data_file: |
|
|
data.append(line.strip()[:300]) |
|
|
else: |
|
|
data.append(line.strip()) |
|
|
return data |
|
|
|
|
|
|
|
|
def evaluate_f1(ground_truth_file, prediction_file): |
|
|
"""Evaluate F1 score for general QA datasets""" |
|
|
groundtruth_answers = load_groundtruth_file(ground_truth_file) |
|
|
|
|
|
|
|
|
if "inscit" in ground_truth_file: |
|
|
groundtruth_answers_update = [] |
|
|
for answers in groundtruth_answers: |
|
|
answers_update = [] |
|
|
for ans in answers: |
|
|
|
|
|
if ans != "Sorry. I cannot find the answer based on the context.": |
|
|
answers_update.append(ans) |
|
|
assert len(answers_update) > 0 |
|
|
groundtruth_answers_update.append(copy.deepcopy(answers_update)) |
|
|
groundtruth_answers = groundtruth_answers_update |
|
|
|
|
|
predicted_answers = load_prediction(prediction_file) |
|
|
|
|
|
|
|
|
if "quac" in prediction_file or "doqa" in prediction_file: |
|
|
predicted_answers_new = [] |
|
|
for pred in predicted_answers: |
|
|
pred = pred.lower() |
|
|
for keyphrase in unanswerable_keyphrases: |
|
|
if "उत्तर नहीं" in pred: |
|
|
pred = "क्षमा करें, मैं संदर्भ के आधार पर उत्तर नहीं ढूँढ पा रहा हूँ।" |
|
|
break |
|
|
predicted_answers_new.append(pred) |
|
|
predicted_answers = predicted_answers_new |
|
|
|
|
|
f1_score = compute_f1_score(predicted_answers, groundtruth_answers) |
|
|
return f1_score |
|
|
|
|
|
|
|
|
def evaluate_convfinqa(ground_truth_file, prediction_file): |
|
|
""" |
|
|
Evaluate ConvFinQA dataset with special numeric matching logic. |
|
|
Since the model gives a long answer output, while the gold answer for ConvFinQA |
|
|
are either an arithmetic formula or a final executed number. |
|
|
We consider the output containing either the executed number or the arithmetic |
|
|
formula as correct. |
|
|
""" |
|
|
|
|
|
def _is_float(string): |
|
|
try: |
|
|
float(string) |
|
|
return True |
|
|
except ValueError: |
|
|
return False |
|
|
|
|
|
with open(ground_truth_file, "r") as f: |
|
|
gold_list = json.load(f) |
|
|
|
|
|
groundtruth_answers = [item['exe_answer'] for item in gold_list] |
|
|
groundtruth_answers_formula = [item['answers'][0] for item in gold_list] |
|
|
|
|
|
|
|
|
question_list = [item['messages'][-1]['content'] for item in gold_list] |
|
|
predicted_answers = load_prediction(prediction_file) |
|
|
|
|
|
print(len(predicted_answers), len(groundtruth_answers)) |
|
|
if len(predicted_answers) != len(groundtruth_answers): |
|
|
groundtruth_answers = groundtruth_answers[:len(predicted_answers)] |
|
|
|
|
|
count_exact_match = 0 |
|
|
for question, pred, gold, gold_formula in zip(question_list, predicted_answers, |
|
|
groundtruth_answers, groundtruth_answers_formula): |
|
|
|
|
|
original_pred = pred |
|
|
|
|
|
original_pred = original_pred.replace(",", "") |
|
|
|
|
|
|
|
|
original_pred = original_pred.replace("$", "").replace("million", "").replace( |
|
|
"billion", "").replace("मिलियन", "").replace("बिलियन ", "") |
|
|
|
|
|
|
|
|
pattern = r'\((\b\w+\b)\)' |
|
|
original_pred = re.sub(pattern, '', original_pred) |
|
|
|
|
|
|
|
|
original_pred = " ".join(original_pred.split()) |
|
|
|
|
|
if str(gold) in original_pred: |
|
|
count_exact_match += 1 |
|
|
elif str(gold_formula) in original_pred: |
|
|
count_exact_match += 1 |
|
|
elif _is_float(gold) and (str(round(float(gold), 3)) in original_pred or |
|
|
str(round(float(gold), 2)) in original_pred): |
|
|
count_exact_match += 1 |
|
|
elif "percent" in question and (str(float(gold)*100) in original_pred or |
|
|
str(round(float(gold)*100, 1)) in original_pred or |
|
|
str(round(float(gold)*100, 2)) in original_pred): |
|
|
count_exact_match += 1 |
|
|
elif str(gold).endswith(".0") and str(int(gold)) in original_pred: |
|
|
|
|
|
count_exact_match += 1 |
|
|
elif "decrease" in original_pred and _is_float(gold) and gold < 0 and ( |
|
|
str(-1 * gold) in original_pred): |
|
|
|
|
|
|
|
|
count_exact_match += 1 |
|
|
|
|
|
accuracy = count_exact_match / len(predicted_answers) |
|
|
print("accuracy of exact match: %.4f" % accuracy) |
|
|
return accuracy |
|
|
|
|
|
|
|
|
def separate_cannot_answer(ground_truth_file, prediction_file): |
|
|
"""Separate answerable and unanswerable questions""" |
|
|
|
|
|
with open(ground_truth_file, "r") as f: |
|
|
groundtruth_answers = json.load(f) |
|
|
|
|
|
predicted_answers = load_prediction(prediction_file) |
|
|
print(len(predicted_answers), len(groundtruth_answers)) |
|
|
if len(predicted_answers) != len(groundtruth_answers): |
|
|
groundtruth_answers = groundtruth_answers[:len(predicted_answers)] |
|
|
|
|
|
if "quac" in prediction_file: |
|
|
""" |
|
|
For answerable cases, we want to make sure the retrieved context list contains the gold chunk. |
|
|
For QuAC dataset, we use top-5 retrieved contexts as inputs, quac_correct_retrieved_instance_idx_list |
|
|
is the index list where the top-5 retrieved context contains the gold answer |
|
|
""" |
|
|
answerable_instance_idx_list = quac_correct_retrieved_instance_idx_list |
|
|
else: |
|
|
answerable_instance_idx_list = None |
|
|
|
|
|
predicted_answers_new = [] |
|
|
for pred in predicted_answers: |
|
|
pred = pred.lower() |
|
|
for keyphrase in unanswerable_keyphrases: |
|
|
if keyphrase in pred: |
|
|
pred = "Sorry. I cannot find the answer based on the context." |
|
|
break |
|
|
predicted_answers_new.append(pred) |
|
|
predicted_answers = predicted_answers_new |
|
|
|
|
|
cannot_answer_idx_list = [] |
|
|
answerable_idx_list = [] |
|
|
if answerable_instance_idx_list: |
|
|
count_idx = 0 |
|
|
for idx, item in enumerate(groundtruth_answers): |
|
|
if 'answers' in item: |
|
|
answer = item["answers"][0] |
|
|
else: |
|
|
answer = item['answer'] |
|
|
noanswer_response = "Sorry. I cannot find the answer based on the context." |
|
|
|
|
|
if answer == noanswer_response: |
|
|
cannot_answer_idx_list.append(idx) |
|
|
continue |
|
|
|
|
|
if answerable_instance_idx_list: |
|
|
if count_idx in answerable_instance_idx_list: |
|
|
answerable_idx_list.append(idx) |
|
|
count_idx += 1 |
|
|
else: |
|
|
answerable_idx_list.append(idx) |
|
|
|
|
|
print("number of cannot answer cases: %d (out of %d)" % (len(cannot_answer_idx_list), len(groundtruth_answers))) |
|
|
print("number of answerable cases: %d (out of %d)" % (len(answerable_idx_list), len(groundtruth_answers))) |
|
|
|
|
|
return predicted_answers, cannot_answer_idx_list, answerable_idx_list |
|
|
|
|
|
|
|
|
def get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list): |
|
|
"""Calculate accuracy for answerable and unanswerable questions""" |
|
|
|
|
|
noanswer_count = 0 |
|
|
for idx in cannot_answer_idx_list: |
|
|
prediction = predicted_answers[idx] |
|
|
prediction = prediction.lower() |
|
|
if "sorry" in prediction and "cannot find the answer" in prediction: |
|
|
noanswer_count += 1 |
|
|
cannot_answer_acc = noanswer_count / len(cannot_answer_idx_list) if len(cannot_answer_idx_list) > 0 else 0.0 |
|
|
print("accuracy of cannot answer cases: %.4f" % cannot_answer_acc) |
|
|
|
|
|
|
|
|
answerable_count = 0 |
|
|
for idx in answerable_idx_list: |
|
|
prediction = predicted_answers[idx] |
|
|
prediction = prediction.lower() |
|
|
if "sorry" in prediction and "cannot find the answer" in prediction: |
|
|
continue |
|
|
answerable_count += 1 |
|
|
answerable_acc = answerable_count / len(answerable_idx_list) if len(answerable_idx_list) > 0 else 0.0 |
|
|
print("accuracy of answerable cases: %.4f" % answerable_acc) |
|
|
|
|
|
|
|
|
def evaluate_cannot_answer_acc(ground_truth_file, prediction_file): |
|
|
"""Evaluate accuracy for answerable and unanswerable questions""" |
|
|
predicted_answers, cannot_answer_idx_list, answerable_idx_list = \ |
|
|
separate_cannot_answer(ground_truth_file, prediction_file) |
|
|
|
|
|
get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list) |
|
|
|
|
|
def get_dataset_config(args): |
|
|
""" |
|
|
Returns configuration for all datasets using paths from args |
|
|
|
|
|
Args: |
|
|
args: Arguments object with dataset path configurations |
|
|
|
|
|
Returns: |
|
|
dict: Dataset configuration mapping |
|
|
""" |
|
|
return { |
|
|
'doc2dial': { |
|
|
'file_suffix': 'doc2dial', |
|
|
'ground_truth_path': args.doc2dial_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'quac': { |
|
|
'file_suffix': 'quac', |
|
|
'ground_truth_path': args.quac_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'qrecc': { |
|
|
'file_suffix': 'qrecc', |
|
|
'ground_truth_path': args.qrecc_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'inscit': { |
|
|
'file_suffix': 'inscit', |
|
|
'ground_truth_path': args.inscit_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'hybridial': { |
|
|
'file_suffix': 'hybridial', |
|
|
'ground_truth_path': args.hybridial_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'doqa_cooking': { |
|
|
'file_suffix': 'doqa_cooking', |
|
|
'ground_truth_path': args.doqa_cooking_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'doqa_travel': { |
|
|
'file_suffix': 'doqa_travel', |
|
|
'ground_truth_path': args.doqa_travel_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'doqa_movies': { |
|
|
'file_suffix': 'doqa_movies', |
|
|
'ground_truth_path': args.doqa_movies_path, |
|
|
'eval_function': evaluate_f1 |
|
|
}, |
|
|
'convfinqa': { |
|
|
'file_suffix': 'convfinqa', |
|
|
'ground_truth_path': args.convfinqa_path, |
|
|
'eval_function': evaluate_convfinqa |
|
|
} |
|
|
} |
|
|
|
|
|
def evaluate_single_dataset(dataset_name, prediction_file, ground_truth_file, eval_function): |
|
|
""" |
|
|
Evaluate a single dataset and return the score |
|
|
|
|
|
Args: |
|
|
dataset_name: Name of the dataset |
|
|
prediction_file: Path to prediction file |
|
|
ground_truth_file: Path to ground truth file |
|
|
eval_function: Function to use for evaluation |
|
|
|
|
|
Returns: |
|
|
float: Evaluation score |
|
|
""" |
|
|
print("-" * 80) |
|
|
print(f"Evaluating {dataset_name}") |
|
|
print(f"Prediction file: {prediction_file}") |
|
|
print(f"Ground truth file: {ground_truth_file}") |
|
|
|
|
|
if not os.path.exists(prediction_file): |
|
|
print(f"Warning: Prediction file not found: {prediction_file}") |
|
|
return None |
|
|
|
|
|
if not os.path.exists(ground_truth_file): |
|
|
print(f"Warning: Ground truth file not found: {ground_truth_file}") |
|
|
return None |
|
|
|
|
|
try: |
|
|
|
|
|
buffer = io.StringIO() |
|
|
sys.stdout = buffer |
|
|
score_value = eval_function(ground_truth_file, prediction_file) |
|
|
sys.stdout = sys.__stdout__ |
|
|
|
|
|
|
|
|
if score_value is not None: |
|
|
return float(score_value) |
|
|
|
|
|
|
|
|
output = buffer.getvalue() |
|
|
if "f1:" in output: |
|
|
score = output.split("f1:")[-1].strip() |
|
|
elif "accuracy of exact match:" in output: |
|
|
score = output.split("accuracy of exact match:")[-1].strip() |
|
|
else: |
|
|
print(f"Warning: Could not parse score from output: {output}") |
|
|
return None |
|
|
|
|
|
return float(score) |
|
|
except Exception as e: |
|
|
print(f"Error evaluating {dataset_name}: {e}") |
|
|
sys.stdout = sys.__stdout__ |
|
|
return None |
|
|
|
|
|
|
|
|
def evaluate_single_model(model_name, results_dir, data_path, datasets, args): |
|
|
""" |
|
|
Evaluate a single model across all specified datasets |
|
|
|
|
|
Args: |
|
|
model_name: Name of the model |
|
|
results_dir: Directory containing model results |
|
|
data_path: Path to ground truth data |
|
|
datasets: List of dataset names to evaluate |
|
|
args: Arguments object with configuration |
|
|
|
|
|
Returns: |
|
|
dict: Dictionary mapping dataset names to scores |
|
|
""" |
|
|
print(f"\n{'='*80}") |
|
|
print(f"Evaluating Model: {model_name}") |
|
|
print(f"{'='*80}\n") |
|
|
|
|
|
output_dir = os.path.join(results_dir, model_name) |
|
|
dataset_config = get_dataset_config(args) |
|
|
scores = {'model': model_name} |
|
|
|
|
|
for dataset_name in datasets: |
|
|
if dataset_name not in dataset_config: |
|
|
print(f"Warning: Unknown dataset {dataset_name}, skipping...") |
|
|
continue |
|
|
|
|
|
config = dataset_config[dataset_name] |
|
|
prediction_file = os.path.join(output_dir, f"{config['file_suffix']}.txt") |
|
|
ground_truth_file = os.path.join(data_path, config['ground_truth_path']) |
|
|
|
|
|
score = evaluate_single_dataset( |
|
|
dataset_name, |
|
|
prediction_file, |
|
|
ground_truth_file, |
|
|
config['eval_function'] |
|
|
) |
|
|
|
|
|
scores[dataset_name] = score |
|
|
|
|
|
return scores |
|
|
|
|
|
|
|
|
def evaluate_all_models(results_dir, data_path, datasets, args, output_csv=None): |
|
|
""" |
|
|
Evaluate all models in the results directory |
|
|
|
|
|
Args: |
|
|
results_dir: Directory containing model results (subdirectories for each model) |
|
|
data_path: Path to ground truth data directory |
|
|
datasets: List of dataset names to evaluate |
|
|
args: Arguments object with configuration |
|
|
output_csv: Path to output CSV file (default: <results_dir>/scores.csv) |
|
|
|
|
|
Returns: |
|
|
pd.DataFrame: DataFrame containing all evaluation scores |
|
|
""" |
|
|
|
|
|
model_names = [d for d in os.listdir(results_dir) |
|
|
if os.path.isdir(os.path.join(results_dir, d))] |
|
|
|
|
|
if not model_names: |
|
|
print(f"Warning: No model directories found in {results_dir}") |
|
|
return pd.DataFrame() |
|
|
|
|
|
print(f"\nFound {len(model_names)} model(s): {model_names}\n") |
|
|
|
|
|
|
|
|
columns = ['model'] + datasets |
|
|
df = pd.DataFrame(columns=columns) |
|
|
|
|
|
|
|
|
all_scores = [] |
|
|
for model_name in model_names: |
|
|
scores = evaluate_single_model(model_name, results_dir, data_path, datasets, args) |
|
|
all_scores.append(scores) |
|
|
|
|
|
|
|
|
df = pd.DataFrame(all_scores) |
|
|
|
|
|
|
|
|
numeric_cols = [col for col in df.columns if col != 'model'] |
|
|
df['average'] = df[numeric_cols].mean(axis=1, skipna=True) |
|
|
|
|
|
|
|
|
if output_csv is None: |
|
|
output_csv = os.path.join(results_dir, 'scores.csv') |
|
|
|
|
|
df.to_csv(output_csv, index=False) |
|
|
print(f"\nScores saved to: {output_csv}") |
|
|
print("\nFinal Results:") |
|
|
print(df.to_string(index=False)) |
|
|
|
|
|
return df |
|
|
|
|
|
def main(): |
|
|
"""Main function to run evaluation pipeline""" |
|
|
args = get_args_scores() |
|
|
|
|
|
|
|
|
df = evaluate_all_models( |
|
|
results_dir=args.results_dir, |
|
|
data_path=args.data_path, |
|
|
datasets=args.datasets, |
|
|
args=args, |
|
|
output_csv=args.output_csv |
|
|
) |
|
|
|
|
|
return df |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|