|
|
|
|
|
""" |
|
|
This script provides a unified interface to: |
|
|
1. Run inference for all datasets using HuggingFace models |
|
|
2. Evaluate all predictions and generate scores |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import argparse |
|
|
import subprocess |
|
|
from typing import List, Optional |
|
|
import pandas as pd |
|
|
|
|
|
ALL_DATASETS = [ |
|
|
'doc2dial', 'quac', 'qrecc', 'inscit', |
|
|
'hybridial', |
|
|
'doqa_cooking', 'doqa_travel', 'doqa_movies', |
|
|
'convfinqa' |
|
|
] |
|
|
|
|
|
def run_inference_for_dataset( |
|
|
model_id: str, |
|
|
dataset: str, |
|
|
data_folder: str, |
|
|
output_folder: str, |
|
|
device: str = 'cuda', |
|
|
num_ctx: int = 5, |
|
|
max_tokens: int = 64, |
|
|
expected_samples: int = 500, |
|
|
limit: Optional[int] = None |
|
|
) -> bool: |
|
|
""" |
|
|
Run inference for a single dataset |
|
|
|
|
|
Args: |
|
|
model_id: Model identifier or path |
|
|
dataset: Dataset name |
|
|
data_folder: Path to data folder |
|
|
output_folder: Path to output folder |
|
|
device: Device to run on (cuda/cpu) |
|
|
num_ctx: Number of contexts |
|
|
max_tokens: Maximum number of tokens to generate |
|
|
expected_samples: Expected number of samples |
|
|
limit: Limit number of samples to process |
|
|
|
|
|
Returns: |
|
|
bool: True if successful, False otherwise |
|
|
""" |
|
|
print(f"\n{'='*80}") |
|
|
print(f"Running inference for dataset: {dataset}") |
|
|
print(f"{'='*80}\n") |
|
|
|
|
|
cmd = [ |
|
|
'python', 'run_generation_hf.py', |
|
|
'--model-id', model_id, |
|
|
'--data-folder', data_folder, |
|
|
'--output-folder', output_folder, |
|
|
'--eval-dataset', dataset, |
|
|
'--device', device, |
|
|
'--num-ctx', str(num_ctx), |
|
|
'--max-tokens', str(max_tokens), |
|
|
'--expected-samples', str(expected_samples) |
|
|
] |
|
|
|
|
|
if limit is not None: |
|
|
cmd.extend(['--limit', str(limit)]) |
|
|
|
|
|
try: |
|
|
result = subprocess.run(cmd, check=True, capture_output=False, text=True) |
|
|
print(f"✓ Inference completed for {dataset}") |
|
|
return True |
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"✗ Error running inference for {dataset}: {e}") |
|
|
return False |
|
|
except Exception as e: |
|
|
print(f"✗ Unexpected error for {dataset}: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
def run_inference_for_all_datasets( |
|
|
model_id: str, |
|
|
datasets: List[str], |
|
|
data_folder: str, |
|
|
output_folder: str, |
|
|
device: str = 'cuda', |
|
|
num_ctx: int = 5, |
|
|
max_tokens: int = 64, |
|
|
expected_samples: int = 500, |
|
|
limit: Optional[int] = None |
|
|
) -> dict: |
|
|
""" |
|
|
Run inference for all specified datasets |
|
|
|
|
|
Args: |
|
|
model_id: Model identifier or path |
|
|
datasets: List of dataset names |
|
|
data_folder: Path to data folder |
|
|
output_folder: Path to output folder |
|
|
device: Device to run on (cuda/cpu) |
|
|
num_ctx: Number of contexts |
|
|
max_tokens: Maximum number of tokens to generate |
|
|
expected_samples: Expected number of samples |
|
|
limit: Limit number of samples to process |
|
|
|
|
|
Returns: |
|
|
dict: Dictionary mapping dataset names to success status |
|
|
""" |
|
|
print(f"\n{'#'*80}") |
|
|
print(f"# Running Inference for Model: {model_id}") |
|
|
print(f"# Total Datasets: {len(datasets)}") |
|
|
print(f"{'#'*80}\n") |
|
|
|
|
|
results = {} |
|
|
for dataset in datasets: |
|
|
success = run_inference_for_dataset( |
|
|
model_id=model_id, |
|
|
dataset=dataset, |
|
|
data_folder=data_folder, |
|
|
output_folder=output_folder, |
|
|
device=device, |
|
|
num_ctx=num_ctx, |
|
|
max_tokens=max_tokens, |
|
|
expected_samples=expected_samples, |
|
|
limit=limit |
|
|
) |
|
|
results[dataset] = success |
|
|
|
|
|
|
|
|
print(f"\n{'='*80}") |
|
|
print("Inference Summary:") |
|
|
print(f"{'='*80}") |
|
|
successful = sum(1 for v in results.values() if v) |
|
|
print(f"✓ Successful: {successful}/{len(datasets)}") |
|
|
print(f"✗ Failed: {len(datasets) - successful}/{len(datasets)}") |
|
|
|
|
|
if successful < len(datasets): |
|
|
print("\nFailed datasets:") |
|
|
for dataset, success in results.items(): |
|
|
if not success: |
|
|
print(f" - {dataset}") |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
def run_evaluation( |
|
|
results_dir: str, |
|
|
data_path: str, |
|
|
datasets: List[str], |
|
|
output_csv: Optional[str] = None |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
Run evaluation for all models and datasets |
|
|
|
|
|
Args: |
|
|
results_dir: Directory containing model results |
|
|
data_path: Path to ground truth data |
|
|
datasets: List of dataset names to evaluate |
|
|
output_csv: Path to output CSV file |
|
|
|
|
|
Returns: |
|
|
pd.DataFrame: Evaluation results |
|
|
""" |
|
|
print(f"\n{'#'*80}") |
|
|
print(f"# Running Evaluation") |
|
|
print(f"# Results Directory: {results_dir}") |
|
|
print(f"# Data Path: {data_path}") |
|
|
print(f"{'#'*80}\n") |
|
|
|
|
|
cmd = [ |
|
|
'python', 'get_scores.py', |
|
|
'--results-dir', results_dir, |
|
|
'--data-path', data_path, |
|
|
'--datasets' |
|
|
] + datasets |
|
|
|
|
|
if output_csv: |
|
|
cmd.extend(['--output-csv', output_csv]) |
|
|
|
|
|
try: |
|
|
result = subprocess.run(cmd, check=True, capture_output=False, text=True) |
|
|
print(f"\n✓ Evaluation completed successfully") |
|
|
|
|
|
|
|
|
if output_csv: |
|
|
csv_path = output_csv |
|
|
else: |
|
|
csv_path = os.path.join(results_dir, 'scores.csv') |
|
|
|
|
|
if os.path.exists(csv_path): |
|
|
df = pd.read_csv(csv_path) |
|
|
return df |
|
|
else: |
|
|
print(f"Warning: Output CSV not found at {csv_path}") |
|
|
return pd.DataFrame() |
|
|
|
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"✗ Error running evaluation: {e}") |
|
|
return pd.DataFrame() |
|
|
except Exception as e: |
|
|
print(f"✗ Unexpected error during evaluation: {e}") |
|
|
return pd.DataFrame() |
|
|
|
|
|
def run_full_pipeline( |
|
|
model_id: str, |
|
|
data_folder: str, |
|
|
output_folder: str, |
|
|
datasets: List[str] = ALL_DATASETS, |
|
|
device: str = 'cuda', |
|
|
num_ctx: int = 5, |
|
|
max_tokens: int = 64, |
|
|
expected_samples: int = 500, |
|
|
limit: Optional[int] = None, |
|
|
skip_inference: bool = False, |
|
|
skip_evaluation: bool = False, |
|
|
output_csv: Optional[str] = None |
|
|
) -> pd.DataFrame: |
|
|
""" |
|
|
Run the complete pipeline: inference + evaluation |
|
|
|
|
|
Args: |
|
|
model_id: Model identifier or path |
|
|
data_folder: Path to data folder |
|
|
output_folder: Path to output folder |
|
|
datasets: List of dataset names |
|
|
device: Device to run on (cuda/cpu) |
|
|
num_ctx: Number of contexts |
|
|
max_tokens: Maximum number of tokens to generate |
|
|
expected_samples: Expected number of samples |
|
|
limit: Limit number of samples to process |
|
|
skip_inference: Skip inference step |
|
|
skip_evaluation: Skip evaluation step |
|
|
output_csv: Path to output CSV file |
|
|
|
|
|
Returns: |
|
|
pd.DataFrame: Evaluation results |
|
|
""" |
|
|
print(f"\n{'#'*80}") |
|
|
print(f"# ChatRAG-Hi Full Evaluation Pipeline") |
|
|
print(f"{'#'*80}\n") |
|
|
print(f"Model: {model_id}") |
|
|
print(f"Datasets: {', '.join(datasets)}") |
|
|
print(f"Device: {device}") |
|
|
print(f"Skip Inference: {skip_inference}") |
|
|
print(f"Skip Evaluation: {skip_evaluation}") |
|
|
|
|
|
|
|
|
if not skip_inference: |
|
|
inference_results = run_inference_for_all_datasets( |
|
|
model_id=model_id, |
|
|
datasets=datasets, |
|
|
data_folder=data_folder, |
|
|
output_folder=output_folder, |
|
|
device=device, |
|
|
num_ctx=num_ctx, |
|
|
max_tokens=max_tokens, |
|
|
expected_samples=expected_samples, |
|
|
limit=limit |
|
|
) |
|
|
else: |
|
|
print("\n⊘ Skipping inference step") |
|
|
|
|
|
|
|
|
if not skip_evaluation: |
|
|
eval_results = run_evaluation( |
|
|
results_dir=output_folder, |
|
|
data_path=data_folder, |
|
|
datasets=datasets, |
|
|
output_csv=output_csv |
|
|
) |
|
|
return eval_results |
|
|
else: |
|
|
print("\n⊘ Skipping evaluation step") |
|
|
return pd.DataFrame() |
|
|
|
|
|
def get_args(): |
|
|
"""Parse command line arguments""" |
|
|
parser = argparse.ArgumentParser( |
|
|
description="Comprehensive wrapper for ChatRAG-Hi inference and evaluation" |
|
|
) |
|
|
|
|
|
|
|
|
parser.add_argument('--mode', type=str, choices=['inference', 'evaluation', 'full'], |
|
|
default='full', |
|
|
help='Pipeline mode: inference only, evaluation only, or full pipeline') |
|
|
|
|
|
|
|
|
parser.add_argument('--model-id', type=str, required=True, |
|
|
help='Model identifier or path') |
|
|
|
|
|
|
|
|
parser.add_argument('--data-folder', type=str, required=True, |
|
|
help='Path to data folder containing ground truth JSON files') |
|
|
parser.add_argument('--output-folder', type=str, required=True, |
|
|
help='Path to output folder for predictions and scores') |
|
|
|
|
|
|
|
|
parser.add_argument('--datasets', type=str, nargs='+', |
|
|
default=ALL_DATASETS, |
|
|
help='List of datasets to process') |
|
|
parser.add_argument('--all-datasets', action='store_true', |
|
|
help='Process all available datasets') |
|
|
|
|
|
|
|
|
parser.add_argument('--device', type=str, default='cuda', |
|
|
help='Device to run on: cpu or cuda') |
|
|
parser.add_argument('--num-ctx', type=int, default=5, |
|
|
help='Number of contexts') |
|
|
parser.add_argument('--max-tokens', type=int, default=64, |
|
|
help='Maximum number of tokens to generate') |
|
|
parser.add_argument('--expected-samples', type=int, default=500, |
|
|
help='Expected number of samples per dataset') |
|
|
parser.add_argument('--limit', type=int, default=None, |
|
|
help='Limit number of samples to process (for testing)') |
|
|
|
|
|
|
|
|
parser.add_argument('--output-csv', type=str, default=None, |
|
|
help='Path to output CSV file for scores') |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
if args.all_datasets: |
|
|
args.datasets = ALL_DATASETS |
|
|
|
|
|
return args |
|
|
|
|
|
|
|
|
def main(): |
|
|
"""Main entry point""" |
|
|
args = get_args() |
|
|
|
|
|
|
|
|
os.makedirs(args.output_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
skip_inference = (args.mode == 'evaluation') |
|
|
skip_evaluation = (args.mode == 'inference') |
|
|
|
|
|
|
|
|
results = run_full_pipeline( |
|
|
model_id=args.model_id, |
|
|
data_folder=args.data_folder, |
|
|
output_folder=args.output_folder, |
|
|
datasets=args.datasets, |
|
|
device=args.device, |
|
|
num_ctx=args.num_ctx, |
|
|
max_tokens=args.max_tokens, |
|
|
expected_samples=args.expected_samples, |
|
|
limit=args.limit, |
|
|
skip_inference=skip_inference, |
|
|
skip_evaluation=skip_evaluation, |
|
|
output_csv=args.output_csv |
|
|
) |
|
|
|
|
|
if not results.empty and args.mode != 'inference': |
|
|
print(f"\n{'='*80}") |
|
|
print("Final Evaluation Results:") |
|
|
print(f"{'='*80}\n") |
|
|
print(results.to_string(index=False)) |
|
|
print(f"\n{'='*80}\n") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|