turblimp-evaluations / own_evaluation.py
stefan-it's picture
feat: add evaluation script
e2ee063
raw
history blame
3.27 kB
import csv
import json
import numpy as np
import torch
import os
from minicons import scorer
from pathlib import Path
def load_sentences(filepath):
sentence_pairs = []
with open(filepath, 'r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=';')
next(reader)
for row in reader:
good_sentence = row[0]
bad_sentence = row[1]
sentence_pairs.append([good_sentence, bad_sentence])
return sentence_pairs
def compute_score(data, model, mode):
if mode == 'ilm':
score = model.sequence_score(data, reduction=lambda x: x.sum(0).item())
elif mode == 'mlm':
score = model.sequence_score(data, reduction=lambda x: x.sum(0).item(), PLL_metric='within_word_l2r')
return score
def process_files(model, mode, model_name):
root_folder = Path("./data/base")
file_names = sorted([str(file) for file in root_folder.iterdir() if file.name.endswith("csv")])
print(f"Perform benchmarking on the following {len(file_names)} datasets:\n*", "\n* ".join(file_names))
f_out = open(f'results-{model_name.replace("/", "-")}.jsonl', "wt")
for file_path in file_names:
try:
pairs = load_sentences(file_path)
results = []
differences = 0
accuracy = 0
for pair in pairs:
score = compute_score(pair, model, mode)
results.append({
'good_sentence': pair[0],
'bad_sentence': pair[1],
'good_score': score[0],
'bad_score': score[1],
'difference': score[0] - score[1],
'correct': score[0] > score[1]
})
if score[0] > score[1]:
accuracy += 1
differences += score[0] - score[1]
mean_difference = differences / len(pairs)
accuracy = accuracy / len(pairs)
summary = {
'file_name': file_path,
'mean_difference': mean_difference,
'accuracy': accuracy * 100,
'total_pairs': len(pairs),
'model_name': model_name,
}
f_out.write(json.dumps(summary) + "\n")
print(summary)
except Exception as e:
print(f"Error processing {file_path}: {str(e)}")
continue
f_out.close()
mlm_model_names = [
"dbmdz/electra-small-turkish-cased-generator",
"dbmdz/electra-base-turkish-cased-generator",
"dbmdz/electra-base-turkish-mc4-cased-generator",
"dbmdz/electra-base-turkish-mc4-uncased-generator",
"dbmdz/bert-base-turkish-cased",
"dbmdz/bert-base-turkish-uncased",
"dbmdz/bert-base-turkish-128k-cased",
"dbmdz/bert-base-turkish-128k-uncased",
"dbmdz/distilbert-base-turkish-cased",
"dbmdz/convbert-base-turkish-cased",
"dbmdz/convbert-base-turkish-mc4-cased",
"dbmdz/convbert-base-turkish-mc4-uncased",
]
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mode = 'mlm'
for model_name in mlm_model_names:
model = scorer.MaskedLMScorer(model_name, device)
process_files(
model=model,
mode=mode,
model_name=model_name,
)