stefan-it commited on
Commit
e2ee063
·
1 Parent(s): 6bfad66

feat: add evaluation script

Browse files
Files changed (1) hide show
  1. own_evaluation.py +105 -0
own_evaluation.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import numpy as np
4
+ import torch
5
+ import os
6
+
7
+ from minicons import scorer
8
+ from pathlib import Path
9
+
10
+
11
+ def load_sentences(filepath):
12
+ sentence_pairs = []
13
+ with open(filepath, 'r', encoding='utf-8') as file:
14
+ reader = csv.reader(file, delimiter=';')
15
+ next(reader)
16
+ for row in reader:
17
+ good_sentence = row[0]
18
+ bad_sentence = row[1]
19
+ sentence_pairs.append([good_sentence, bad_sentence])
20
+ return sentence_pairs
21
+
22
+ def compute_score(data, model, mode):
23
+ if mode == 'ilm':
24
+ score = model.sequence_score(data, reduction=lambda x: x.sum(0).item())
25
+ elif mode == 'mlm':
26
+ score = model.sequence_score(data, reduction=lambda x: x.sum(0).item(), PLL_metric='within_word_l2r')
27
+ return score
28
+
29
+ def process_files(model, mode, model_name):
30
+ root_folder = Path("./data/base")
31
+
32
+ file_names = sorted([str(file) for file in root_folder.iterdir() if file.name.endswith("csv")])
33
+
34
+ print(f"Perform benchmarking on the following {len(file_names)} datasets:\n*", "\n* ".join(file_names))
35
+
36
+ f_out = open(f'results-{model_name.replace("/", "-")}.jsonl', "wt")
37
+
38
+ for file_path in file_names:
39
+ try:
40
+ pairs = load_sentences(file_path)
41
+ results = []
42
+ differences = 0
43
+ accuracy = 0
44
+
45
+ for pair in pairs:
46
+ score = compute_score(pair, model, mode)
47
+ results.append({
48
+ 'good_sentence': pair[0],
49
+ 'bad_sentence': pair[1],
50
+ 'good_score': score[0],
51
+ 'bad_score': score[1],
52
+ 'difference': score[0] - score[1],
53
+ 'correct': score[0] > score[1]
54
+ })
55
+
56
+ if score[0] > score[1]:
57
+ accuracy += 1
58
+ differences += score[0] - score[1]
59
+
60
+ mean_difference = differences / len(pairs)
61
+ accuracy = accuracy / len(pairs)
62
+
63
+ summary = {
64
+ 'file_name': file_path,
65
+ 'mean_difference': mean_difference,
66
+ 'accuracy': accuracy * 100,
67
+ 'total_pairs': len(pairs),
68
+ 'model_name': model_name,
69
+ }
70
+ f_out.write(json.dumps(summary) + "\n")
71
+ print(summary)
72
+
73
+ except Exception as e:
74
+ print(f"Error processing {file_path}: {str(e)}")
75
+ continue
76
+ f_out.close()
77
+
78
+ mlm_model_names = [
79
+ "dbmdz/electra-small-turkish-cased-generator",
80
+ "dbmdz/electra-base-turkish-cased-generator",
81
+ "dbmdz/electra-base-turkish-mc4-cased-generator",
82
+ "dbmdz/electra-base-turkish-mc4-uncased-generator",
83
+ "dbmdz/bert-base-turkish-cased",
84
+ "dbmdz/bert-base-turkish-uncased",
85
+ "dbmdz/bert-base-turkish-128k-cased",
86
+ "dbmdz/bert-base-turkish-128k-uncased",
87
+ "dbmdz/distilbert-base-turkish-cased",
88
+ "dbmdz/convbert-base-turkish-cased",
89
+ "dbmdz/convbert-base-turkish-mc4-cased",
90
+ "dbmdz/convbert-base-turkish-mc4-uncased",
91
+ ]
92
+
93
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
94
+
95
+ mode = 'mlm'
96
+
97
+ for model_name in mlm_model_names:
98
+ model = scorer.MaskedLMScorer(model_name, device)
99
+
100
+ process_files(
101
+ model=model,
102
+ mode=mode,
103
+ model_name=model_name,
104
+ )
105
+