Datasets:

Modalities:
Text
Formats:
json
Languages:
Hindi
ArXiv:
Libraries:
Datasets
pandas
License:
ravirajoshi commited on
Commit
e20ef19
·
verified ·
1 Parent(s): dea540b

Upload 10 files

Browse files
evaluation/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Overview
2
+
3
+ The evaluation system consists of three main components:
4
+
5
+ 1. **`run_generation_hf.py`**: Runs inference for individual datasets
6
+ 2. **`get_scores.py`**: Modular evaluation script that calculates scores
7
+ 3. **`run_all_evaluation.py`**: Comprehensive wrapper for running full pipelines
8
+
9
+ ## Inference Step Customization
10
+
11
+ **The inference step must be modified by users based on their specific model requirements.**
12
+
13
+ As the model landscape continuously expands and evolves, the inference scripts provided are **reference implementations** that need to be adapted for your use case. Different models have different:
14
+ - Loading mechanisms
15
+ - Tokenization requirements
16
+ - Generation parameters
17
+ - API interfaces
18
+ - Memory requirements
19
+
20
+ ### Sample Inference Implementations
21
+
22
+ We provide two sample inference scripts - `run_generation_hf.py` and `run_generation_vllm.py`
23
+
24
+ ### How to Customize
25
+
26
+ 1. **Choose or create an inference script** that matches your model's requirements
27
+ 2. **Modify the model loading** section to work with your specific model
28
+ 3. **Adjust generation parameters** (temperature, top_p, max_tokens, etc.)
29
+ 4. **Update the prompt formatting** if your model uses a different template
30
+
31
+ For comprehensive examples of different usage patterns, see **[`example_usage.sh`](./example_usage.sh)**, which includes:
32
+ - Full pipeline execution
33
+ - Inference-only runs
34
+ - Evaluation-only runs
35
+
36
+ **After generating predictions, the evaluation step (`get_scores.py`) remains the same across all models.**
evaluation/arguments.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import argparse
3
+ import os
4
+
5
+ def get_args():
6
+ parser = argparse.ArgumentParser(description="ChatQA-HF")
7
+
8
+ ## model
9
+ parser.add_argument('--model-id', type=str, default='', help='model id')
10
+ parser.add_argument('--model-folder', type=str, default='', help='path to the folder containing the model')
11
+ parser.add_argument('--model-name', type=str, default='', help='name of the model')
12
+
13
+ ## dataset path
14
+ parser.add_argument('--data-folder', type=str, default='', help='path to the datafolder of ChatRAG Bench')
15
+ parser.add_argument('--output-folder', type=str, default='', help='path to the datafolder of ChatRAG Bench')
16
+ parser.add_argument('--eval-dataset', type=str, default='')
17
+ parser.add_argument('--doc2dial-path', type=str, default='doc2dial/test.json')
18
+ parser.add_argument('--convfinqa-path', type=str, default='convfinqa/test.json')
19
+ parser.add_argument('--quac-path', type=str, default='quac/test.json')
20
+ parser.add_argument('--qrecc-path', type=str, default='qrecc/test.json')
21
+ parser.add_argument('--doqa-cooking-path', type=str, default='doqa_cooking/test.json')
22
+ parser.add_argument('--doqa-travel-path', type=str, default='doqa_travel/test.json')
23
+ parser.add_argument('--doqa-movies-path', type=str, default='doqa_movies/test.json')
24
+ parser.add_argument('--hybridial-path', type=str, default='hybridial/test.json')
25
+ parser.add_argument('--inscit-path', type=str, default='inscit/test.json')
26
+
27
+ ## others
28
+ parser.add_argument('--out-seq-len', type=int, default=64)
29
+ parser.add_argument('--num-ctx', type=int, default=5)
30
+ parser.add_argument('--max-tokens', type=int, default=64)
31
+ parser.add_argument('--expected-samples', type=int, default=500, help='expected number of samples in dataset for completion check')
32
+ parser.add_argument('--stop-strings', type=str, nargs='+', default=["<|endoftext|>", "<extra_id_1>", "<extra_id_1>User"], help='stop strings for generation')
33
+ parser.add_argument('--device', type=str, default='cpu', help='device to run on: cpu or cuda')
34
+ parser.add_argument('--limit', type=int, default=None, help='limit the number of samples to process')
35
+
36
+ args = parser.parse_args()
37
+
38
+ return args
39
+
40
+ def get_args_scores():
41
+ parser = argparse.ArgumentParser(description="ChatRAG Evaluation Scores")
42
+
43
+ # Directory paths
44
+ parser.add_argument('--results-dir', type=str, required=True,
45
+ help='Directory containing model prediction results (subdirectories for each model)')
46
+ parser.add_argument('--data-path', type=str, required=True,
47
+ help='Path to ground truth data directory containing JSON files')
48
+ parser.add_argument('--output-csv', type=str, default=None,
49
+ help='Output CSV file path (default: <results_dir>/scores.csv)')
50
+
51
+ # Dataset options
52
+ parser.add_argument('--datasets', type=str, nargs='+',
53
+ default=['doc2dial', 'quac', 'qrecc', 'topiocqa', 'inscit',
54
+ 'coqa', 'hybridial', 'sqa', 'doqa_cooking',
55
+ 'doqa_travel', 'doqa_movies', 'convfinqa'],
56
+ help='List of datasets to evaluate')
57
+
58
+ # Dataset file paths (relative to data-path)
59
+ parser.add_argument('--doc2dial-path', type=str, default='doc2dial/test.json')
60
+ parser.add_argument('--convfinqa-path', type=str, default='convfinqa/test.json')
61
+ parser.add_argument('--quac-path', type=str, default='quac/test.json')
62
+ parser.add_argument('--qrecc-path', type=str, default='qrecc/test.json')
63
+ parser.add_argument('--doqa-cooking-path', type=str, default='doqa_cooking/test.json')
64
+ parser.add_argument('--doqa-travel-path', type=str, default='doqa_travel/test.json')
65
+ parser.add_argument('--doqa-movies-path', type=str, default='doqa_movies/test.json')
66
+ parser.add_argument('--hybridial-path', type=str, default='hybridial/test.json')
67
+ parser.add_argument('--inscit-path', type=str, default='inscit/test.json')
68
+
69
+ args = parser.parse_args()
70
+ return args
evaluation/dataset.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+
4
+
5
+ def load_data(datapath):
6
+ print("loading data from %s" % datapath)
7
+ with open(datapath, "r") as f:
8
+ data_list = json.load(f)
9
+
10
+ return data_list
11
+
12
+
13
+ def reformat_question(turn_list, dataset_name):
14
+
15
+ ## only take the lastest 7 turns
16
+ turn_list = turn_list[-7:]
17
+ assert turn_list[-1]['role'] == 'user'
18
+
19
+ # ChatRAG-Hi available datasets - all use long answer format
20
+ long_answer_dataset_list = ["doc2dial", "quac", "qrecc", "inscit", "doqa_movies", "doqa_travel", "doqa_cooking", "hybridial", "convfinqa"]
21
+
22
+ if dataset_name in long_answer_dataset_list:
23
+ for item in turn_list:
24
+ if item['role'] == 'user':
25
+ ## only needs to add it on the first user turn
26
+ item['content'] = 'Please give a full and complete answer for the question. ' + item['content']
27
+ break
28
+ else:
29
+ raise Exception(f"Dataset '{dataset_name}' not supported in ChatRAG-Hi! Available datasets: {long_answer_dataset_list}")
30
+
31
+ question = ""
32
+ for item in turn_list:
33
+ if item["role"] == "user":
34
+ question += "User: " + item["content"] + "\n\n"
35
+ else:
36
+ assert item["role"] == "assistant"
37
+ question += "Assistant: " + item["content"] + "\n\n"
38
+
39
+ question += "Assistant:"
40
+
41
+ return question
42
+
43
+
44
+ def get_inputs(data_list, dataset_name, tokenizer, num_ctx, max_output_len, max_seq_length=4096):
45
+
46
+ system = "System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context."
47
+
48
+ prompt_list = []
49
+ for item in data_list:
50
+ turn_list = item['messages']
51
+ question_formatted = reformat_question(turn_list, dataset_name)
52
+
53
+ ctx_list = ["title: " + ctx["title"] + ", source: " + ctx["text"] for ctx in item['ctxs'][:num_ctx]]
54
+ context = "\n\n".join(ctx_list)
55
+
56
+ context_tokens = tokenizer.encode(context)
57
+ question_tokens = tokenizer.encode(question_formatted)
58
+ system_tokens = tokenizer.encode(system)
59
+
60
+ if len(context_tokens) + len(question_tokens) + len(system_tokens) + max_output_len >= max_seq_length:
61
+ context_tokens = context_tokens[:max_seq_length - max_output_len - len(question_tokens) - len(system_tokens)]
62
+ context = tokenizer.decode(context_tokens, skip_special_tokens=True)
63
+
64
+ model_input = system + "\n\n" + context + "\n\n" + question_formatted
65
+
66
+ prompt_list.append(model_input)
67
+
68
+ return prompt_list
69
+
evaluation/evaluation_utils.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ## a index list of the sample where the correct context is found in the top-5 retrieved contexts
4
+ quac_correct_retrieved_instance_idx_list = [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 152, 153, 156, 157, 158, 159, 161, 162, 163, 164, 165, 166, 167, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 188, 189, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 216, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 243, 245, 246, 248, 249, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 284, 285, 287, 289, 290, 291, 292, 293, 294, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 335, 336, 337, 338, 339, 340, 341, 342, 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 362, 363, 364, 365, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 415, 417, 419, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 437, 438, 440, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 466, 467, 468, 469, 470, 471, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 485, 486, 488, 489, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 509, 510, 511, 512, 514, 515, 518, 519, 520, 521, 522, 523, 524, 527, 528, 529, 530, 531, 532, 533, 534, 535, 538, 539, 540, 541, 542, 543, 544, 547, 548, 549, 550, 551, 552, 554, 555, 557, 558, 560, 561, 562, 563, 564, 565, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 608, 609, 610, 611, 612, 613, 614, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 637, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 652, 653, 655, 656, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 692, 693, 695, 699, 700, 701, 703, 704, 705, 707, 708, 709, 711, 712, 715, 716, 717, 718, 719, 720, 721, 723, 724, 726, 728, 732, 733, 734, 738, 739, 740, 741, 742, 743, 744, 748, 749, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 773, 774, 775, 776, 777, 778, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 804, 805, 806, 807, 809, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 822, 823, 824, 825, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 848, 849, 850, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878, 879, 880, 883, 884, 885, 886, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 938, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 972, 974, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 995, 998, 999, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1067, 1068, 1069, 1070, 1071, 1073, 1074, 1075, 1076, 1077, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1093, 1094, 1095, 1096, 1098, 1099, 1100, 1102, 1103, 1105, 1106, 1107, 1109, 1110, 1113, 1114, 1118, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1130, 1131, 1133, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1225, 1226, 1227, 1228, 1229, 1230, 1232, 1233, 1234, 1235, 1237, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1267, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1283, 1284, 1285, 1287, 1288, 1289, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, 1343, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1402, 1403, 1404, 1405, 1407, 1408, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1447, 1448, 1449, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1467, 1468, 1469, 1471, 1472, 1474, 1475, 1476, 1477, 1478, 1479, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1495, 1498, 1504, 1505, 1507, 1508, 1509, 1512, 1513, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1536, 1537, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1608, 1609, 1611, 1612, 1613, 1614, 1615, 1616, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1639, 1640, 1641, 1642, 1643, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1655, 1656, 1657, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1678, 1679, 1681, 1682, 1683, 1686, 1687, 1688, 1689, 1690, 1691, 1694, 1695, 1696, 1697, 1699, 1700, 1701, 1702, 1703, 1704, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1723, 1724, 1725, 1726, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1758, 1759, 1760, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1779, 1780, 1782, 1783, 1785, 1786, 1787, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1800, 1801, 1802, 1803, 1804, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1843, 1844, 1845, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1860, 1863, 1864, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1903, 1904, 1906, 1907, 1908, 1910, 1911, 1912, 1914, 1915, 1917, 1920, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1944, 1946, 1947, 1948, 1950, 1951, 1952, 1955, 1956, 1957, 1958, 1959, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014, 2015, 2017, 2018, 2019, 2020, 2021, 2023, 2025, 2026, 2027, 2028, 2029, 2033, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2073, 2074, 2075, 2078, 2079, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2109, 2110, 2112, 2113, 2114, 2115, 2116, 2117, 2120, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2132, 2134, 2138, 2139, 2140, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2174, 2177, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2191, 2192, 2193, 2196, 2198, 2199, 2201, 2202, 2205, 2206, 2207, 2209, 2212, 2213, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2281, 2282, 2283, 2284, 2285, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2299, 2301, 2302, 2303, 2304, 2305, 2306, 2308, 2309, 2311, 2312, 2313, 2314, 2315, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2342, 2343, 2344, 2346, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2361, 2362, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2379, 2384, 2385, 2386, 2387, 2388, 2389, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2419, 2420, 2421, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2462, 2463, 2464, 2466, 2467, 2469, 2470, 2472, 2473, 2475, 2476, 2477, 2478, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2511, 2512, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2541, 2542, 2543, 2544, 2545, 2546, 2548, 2551, 2552, 2553, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2593, 2594, 2595, 2596, 2599, 2600, 2601, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2615, 2616, 2618, 2619, 2620, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2684, 2687, 2688, 2691, 2692, 2693, 2694, 2695, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2719, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2737, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2768, 2769, 2771, 2773, 2774, 2775, 2776, 2777, 2778, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2822, 2823, 2824, 2825, 2826, 2827, 2829, 2830, 2831, 2832, 2834, 2835, 2836, 2837, 2838, 2839, 2841, 2842, 2843, 2845, 2846, 2848, 2849, 2850, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2893, 2894, 2895, 2896, 2897, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2922, 2923, 2924, 2925, 2926, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2940, 2943, 2944, 2945, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2957, 2959, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2998, 2999, 3000, 3001, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3037, 3040, 3041, 3043, 3044, 3045, 3046, 3047, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3082, 3083, 3084, 3086, 3087, 3089, 3090, 3091, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3110, 3111, 3115, 3116, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3171, 3172, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3185, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3214, 3215, 3216, 3217, 3218, 3220, 3221, 3222, 3223, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3313, 3314, 3316, 3317, 3318, 3319, 3320, 3321, 3323, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3351, 3353, 3354, 3355, 3356, 3359, 3363, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3377, 3379, 3380, 3381, 3382, 3383, 3384, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3401, 3402, 3403, 3406, 3407, 3408, 3409, 3410, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3430, 3431, 3432, 3433, 3434, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3456, 3457, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3467, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3498, 3499, 3503, 3504, 3505, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3582, 3583, 3584, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3600, 3601, 3602, 3604, 3609, 3610, 3611, 3612, 3614, 3615, 3616, 3617, 3618, 3619, 3622, 3623, 3625, 3626, 3628, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3704, 3705, 3706, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3727, 3730, 3731, 3732, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3744, 3745, 3746, 3747, 3748, 3749, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3762, 3763, 3764, 3765, 3766, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3826, 3828, 3829, 3830, 3831, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3845, 3846, 3850, 3851, 3852, 3853, 3854, 3856, 3857, 3858, 3859, 3860, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3871, 3873, 3874, 3876, 3877, 3878, 3879, 3881, 3882, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3911, 3912, 3913, 3915, 3916, 3917, 3918, 3919, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3938, 3939, 3940, 3941, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3956, 3957, 3958, 3959, 3960, 3963, 3965, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3978, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3995, 3996, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4040, 4043, 4045, 4047, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4059, 4060, 4064, 4065, 4066, 4067, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4116, 4117, 4120, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4146, 4147, 4148, 4149, 4150, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4168, 4169, 4170, 4171, 4172, 4174, 4175, 4177, 4178, 4179, 4180, 4181, 4184, 4186, 4188, 4189, 4190, 4191, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4246, 4247, 4248, 4249, 4250, 4252, 4253, 4255, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4281, 4282, 4283, 4284, 4285, 4288, 4289, 4290, 4291, 4292, 4294, 4295, 4296, 4297, 4298, 4300, 4301, 4303, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4339, 4340, 4341, 4342, 4343, 4345, 4346, 4347, 4349, 4350, 4352, 4353, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4370, 4371, 4372, 4373, 4374, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4400, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4442, 4444, 4445, 4446, 4447, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4462, 4463, 4464, 4465, 4466, 4467, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4552, 4553, 4554, 4555, 4559, 4561, 4562, 4563, 4565, 4566, 4567, 4568, 4569, 4570, 4572, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4601, 4603, 4604, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4615, 4616, 4617, 4618, 4619, 4622, 4623, 4624, 4626, 4627, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4654, 4655, 4656, 4657, 4659, 4660, 4661, 4662, 4663, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4713, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4748, 4749, 4750, 4753, 4754, 4755, 4756, 4757, 4759, 4761, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4825, 4826, 4827, 4829, 4830, 4831, 4833, 4834, 4835, 4836, 4837, 4838, 4840, 4841, 4842, 4843, 4844, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4865, 4866, 4867, 4869, 4870, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4881, 4882, 4885, 4886, 4888, 4890, 4891, 4892, 4893, 4894, 4896, 4897, 4898, 4900, 4901, 4904, 4905, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4953, 4954, 4955, 4956, 4957, 4958, 4960, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4977, 4978, 4979, 4981, 4982, 4983, 4984, 4985, 4988, 4989, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5010, 5011, 5016, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5044, 5045, 5046, 5047, 5048, 5049, 5051, 5052, 5053, 5054, 5055, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087, 5088, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5113, 5115, 5118, 5120, 5123, 5124, 5126, 5127, 5128, 5129, 5130, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5162, 5163, 5164, 5165, 5166, 5167, 5168, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, 5183, 5185, 5186, 5187, 5188, 5189, 5191, 5192, 5193, 5194, 5195, 5196, 5197, 5198, 5199, 5200, 5201, 5202, 5203, 5204, 5205, 5206, 5208, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5237, 5240, 5241, 5244, 5245, 5246, 5247, 5248, 5249, 5250, 5251, 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5260, 5261, 5262, 5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5278, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5293, 5294, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5325, 5326, 5327, 5328, 5338, 5340, 5342, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353, 5354, 5356, 5357, 5358, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5369, 5370, 5371, 5372, 5373, 5374, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5386, 5389, 5390, 5393, 5394, 5395, 5396, 5397, 5399, 5400, 5401, 5402, 5404, 5405, 5408, 5412, 5413, 5414, 5415, 5416, 5417, 5418, 5419, 5421, 5422, 5423, 5424, 5425, 5426, 5427, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5439, 5441, 5442, 5443, 5444, 5445, 5446, 5447, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5467, 5470, 5471, 5472, 5473, 5475, 5476, 5477, 5478, 5479, 5480, 5483, 5484, 5485, 5486, 5487, 5488, 5489, 5490, 5491, 5492, 5493, 5494, 5496, 5497, 5499, 5500, 5501, 5502, 5503, 5504, 5505, 5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5529, 5530, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5541, 5542, 5543, 5544, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5560, 5561, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5571, 5573, 5574, 5576, 5578, 5579, 5580, 5581, 5582, 5583, 5584, 5589, 5590, 5591, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600, 5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5611, 5612, 5613, 5614, 5616, 5617, 5618, 5619, 5620, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5660, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5671, 5673, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5687, 5688, 5689, 5690, 5691, 5692, 5693, 5700, 5701, 5702, 5703, 5704, 5705, 5706, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5717, 5718, 5719, 5720, 5721, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5734, 5736, 5737, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5755, 5756, 5757, 5758, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5771, 5772, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5786, 5787, 5788, 5789, 5790, 5791, 5792, 5794, 5795, 5796, 5797, 5798, 5799, 5801, 5802, 5804, 5807, 5808, 5809, 5813, 5814, 5815, 5816, 5819, 5821, 5822, 5823, 5824, 5825, 5826, 5827, 5828, 5829, 5832, 5834, 5835, 5836, 5837, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5850, 5851, 5852, 5853, 5856, 5859, 5862, 5863, 5864, 5865]
5
+
6
+
7
+ unanswerable_keyphrases = ["cannot find", "can't find", "not able to", "unable to", "does not provide", "cannot provide", "cannot answer", "couldnot answer", "can't answer", "couldn't answer", "cannot be found", "cannot be determined", "do not have", "couldn't find", "no information", "does not mention", "doesn't mention", "not explicitly mentioned", "not explicitly explain", "can not find", "could not find", "does not specify", "doesn't provide", "doesn't specify", "there is no", "not mentioned", "don't have", "don't know", "does not include", "doesn't include", "does not contain", "doesn't contain", "not provided", "does not indicate", "doesn't indicate", "does not disclose", "doesn't disclose"]
evaluation/example_usage.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ MODEL_ID="nvidia/Nemotron-4-Mini-Hindi-4B-Instruct"
4
+ DATA_FOLDER="ChatRAG-Hi/data"
5
+ OUTPUT_FOLDER="ChatRAG-Hi/results"
6
+
7
+ echo "Running full pipeline for all datasets"
8
+ python run_all_evaluation.py \
9
+ --mode full \
10
+ --model-id "$MODEL_ID" \
11
+ --data-folder "$DATA_FOLDER" \
12
+ --output-folder "$OUTPUT_FOLDER" \
13
+ --all-datasets \
14
+ --device cuda \
15
+ --num-ctx 5 \
16
+ --max-tokens 64 \
17
+ --limit 10
18
+
19
+ echo "Evaluate specific dataset subset"
20
+ python run_all_evaluation.py \
21
+ --mode full \
22
+ --model-id "$MODEL_ID" \
23
+ --data-folder "$DATA_FOLDER" \
24
+ --output-folder "$OUTPUT_FOLDER" \
25
+ --datasets doc2dial quac inscit \
26
+ --device cuda
27
+
28
+ echo "Evaluation only (predictions already exist)"
29
+ python run_all_evaluation.py \
30
+ --mode evaluation \
31
+ --model-id "$MODEL_ID" \
32
+ --data-folder "$DATA_FOLDER" \
33
+ --output-folder "$OUTPUT_FOLDER" \
34
+ --all-datasets
35
+
36
+
37
+ echo "Direct evaluation with get_scores.py"
38
+ python get_scores.py \
39
+ --results-dir "$OUTPUT_FOLDER" \
40
+ --data-path "$DATA_FOLDER" \
41
+ --datasets doc2dial quac qrecc inscit hybridial doqa_cooking doqa_travel doqa_movies convfinqa \
42
+ --output-csv "${OUTPUT_FOLDER}/scores.csv"
evaluation/get_scores.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from evaluation_utils import quac_correct_retrieved_instance_idx_list
3
+ from evaluation_utils import unanswerable_keyphrases
4
+ from arguments import get_args_scores
5
+ import json
6
+ from metrics import F1Metric
7
+ import copy
8
+ import re
9
+ import pandas as pd
10
+ import os
11
+ import io
12
+ import sys
13
+ import argparse
14
+
15
+ def compute_f1_score(predicted_answers, groundtruth_answer, exp_name="default"):
16
+ """Evaluating F1 Score"""
17
+ print(len(predicted_answers), len(groundtruth_answer))
18
+ if len(predicted_answers) != len(groundtruth_answer):
19
+ groundtruth_answer = groundtruth_answer[:len(predicted_answers)]
20
+
21
+ guess_list = []
22
+ for guess in predicted_answers:
23
+ guess = guess.strip()
24
+ if "</s>" in guess:
25
+ guess = guess.replace("</s>", "")
26
+ guess_list.append(guess)
27
+
28
+ answer_list = []
29
+ for answer in groundtruth_answer:
30
+ answer_list.append(answer)
31
+
32
+ assert len(guess_list) == len(answer_list), \
33
+ "lengths of guess and answer are different!"
34
+
35
+ precision, recall, f1 = F1Metric.compute_all_pairs(guess_list, answer_list)
36
+ print('Method: %s; Precision: %.4f; recall: %.4f; f1: %.4f' % (
37
+ exp_name, precision, recall, f1))
38
+
39
+ return f1
40
+
41
+
42
+ def load_groundtruth_file(data_file):
43
+ """Load ground truth answers from JSON file"""
44
+ with open(data_file, "r") as f:
45
+ examples = json.load(f)
46
+
47
+ data = []
48
+ for instance in examples:
49
+ if "answers" in instance:
50
+ answers = instance["answers"]
51
+ elif "answer" in instance:
52
+ if type(instance["answer"]) is str:
53
+ answers = [instance["answer"]]
54
+ elif type(instance["answer"]) is list:
55
+ answers = instance["answer"]
56
+ else:
57
+ answers = [str(instance["answer"])]
58
+ else:
59
+ raise ValueError("need to have answer or answers")
60
+ data.append(answers)
61
+
62
+ return data
63
+
64
+
65
+ def load_prediction(data_file):
66
+ """Load predictions from text file"""
67
+ data = []
68
+ with open(data_file, "r") as f:
69
+ for line in f.readlines():
70
+ if "_on" in data_file or "_medium" in data_file or "_high" in data_file:
71
+ data.append(line.strip()[:300])
72
+ else:
73
+ data.append(line.strip())
74
+ return data
75
+
76
+
77
+ def evaluate_f1(ground_truth_file, prediction_file):
78
+ """Evaluate F1 score for general QA datasets"""
79
+ groundtruth_answers = load_groundtruth_file(ground_truth_file)
80
+
81
+ # Special handling for inscit dataset
82
+ if "inscit" in ground_truth_file:
83
+ groundtruth_answers_update = []
84
+ for answers in groundtruth_answers:
85
+ answers_update = []
86
+ for ans in answers:
87
+ # Remove default answer added to inscit dataset
88
+ if ans != "Sorry. I cannot find the answer based on the context.":
89
+ answers_update.append(ans)
90
+ assert len(answers_update) > 0
91
+ groundtruth_answers_update.append(copy.deepcopy(answers_update))
92
+ groundtruth_answers = groundtruth_answers_update
93
+
94
+ predicted_answers = load_prediction(prediction_file)
95
+
96
+ # Special handling for quac and doqa datasets (unanswerable questions)
97
+ if "quac" in prediction_file or "doqa" in prediction_file:
98
+ predicted_answers_new = []
99
+ for pred in predicted_answers:
100
+ pred = pred.lower()
101
+ for keyphrase in unanswerable_keyphrases:
102
+ if "उत्तर नहीं" in pred:
103
+ pred = "क्षमा करें, मैं संदर्भ के आधार पर उत्तर नहीं ढूँढ पा रहा हूँ।"
104
+ break
105
+ predicted_answers_new.append(pred)
106
+ predicted_answers = predicted_answers_new
107
+
108
+ f1_score = compute_f1_score(predicted_answers, groundtruth_answers)
109
+ return f1_score
110
+
111
+
112
+ def evaluate_convfinqa(ground_truth_file, prediction_file):
113
+ """
114
+ Evaluate ConvFinQA dataset with special numeric matching logic.
115
+ Since the model gives a long answer output, while the gold answer for ConvFinQA
116
+ are either an arithmetic formula or a final executed number.
117
+ We consider the output containing either the executed number or the arithmetic
118
+ formula as correct.
119
+ """
120
+
121
+ def _is_float(string):
122
+ try:
123
+ float(string)
124
+ return True
125
+ except ValueError:
126
+ return False
127
+
128
+ with open(ground_truth_file, "r") as f:
129
+ gold_list = json.load(f)
130
+
131
+ groundtruth_answers = [item['exe_answer'] for item in gold_list]
132
+ groundtruth_answers_formula = [item['answers'][0] for item in gold_list]
133
+
134
+ # Last turn question_list
135
+ question_list = [item['messages'][-1]['content'] for item in gold_list]
136
+ predicted_answers = load_prediction(prediction_file)
137
+
138
+ print(len(predicted_answers), len(groundtruth_answers))
139
+ if len(predicted_answers) != len(groundtruth_answers):
140
+ groundtruth_answers = groundtruth_answers[:len(predicted_answers)]
141
+
142
+ count_exact_match = 0
143
+ for question, pred, gold, gold_formula in zip(question_list, predicted_answers,
144
+ groundtruth_answers, groundtruth_answers_formula):
145
+
146
+ original_pred = pred
147
+ # Convert 1,000,000 into 1000000
148
+ original_pred = original_pred.replace(",", "")
149
+
150
+ # Convert $10 million + $20 million into 10 + 20
151
+ original_pred = original_pred.replace("$", "").replace("million", "").replace(
152
+ "billion", "").replace("मिलियन", "").replace("बिलियन ", "")
153
+
154
+ # Convert 10 (2017) + 20 (2018) into 10 + 20
155
+ pattern = r'\((\b\w+\b)\)'
156
+ original_pred = re.sub(pattern, '', original_pred)
157
+
158
+ # Make sure each token only has one space in between
159
+ original_pred = " ".join(original_pred.split())
160
+
161
+ if str(gold) in original_pred:
162
+ count_exact_match += 1
163
+ elif str(gold_formula) in original_pred:
164
+ count_exact_match += 1
165
+ elif _is_float(gold) and (str(round(float(gold), 3)) in original_pred or
166
+ str(round(float(gold), 2)) in original_pred):
167
+ count_exact_match += 1
168
+ elif "percent" in question and (str(float(gold)*100) in original_pred or
169
+ str(round(float(gold)*100, 1)) in original_pred or
170
+ str(round(float(gold)*100, 2)) in original_pred):
171
+ count_exact_match += 1
172
+ elif str(gold).endswith(".0") and str(int(gold)) in original_pred:
173
+ # Gold is an integer like 80.0 then convert it into 80
174
+ count_exact_match += 1
175
+ elif "decrease" in original_pred and _is_float(gold) and gold < 0 and (
176
+ str(-1 * gold) in original_pred):
177
+ # For the case where model generates something like a decrease of 10 million,
178
+ # while gold is -10
179
+ count_exact_match += 1
180
+
181
+ accuracy = count_exact_match / len(predicted_answers)
182
+ print("accuracy of exact match: %.4f" % accuracy)
183
+ return accuracy
184
+
185
+
186
+ def separate_cannot_answer(ground_truth_file, prediction_file):
187
+ """Separate answerable and unanswerable questions"""
188
+ # Load ground truth
189
+ with open(ground_truth_file, "r") as f:
190
+ groundtruth_answers = json.load(f)
191
+ # Load prediction
192
+ predicted_answers = load_prediction(prediction_file)
193
+ print(len(predicted_answers), len(groundtruth_answers))
194
+ if len(predicted_answers) != len(groundtruth_answers):
195
+ groundtruth_answers = groundtruth_answers[:len(predicted_answers)]
196
+
197
+ if "quac" in prediction_file:
198
+ """
199
+ For answerable cases, we want to make sure the retrieved context list contains the gold chunk.
200
+ For QuAC dataset, we use top-5 retrieved contexts as inputs, quac_correct_retrieved_instance_idx_list
201
+ is the index list where the top-5 retrieved context contains the gold answer
202
+ """
203
+ answerable_instance_idx_list = quac_correct_retrieved_instance_idx_list
204
+ else:
205
+ answerable_instance_idx_list = None
206
+
207
+ predicted_answers_new = []
208
+ for pred in predicted_answers:
209
+ pred = pred.lower()
210
+ for keyphrase in unanswerable_keyphrases:
211
+ if keyphrase in pred:
212
+ pred = "Sorry. I cannot find the answer based on the context."
213
+ break
214
+ predicted_answers_new.append(pred)
215
+ predicted_answers = predicted_answers_new
216
+
217
+ cannot_answer_idx_list = []
218
+ answerable_idx_list = []
219
+ if answerable_instance_idx_list:
220
+ count_idx = 0
221
+ for idx, item in enumerate(groundtruth_answers):
222
+ if 'answers' in item:
223
+ answer = item["answers"][0]
224
+ else:
225
+ answer = item['answer']
226
+ noanswer_response = "Sorry. I cannot find the answer based on the context."
227
+
228
+ if answer == noanswer_response:
229
+ cannot_answer_idx_list.append(idx)
230
+ continue
231
+
232
+ if answerable_instance_idx_list:
233
+ if count_idx in answerable_instance_idx_list:
234
+ answerable_idx_list.append(idx)
235
+ count_idx += 1
236
+ else:
237
+ answerable_idx_list.append(idx)
238
+
239
+ print("number of cannot answer cases: %d (out of %d)" % (len(cannot_answer_idx_list), len(groundtruth_answers)))
240
+ print("number of answerable cases: %d (out of %d)" % (len(answerable_idx_list), len(groundtruth_answers)))
241
+
242
+ return predicted_answers, cannot_answer_idx_list, answerable_idx_list
243
+
244
+
245
+ def get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list):
246
+ """Calculate accuracy for answerable and unanswerable questions"""
247
+ # Cannot answer
248
+ noanswer_count = 0
249
+ for idx in cannot_answer_idx_list:
250
+ prediction = predicted_answers[idx]
251
+ prediction = prediction.lower()
252
+ if "sorry" in prediction and "cannot find the answer" in prediction:
253
+ noanswer_count += 1
254
+ cannot_answer_acc = noanswer_count / len(cannot_answer_idx_list) if len(cannot_answer_idx_list) > 0 else 0.0
255
+ print("accuracy of cannot answer cases: %.4f" % cannot_answer_acc)
256
+
257
+ # Answerable
258
+ answerable_count = 0
259
+ for idx in answerable_idx_list:
260
+ prediction = predicted_answers[idx]
261
+ prediction = prediction.lower()
262
+ if "sorry" in prediction and "cannot find the answer" in prediction:
263
+ continue
264
+ answerable_count += 1
265
+ answerable_acc = answerable_count / len(answerable_idx_list) if len(answerable_idx_list) > 0 else 0.0
266
+ print("accuracy of answerable cases: %.4f" % answerable_acc)
267
+
268
+
269
+ def evaluate_cannot_answer_acc(ground_truth_file, prediction_file):
270
+ """Evaluate accuracy for answerable and unanswerable questions"""
271
+ predicted_answers, cannot_answer_idx_list, answerable_idx_list = \
272
+ separate_cannot_answer(ground_truth_file, prediction_file)
273
+
274
+ get_cannot_answer_and_answerable_acc(predicted_answers, cannot_answer_idx_list, answerable_idx_list)
275
+
276
+ def get_dataset_config(args):
277
+ """
278
+ Returns configuration for all datasets using paths from args
279
+
280
+ Args:
281
+ args: Arguments object with dataset path configurations
282
+
283
+ Returns:
284
+ dict: Dataset configuration mapping
285
+ """
286
+ return {
287
+ 'doc2dial': {
288
+ 'file_suffix': 'doc2dial',
289
+ 'ground_truth_path': args.doc2dial_path,
290
+ 'eval_function': evaluate_f1
291
+ },
292
+ 'quac': {
293
+ 'file_suffix': 'quac',
294
+ 'ground_truth_path': args.quac_path,
295
+ 'eval_function': evaluate_f1
296
+ },
297
+ 'qrecc': {
298
+ 'file_suffix': 'qrecc',
299
+ 'ground_truth_path': args.qrecc_path,
300
+ 'eval_function': evaluate_f1
301
+ },
302
+ 'inscit': {
303
+ 'file_suffix': 'inscit',
304
+ 'ground_truth_path': args.inscit_path,
305
+ 'eval_function': evaluate_f1
306
+ },
307
+ 'hybridial': {
308
+ 'file_suffix': 'hybridial',
309
+ 'ground_truth_path': args.hybridial_path,
310
+ 'eval_function': evaluate_f1
311
+ },
312
+ 'doqa_cooking': {
313
+ 'file_suffix': 'doqa_cooking',
314
+ 'ground_truth_path': args.doqa_cooking_path,
315
+ 'eval_function': evaluate_f1
316
+ },
317
+ 'doqa_travel': {
318
+ 'file_suffix': 'doqa_travel',
319
+ 'ground_truth_path': args.doqa_travel_path,
320
+ 'eval_function': evaluate_f1
321
+ },
322
+ 'doqa_movies': {
323
+ 'file_suffix': 'doqa_movies',
324
+ 'ground_truth_path': args.doqa_movies_path,
325
+ 'eval_function': evaluate_f1
326
+ },
327
+ 'convfinqa': {
328
+ 'file_suffix': 'convfinqa',
329
+ 'ground_truth_path': args.convfinqa_path,
330
+ 'eval_function': evaluate_convfinqa
331
+ }
332
+ }
333
+
334
+ def evaluate_single_dataset(dataset_name, prediction_file, ground_truth_file, eval_function):
335
+ """
336
+ Evaluate a single dataset and return the score
337
+
338
+ Args:
339
+ dataset_name: Name of the dataset
340
+ prediction_file: Path to prediction file
341
+ ground_truth_file: Path to ground truth file
342
+ eval_function: Function to use for evaluation
343
+
344
+ Returns:
345
+ float: Evaluation score
346
+ """
347
+ print("-" * 80)
348
+ print(f"Evaluating {dataset_name}")
349
+ print(f"Prediction file: {prediction_file}")
350
+ print(f"Ground truth file: {ground_truth_file}")
351
+
352
+ if not os.path.exists(prediction_file):
353
+ print(f"Warning: Prediction file not found: {prediction_file}")
354
+ return None
355
+
356
+ if not os.path.exists(ground_truth_file):
357
+ print(f"Warning: Ground truth file not found: {ground_truth_file}")
358
+ return None
359
+
360
+ try:
361
+ # Capture stdout to extract score
362
+ buffer = io.StringIO()
363
+ sys.stdout = buffer
364
+ score_value = eval_function(ground_truth_file, prediction_file)
365
+ sys.stdout = sys.__stdout__
366
+
367
+ # If the function already returns the score, use it
368
+ if score_value is not None:
369
+ return float(score_value)
370
+
371
+ # Otherwise, parse from output
372
+ output = buffer.getvalue()
373
+ if "f1:" in output:
374
+ score = output.split("f1:")[-1].strip()
375
+ elif "accuracy of exact match:" in output:
376
+ score = output.split("accuracy of exact match:")[-1].strip()
377
+ else:
378
+ print(f"Warning: Could not parse score from output: {output}")
379
+ return None
380
+
381
+ return float(score)
382
+ except Exception as e:
383
+ print(f"Error evaluating {dataset_name}: {e}")
384
+ sys.stdout = sys.__stdout__
385
+ return None
386
+
387
+
388
+ def evaluate_single_model(model_name, results_dir, data_path, datasets, args):
389
+ """
390
+ Evaluate a single model across all specified datasets
391
+
392
+ Args:
393
+ model_name: Name of the model
394
+ results_dir: Directory containing model results
395
+ data_path: Path to ground truth data
396
+ datasets: List of dataset names to evaluate
397
+ args: Arguments object with configuration
398
+
399
+ Returns:
400
+ dict: Dictionary mapping dataset names to scores
401
+ """
402
+ print(f"\n{'='*80}")
403
+ print(f"Evaluating Model: {model_name}")
404
+ print(f"{'='*80}\n")
405
+
406
+ output_dir = os.path.join(results_dir, model_name)
407
+ dataset_config = get_dataset_config(args)
408
+ scores = {'model': model_name}
409
+
410
+ for dataset_name in datasets:
411
+ if dataset_name not in dataset_config:
412
+ print(f"Warning: Unknown dataset {dataset_name}, skipping...")
413
+ continue
414
+
415
+ config = dataset_config[dataset_name]
416
+ prediction_file = os.path.join(output_dir, f"{config['file_suffix']}.txt")
417
+ ground_truth_file = os.path.join(data_path, config['ground_truth_path'])
418
+
419
+ score = evaluate_single_dataset(
420
+ dataset_name,
421
+ prediction_file,
422
+ ground_truth_file,
423
+ config['eval_function']
424
+ )
425
+
426
+ scores[dataset_name] = score
427
+
428
+ return scores
429
+
430
+
431
+ def evaluate_all_models(results_dir, data_path, datasets, args, output_csv=None):
432
+ """
433
+ Evaluate all models in the results directory
434
+
435
+ Args:
436
+ results_dir: Directory containing model results (subdirectories for each model)
437
+ data_path: Path to ground truth data directory
438
+ datasets: List of dataset names to evaluate
439
+ args: Arguments object with configuration
440
+ output_csv: Path to output CSV file (default: <results_dir>/scores.csv)
441
+
442
+ Returns:
443
+ pd.DataFrame: DataFrame containing all evaluation scores
444
+ """
445
+ # Get all model subdirectories
446
+ model_names = [d for d in os.listdir(results_dir)
447
+ if os.path.isdir(os.path.join(results_dir, d))]
448
+
449
+ if not model_names:
450
+ print(f"Warning: No model directories found in {results_dir}")
451
+ return pd.DataFrame()
452
+
453
+ print(f"\nFound {len(model_names)} model(s): {model_names}\n")
454
+
455
+ # Initialize DataFrame
456
+ columns = ['model'] + datasets
457
+ df = pd.DataFrame(columns=columns)
458
+
459
+ # Evaluate each model
460
+ all_scores = []
461
+ for model_name in model_names:
462
+ scores = evaluate_single_model(model_name, results_dir, data_path, datasets, args)
463
+ all_scores.append(scores)
464
+
465
+ # Convert to DataFrame
466
+ df = pd.DataFrame(all_scores)
467
+
468
+ # Calculate average across datasets (excluding model name column)
469
+ numeric_cols = [col for col in df.columns if col != 'model']
470
+ df['average'] = df[numeric_cols].mean(axis=1, skipna=True)
471
+
472
+ # Save to CSV
473
+ if output_csv is None:
474
+ output_csv = os.path.join(results_dir, 'scores.csv')
475
+
476
+ df.to_csv(output_csv, index=False)
477
+ print(f"\nScores saved to: {output_csv}")
478
+ print("\nFinal Results:")
479
+ print(df.to_string(index=False))
480
+
481
+ return df
482
+
483
+ def main():
484
+ """Main function to run evaluation pipeline"""
485
+ args = get_args_scores()
486
+
487
+ # Evaluate all models
488
+ df = evaluate_all_models(
489
+ results_dir=args.results_dir,
490
+ data_path=args.data_path,
491
+ datasets=args.datasets,
492
+ args=args,
493
+ output_csv=args.output_csv
494
+ )
495
+
496
+ return df
497
+
498
+ if __name__ == "__main__":
499
+ main()
evaluation/metrics.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # The following code is adapted from
3
+ # https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/metrics.py,
4
+ # which is licensed under the MIT license. More details on the license can be
5
+ # found at https://github.com/facebookresearch/ParlAI/blob/master/LICENSE.
6
+
7
+ """Provides standard metric evaluations for dialog."""
8
+
9
+ from collections import Counter
10
+ from typing import List
11
+ import numpy as np
12
+ import re
13
+
14
+ re_art = re.compile(r'\b(a|an|the)\b')
15
+ re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
16
+
17
+
18
+ def normalize_answer(s):
19
+ """
20
+ Lower text and remove punctuation, articles and extra whitespace.
21
+ """
22
+ s = s.lower()
23
+ s = re_punc.sub(' ', s)
24
+ s = re_art.sub(' ', s)
25
+ s = ' '.join(s.split())
26
+ return s
27
+
28
+
29
+ class F1Metric:
30
+ """
31
+ Helper class which computes token-level F1.
32
+ """
33
+
34
+ @staticmethod
35
+ def _prec_recall_f1_score(pred_items, gold_items):
36
+ """
37
+ Compute precision, recall and f1 given a set of gold and prediction items.
38
+ :param pred_items: iterable of predicted values
39
+ :param gold_items: iterable of gold values
40
+ :return: tuple (p, r, f1) for precision, recall, f1
41
+ """
42
+ common = Counter(gold_items) & Counter(pred_items)
43
+ num_same = sum(common.values())
44
+ if num_same == 0:
45
+ return 0, 0, 0
46
+ precision = 1.0 * num_same / len(pred_items)
47
+ recall = 1.0 * num_same / len(gold_items)
48
+ f1 = (2 * precision * recall) / (precision + recall)
49
+ return precision, recall, f1
50
+
51
+ @staticmethod
52
+ def compute_each_pair(guess: str, answer: str):
53
+ if answer == "":
54
+ return None, None, None
55
+ if guess == "":
56
+ return 0, 0, 0
57
+ g_tokens = normalize_answer(guess).split()
58
+ a_tokens = normalize_answer(answer).split()
59
+
60
+ precision, recall, f1 = F1Metric._prec_recall_f1_score(g_tokens, a_tokens)
61
+ return precision, recall, f1
62
+
63
+ @staticmethod
64
+ def compute_all_pairs(guesses: List[str], answers: List[list]):
65
+ assert len(guesses) == len(answers)
66
+ precision_list, recall_list, f1_list = [], [], []
67
+ for guess, answer in zip(guesses, answers):
68
+ assert type(answer) == list
69
+ f1_list_tmp = []
70
+ for answer_each in answer:
71
+ answer_each = answer_each.strip()
72
+ if answer_each == "":
73
+ continue
74
+ precision, recall, f1 = F1Metric.compute_each_pair(guess, answer_each)
75
+ f1_list_tmp.append(f1)
76
+
77
+ if len(f1_list_tmp) > 0:
78
+ f1 = max(f1_list_tmp)
79
+ if precision is None or recall is None or f1 is None:
80
+ continue
81
+ precision_list.append(precision)
82
+ recall_list.append(recall)
83
+ f1_list.append(f1)
84
+
85
+ return np.mean(precision_list), np.mean(recall_list), np.mean(f1_list)
evaluation/run_all_evaluation.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script provides a unified interface to:
4
+ 1. Run inference for all datasets using HuggingFace models
5
+ 2. Evaluate all predictions and generate scores
6
+ """
7
+
8
+ import os
9
+ import sys
10
+ import argparse
11
+ import subprocess
12
+ from typing import List, Optional
13
+ import pandas as pd
14
+
15
+ ALL_DATASETS = [
16
+ 'doc2dial', 'quac', 'qrecc', 'inscit',
17
+ 'hybridial',
18
+ 'doqa_cooking', 'doqa_travel', 'doqa_movies',
19
+ 'convfinqa'
20
+ ]
21
+
22
+ def run_inference_for_dataset(
23
+ model_id: str,
24
+ dataset: str,
25
+ data_folder: str,
26
+ output_folder: str,
27
+ device: str = 'cuda',
28
+ num_ctx: int = 5,
29
+ max_tokens: int = 64,
30
+ expected_samples: int = 500,
31
+ limit: Optional[int] = None
32
+ ) -> bool:
33
+ """
34
+ Run inference for a single dataset
35
+
36
+ Args:
37
+ model_id: Model identifier or path
38
+ dataset: Dataset name
39
+ data_folder: Path to data folder
40
+ output_folder: Path to output folder
41
+ device: Device to run on (cuda/cpu)
42
+ num_ctx: Number of contexts
43
+ max_tokens: Maximum number of tokens to generate
44
+ expected_samples: Expected number of samples
45
+ limit: Limit number of samples to process
46
+
47
+ Returns:
48
+ bool: True if successful, False otherwise
49
+ """
50
+ print(f"\n{'='*80}")
51
+ print(f"Running inference for dataset: {dataset}")
52
+ print(f"{'='*80}\n")
53
+
54
+ cmd = [
55
+ 'python', 'run_generation_hf.py',
56
+ '--model-id', model_id,
57
+ '--data-folder', data_folder,
58
+ '--output-folder', output_folder,
59
+ '--eval-dataset', dataset,
60
+ '--device', device,
61
+ '--num-ctx', str(num_ctx),
62
+ '--max-tokens', str(max_tokens),
63
+ '--expected-samples', str(expected_samples)
64
+ ]
65
+
66
+ if limit is not None:
67
+ cmd.extend(['--limit', str(limit)])
68
+
69
+ try:
70
+ result = subprocess.run(cmd, check=True, capture_output=False, text=True)
71
+ print(f"✓ Inference completed for {dataset}")
72
+ return True
73
+ except subprocess.CalledProcessError as e:
74
+ print(f"✗ Error running inference for {dataset}: {e}")
75
+ return False
76
+ except Exception as e:
77
+ print(f"✗ Unexpected error for {dataset}: {e}")
78
+ return False
79
+
80
+
81
+ def run_inference_for_all_datasets(
82
+ model_id: str,
83
+ datasets: List[str],
84
+ data_folder: str,
85
+ output_folder: str,
86
+ device: str = 'cuda',
87
+ num_ctx: int = 5,
88
+ max_tokens: int = 64,
89
+ expected_samples: int = 500,
90
+ limit: Optional[int] = None
91
+ ) -> dict:
92
+ """
93
+ Run inference for all specified datasets
94
+
95
+ Args:
96
+ model_id: Model identifier or path
97
+ datasets: List of dataset names
98
+ data_folder: Path to data folder
99
+ output_folder: Path to output folder
100
+ device: Device to run on (cuda/cpu)
101
+ num_ctx: Number of contexts
102
+ max_tokens: Maximum number of tokens to generate
103
+ expected_samples: Expected number of samples
104
+ limit: Limit number of samples to process
105
+
106
+ Returns:
107
+ dict: Dictionary mapping dataset names to success status
108
+ """
109
+ print(f"\n{'#'*80}")
110
+ print(f"# Running Inference for Model: {model_id}")
111
+ print(f"# Total Datasets: {len(datasets)}")
112
+ print(f"{'#'*80}\n")
113
+
114
+ results = {}
115
+ for dataset in datasets:
116
+ success = run_inference_for_dataset(
117
+ model_id=model_id,
118
+ dataset=dataset,
119
+ data_folder=data_folder,
120
+ output_folder=output_folder,
121
+ device=device,
122
+ num_ctx=num_ctx,
123
+ max_tokens=max_tokens,
124
+ expected_samples=expected_samples,
125
+ limit=limit
126
+ )
127
+ results[dataset] = success
128
+
129
+ # Print summary
130
+ print(f"\n{'='*80}")
131
+ print("Inference Summary:")
132
+ print(f"{'='*80}")
133
+ successful = sum(1 for v in results.values() if v)
134
+ print(f"✓ Successful: {successful}/{len(datasets)}")
135
+ print(f"✗ Failed: {len(datasets) - successful}/{len(datasets)}")
136
+
137
+ if successful < len(datasets):
138
+ print("\nFailed datasets:")
139
+ for dataset, success in results.items():
140
+ if not success:
141
+ print(f" - {dataset}")
142
+
143
+ return results
144
+
145
+
146
+ def run_evaluation(
147
+ results_dir: str,
148
+ data_path: str,
149
+ datasets: List[str],
150
+ output_csv: Optional[str] = None
151
+ ) -> pd.DataFrame:
152
+ """
153
+ Run evaluation for all models and datasets
154
+
155
+ Args:
156
+ results_dir: Directory containing model results
157
+ data_path: Path to ground truth data
158
+ datasets: List of dataset names to evaluate
159
+ output_csv: Path to output CSV file
160
+
161
+ Returns:
162
+ pd.DataFrame: Evaluation results
163
+ """
164
+ print(f"\n{'#'*80}")
165
+ print(f"# Running Evaluation")
166
+ print(f"# Results Directory: {results_dir}")
167
+ print(f"# Data Path: {data_path}")
168
+ print(f"{'#'*80}\n")
169
+
170
+ cmd = [
171
+ 'python', 'get_scores.py',
172
+ '--results-dir', results_dir,
173
+ '--data-path', data_path,
174
+ '--datasets'
175
+ ] + datasets
176
+
177
+ if output_csv:
178
+ cmd.extend(['--output-csv', output_csv])
179
+
180
+ try:
181
+ result = subprocess.run(cmd, check=True, capture_output=False, text=True)
182
+ print(f"\n✓ Evaluation completed successfully")
183
+
184
+ # Load and return the results
185
+ if output_csv:
186
+ csv_path = output_csv
187
+ else:
188
+ csv_path = os.path.join(results_dir, 'scores.csv')
189
+
190
+ if os.path.exists(csv_path):
191
+ df = pd.read_csv(csv_path)
192
+ return df
193
+ else:
194
+ print(f"Warning: Output CSV not found at {csv_path}")
195
+ return pd.DataFrame()
196
+
197
+ except subprocess.CalledProcessError as e:
198
+ print(f"✗ Error running evaluation: {e}")
199
+ return pd.DataFrame()
200
+ except Exception as e:
201
+ print(f"✗ Unexpected error during evaluation: {e}")
202
+ return pd.DataFrame()
203
+
204
+ def run_full_pipeline(
205
+ model_id: str,
206
+ data_folder: str,
207
+ output_folder: str,
208
+ datasets: List[str] = ALL_DATASETS,
209
+ device: str = 'cuda',
210
+ num_ctx: int = 5,
211
+ max_tokens: int = 64,
212
+ expected_samples: int = 500,
213
+ limit: Optional[int] = None,
214
+ skip_inference: bool = False,
215
+ skip_evaluation: bool = False,
216
+ output_csv: Optional[str] = None
217
+ ) -> pd.DataFrame:
218
+ """
219
+ Run the complete pipeline: inference + evaluation
220
+
221
+ Args:
222
+ model_id: Model identifier or path
223
+ data_folder: Path to data folder
224
+ output_folder: Path to output folder
225
+ datasets: List of dataset names
226
+ device: Device to run on (cuda/cpu)
227
+ num_ctx: Number of contexts
228
+ max_tokens: Maximum number of tokens to generate
229
+ expected_samples: Expected number of samples
230
+ limit: Limit number of samples to process
231
+ skip_inference: Skip inference step
232
+ skip_evaluation: Skip evaluation step
233
+ output_csv: Path to output CSV file
234
+
235
+ Returns:
236
+ pd.DataFrame: Evaluation results
237
+ """
238
+ print(f"\n{'#'*80}")
239
+ print(f"# ChatRAG-Hi Full Evaluation Pipeline")
240
+ print(f"{'#'*80}\n")
241
+ print(f"Model: {model_id}")
242
+ print(f"Datasets: {', '.join(datasets)}")
243
+ print(f"Device: {device}")
244
+ print(f"Skip Inference: {skip_inference}")
245
+ print(f"Skip Evaluation: {skip_evaluation}")
246
+
247
+ # Step 1: Run inference
248
+ if not skip_inference:
249
+ inference_results = run_inference_for_all_datasets(
250
+ model_id=model_id,
251
+ datasets=datasets,
252
+ data_folder=data_folder,
253
+ output_folder=output_folder,
254
+ device=device,
255
+ num_ctx=num_ctx,
256
+ max_tokens=max_tokens,
257
+ expected_samples=expected_samples,
258
+ limit=limit
259
+ )
260
+ else:
261
+ print("\n⊘ Skipping inference step")
262
+
263
+ # Step 2: Run evaluation
264
+ if not skip_evaluation:
265
+ eval_results = run_evaluation(
266
+ results_dir=output_folder,
267
+ data_path=data_folder,
268
+ datasets=datasets,
269
+ output_csv=output_csv
270
+ )
271
+ return eval_results
272
+ else:
273
+ print("\n⊘ Skipping evaluation step")
274
+ return pd.DataFrame()
275
+
276
+ def get_args():
277
+ """Parse command line arguments"""
278
+ parser = argparse.ArgumentParser(
279
+ description="Comprehensive wrapper for ChatRAG-Hi inference and evaluation"
280
+ )
281
+
282
+ # Pipeline control
283
+ parser.add_argument('--mode', type=str, choices=['inference', 'evaluation', 'full'],
284
+ default='full',
285
+ help='Pipeline mode: inference only, evaluation only, or full pipeline')
286
+
287
+ # Model configuration
288
+ parser.add_argument('--model-id', type=str, required=True,
289
+ help='Model identifier or path')
290
+
291
+ # Data paths
292
+ parser.add_argument('--data-folder', type=str, required=True,
293
+ help='Path to data folder containing ground truth JSON files')
294
+ parser.add_argument('--output-folder', type=str, required=True,
295
+ help='Path to output folder for predictions and scores')
296
+
297
+ # Dataset selection
298
+ parser.add_argument('--datasets', type=str, nargs='+',
299
+ default=ALL_DATASETS,
300
+ help='List of datasets to process')
301
+ parser.add_argument('--all-datasets', action='store_true',
302
+ help='Process all available datasets')
303
+
304
+ # Inference parameters
305
+ parser.add_argument('--device', type=str, default='cuda',
306
+ help='Device to run on: cpu or cuda')
307
+ parser.add_argument('--num-ctx', type=int, default=5,
308
+ help='Number of contexts')
309
+ parser.add_argument('--max-tokens', type=int, default=64,
310
+ help='Maximum number of tokens to generate')
311
+ parser.add_argument('--expected-samples', type=int, default=500,
312
+ help='Expected number of samples per dataset')
313
+ parser.add_argument('--limit', type=int, default=None,
314
+ help='Limit number of samples to process (for testing)')
315
+
316
+ # Output options
317
+ parser.add_argument('--output-csv', type=str, default=None,
318
+ help='Path to output CSV file for scores')
319
+
320
+ args = parser.parse_args()
321
+
322
+ # Use all datasets if specified
323
+ if args.all_datasets:
324
+ args.datasets = ALL_DATASETS
325
+
326
+ return args
327
+
328
+
329
+ def main():
330
+ """Main entry point"""
331
+ args = get_args()
332
+
333
+ # Create output directory if it doesn't exist
334
+ os.makedirs(args.output_folder, exist_ok=True)
335
+
336
+ # Determine what to skip based on mode
337
+ skip_inference = (args.mode == 'evaluation')
338
+ skip_evaluation = (args.mode == 'inference')
339
+
340
+ # Run the pipeline
341
+ results = run_full_pipeline(
342
+ model_id=args.model_id,
343
+ data_folder=args.data_folder,
344
+ output_folder=args.output_folder,
345
+ datasets=args.datasets,
346
+ device=args.device,
347
+ num_ctx=args.num_ctx,
348
+ max_tokens=args.max_tokens,
349
+ expected_samples=args.expected_samples,
350
+ limit=args.limit,
351
+ skip_inference=skip_inference,
352
+ skip_evaluation=skip_evaluation,
353
+ output_csv=args.output_csv
354
+ )
355
+
356
+ if not results.empty and args.mode != 'inference':
357
+ print(f"\n{'='*80}")
358
+ print("Final Evaluation Results:")
359
+ print(f"{'='*80}\n")
360
+ print(results.to_string(index=False))
361
+ print(f"\n{'='*80}\n")
362
+
363
+
364
+ if __name__ == "__main__":
365
+ main()
366
+
evaluation/run_generation_hf.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import random
4
+
5
+ from tqdm import tqdm
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from arguments import get_args
8
+
9
+ random.seed(1234)
10
+
11
+
12
+ def load_data(datapath):
13
+ """Load data from a JSON file."""
14
+ print("loading data from %s" % datapath)
15
+ with open(datapath, "r", encoding="utf-8") as f:
16
+ data_list = json.load(f)
17
+ return data_list
18
+
19
+
20
+ def reformat_question(turn_list, dataset_name):
21
+ """Reformat question based on dataset type and keep last 7 turns."""
22
+ ## only take the lastest 7 turns
23
+ _turn_list = turn_list[-7:]
24
+ idx = -6
25
+ while _turn_list[0]['role'] != 'user':
26
+ _turn_list = turn_list[idx:]
27
+ idx += 1
28
+ turn_list = _turn_list
29
+ assert turn_list[-1]['role'] == 'user'
30
+ assert turn_list[0]['role'] == 'user'
31
+
32
+ long_answer_dataset_list = ["doc2dial", "quac", "qrecc", "inscit", "doqa_movies", "doqa_travel", "doqa_cooking", "hybridial", "convfinqa"]
33
+
34
+ if dataset_name in long_answer_dataset_list:
35
+ for item in turn_list:
36
+ if item['role'] == 'user':
37
+ ## only needs to add it on the first user turn
38
+ item['content'] = 'Please give a full and complete answer for the question: ' + item['content']
39
+ break
40
+ else:
41
+ raise Exception("please input a correct dataset name!")
42
+
43
+ return turn_list
44
+
45
+
46
+ def get_inputs_hf(data_list, dataset_name, num_ctx):
47
+ """
48
+ Get inputs formatted for HuggingFace chat template.
49
+ Returns a list of message lists (chat format).
50
+ """
51
+ system = "You are a helpful AI assistant that gives concise and detailed answers to the user's questions based on the given contexts. You should indicate when the answer cannot be found in any of the contexts. You should only respond with the answer."
52
+ prompt_list = []
53
+
54
+ for item in data_list:
55
+ turn_list = item['messages']
56
+ turn_list = reformat_question(turn_list, dataset_name)
57
+
58
+ ctx_list = ["title: " + ctx["title"] + ", context: " + ctx["text"]
59
+ if ctx["title"] else "context: " + ctx["text"] for ctx in item['ctxs'][:num_ctx]]
60
+ context = "\n\n".join(ctx_list)
61
+
62
+ turn_list[0]["content"] = f"{system}\n\n{context}\n\n{turn_list[0]['content']}"
63
+
64
+ # Clean consecutive assistant turns
65
+ cleaned_turn_list = []
66
+ for turn in turn_list:
67
+ try:
68
+ if turn["role"] != "assistant":
69
+ cleaned_turn_list.append(turn)
70
+ else:
71
+ if cleaned_turn_list[-1]["role"] == "assistant":
72
+ cleaned_turn_list[-1]["content"] += ". " + turn["content"]
73
+ else:
74
+ cleaned_turn_list.append(turn)
75
+ except Exception as ex:
76
+ print(str(ex.args))
77
+ import pdb; pdb.set_trace()
78
+
79
+ prompt_list.append(cleaned_turn_list)
80
+
81
+ return prompt_list
82
+
83
+
84
+ def get_input_datapath(args):
85
+ """Get the input data path based on the eval_dataset."""
86
+ if args.eval_dataset == "doc2dial":
87
+ input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
88
+ elif args.eval_dataset == "convfinqa":
89
+ input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
90
+ elif args.eval_dataset == "quac":
91
+ input_datapath = os.path.join(args.data_folder, args.quac_path)
92
+ elif args.eval_dataset == "qrecc":
93
+ input_datapath = os.path.join(args.data_folder, args.qrecc_path)
94
+ elif args.eval_dataset == "doqa_cooking":
95
+ input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
96
+ elif args.eval_dataset == "doqa_travel":
97
+ input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
98
+ elif args.eval_dataset == "doqa_movies":
99
+ input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
100
+ elif args.eval_dataset == "inscit":
101
+ input_datapath = os.path.join(args.data_folder, args.inscit_path)
102
+ elif args.eval_dataset == "hybridial":
103
+ input_datapath = os.path.join(args.data_folder, args.hybridial_path)
104
+ else:
105
+ raise Exception("please input a correct eval_dataset name!")
106
+
107
+ return input_datapath
108
+
109
+
110
+ def get_prompt_list(args):
111
+ """Get prompt list for the given dataset."""
112
+ input_datapath = get_input_datapath(args)
113
+ data_list = load_data(input_datapath)
114
+ print("number of samples in the dataset:", len(data_list))
115
+
116
+ # Apply limit if specified
117
+ if args.limit is not None:
118
+ data_list = data_list[:args.limit]
119
+ print(f"limited to {args.limit} samples")
120
+
121
+ prompt_list = get_inputs_hf(data_list, args.eval_dataset, num_ctx=args.num_ctx)
122
+ return prompt_list
123
+
124
+
125
+ def run_inference(args, tokenizer, model):
126
+ """Run inference for a given dataset."""
127
+ # Get output filepath
128
+ model_name = args.model_id.replace('/', '_')
129
+ os.makedirs(os.path.join(args.output_folder, model_name), exist_ok=True)
130
+ output_filepath = os.path.join(args.output_folder, model_name, f"{args.eval_dataset}.txt")
131
+
132
+ # Check for existing results
133
+ existing_count = 0
134
+ if os.path.exists(output_filepath):
135
+ with open(output_filepath, "r") as f:
136
+ lines = f.readlines()
137
+ if len(lines) >= args.expected_samples:
138
+ print(f"Skipping as results exist ({len(lines)} samples)", "\n\n")
139
+ return
140
+ else:
141
+ existing_count = len(lines)
142
+ print(f"Resuming from {existing_count} existing samples")
143
+
144
+ # Get prompt list
145
+ prompt_list = get_prompt_list(args)
146
+
147
+ # Run generation
148
+ output_list = []
149
+ with open(output_filepath, "a", encoding='utf-8') as f:
150
+ for idx, messages in enumerate(tqdm(prompt_list, desc=f"Generating for {args.eval_dataset}")):
151
+ if idx < existing_count:
152
+ continue
153
+
154
+ try:
155
+ # Apply chat template
156
+ text = tokenizer.apply_chat_template(
157
+ messages,
158
+ tokenize=False,
159
+ add_generation_prompt=True
160
+ )
161
+
162
+ # Generate
163
+ model_inputs = tokenizer([text], return_tensors="pt").to(args.device)
164
+ generated_ids = model.generate(
165
+ model_inputs.input_ids,
166
+ max_new_tokens=args.max_tokens,
167
+ stop_strings=args.stop_strings,
168
+ tokenizer=tokenizer
169
+ )
170
+
171
+ # Decode
172
+ generated_ids = [
173
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
174
+ ]
175
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
176
+ generated_text = response.strip().replace("\n", " ").strip(" <extra_id_1>")
177
+
178
+ output_list.append(generated_text)
179
+ f.write(generated_text + "\n")
180
+
181
+ except Exception as ex:
182
+ print(f"Error at index {idx}: {str(ex)}")
183
+ break
184
+
185
+ print(f"Generated {len(output_list)} responses for {args.eval_dataset}")
186
+
187
+
188
+ def main():
189
+ """Main function to run HuggingFace model inference."""
190
+ args = get_args()
191
+
192
+ print(f"Evaluating model: {args.model_id}")
193
+ print(f"Dataset: {args.eval_dataset}")
194
+ print(f"Device: {args.device}")
195
+ print(f"Num contexts: {args.num_ctx}")
196
+ print(f"Max tokens: {args.max_tokens}")
197
+
198
+ # Load tokenizer and model
199
+ tokenizer = AutoTokenizer.from_pretrained(args.model_id, stop_strings=args.stop_strings)
200
+ model = AutoModelForCausalLM.from_pretrained(args.model_id)
201
+ model.to(args.device)
202
+
203
+ # Run inference
204
+ run_inference(args, tokenizer, model)
205
+
206
+ print("Inference completed!")
207
+
208
+
209
+ if __name__ == "__main__":
210
+ main()
evaluation/run_generation_vllm.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from transformers import AutoTokenizer
4
+ from vllm import LLM, SamplingParams
5
+ from arguments import get_args
6
+ from dataset import load_data, get_inputs
7
+ import torch
8
+ import os
9
+
10
+ def get_prompt_list(args):
11
+
12
+ ## get tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained(args.model_id)
14
+
15
+ ## get input data
16
+ if args.eval_dataset == "doc2dial":
17
+ input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
18
+ elif args.eval_dataset == "convfinqa":
19
+ input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
20
+ elif args.eval_dataset == "quac":
21
+ input_datapath = os.path.join(args.data_folder, args.quac_path)
22
+ elif args.eval_dataset == "qrecc":
23
+ input_datapath = os.path.join(args.data_folder, args.qrecc_path)
24
+ elif args.eval_dataset == "doqa_cooking":
25
+ input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
26
+ elif args.eval_dataset == "doqa_travel":
27
+ input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
28
+ elif args.eval_dataset == "doqa_movies":
29
+ input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
30
+ elif args.eval_dataset == "coqa":
31
+ input_datapath = os.path.join(args.data_folder, args.coqa_path)
32
+ elif args.eval_dataset == "sqa":
33
+ input_datapath = os.path.join(args.data_folder, args.sqa_path)
34
+ elif args.eval_dataset == "topiocqa":
35
+ input_datapath = os.path.join(args.data_folder, args.topiocqa_path)
36
+ elif args.eval_dataset == "inscit":
37
+ input_datapath = os.path.join(args.data_folder, args.inscit_path)
38
+ elif args.eval_dataset == "hybridial":
39
+ input_datapath = os.path.join(args.data_folder, args.hybridial_path)
40
+
41
+ else:
42
+ raise Exception("please input a correct eval_dataset name!")
43
+
44
+ data_list = load_data(input_datapath)
45
+ print("number of samples in the dataset:", len(data_list))
46
+ prompt_list = get_inputs(data_list, args.eval_dataset, tokenizer, num_ctx=args.num_ctx, max_output_len=args.out_seq_len)
47
+
48
+ return prompt_list
49
+
50
+
51
+ def main():
52
+ args = get_args()
53
+
54
+ ## bos token for llama-3
55
+ bos_token = "<|begin_of_text|>"
56
+
57
+ ## get model_path
58
+ model_path = os.path.join(args.model_folder, args.model_name)
59
+
60
+ ## get prompt_list
61
+ prompt_list = get_prompt_list(args)
62
+
63
+ ## get output_datapath
64
+ output_datapath = os.path.join(args.output_folder, "%s_output.txt" % args.eval_dataset)
65
+
66
+ ## run inference
67
+ sampling_params = SamplingParams(temperature=0, top_k=1, max_tokens=args.max_tokens)
68
+
69
+ ## This changes the GPU support to 8
70
+ model_vllm = LLM(model_path, tensor_parallel_size=8)
71
+
72
+ output_list = []
73
+ for prompt in prompt_list:
74
+ prompt = bos_token + prompt
75
+ output = model_vllm.generate([prompt], sampling_params)[0]
76
+ generated_text = output.outputs[0].text
77
+ generated_text = generated_text.strip().replace("\n", " ")
78
+
79
+ # print("generated_text:", generated_text)
80
+ output_list.append(generated_text)
81
+
82
+ print("writing to %s" % output_datapath)
83
+ with open(output_datapath, "w") as f:
84
+ for output in output_list:
85
+ f.write(output + "\n")
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()