File size: 2,033 Bytes
83c2bac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#%%
from utils.retriever_utils import load_passages, validate, save_results
import pickle
import os
import csv 

#%%
def load_data_with_pickle(file_path):
    with open(file_path, 'rb') as f:
        return pickle.load(f)


def process_and_save_retrieval_results(top_docs, dataset_name, questions, question_answers, all_passages, num_threads, match_type, output_dir, output_no_text=False):
    recall_outfile = os.path.join(output_dir, 'recall_at_k.csv')
    result_outfile = os.path.join(output_dir, 'results.json')
    
    questions_doc_hits = validate(
        dataset_name,
        all_passages,
        question_answers,
        top_docs,
        num_threads,
        match_type,
        recall_outfile,
        use_wandb=False
    )
    
    save_results(
        all_passages,
        questions,
        question_answers,
        top_docs,
        questions_doc_hits,
        result_outfile,
        output_no_text=output_no_text
    )
    
    return questions_doc_hits


#%%
if __name__=='__main__':
    
    dataset_name = 'webq'
    num_threads = 10
    output_no_text = False
    ctx_file = './corpus/wiki_webq_corpus.tsv'


    match_type = 'string'
    input_file_path = './data/webq-test.csv'
    with open(input_file_path,'r') as file:
        query_data = csv.reader(file, delimiter='\t')
        questions, question_answers = zip(*[(item[0], eval(item[1])) for item in query_data])
        questions = questions
        question_answers = question_answers
    
    all_passages = load_passages(ctx_file)

    output_dir = './output/webq-test-result'

    
    top_docs_pkl_path = './output/result_str.pkl'

    top_docs = load_data_with_pickle(top_docs_pkl_path)
    
    os.makedirs(output_dir, exist_ok=True)
    questions_doc_hits = process_and_save_retrieval_results(
        top_docs,
        dataset_name,
        questions,
        question_answers,
        all_passages,
        num_threads,
        match_type,
        output_dir,
        output_no_text=output_no_text
    )

    print('Validation End!')