text
stringlengths
1
93.6k
retrieval=retrieval, corpus=corpus, nlp_model=nlp_model,
bm25_train_full=bm25_train_full, retrieve_number=retrieve_number)
else:
gene_exps = ep_generator(data["question"],
list(set(selected_quest_compose) | set(selected_quest)),
temp, que_to_s_dict_train, question_to_mid_dict, api_key, LLM_engine,
retrieval=retrieval, corpus=corpus, nlp_model=nlp_model,
bm25_train_full=bm25_train_full, retrieve_number=retrieve_number)
two_hop_rela_dict = {}
answer_candi = []
removed_none_candi = []
answer_to_grounded_dict = {}
logger.info("gene_exps: {}".format(gene_exps))
scouts = gene_exps[:6]
for idx, gene_exp in enumerate(scouts):
try:
logger.info("gene_exp: {}".format(gene_exp))
join_num = number_of_join(gene_exp)
if join_num > 5:
continue
if join_num > 3:
top_mid = 5
else:
top_mid = 15
found_names = find_friend_name(gene_exp, data["question"])
found_mids = from_fn_to_id_set(found_names, data["question"], name_to_id_dict, bm25_all_fns, all_fns)
found_mids = [mids[:top_mid] for mids in found_mids]
mid_combinations = list(itertools.product(*found_mids))
logger.info("all_iters: {}".format(mid_combinations))
for mid_iters in mid_combinations:
logger.info("mid_iters: {}".format(mid_iters))
replaced_exp = convz_fn_to_mids(gene_exp, found_names, mid_iters)
answer, two_hop_rela_dict, bounded_exp = bound_to_existed(data["question"], replaced_exp, mid_iters,
two_hop_rela_dict, relationship_to_enti,
hsearcher, rela_corpus, relationships)
answer_candi.append(answer)
if answer is not None:
answer_to_grounded_dict[tuple(answer)] = bounded_exp
for ans in answer_candi:
if ans != None:
removed_none_candi.append(ans)
if not removed_none_candi:
answer = None
else:
count_dict = Counter([tuple(candi) for candi in removed_none_candi])
logger.info("count_dict: {}".format(count_dict))
answer = max(count_dict, key=count_dict.get)
except:
if not removed_none_candi:
answer = None
else:
count_dict = Counter([tuple(candi) for candi in removed_none_candi])
logger.info("count_dict: {}".format(count_dict))
answer = max(count_dict, key=count_dict.get)
answer_to_grounded_dict[None] = ""
logger.info("predicted_answer: {}".format(answer))
logger.info("label: {}".format(label))
if answer is None:
no_ans[idx] += 1
elif set(answer) == set(label):
correct[idx] += 1
total[idx] += 1
em_score = correct[idx] / total[idx]
logger.info("================================================================")
logger.info("consistent candidates number: {}".format(idx+1))
logger.info("em_score: {}".format(em_score))
logger.info("correct: {}".format(correct[idx]))
logger.info("total: {}".format(total[idx]))
logger.info("no_ans: {}".format(no_ans[idx]))
logger.info(" ")
logger.info("================================================================")
def parse_args():
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--shot_num', type=int, metavar='N',
default=40, help='the number of shots used in in-context demo')
parser.add_argument('--temperature', type=float, metavar='N',
default=0.3, help='the temperature of LLM')
parser.add_argument('--api_key', type=str, metavar='N',
default=None, help='the api key to access LLM')
parser.add_argument('--engine', type=str, metavar='N',
default="code-davinci-002", help='engine name of LLM')
parser.add_argument('--retrieval', action='store_true', help='whether to use retrieval-augmented KB-BINDER')
parser.add_argument('--train_data_path', type=str, metavar='N',
default="data/GrailQA/grailqa_v1.0_train.json", help='training data path')
parser.add_argument('--eva_data_path', type=str, metavar='N',
default="data/GrailQA/grailqa_v1.0_dev.json", help='evaluation data path')
parser.add_argument('--fb_roles_path', type=str, metavar='N',
default="data/GrailQA/fb_roles", help='freebase roles file path')
parser.add_argument('--surface_map_path', type=str, metavar='N',
default="data/surface_map_file_freebase_complete_all_mention", help='surface map file path')
args = parser.parse_args()
return args