text
stringlengths
1
93.6k
def main():
args = parse_args()
nlp = spacy.load("en_core_web_sm")
bm25_searcher = LuceneSearcher('contriever_fb_relation/index_relation_fb')
query_encoder = AutoQueryEncoder(encoder_dir='facebook/contriever', pooling='mean')
contriever_searcher = FaissSearcher('contriever_fb_relation/freebase_contriever_index', query_encoder)
hsearcher = HybridSearcher(contriever_searcher, bm25_searcher)
rela_corpus = LuceneSearcher('contriever_fb_relation/index_relation_fb')
dev_data = process_file(args.eva_data_path)
train_data = process_file(args.train_data_path)
que_to_s_dict_train = {data["question"]: data["s_expression"] for data in train_data}
question_to_mid_dict = process_file_node(args.train_data_path)
if not args.retrieval:
selected_quest_compose, selected_quest_compare, selected_quest = select_shot_prompt_train(train_data, args.shot_num)
else:
selected_quest_compose = []
selected_quest_compare = []
selected_quest = []
all_ques = selected_quest_compose + selected_quest_compare
corpus = [data["question"] for data in train_data]
tokenized_train_data = []
for doc in corpus:
nlp_doc = nlp(doc)
tokenized_train_data.append([token.lemma_ for token in nlp_doc])
bm25_train_full = BM25Okapi(tokenized_train_data)
if not args.retrieval:
prompt_type = ''
random.shuffle(all_ques)
for que in all_ques:
prompt_type = prompt_type + "Question: " + que + "\nType of the question: "
if que in selected_quest_compose:
prompt_type += "Composition\n"
else:
prompt_type += "Comparison\n"
else:
prompt_type = ''
with open(args.fb_roles_path) as f:
lines = f.readlines()
relationships = []
entities_set = []
relationship_to_enti = {}
for line in lines:
info = line.split(" ")
relationships.append(info[1])
entities_set.append(info[0])
entities_set.append(info[2])
relationship_to_enti[info[1]] = [info[0], info[2]]
with open(args.surface_map_path) as f:
lines = f.readlines()
name_to_id_dict = {}
for line in lines:
info = line.split("\t")
name = info[0]
score = float(info[1])
mid = info[2].strip()
if name in name_to_id_dict:
name_to_id_dict[name][mid] = score
else:
name_to_id_dict[name] = {}
name_to_id_dict[name][mid] = score
all_fns = list(name_to_id_dict.keys())
tokenized_all_fns = [fn.split() for fn in all_fns]
bm25_all_fns = BM25Okapi(tokenized_all_fns)
all_combiner_evaluation(dev_data, selected_quest_compose, selected_quest_compare, selected_quest, prompt_type,
hsearcher, rela_corpus, relationships, args.temperature, que_to_s_dict_train,
question_to_mid_dict, args.api_key, args.engine, name_to_id_dict, bm25_all_fns,
all_fns, relationship_to_enti, retrieval=args.retrieval, corpus=corpus, nlp_model=nlp,
bm25_train_full=bm25_train_full, retrieve_number=args.shot_num)
if __name__=="__main__":
main()
# <FILESEP>
import argparse, os, sys, time, gc, datetime
from models.module import cas_mvsnet_loss, cas_mvsnet_loss_kl
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from datasets import find_dataset_def
# from models import *
from models.cas_mvsnet import CascadeMVSNet
from tools.utils import *
import torch.distributed as dist
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='A PyTorch Implementation of Cascade Cost Volume MVSNet')
parser.add_argument('--mode', default='train', help='train or test', choices=['train', 'test', 'profile'])
parser.add_argument('--model', default='mvsnet', help='select model')
parser.add_argument('--device', default='cuda', help='select model')
parser.add_argument('--dataset', default='dtu_yao_st', help='select dataset')
parser.add_argument('--trainpath', help='train datapath')
parser.add_argument('--pseudopath', help='train datapath')
parser.add_argument('--testpath', help='test datapath')
parser.add_argument('--trainlist', help='train list')