mfaq_light / train_valid_split.py
maximedb's picture
ok
343756c
import os
import json
import tqdm
import functools
import collections
import multiprocessing
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def extract_domains(filename):
domains = set()
with open(filename) as f:
for line in f:
line = json.loads(line.strip())
domains.add(line["domain"])
return filename, list(domains)
def filter_valid(questions):
answers = set()
new_questions = []
for question in questions:
if question["answer"] not in answers:
new_questions.append(question)
answers.add(question["answer"])
return new_questions
# def format_to_valid(questions):
# answers_txt = [e["answer"] for e in questions]
# questions_txt = [e["question"] for e in questions]
# vectorizer = TfidfVectorizer()
# vectorizer.fit(answers_txt + questions_txt)
# answer_vectors = vectorizer.transform(answers_txt)
# for i, question in enumerate(questions):
# similarities = linear_kernel(answer_vectors[[i]], answer_vectors).flatten()
# answer_scores = [(j, sim) for j, sim in enumerate(similarities) if sim != 1]
# answer_scores = sorted(answer_scores, key=lambda x: x[1], reverse=True)
# sorted_answers = [questions[j]["answer"] for j, _ in answer_scores if questions[j]["answer"] != question["answer"]]
# negative_answer = sorted_answers[len(sorted_answers) // 2]
# assert question["answer"] not in sorted_answers
# question["candidates"] = [question["answer"]] + sorted_answers
# question["negative_example"] = negative_answer
# return questions
def format_to_valid(questions):
answers = [e["answer"] for e in questions]
for question in questions:
answer = question["answer"]
candidates = [e for e in answers if e != answer]
candidates = [answer] + candidates
question["candidates"] = candidates
return questions
def format_to_train(questions):
answers_txt = [e["answer"] for e in questions]
answers_shifted = answers_txt[1:] + [answers_txt[0]]
for question, answer in zip(questions, answers_shifted):
question["negative"] = answer
return questions
def valid_train_split(filename, mapping=None):
previous_domain = ""
train = []
valid = []
domain_data = {"questions": [], "pages": set()}
counter = 0
with open(filename) as f:
for line_txt in f:
counter += 1
line = json.loads(line_txt.strip())
domain = line["domain"]
if domain != previous_domain and previous_domain != "":
form_questions = format_to_train(domain_data["questions"])
if len(mapping[previous_domain]) > 1:
train.extend(form_questions)
elif len(valid) > 2000:
train.extend(form_questions)
elif len(domain_data["pages"]) > 1:
train.extend(form_questions)
elif len(domain_data["questions"]) < 15:
train.extend(form_questions)
else:
questions = filter_valid(domain_data["questions"])
if len(questions) < 15:
train.extend(form_questions)
else:
questions = format_to_valid(questions)
valid.extend(questions)
domain_data = {"questions": [], "pages": set()}
domain_data["questions"].append(line)
domain_data["pages"].add(line["domain_index"])
previous_domain = domain
# train.extend(form_questions)
return train, valid, filename
domain_count = collections.defaultdict(list)
data = [f"data/{e}" for e in os.listdir("data") if e.endswith(".json")]
# with multiprocessing.Pool(os.cpu_count()) as p:
with multiprocessing.Pool(1) as p:
for filename, domains in tqdm.tqdm(p.imap_unordered(extract_domains, data)):
language = filename.split(".")[1]
for domain in domains:
domain_count[domain].append(language)
with multiprocessing.Pool(os.cpu_count()) as p:
fn = functools.partial(valid_train_split, mapping=domain_count)
for train, valid, filename in tqdm.tqdm(p.imap_unordered(fn, data)):
train_filename = filename.replace("data/", "data/train/")
train = [json.dumps(e, ensure_ascii=False) for e in train]
valid = [json.dumps(e, ensure_ascii=False) for e in valid]
with open(train_filename, "w+") as f:
train = "\n".join(train)
f.write(train)
valid_filename = filename.replace("data/", "data/valid/")
with open(valid_filename, "w+") as f:
valid = "\n".join(valid)
f.write(valid)