|
import csv |
|
import json |
|
import os |
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{jiang-etal-2020-neural, |
|
title = "Neural {CRF} Model for Sentence Alignment in Text Simplification", |
|
author = "Jiang, Chao and |
|
Maddela, Mounica and |
|
Lan, Wuwei and |
|
Zhong, Yang and |
|
Xu, Wei", |
|
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
month = jul, |
|
year = "2020", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/2020.acl-main.709", |
|
doi = "10.18653/v1/2020.acl-main.709", |
|
pages = "7943--7960", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
WikiAuto provides a set of aligned sentences from English Wikipedia and Simple |
|
English Wikipedia as a resource to train sentence simplification systems. |
|
|
|
The authors first crowd-sourced a set of manual alignments between sentences in |
|
a subset of the Simple English Wikipedia and their corresponding versions in |
|
English Wikipedia (this corresponds to the manual config in this version of the |
|
dataset), then trained a neural CRF system to predict these alignments. |
|
|
|
The trained alignment prediction model was then applied to the other articles in |
|
Simple English Wikipedia with an English counterpart to create a larger corpus |
|
of aligned sentences (corresponding to the auto and auto_acl configs here). |
|
""" |
|
|
|
_URLs = { |
|
"train": "train.tsv", |
|
"validation": "valid.tsv", |
|
"test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json", |
|
"challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip", |
|
"test_contract": "benchmarks/contract-benchmark.tsv", |
|
"test_wiki": "benchmarks/wiki-benchmark.tsv", |
|
} |
|
|
|
|
|
_URLs[ |
|
"test_asset_orig" |
|
] = "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.orig" |
|
for i in range(10): |
|
_URLs[ |
|
f"test_asset_{i}" |
|
] = f"https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.{i}" |
|
|
|
|
|
class WikiAuto(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
DEFAULT_CONFIG_NAME = "wiki_auto_asset_turk" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"gem_parent_id": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=datasets.info.SupervisedKeysData( |
|
input="source", output="target" |
|
), |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_dir = dl_manager.download_and_extract(_URLs) |
|
challenge_sets = [ |
|
( |
|
"challenge_train_sample", |
|
"train_wiki_auto_asset_turk_RandomSample500.json", |
|
), |
|
( |
|
"challenge_validation_sample", |
|
"validation_wiki_auto_asset_turk_RandomSample500.json", |
|
), |
|
( |
|
"challenge_test_asset_backtranslation", |
|
"test_asset_wiki_auto_asset_turk_BackTranslation.json", |
|
), |
|
( |
|
"challenge_test_asset_bfp02", |
|
"test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json", |
|
), |
|
( |
|
"challenge_test_asset_bfp05", |
|
"test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json", |
|
), |
|
( |
|
"challenge_test_asset_nopunc", |
|
"test_asset_wiki_auto_asset_turk_WithoutPunctuation.json", |
|
), |
|
( |
|
"challenge_test_turk_backtranslation", |
|
"detok_test_turk_wiki_auto_asset_turk_BackTranslation.json", |
|
), |
|
( |
|
"challenge_test_turk_bfp02", |
|
"detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json", |
|
), |
|
( |
|
"challenge_test_turk_bfp05", |
|
"detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json", |
|
), |
|
( |
|
"challenge_test_turk_nopunc", |
|
"detok_test_turk_wiki_auto_asset_turk_WithoutPunctuation.json", |
|
), |
|
] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": dl_dir["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": dl_dir["validation"], |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test_asset", |
|
gen_kwargs={ |
|
"filepath": "", |
|
"split": "test_asset", |
|
"filepaths": [dl_dir["test_asset_orig"]] |
|
+ [dl_dir[f"test_asset_{i}"] for i in range(10)], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test_turk", |
|
gen_kwargs={ |
|
"filepath": dl_dir["test_turk"], |
|
"split": "test_turk", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test_contract", |
|
gen_kwargs={ |
|
"filepath": dl_dir["test_contract"], |
|
"split": "test_contract", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test_wiki", |
|
gen_kwargs={ |
|
"filepath": dl_dir["test_wiki"], |
|
"split": "test_wiki", |
|
}, |
|
), |
|
] + [ |
|
datasets.SplitGenerator( |
|
name=challenge_split, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
dl_dir["challenge_set"], "wiki_auto_asset_turk", filename |
|
), |
|
"split": challenge_split, |
|
}, |
|
) |
|
for challenge_split, filename in challenge_sets |
|
] |
|
|
|
def _generate_examples(self, filepath, split, filepaths=None, lang=None): |
|
"""Yields examples.""" |
|
if split in ["train", "validation"]: |
|
keys = [ |
|
"source", |
|
"target", |
|
] |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, line in enumerate(f): |
|
values = line.strip().split("\t") |
|
assert ( |
|
len(values) == 2 |
|
), f"Not enough fields in ---- {line} --- {values}" |
|
example = dict([(k, val) for k, val in zip(keys, values)]) |
|
example["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}" |
|
example["gem_parent_id"] = example["gem_id"] |
|
example["references"] = ( |
|
[] if split == "train" else [example["target"]] |
|
) |
|
yield id_, example |
|
elif split == "test_turk": |
|
examples = json.load(open(filepath, encoding="utf-8")) |
|
for id_, example in enumerate(examples): |
|
example["gem_parent_id"] = example["gem_id"] |
|
for k in ["source_id", "target_id"]: |
|
if k in example: |
|
del example[k] |
|
yield id_, example |
|
elif split == "test_asset": |
|
files = [open(f_name, encoding="utf-8") for f_name in filepaths] |
|
for id_, lines in enumerate(zip(*files)): |
|
yield id_, { |
|
"gem_id": f"wiki_auto_asset_turk-{split}-{id_}", |
|
"gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}", |
|
"target": lines[1].strip(), |
|
"source": lines[0].strip(), |
|
"references": [line.strip() for line in lines[1:]], |
|
} |
|
elif split == "test_wiki" or split == "test_contract": |
|
with open(filepath, 'r') as f: |
|
reader = csv.DictReader(f, delimiter="\t") |
|
for id_, entry in enumerate(reader): |
|
yield id_, { |
|
"gem_id": f"wiki_auto_asset_turk-{split}-{id_}", |
|
"gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}", |
|
"target": entry["simple"], |
|
"source": entry["complex"], |
|
"references": [entry["simple"]], |
|
} |
|
else: |
|
exples = json.load(open(filepath, encoding="utf-8")) |
|
if isinstance(exples, dict): |
|
assert len(exples) == 1, "multiple entries found" |
|
exples = list(exples.values())[0] |
|
for id_, exple in enumerate(exples): |
|
exple["gem_parent_id"] = exple["gem_id"] |
|
exple["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}" |
|
for k in ["source_id", "target_id"]: |
|
if k in exple: |
|
del exple[k] |
|
yield id_, exple |
|
|