|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""tatoeba-mt""" |
|
|
|
|
|
import os |
|
import csv |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{tiedemann-2020-tatoeba, |
|
title = "The {T}atoeba {T}ranslation {C}hallenge {--} {R}ealistic Data Sets for Low Resource and Multilingual {MT}", |
|
author = {Tiedemann, J{\"o}rg}, |
|
booktitle = "Proceedings of the Fifth Conference on Machine Translation", |
|
month = nov, |
|
year = "2020", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2020.wmt-1.139", |
|
pages = "1174--1182", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Tatoeba Translation Challenge is a multilingual data set of |
|
machine translation benchmarks derived from user-contributed |
|
translations collected by [Tatoeba.org](https://tatoeba.org/) and |
|
provided as parallel corpus from [OPUS](https://opus.nlpl.eu/). This |
|
dataset includes test and development data sorted by language pair. It |
|
includes test sets for hundreds of language pairs and is continuously |
|
updated. Please, check the version number tag to refer to the release |
|
that your are using. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_LanguagePairs = ['ja-zh_cn'] |
|
_LICENSE = "cc-by-2.0" |
|
_HOMEPAGE = "https://github.com/Helsinki-NLP/Tatoeba-Challenge/" |
|
|
|
class tatoebaMTConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for tatoeba-mt""" |
|
|
|
def __init__(self, language_pair, **kwargs): |
|
super().__init__(**kwargs) |
|
""" |
|
|
|
Args: |
|
language_pair: language pair, you want to load |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
self.language_pair = language_pair |
|
|
|
|
|
|
|
class tatoebaMT(datasets.GeneratorBasedBuilder): |
|
"""The Tatoeba Translation Challenge is a multilingual data set of |
|
machine translation benchmarks derived from user-contributed |
|
translations collected at https://tatoeba.org/ and provided as |
|
parallel corpus from https://opus.nlpl.eu/. This dataset includes |
|
test and development data sorted by language pair. It includes |
|
test sets for hundreds of language pairs and is continuously |
|
updated. Please, check the version number tag to refer to the |
|
release that your are using. |
|
""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
BUILDER_CONFIG_CLASS = tatoebaMTConfig |
|
BUILDER_CONFIGS = [ |
|
tatoebaMTConfig(name=pair, description=_DESCRIPTION, language_pair=pair ) |
|
for pair in _LanguagePairs |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"sourceLang": datasets.Value("string"), |
|
"targetlang": datasets.Value("string"), |
|
"sourceString": datasets.Value("string"), |
|
"targetString": datasets.Value("string") |
|
}), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
lang_pair = self.config.language_pair |
|
|
|
|
|
files = {'train':'train.csv'} |
|
output = [] |
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(files) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if 'train' in files: |
|
train = datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": data_dir["train"] |
|
} |
|
) |
|
output.append(train) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return output |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter=",", quotechar='"') |
|
for id_, row in enumerate(reader): |
|
|
|
|
|
yield id_, { |
|
"sourceLang": 'ja', |
|
"targetlang": 'zh_cn', |
|
"sourceString": row[0], |
|
"targetString": row[1] if len(row) > 1 else "" |
|
} |