Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Size:
1M - 10M
ArXiv:
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- wikiann.py +31 -28
wikiann.py
CHANGED
|
@@ -326,24 +326,24 @@ class Wikiann(datasets.GeneratorBasedBuilder):
|
|
| 326 |
def _split_generators(self, dl_manager):
|
| 327 |
wikiann_dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
| 328 |
lang = self.config.name
|
| 329 |
-
|
| 330 |
|
| 331 |
return [
|
| 332 |
datasets.SplitGenerator(
|
| 333 |
name=datasets.Split.VALIDATION,
|
| 334 |
-
gen_kwargs={"filepath":
|
| 335 |
),
|
| 336 |
datasets.SplitGenerator(
|
| 337 |
name=datasets.Split.TEST,
|
| 338 |
-
gen_kwargs={"filepath":
|
| 339 |
),
|
| 340 |
datasets.SplitGenerator(
|
| 341 |
name=datasets.Split.TRAIN,
|
| 342 |
-
gen_kwargs={"filepath":
|
| 343 |
),
|
| 344 |
]
|
| 345 |
|
| 346 |
-
def _generate_examples(self, filepath):
|
| 347 |
"""Reads line by line format of the NER dataset and generates examples.
|
| 348 |
Input Format:
|
| 349 |
en:rick B-PER
|
|
@@ -365,27 +365,30 @@ class Wikiann(datasets.GeneratorBasedBuilder):
|
|
| 365 |
Examples with the format listed above.
|
| 366 |
"""
|
| 367 |
guid_index = 1
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
splits = line.split("\t")
|
| 384 |
-
# strip out en: prefix
|
| 385 |
-
langs.append(splits[0].split(":")[0])
|
| 386 |
-
tokens.append(":".join(splits[0].split(":")[1:]))
|
| 387 |
-
if len(splits) > 1:
|
| 388 |
-
ner_tags.append(splits[-1].replace("\n", ""))
|
| 389 |
else:
|
| 390 |
-
#
|
| 391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
def _split_generators(self, dl_manager):
|
| 327 |
wikiann_dl_dir = dl_manager.download_and_extract(_DATA_URL)
|
| 328 |
lang = self.config.name
|
| 329 |
+
lang_archive = os.path.join(wikiann_dl_dir, lang + ".tar.gz")
|
| 330 |
|
| 331 |
return [
|
| 332 |
datasets.SplitGenerator(
|
| 333 |
name=datasets.Split.VALIDATION,
|
| 334 |
+
gen_kwargs={"filepath": "dev", "files": dl_manager.iter_archive(lang_archive)},
|
| 335 |
),
|
| 336 |
datasets.SplitGenerator(
|
| 337 |
name=datasets.Split.TEST,
|
| 338 |
+
gen_kwargs={"filepath": "test", "files": dl_manager.iter_archive(lang_archive)},
|
| 339 |
),
|
| 340 |
datasets.SplitGenerator(
|
| 341 |
name=datasets.Split.TRAIN,
|
| 342 |
+
gen_kwargs={"filepath": "train", "files": dl_manager.iter_archive(lang_archive)},
|
| 343 |
),
|
| 344 |
]
|
| 345 |
|
| 346 |
+
def _generate_examples(self, filepath, files):
|
| 347 |
"""Reads line by line format of the NER dataset and generates examples.
|
| 348 |
Input Format:
|
| 349 |
en:rick B-PER
|
|
|
|
| 365 |
Examples with the format listed above.
|
| 366 |
"""
|
| 367 |
guid_index = 1
|
| 368 |
+
for path, f in files:
|
| 369 |
+
if path == filepath:
|
| 370 |
+
tokens = []
|
| 371 |
+
ner_tags = []
|
| 372 |
+
langs = []
|
| 373 |
+
for line in f:
|
| 374 |
+
line = line.decode("utf-8")
|
| 375 |
+
if line == "" or line == "\n":
|
| 376 |
+
if tokens:
|
| 377 |
+
spans = self._get_spans(tokens, ner_tags)
|
| 378 |
+
yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs, "spans": spans}
|
| 379 |
+
guid_index += 1
|
| 380 |
+
tokens = []
|
| 381 |
+
ner_tags = []
|
| 382 |
+
langs = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
else:
|
| 384 |
+
# wikiann data is tab separated
|
| 385 |
+
splits = line.split("\t")
|
| 386 |
+
# strip out en: prefix
|
| 387 |
+
langs.append(splits[0].split(":")[0])
|
| 388 |
+
tokens.append(":".join(splits[0].split(":")[1:]))
|
| 389 |
+
if len(splits) > 1:
|
| 390 |
+
ner_tags.append(splits[-1].replace("\n", ""))
|
| 391 |
+
else:
|
| 392 |
+
# examples have no label in test set
|
| 393 |
+
ner_tags.append("O")
|
| 394 |
+
break
|