Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Size:
1M - 10M
ArXiv:
License:
Update files from the datasets library (from 1.5.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.5.0
- wikiann.py +3 -3
wikiann.py
CHANGED
|
@@ -296,7 +296,7 @@ class Wikiann(datasets.GeneratorBasedBuilder):
|
|
| 296 |
ner_tags = []
|
| 297 |
langs = []
|
| 298 |
for line in f:
|
| 299 |
-
if line
|
| 300 |
if tokens:
|
| 301 |
yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs}
|
| 302 |
guid_index += 1
|
|
@@ -307,8 +307,8 @@ class Wikiann(datasets.GeneratorBasedBuilder):
|
|
| 307 |
# wikiann data is tab separated
|
| 308 |
splits = line.split("\t")
|
| 309 |
# strip out en: prefix
|
| 310 |
-
langs.append(splits[0][
|
| 311 |
-
tokens.append(splits[0][
|
| 312 |
if len(splits) > 1:
|
| 313 |
ner_tags.append(splits[-1].replace("\n", ""))
|
| 314 |
else:
|
|
|
|
| 296 |
ner_tags = []
|
| 297 |
langs = []
|
| 298 |
for line in f:
|
| 299 |
+
if line == "" or line == "\n":
|
| 300 |
if tokens:
|
| 301 |
yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs}
|
| 302 |
guid_index += 1
|
|
|
|
| 307 |
# wikiann data is tab separated
|
| 308 |
splits = line.split("\t")
|
| 309 |
# strip out en: prefix
|
| 310 |
+
langs.append(splits[0].split(":")[0])
|
| 311 |
+
tokens.append(":".join(splits[0].split(":")[1:]))
|
| 312 |
if len(splits) > 1:
|
| 313 |
ner_tags.append(splits[-1].replace("\n", ""))
|
| 314 |
else:
|