Datasets:

Languages:
Filipino
ArXiv:
License:
holylovenia commited on
Commit
75f70f1
·
verified ·
1 Parent(s): 13966f7

Upload wikitext_tl_39.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wikitext_tl_39.py +111 -0
wikitext_tl_39.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import datasets
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses, Tasks
9
+
10
+ _CITATION = """
11
+ @article{cruz2019evaluating,
12
+ title={Evaluating Language Model Finetuning Techniques for Low-resource Languages},
13
+ author={Cruz, Jan Christian Blaise and Cheng, Charibeth},
14
+ journal={arXiv preprint arXiv:1907.00409},
15
+ year={2019}
16
+ }
17
+ """
18
+
19
+ _DATASETNAME = "wikitext_tl_39"
20
+
21
+ _DESCRIPTION = """A benchmark Language Modeling dataset for Tagalog. The dataset construction was done similar to that of the WikiText
22
+ Long Term Dependency Language Modeling Dataset, with a some differences, such as in how Wikipedia was scraped and how the vocabulary was
23
+ created. The dataset contains 39 Million tokens in the training set.
24
+ """
25
+
26
+ _HOMEPAGE = "https://huggingface.co/datasets/wikitext_tl39"
27
+
28
+ _LANGUAGES = ["fil"]
29
+
30
+ _LICENSE = Licenses.GPL_3_0.value
31
+
32
+ _LOCAL = False
33
+
34
+ _URLS = {
35
+ _DATASETNAME: "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/wikitext-tl-39/wikitext-tl-39.zip",
36
+ }
37
+
38
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
39
+
40
+ _SOURCE_VERSION = "1.0.0"
41
+
42
+ _SEACROWD_VERSION = "2024.06.20"
43
+
44
+
45
+ class WikiTextTL39Dataset(datasets.GeneratorBasedBuilder):
46
+ """Large scale, unlabeled text dataset with 39 Million tokens in the training set in Tagalog."""
47
+
48
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
49
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
50
+
51
+ BUILDER_CONFIGS = [
52
+ SEACrowdConfig(
53
+ name=f"{_DATASETNAME}_source",
54
+ version=SOURCE_VERSION,
55
+ description=f"{_DATASETNAME} source schema",
56
+ schema="source",
57
+ subset_id=_DATASETNAME,
58
+ ),
59
+ SEACrowdConfig(
60
+ name=f"{_DATASETNAME}_seacrowd_ssp",
61
+ version=SEACROWD_VERSION,
62
+ description=f"{_DATASETNAME} SEACrowd schema",
63
+ schema="seacrowd_ssp",
64
+ subset_id=_DATASETNAME,
65
+ ),
66
+ ]
67
+
68
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
69
+
70
+ def _info(self) -> datasets.DatasetInfo:
71
+ features = schemas.ssp_features
72
+
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
82
+ data_dir = dl_manager.download_and_extract(_URLS[_DATASETNAME])
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ gen_kwargs={"filepath": os.path.join(data_dir, "wikitext-tl-39", "train.txt"), "split": "train"},
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TEST,
91
+ gen_kwargs={"filepath": os.path.join(data_dir, "wikitext-tl-39", "test.txt"), "split": "test"},
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ gen_kwargs={"filepath": os.path.join(data_dir, "wikitext-tl-39", "valid.txt"), "split": "valid"},
96
+ ),
97
+ ]
98
+
99
+ def _generate_examples(self, filepath: Path, split: str) -> tuple[int, dict]:
100
+ with open(filepath, encoding="utf-8") as f:
101
+ for i, row in enumerate(f):
102
+ if row.strip():
103
+ yield i, {
104
+ "id": str(i),
105
+ "text": row,
106
+ }
107
+ else:
108
+ yield i, {
109
+ "id": str(i),
110
+ "text": "",
111
+ }