Commit
·
a29e26b
1
Parent(s):
1ded71e
Revert to the previous version
Browse files- malayalam_wiki.py +37 -23
malayalam_wiki.py
CHANGED
@@ -1,8 +1,10 @@
|
|
|
|
|
|
1 |
import re
|
2 |
|
3 |
import datasets
|
4 |
|
5 |
-
|
6 |
|
7 |
_DESCRIPTION = """\
|
8 |
Common Crawl - Malayalam.
|
@@ -18,7 +20,8 @@ _CITATION = """\
|
|
18 |
"""
|
19 |
|
20 |
_URLs = {
|
21 |
-
"
|
|
|
22 |
}
|
23 |
|
24 |
|
@@ -44,6 +47,11 @@ class MalayalamWiki(datasets.GeneratorBasedBuilder):
|
|
44 |
),
|
45 |
]
|
46 |
|
|
|
|
|
|
|
|
|
|
|
47 |
def remove_special_characters(self, txt):
|
48 |
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]'
|
49 |
unicode_ignore_regex = r'[\u200e\u200c\u200d]'
|
@@ -55,19 +63,6 @@ class MalayalamWiki(datasets.GeneratorBasedBuilder):
|
|
55 |
txt = re.sub(english_ignore_regex, '',txt) + " "
|
56 |
return txt
|
57 |
|
58 |
-
def getfilenames(self, baseurl):
|
59 |
-
wiki_data_files = []
|
60 |
-
for i in range(20):
|
61 |
-
zeros = '00000'
|
62 |
-
if i > 9:
|
63 |
-
zeros = '0000'
|
64 |
-
|
65 |
-
file1 = baseurl + zeros + str(i) + '_html_body.txt'
|
66 |
-
file2 = baseurl + zeros + str(i) + '_heading_para.txt'
|
67 |
-
wiki_data_files.append(file1)
|
68 |
-
wiki_data_files.append(file2)
|
69 |
-
return wiki_data_files
|
70 |
-
|
71 |
def _info(self):
|
72 |
return datasets.DatasetInfo(
|
73 |
description=_DESCRIPTION,
|
@@ -83,21 +78,40 @@ class MalayalamWiki(datasets.GeneratorBasedBuilder):
|
|
83 |
|
84 |
def _split_generators(self, dl_manager):
|
85 |
"""Returns SplitGenerators."""
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
return [
|
89 |
datasets.SplitGenerator(
|
90 |
name=datasets.Split.TRAIN,
|
91 |
gen_kwargs={
|
92 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
},
|
94 |
)
|
95 |
]
|
96 |
|
97 |
-
def _generate_examples(self,
|
98 |
-
|
99 |
-
for file_id, file in enumerate(filepaths):
|
100 |
-
logger.info("generating examples from = %s", file)
|
101 |
with open(file, encoding="utf-8") as f:
|
102 |
for row_id, row in enumerate(f):
|
103 |
-
yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}
|
|
|
1 |
+
import random
|
2 |
+
import os
|
3 |
import re
|
4 |
|
5 |
import datasets
|
6 |
|
7 |
+
from datasets.tasks import TextClassification
|
8 |
|
9 |
_DESCRIPTION = """\
|
10 |
Common Crawl - Malayalam.
|
|
|
20 |
"""
|
21 |
|
22 |
_URLs = {
|
23 |
+
"malayalam_wiki_1": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/malayalam_filtered_html_body.tar.gz",
|
24 |
+
"malayalam_wiki_2": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/unfiltered_heading_and_para.tar.gz"
|
25 |
}
|
26 |
|
27 |
|
|
|
47 |
),
|
48 |
]
|
49 |
|
50 |
+
|
51 |
+
def partition (self, list_in, n):
|
52 |
+
random.shuffle(list_in)
|
53 |
+
return [list_in[i::n] for i in range(n)]
|
54 |
+
|
55 |
def remove_special_characters(self, txt):
|
56 |
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]'
|
57 |
unicode_ignore_regex = r'[\u200e\u200c\u200d]'
|
|
|
63 |
txt = re.sub(english_ignore_regex, '',txt) + " "
|
64 |
return txt
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
def _info(self):
|
67 |
return datasets.DatasetInfo(
|
68 |
description=_DESCRIPTION,
|
|
|
78 |
|
79 |
def _split_generators(self, dl_manager):
|
80 |
"""Returns SplitGenerators."""
|
81 |
+
# TODO: implement iter_archive() instead of download_and_extract
|
82 |
+
dl_path = dl_manager.download_and_extract(_URLs)
|
83 |
+
files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_1"],"malayalam_filtered_html_body")))
|
84 |
+
file_paths = [os.path.join(dl_path["malayalam_wiki_1"], "malayalam_filtered_html_body" , file) for file in files]
|
85 |
+
files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para")))
|
86 |
+
wiki_2 = [os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para", file) for file in files]
|
87 |
+
file_paths.extend(wiki_2)
|
88 |
+
filepaths_splice = self.partition(file_paths,3)
|
89 |
return [
|
90 |
datasets.SplitGenerator(
|
91 |
name=datasets.Split.TRAIN,
|
92 |
gen_kwargs={
|
93 |
+
"filepath": filepaths_splice[0],
|
94 |
+
"split": "train",
|
95 |
+
},
|
96 |
+
),
|
97 |
+
datasets.SplitGenerator(
|
98 |
+
name=datasets.Split.VALIDATION,
|
99 |
+
gen_kwargs={
|
100 |
+
"filepath": filepaths_splice[1],
|
101 |
+
"split": "validation",
|
102 |
+
},
|
103 |
+
),
|
104 |
+
datasets.SplitGenerator(
|
105 |
+
name=datasets.Split.TEST,
|
106 |
+
gen_kwargs={
|
107 |
+
"filepath": filepaths_splice[2],
|
108 |
+
"split": "test",
|
109 |
},
|
110 |
)
|
111 |
]
|
112 |
|
113 |
+
def _generate_examples(self, filepath):
|
114 |
+
for file_id, file in enumerate(filepath):
|
|
|
|
|
115 |
with open(file, encoding="utf-8") as f:
|
116 |
for row_id, row in enumerate(f):
|
117 |
+
yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}
|