phongdtd commited on
Commit
c7523f7
·
1 Parent(s): 727a08a

init commit

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. VinDataVLSP.py +169 -0
  3. dataset_infos.json +51 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /venv/
2
+ /.idea
VinDataVLSP.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ VinDataVLSP Dataset"""
16
+
17
+
18
+ import datasets
19
+ from datasets.tasks import AutomaticSpeechRecognition
20
+ import pandas as pd
21
+ import re
22
+
23
+
24
+ _DATA_URL = "https://drive.google.com/uc?export=download&id=1LZ05QBbicWPvisdIx6_B_QVnswIbNdqW"
25
+ _PROMPTS_URLS = {
26
+ "train": "https://drive.google.com/uc?export=download&id=1eOOvCDz0uOBBRzsHK7NALcGA70-XbQrd",
27
+ "test": "https://drive.google.com/uc?export=download&id=1r2wy5K0VL7wL_iMdtzMhGEy-_k3M2Gdv",
28
+ "validation": "https://drive.google.com/uc?export=download&id=1c0YsA4x1Up9qjDpsj1VKH_86m85cTi79"
29
+ }
30
+
31
+ _DESCRIPTION = """\
32
+ """
33
+
34
+ _LANGUAGES = {
35
+ "vi": {
36
+ "Language": "Vietnamese",
37
+ "Date": "2021-12-11",
38
+ "Size": "11 GB",
39
+ "Version": "vi_100h_2021-12-11",
40
+ },
41
+ }
42
+
43
+
44
+ class VinDataVLSPConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for CommonVoice."""
46
+
47
+ def __init__(self, name, sub_version, **kwargs):
48
+ """
49
+ Args:
50
+ data_dir: `string`, the path to the folder containing the files in the
51
+ downloaded .tar
52
+ citation: `string`, citation for the data set
53
+ url: `string`, url for information about the data set
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ self.sub_version = sub_version
57
+ self.language = kwargs.pop("language", None)
58
+ self.date_of_snapshot = kwargs.pop("date", None)
59
+ self.size = kwargs.pop("size", None)
60
+ self.validated_hr_total = kwargs.pop("val_hrs", None)
61
+ self.total_hr_total = kwargs.pop("total_hrs", None)
62
+ self.num_of_voice = kwargs.pop("num_of_voice", None)
63
+ description = ""
64
+ super(VinDataVLSPConfig, self).__init__(
65
+ name=name, version=datasets.Version("0.1.0", ""), description=description, **kwargs
66
+ )
67
+
68
+
69
+ class VinDataVLSP(datasets.GeneratorBasedBuilder):
70
+
71
+ DEFAULT_WRITER_BATCH_SIZE = 1000
72
+ BUILDER_CONFIGS = [
73
+ VinDataVLSPConfig(
74
+ name=lang_id,
75
+ language=_LANGUAGES[lang_id]["Language"],
76
+ sub_version=_LANGUAGES[lang_id]["Version"],
77
+ # date=_LANGUAGES[lang_id]["Date"],
78
+ # size=_LANGUAGES[lang_id]["Size"],
79
+ # val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
80
+ # total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
81
+ # num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
82
+ )
83
+ for lang_id in _LANGUAGES.keys()
84
+ ]
85
+
86
+ def _info(self):
87
+ features = datasets.Features(
88
+ {
89
+ "file_path": datasets.Value("string"),
90
+ "script": datasets.Value("string"),
91
+ "audio": datasets.Audio(sampling_rate=16_000),
92
+ }
93
+ )
94
+
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=features,
98
+ supervised_keys=None,
99
+ task_templates=[
100
+ AutomaticSpeechRecognition(audio_file_path_column="file_path", transcription_column="script")
101
+ ],
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators."""
106
+ tsv_files = dl_manager.download(_PROMPTS_URLS)
107
+ archive = dl_manager.download(_DATA_URL)
108
+ path_to_clips = "./VinDataVLSP/"
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={
114
+ "tsv_files": tsv_files["train"],
115
+ "audio_files": dl_manager.iter_archive(archive),
116
+ "path_to_clips": path_to_clips,
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "tsv_files": tsv_files["test"],
123
+ "audio_files": dl_manager.iter_archive(archive),
124
+ "path_to_clips": path_to_clips,
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={
130
+ "tsv_files": tsv_files["validation"],
131
+ "audio_files": dl_manager.iter_archive(archive),
132
+ "path_to_clips": path_to_clips,
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, tsv_files, audio_files, path_to_clips):
138
+ """Yields examples."""
139
+ data_fields = list(self._info().features.keys())
140
+
141
+ # audio is not a header of the csv files
142
+ data_fields.remove("audio")
143
+ examples = {}
144
+
145
+ df = pd.read_csv(tsv_files, sep="\t", header=0)
146
+ df = df.dropna()
147
+ chars_to_ignore_regex = r'[,?.!\-;:"“%\'�]'
148
+
149
+ for file_path, script, duration in zip(df["file_path"], df["script"], df["duration"]):
150
+ # set full path for mp3 audio file
151
+ audio_path = path_to_clips + "/" + file_path
152
+
153
+ # Preprocessing script
154
+ if ":" in script:
155
+ two_dot_index = script.index(":")
156
+ script = script[two_dot_index + 1:]
157
+ script = script.replace("\n", " ")
158
+ script = re.sub(chars_to_ignore_regex, '', script).lower()
159
+
160
+ examples[audio_path] = {
161
+ "file_path": audio_path,
162
+ "script": script,
163
+ }
164
+
165
+ for path, f in audio_files:
166
+ if path.startswith(path_to_clips):
167
+ if path in examples:
168
+ audio = {"path": path, "bytes": f.read()}
169
+ yield path, {**examples[path], "audio": audio}
dataset_infos.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vi": {
3
+ "description": "Common Voice is Mozilla's initiative to help teach machines how real people speak.",
4
+ "citation": "@inproceedings{commonvoice:2020,\n author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},\n title = {Common Voice: A Massively-Multilingual Speech Corpus},\n booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},\n pages = {4211--4215},\n year = 2020\n}\n",
5
+ "homepage": "https://commonvoice.mozilla.org/en/datasets",
6
+ "license": "https://github.com/common-voice/common-voice/blob/main/LICENSE",
7
+ "features": {
8
+ "file_path": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "script": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "duration": {
19
+ "dtype": "float16",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "audio": {
24
+ "sampling_rate": 16000,
25
+ "mono": true,
26
+ "_storage_dtype": "struct",
27
+ "id": null,
28
+ "_type": "Audio"
29
+ }
30
+ },
31
+ "post_processed": null,
32
+ "supervised_keys": null,
33
+ "task_templates": [
34
+ {
35
+ "task": "automatic-speech-recognition",
36
+ "audio_file_path_column": "file_path",
37
+ "transcription_column": "script"
38
+ }
39
+ ],
40
+ "builder_name": "custom_common_voice",
41
+ "config_name": "vi",
42
+ "version": {
43
+ "version_str": "0.2.0",
44
+ "description": "",
45
+ "major": 0,
46
+ "minor": 2,
47
+ "patch": 0
48
+ },
49
+ "post_processing_size": null
50
+ }
51
+ }