Datasets:
Tasks:
Automatic Speech Recognition
Formats:
parquet
Languages:
English
Size:
1M - 10M
ArXiv:
License:
Change validation set name
Browse files- peoples_speech.py +8 -8
peoples_speech.py
CHANGED
|
@@ -135,12 +135,12 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
| 135 |
]
|
| 136 |
|
| 137 |
n_files_train = self._get_n_files(dl_manager, split="train", config=self.config.name)
|
| 138 |
-
|
| 139 |
n_files_test = self._get_n_files(dl_manager, split="test", config="test")
|
| 140 |
|
| 141 |
urls = {
|
| 142 |
"train": [_DATA_URL.format(split="train", config=self.config.name, archive_id=i) for i in range(n_files_train)],
|
| 143 |
-
"
|
| 144 |
"test": [_DATA_URL.format(split="test", config="test", archive_id=i) for i in range(n_files_test)],
|
| 145 |
}
|
| 146 |
archive_paths = dl_manager.download(urls)
|
|
@@ -149,13 +149,13 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
| 149 |
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else \
|
| 150 |
{
|
| 151 |
"train": [None] * len(archive_paths),
|
| 152 |
-
"
|
| 153 |
"test": [None] * len(archive_paths),
|
| 154 |
}
|
| 155 |
|
| 156 |
manifest_urls = {
|
| 157 |
"train": _MANIFEST_URL.format(split="train", config=self.config.name),
|
| 158 |
-
"
|
| 159 |
"test": _MANIFEST_URL.format(split="test", config="test"),
|
| 160 |
}
|
| 161 |
manifest_paths = dl_manager.download_and_extract(manifest_urls)
|
|
@@ -183,16 +183,16 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
| 183 |
datasets.SplitGenerator(
|
| 184 |
name=datasets.Split.VALIDATION,
|
| 185 |
gen_kwargs={
|
| 186 |
-
"local_extracted_archive_paths": local_extracted_archive_paths["
|
| 187 |
# use iter_archive here to access the files in the TAR archives:
|
| 188 |
-
"archives": [dl_manager.iter_archive(path) for path in archive_paths["
|
| 189 |
-
"manifest_path": manifest_paths["
|
| 190 |
},
|
| 191 |
),
|
| 192 |
datasets.SplitGenerator(
|
| 193 |
name=datasets.Split.TEST,
|
| 194 |
gen_kwargs={
|
| 195 |
-
"local_extracted_archive_paths": local_extracted_archive_paths["
|
| 196 |
# use iter_archive here to access the files in the TAR archives:
|
| 197 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 198 |
"manifest_path": manifest_paths["test"],
|
|
|
|
| 135 |
]
|
| 136 |
|
| 137 |
n_files_train = self._get_n_files(dl_manager, split="train", config=self.config.name)
|
| 138 |
+
n_files_validation = self._get_n_files(dl_manager, split="validation", config="validation")
|
| 139 |
n_files_test = self._get_n_files(dl_manager, split="test", config="test")
|
| 140 |
|
| 141 |
urls = {
|
| 142 |
"train": [_DATA_URL.format(split="train", config=self.config.name, archive_id=i) for i in range(n_files_train)],
|
| 143 |
+
"validation": [_DATA_URL.format(split="validation", config="validation", archive_id=i) for i in range(n_files_validation)],
|
| 144 |
"test": [_DATA_URL.format(split="test", config="test", archive_id=i) for i in range(n_files_test)],
|
| 145 |
}
|
| 146 |
archive_paths = dl_manager.download(urls)
|
|
|
|
| 149 |
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else \
|
| 150 |
{
|
| 151 |
"train": [None] * len(archive_paths),
|
| 152 |
+
"validation": [None] * len(archive_paths),
|
| 153 |
"test": [None] * len(archive_paths),
|
| 154 |
}
|
| 155 |
|
| 156 |
manifest_urls = {
|
| 157 |
"train": _MANIFEST_URL.format(split="train", config=self.config.name),
|
| 158 |
+
"validation": _MANIFEST_URL.format(split="validation", config="validation"),
|
| 159 |
"test": _MANIFEST_URL.format(split="test", config="test"),
|
| 160 |
}
|
| 161 |
manifest_paths = dl_manager.download_and_extract(manifest_urls)
|
|
|
|
| 183 |
datasets.SplitGenerator(
|
| 184 |
name=datasets.Split.VALIDATION,
|
| 185 |
gen_kwargs={
|
| 186 |
+
"local_extracted_archive_paths": local_extracted_archive_paths["validation"],
|
| 187 |
# use iter_archive here to access the files in the TAR archives:
|
| 188 |
+
"archives": [dl_manager.iter_archive(path) for path in archive_paths["validation"]],
|
| 189 |
+
"manifest_path": manifest_paths["validation"],
|
| 190 |
},
|
| 191 |
),
|
| 192 |
datasets.SplitGenerator(
|
| 193 |
name=datasets.Split.TEST,
|
| 194 |
gen_kwargs={
|
| 195 |
+
"local_extracted_archive_paths": local_extracted_archive_paths["validation"],
|
| 196 |
# use iter_archive here to access the files in the TAR archives:
|
| 197 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 198 |
"manifest_path": manifest_paths["test"],
|