Simplify Common Voice code (#3817)
Browse files* simplify common_voice
* one iter_archive per split
Commit from https://github.com/huggingface/datasets/commit/f5929980e7d857d491a20c35305ea01a24c25326
- common_voice.py +59 -90
common_voice.py
CHANGED
|
@@ -659,135 +659,100 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
| 659 |
|
| 660 |
def _split_generators(self, dl_manager):
|
| 661 |
"""Returns SplitGenerators."""
|
| 662 |
-
|
| 663 |
archive_path = dl_manager.download(_DATA_URL.format(self.config.name))
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
|
| 668 |
-
|
| 669 |
-
|
| 670 |
-
|
| 671 |
-
|
| 672 |
-
|
| 673 |
-
|
| 674 |
-
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
|
| 684 |
-
path_to_data = os.path.join(extracted_dir, "cv-corpus-6.1-2020-12-11", self.config.name)
|
| 685 |
-
path_to_clips = os.path.join(path_to_data, "clips")
|
| 686 |
-
metadata_filepaths = {
|
| 687 |
-
split: os.path.join(path_to_data, f"{split}.tsv")
|
| 688 |
-
for split in ["train", "test", "dev", "other", "validated", "invalidated"]
|
| 689 |
-
}
|
| 690 |
|
| 691 |
return [
|
| 692 |
datasets.SplitGenerator(
|
| 693 |
name=datasets.Split.TRAIN,
|
| 694 |
gen_kwargs={
|
| 695 |
-
"
|
| 696 |
-
"archive_iterator":
|
| 697 |
-
|
|
|
|
|
|
|
| 698 |
"path_to_clips": path_to_clips,
|
| 699 |
},
|
| 700 |
),
|
| 701 |
datasets.SplitGenerator(
|
| 702 |
name=datasets.Split.TEST,
|
| 703 |
gen_kwargs={
|
| 704 |
-
"
|
| 705 |
-
"archive_iterator":
|
| 706 |
-
|
|
|
|
|
|
|
| 707 |
"path_to_clips": path_to_clips,
|
| 708 |
},
|
| 709 |
),
|
| 710 |
datasets.SplitGenerator(
|
| 711 |
name=datasets.Split.VALIDATION,
|
| 712 |
gen_kwargs={
|
| 713 |
-
"
|
| 714 |
-
"archive_iterator":
|
| 715 |
-
|
|
|
|
|
|
|
| 716 |
"path_to_clips": path_to_clips,
|
| 717 |
},
|
| 718 |
),
|
| 719 |
datasets.SplitGenerator(
|
| 720 |
name="other",
|
| 721 |
gen_kwargs={
|
| 722 |
-
"
|
| 723 |
-
"archive_iterator":
|
| 724 |
-
|
|
|
|
|
|
|
| 725 |
"path_to_clips": path_to_clips,
|
| 726 |
},
|
| 727 |
),
|
| 728 |
datasets.SplitGenerator(
|
| 729 |
name="validated",
|
| 730 |
gen_kwargs={
|
| 731 |
-
"
|
| 732 |
-
"archive_iterator":
|
| 733 |
-
|
|
|
|
|
|
|
| 734 |
"path_to_clips": path_to_clips,
|
| 735 |
},
|
| 736 |
),
|
| 737 |
datasets.SplitGenerator(
|
| 738 |
name="invalidated",
|
| 739 |
gen_kwargs={
|
| 740 |
-
"
|
| 741 |
-
"archive_iterator":
|
| 742 |
-
|
|
|
|
|
|
|
| 743 |
"path_to_clips": path_to_clips,
|
| 744 |
},
|
| 745 |
),
|
| 746 |
]
|
| 747 |
|
| 748 |
-
def _generate_examples(self,
|
| 749 |
"""Yields examples."""
|
| 750 |
-
if streaming:
|
| 751 |
-
yield from self._generate_examples_streaming(archive_iterator, filepath, path_to_clips)
|
| 752 |
-
else:
|
| 753 |
-
yield from self._generate_examples_non_streaming(filepath, path_to_clips)
|
| 754 |
-
|
| 755 |
-
def _generate_examples_non_streaming(self, filepath, path_to_clips):
|
| 756 |
-
|
| 757 |
-
data_fields = list(self._info().features.keys())
|
| 758 |
-
|
| 759 |
-
# audio is not a header of the csv files
|
| 760 |
-
data_fields.remove("audio")
|
| 761 |
-
path_idx = data_fields.index("path")
|
| 762 |
-
|
| 763 |
-
with open(filepath, encoding="utf-8") as f:
|
| 764 |
-
lines = f.readlines()
|
| 765 |
-
headline = lines[0]
|
| 766 |
-
|
| 767 |
-
column_names = headline.strip().split("\t")
|
| 768 |
-
assert (
|
| 769 |
-
column_names == data_fields
|
| 770 |
-
), f"The file should have {data_fields} as column names, but has {column_names}"
|
| 771 |
-
|
| 772 |
-
for id_, line in enumerate(lines[1:]):
|
| 773 |
-
field_values = line.strip().split("\t")
|
| 774 |
-
|
| 775 |
-
# set absolute path for mp3 audio file
|
| 776 |
-
field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx])
|
| 777 |
-
|
| 778 |
-
# if data is incomplete, fill with empty values
|
| 779 |
-
if len(field_values) < len(data_fields):
|
| 780 |
-
field_values += (len(data_fields) - len(field_values)) * ["''"]
|
| 781 |
-
|
| 782 |
-
result = {key: value for key, value in zip(data_fields, field_values)}
|
| 783 |
-
|
| 784 |
-
# set audio feature
|
| 785 |
-
result["audio"] = field_values[path_idx]
|
| 786 |
-
|
| 787 |
-
yield id_, result
|
| 788 |
-
|
| 789 |
-
def _generate_examples_streaming(self, archive_iterator, filepath, path_to_clips):
|
| 790 |
-
"""Yields examples in streaming mode."""
|
| 791 |
data_fields = list(self._info().features.keys())
|
| 792 |
|
| 793 |
# audio is not a header of the csv files
|
|
@@ -796,8 +761,10 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
| 796 |
|
| 797 |
all_field_values = {}
|
| 798 |
metadata_found = False
|
|
|
|
| 799 |
for path, f in archive_iterator:
|
| 800 |
-
|
|
|
|
| 801 |
metadata_found = True
|
| 802 |
lines = f.readlines()
|
| 803 |
headline = lines[0].decode("utf-8")
|
|
@@ -811,11 +778,13 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
| 811 |
# set full path for mp3 audio file
|
| 812 |
audio_path = "/".join([path_to_clips, field_values[path_idx]])
|
| 813 |
all_field_values[audio_path] = field_values
|
|
|
|
| 814 |
elif path.startswith(path_to_clips):
|
| 815 |
assert metadata_found, "Found audio clips before the metadata TSV file."
|
| 816 |
if not all_field_values:
|
| 817 |
break
|
| 818 |
if path in all_field_values:
|
|
|
|
| 819 |
field_values = all_field_values[path]
|
| 820 |
|
| 821 |
# if data is incomplete, fill with empty values
|
|
@@ -826,7 +795,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
| 826 |
|
| 827 |
# set audio feature
|
| 828 |
result["audio"] = {"path": path, "bytes": f.read()}
|
| 829 |
-
# set path to None
|
| 830 |
-
result["path"] = None
|
| 831 |
|
| 832 |
yield path, result
|
|
|
|
| 659 |
|
| 660 |
def _split_generators(self, dl_manager):
|
| 661 |
"""Returns SplitGenerators."""
|
| 662 |
+
# Download the TAR archive that contains the audio files:
|
| 663 |
archive_path = dl_manager.download(_DATA_URL.format(self.config.name))
|
| 664 |
+
|
| 665 |
+
# First we locate the data using the path within the archive:
|
| 666 |
+
path_to_data = "/".join(["cv-corpus-6.1-2020-12-11", self.config.name])
|
| 667 |
+
path_to_clips = "/".join([path_to_data, "clips"])
|
| 668 |
+
metadata_filepaths = {
|
| 669 |
+
split: "/".join([path_to_data, f"{split}.tsv"])
|
| 670 |
+
for split in ["train", "test", "dev", "other", "validated", "invalidated"]
|
| 671 |
+
}
|
| 672 |
+
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
|
| 673 |
+
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
|
| 674 |
+
|
| 675 |
+
# To access the audio data from the TAR archives using the download manager,
|
| 676 |
+
# we have to use the dl_manager.iter_archive method.
|
| 677 |
+
#
|
| 678 |
+
# This is because dl_manager.download_and_extract
|
| 679 |
+
# doesn't work to stream TAR archives in streaming mode.
|
| 680 |
+
# (we have to stream the files of a TAR archive one by one)
|
| 681 |
+
#
|
| 682 |
+
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
|
| 683 |
+
# file in the TAR archive.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 684 |
|
| 685 |
return [
|
| 686 |
datasets.SplitGenerator(
|
| 687 |
name=datasets.Split.TRAIN,
|
| 688 |
gen_kwargs={
|
| 689 |
+
"local_extracted_archive": local_extracted_archive,
|
| 690 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 691 |
+
archive_path
|
| 692 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 693 |
+
"metadata_filepath": metadata_filepaths["train"],
|
| 694 |
"path_to_clips": path_to_clips,
|
| 695 |
},
|
| 696 |
),
|
| 697 |
datasets.SplitGenerator(
|
| 698 |
name=datasets.Split.TEST,
|
| 699 |
gen_kwargs={
|
| 700 |
+
"local_extracted_archive": local_extracted_archive,
|
| 701 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 702 |
+
archive_path
|
| 703 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 704 |
+
"metadata_filepath": metadata_filepaths["test"],
|
| 705 |
"path_to_clips": path_to_clips,
|
| 706 |
},
|
| 707 |
),
|
| 708 |
datasets.SplitGenerator(
|
| 709 |
name=datasets.Split.VALIDATION,
|
| 710 |
gen_kwargs={
|
| 711 |
+
"local_extracted_archive": local_extracted_archive,
|
| 712 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 713 |
+
archive_path
|
| 714 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 715 |
+
"metadata_filepath": metadata_filepaths["dev"],
|
| 716 |
"path_to_clips": path_to_clips,
|
| 717 |
},
|
| 718 |
),
|
| 719 |
datasets.SplitGenerator(
|
| 720 |
name="other",
|
| 721 |
gen_kwargs={
|
| 722 |
+
"local_extracted_archive": local_extracted_archive,
|
| 723 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 724 |
+
archive_path
|
| 725 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 726 |
+
"metadata_filepath": metadata_filepaths["other"],
|
| 727 |
"path_to_clips": path_to_clips,
|
| 728 |
},
|
| 729 |
),
|
| 730 |
datasets.SplitGenerator(
|
| 731 |
name="validated",
|
| 732 |
gen_kwargs={
|
| 733 |
+
"local_extracted_archive": local_extracted_archive,
|
| 734 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 735 |
+
archive_path
|
| 736 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 737 |
+
"metadata_filepath": metadata_filepaths["validated"],
|
| 738 |
"path_to_clips": path_to_clips,
|
| 739 |
},
|
| 740 |
),
|
| 741 |
datasets.SplitGenerator(
|
| 742 |
name="invalidated",
|
| 743 |
gen_kwargs={
|
| 744 |
+
"local_extracted_archive": local_extracted_archive,
|
| 745 |
+
"archive_iterator": dl_manager.iter_archive(
|
| 746 |
+
archive_path
|
| 747 |
+
), # use iter_archive here to access the files in the TAR archives
|
| 748 |
+
"metadata_filepath": metadata_filepaths["invalidated"],
|
| 749 |
"path_to_clips": path_to_clips,
|
| 750 |
},
|
| 751 |
),
|
| 752 |
]
|
| 753 |
|
| 754 |
+
def _generate_examples(self, local_extracted_archive, archive_iterator, metadata_filepath, path_to_clips):
|
| 755 |
"""Yields examples."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 756 |
data_fields = list(self._info().features.keys())
|
| 757 |
|
| 758 |
# audio is not a header of the csv files
|
|
|
|
| 761 |
|
| 762 |
all_field_values = {}
|
| 763 |
metadata_found = False
|
| 764 |
+
# Here we iterate over all the files within the TAR archive:
|
| 765 |
for path, f in archive_iterator:
|
| 766 |
+
# Parse the metadata CSV file
|
| 767 |
+
if path == metadata_filepath:
|
| 768 |
metadata_found = True
|
| 769 |
lines = f.readlines()
|
| 770 |
headline = lines[0].decode("utf-8")
|
|
|
|
| 778 |
# set full path for mp3 audio file
|
| 779 |
audio_path = "/".join([path_to_clips, field_values[path_idx]])
|
| 780 |
all_field_values[audio_path] = field_values
|
| 781 |
+
# Else, read the audio file and yield an example
|
| 782 |
elif path.startswith(path_to_clips):
|
| 783 |
assert metadata_found, "Found audio clips before the metadata TSV file."
|
| 784 |
if not all_field_values:
|
| 785 |
break
|
| 786 |
if path in all_field_values:
|
| 787 |
+
# retrieve the metadata corresponding to this audio file
|
| 788 |
field_values = all_field_values[path]
|
| 789 |
|
| 790 |
# if data is incomplete, fill with empty values
|
|
|
|
| 795 |
|
| 796 |
# set audio feature
|
| 797 |
result["audio"] = {"path": path, "bytes": f.read()}
|
| 798 |
+
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
|
| 799 |
+
result["path"] = os.path.join(local_extracted_archive, path) if local_extracted_archive else None
|
| 800 |
|
| 801 |
yield path, result
|