File size: 3,325 Bytes
8d7d253
145e867
 
 
 
8d7d253
145e867
 
8d7d253
145e867
50b4e84
8d7d253
 
 
 
 
 
 
145e867
8d7d253
145e867
8d7d253
 
554e7c2
145e867
a8148ba
145e867
 
 
a8148ba
 
145e867
 
a8148ba
145e867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import datasets
from pathlib import Path
import random

_DESCRIPTION = "Custom dataset with audio (.wav) and phoneme (.txt) pairs, split by speaker."

# For reproducibility!
_SEED = 42

class CustomAudioPhonemeDataset(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.5")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "audio": datasets.Audio(sampling_rate=16000),
                "phoneme": datasets.Sequence(datasets.Value("string")),
                "speaker": datasets.Value("string"),
            }),
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.manual_dir

        # Get all speakers from 'wav'
        wav_dir = Path(data_dir) / 'wav'
        speakers = sorted([d.name for d in wav_dir.iterdir() if d.is_dir()])
        random.seed(_SEED)
        random.shuffle(speakers)

        # 80/10/10 split
        N = len(speakers)
        n_train = int(0.8 * N)
        n_val = int(0.1 * N)

        train_speakers = set(speakers[:n_train])
        val_speakers = set(speakers[n_train:n_train + n_val])
        test_speakers = set(speakers[n_train + n_val:])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"dataset_folder": data_dir, "split_speakers": train_speakers},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"dataset_folder": data_dir, "split_speakers": val_speakers},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"dataset_folder": data_dir, "split_speakers": test_speakers},
            ),
        ]

    def _generate_examples(self, dataset_folder, split_speakers):
        dataset_folder = Path(dataset_folder)
        wav_dir = dataset_folder / 'wav'
        phoneme_dir = dataset_folder / 'phonemized'

        # Gather audio and phoneme files (still recursively)
        audio_files = sorted([p for p in wav_dir.rglob('*.wav') if not p.name.startswith('._')])
        phoneme_files = sorted([p for p in phoneme_dir.rglob('*.txt') if not p.name.startswith('._')])

        # Map speaker from the relative path (assumes: wav/speaker/file.wav)
        def get_speaker(path):
            # wav/speaker/utterance.wav → speaker
            return path.parent.name

        # Match by base name
        audio_map = {(get_speaker(p), p.stem): p for p in audio_files}
        phoneme_map = {(get_speaker(p), p.stem): p for p in phoneme_files}

        # Only use files where speaker is in split
        keys = set(audio_map.keys()) & set(phoneme_map.keys())
        keys = [k for k in keys if k[0] in split_speakers]

        for idx, (speaker, stem) in enumerate(sorted(keys)):
            audio_path = str(audio_map[(speaker, stem)])
            phoneme_path = str(phoneme_map[(speaker, stem)])

            with open(phoneme_path, 'r', encoding='utf-8') as f:
                phoneme = f.read().split()

            yield idx, {
                "audio": {"path": audio_path, "bytes": None},
                "phoneme": phoneme,
                "speaker": speaker,
            }