Datasets:
Enhance HyVoxPopuli dataset: update README, add validation script, improve dataset loader, and expand examples
Browse files- .gitignore +42 -0
- CONTRIBUTING.md +65 -0
- README.md +182 -59
- dataset_infos.json +13 -0
- examples/load_dataset.py +38 -0
- hyvoxpopuli.py +131 -59
- pyproject.toml +32 -0
- requirements.txt +14 -0
- scripts/validate_dataset.py +84 -0
- setup.py +30 -0
- tests/test_dataset.py +56 -0
.gitignore
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
MANIFEST
|
23 |
+
|
24 |
+
# Virtual Environment
|
25 |
+
venv/
|
26 |
+
env/
|
27 |
+
ENV/
|
28 |
+
|
29 |
+
# IDE
|
30 |
+
.idea/
|
31 |
+
.vscode/
|
32 |
+
*.swp
|
33 |
+
*.swo
|
34 |
+
|
35 |
+
# Dataset specific
|
36 |
+
data/**/*.tar.gz
|
37 |
+
data/**/*.wav
|
38 |
+
!data/samples/*.wav
|
39 |
+
|
40 |
+
# Logs
|
41 |
+
*.log
|
42 |
+
logs/
|
CONTRIBUTING.md
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributing to HyVoxPopuli
|
2 |
+
|
3 |
+
We welcome contributions to the HyVoxPopuli dataset! This document provides guidelines for contributing to the project.
|
4 |
+
|
5 |
+
## Types of Contributions
|
6 |
+
|
7 |
+
### 1. Adding New Audio Data
|
8 |
+
|
9 |
+
When contributing new audio data:
|
10 |
+
- Ensure audio is recorded at 16kHz sampling rate
|
11 |
+
- Provide accurate transcriptions in Armenian
|
12 |
+
- Include speaker metadata (gender, speaker_id)
|
13 |
+
- Note any specific accent information if applicable
|
14 |
+
|
15 |
+
### 2. Improving Transcriptions
|
16 |
+
|
17 |
+
To improve existing transcriptions:
|
18 |
+
1. Identify the audio segment using its `audio_id`
|
19 |
+
2. Provide both the original and corrected transcription
|
20 |
+
3. Include justification for the correction
|
21 |
+
4. Mark if the correction is validated by a native speaker
|
22 |
+
|
23 |
+
### 3. Documentation Improvements
|
24 |
+
|
25 |
+
Help us improve:
|
26 |
+
- README.md clarity
|
27 |
+
- Usage examples
|
28 |
+
- Error reporting
|
29 |
+
- Best practices documentation
|
30 |
+
|
31 |
+
### 4. Bug Reports
|
32 |
+
|
33 |
+
When reporting issues:
|
34 |
+
- Use the GitHub issue tracker
|
35 |
+
- Include the specific audio_id if relevant
|
36 |
+
- Provide clear steps to reproduce the issue
|
37 |
+
- Include your environment details
|
38 |
+
|
39 |
+
## Submission Process
|
40 |
+
|
41 |
+
1. Fork the repository
|
42 |
+
2. Create a feature branch
|
43 |
+
3. Make your changes
|
44 |
+
4. Submit a pull request
|
45 |
+
|
46 |
+
## Quality Standards
|
47 |
+
|
48 |
+
- Audio files must be high quality, 16kHz
|
49 |
+
- Transcriptions must be verified by native speakers
|
50 |
+
- Code contributions should include tests
|
51 |
+
- Documentation should be clear and concise
|
52 |
+
|
53 |
+
## Communication
|
54 |
+
|
55 |
+
- Use GitHub Issues for bug reports and feature requests
|
56 |
+
- Join our community discussions [link to be added]
|
57 |
+
- Follow our code of conduct
|
58 |
+
|
59 |
+
## License
|
60 |
+
|
61 |
+
By contributing, you agree that your contributions will be licensed under the CC-BY-4.0 license.
|
62 |
+
|
63 |
+
## Questions?
|
64 |
+
|
65 |
+
Feel free to reach out to the maintainers for any questions about contributing.
|
README.md
CHANGED
@@ -1,103 +1,226 @@
|
|
1 |
---
|
2 |
-
annotations_creators:
|
|
|
3 |
language:
|
4 |
- hy
|
5 |
-
language_creators:
|
|
|
|
|
|
|
|
|
|
|
6 |
pretty_name: HyVoxPopuli
|
7 |
-
|
|
|
|
|
|
|
8 |
task_categories:
|
9 |
- automatic-speech-recognition
|
10 |
-
task_ids:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
## Table of Contents
|
16 |
-
- [Table of Contents](#table-of-contents)
|
17 |
-
- [Dataset Description](#dataset-description)
|
18 |
-
- [Dataset Summary](#dataset-summary)
|
19 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
20 |
-
- [Languages](#languages)
|
21 |
-
- [Dataset Structure](#dataset-structure)
|
22 |
-
- [Data Instances](#data-instances)
|
23 |
-
- [Data Fields](#data-fields)
|
24 |
-
- [Data Splits](#data-splits)
|
25 |
-
- [Dataset Creation](#dataset-creation)
|
26 |
-
- [Curation Rationale](#curation-rationale)
|
27 |
-
- [Source Data](#source-data)
|
28 |
-
- [Annotations](#annotations)
|
29 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
30 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
31 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
32 |
-
- [Discussion of Biases](#discussion-of-biases)
|
33 |
-
- [Other Known Limitations](#other-known-limitations)
|
34 |
-
- [Additional Information](#additional-information)
|
35 |
-
- [Dataset Curators](#dataset-curators)
|
36 |
-
- [Licensing Information](#licensing-information)
|
37 |
-
- [Citation Information](#citation-information)
|
38 |
-
- [Contributions](#contributions)
|
39 |
|
40 |
## Dataset Description
|
41 |
|
42 |
### Dataset Summary
|
43 |
|
44 |
-
|
45 |
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
```python
|
49 |
from datasets import load_dataset
|
50 |
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
```
|
53 |
|
54 |
-
###
|
55 |
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
## Dataset Structure
|
59 |
|
60 |
### Data Instances
|
61 |
|
|
|
|
|
62 |
```python
|
63 |
{
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
}
|
77 |
```
|
78 |
|
79 |
### Data Fields
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
|
90 |
### Data Splits
|
91 |
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
## Dataset Creation
|
95 |
|
96 |
-
|
97 |
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
|
|
|
|
|
|
|
|
101 |
|
102 |
The raw data is collected from [Գրքասեր site ](https://grqaser.org/am)
|
103 |
|
|
|
1 |
---
|
2 |
+
annotations_creators:
|
3 |
+
- expert-generated
|
4 |
language:
|
5 |
- hy
|
6 |
+
language_creators:
|
7 |
+
- found
|
8 |
+
license:
|
9 |
+
- cc-by-4.0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
pretty_name: HyVoxPopuli
|
13 |
+
size_categories:
|
14 |
+
- 10K<n<100K
|
15 |
+
source_datasets:
|
16 |
+
- original
|
17 |
task_categories:
|
18 |
- automatic-speech-recognition
|
19 |
+
task_ids:
|
20 |
+
- speech-recognition
|
21 |
+
tags:
|
22 |
+
- speech
|
23 |
+
- armenian
|
24 |
+
- audio
|
25 |
+
- asr
|
26 |
+
paperswithcode_id: hyvoxpopuli
|
27 |
---
|
28 |
|
29 |
+
# HyVoxPopuli: Armenian Speech Recognition Dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
## Dataset Description
|
32 |
|
33 |
### Dataset Summary
|
34 |
|
35 |
+
HyVoxPopuli is a high-quality Armenian speech recognition dataset designed for training and evaluating automatic speech recognition (ASR) models. The dataset contains carefully curated audio segments paired with their transcriptions in Armenian, making it valuable for developing ASR systems for the Armenian language.
|
36 |
|
37 |
+
The dataset includes:
|
38 |
+
- High-quality audio recordings at 16kHz sampling rate
|
39 |
+
- Expert-validated transcriptions
|
40 |
+
- Speaker metadata including gender and speaker ID
|
41 |
+
- Optional accent information where applicable
|
42 |
+
- Split into train, validation, and test sets
|
43 |
+
|
44 |
+
### Languages
|
45 |
+
|
46 |
+
The dataset contains Armenian (ISO 639-1 code: hy) speech data.
|
47 |
+
|
48 |
+
## Quick Start
|
49 |
+
|
50 |
+
### Loading the Dataset
|
51 |
|
52 |
```python
|
53 |
from datasets import load_dataset
|
54 |
|
55 |
+
# Load the dataset
|
56 |
+
dataset = load_dataset("Edmon02/hyvoxpopuli")
|
57 |
+
|
58 |
+
# Access specific splits
|
59 |
+
train_dataset = dataset["train"]
|
60 |
+
validation_dataset = dataset["validation"]
|
61 |
+
test_dataset = dataset["test"]
|
62 |
+
|
63 |
+
# Example: Print the first audio sample's transcription
|
64 |
+
print(train_dataset[0]["normalized_text"])
|
65 |
+
|
66 |
+
# Example: Load and process audio
|
67 |
+
import librosa
|
68 |
+
|
69 |
+
def process_audio(example):
|
70 |
+
audio = example["audio"]
|
71 |
+
array = audio["array"]
|
72 |
+
sampling_rate = audio["sampling_rate"]
|
73 |
+
|
74 |
+
# Example: Extract MFCC features
|
75 |
+
mfccs = librosa.feature.mfcc(y=array, sr=sampling_rate, n_mfcc=13)
|
76 |
+
return {"mfccs": mfccs}
|
77 |
+
|
78 |
+
# Process the dataset
|
79 |
+
processed_dataset = dataset.map(process_audio)
|
80 |
```
|
81 |
|
82 |
+
### Using with 🤗 Transformers
|
83 |
|
84 |
+
```python
|
85 |
+
from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor
|
86 |
+
from transformers import WhisperForConditionalGeneration
|
87 |
+
|
88 |
+
# Initialize feature extractor, tokenizer and processor
|
89 |
+
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
|
90 |
+
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Armenian", task="transcribe")
|
91 |
+
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Armenian", task="transcribe")
|
92 |
+
|
93 |
+
# Prepare the dataset
|
94 |
+
def prepare_dataset(example):
|
95 |
+
audio = example["audio"]
|
96 |
+
features = processor(
|
97 |
+
audio["array"],
|
98 |
+
sampling_rate=audio["sampling_rate"],
|
99 |
+
text=example["normalized_text"]
|
100 |
+
)
|
101 |
+
return features
|
102 |
+
|
103 |
+
processed_dataset = dataset.map(prepare_dataset)
|
104 |
+
```
|
105 |
|
106 |
## Dataset Structure
|
107 |
|
108 |
### Data Instances
|
109 |
|
110 |
+
Each instance in the dataset contains:
|
111 |
+
|
112 |
```python
|
113 |
{
|
114 |
+
'audio_id': 'segment_00000072',
|
115 |
+
'audio': {
|
116 |
+
'path': 'path/to/audio.wav',
|
117 |
+
'array': array([...], dtype=float32),
|
118 |
+
'sampling_rate': 16000
|
119 |
+
},
|
120 |
+
'raw_text': '...', # Original text
|
121 |
+
'normalized_text': '...', # Normalized transcription
|
122 |
+
'gender': 'female', # or 'male'
|
123 |
+
'speaker_id': 'speaker_2',
|
124 |
+
'is_gold_transcript': True,
|
125 |
+
'accent': 'None' # or specific accent identifier
|
126 |
}
|
127 |
```
|
128 |
|
129 |
### Data Fields
|
130 |
|
131 |
+
| Field Name | Type | Description |
|
132 |
+
|------------|------|-------------|
|
133 |
+
| `audio_id` | string | Unique identifier for the audio segment |
|
134 |
+
| `audio` | Audio | Audio data with 16kHz sampling rate |
|
135 |
+
| `raw_text` | string | Original orthographic transcription |
|
136 |
+
| `normalized_text` | string | Normalized transcription |
|
137 |
+
| `gender` | string | Speaker's gender ('male' or 'female') |
|
138 |
+
| `speaker_id` | string | Unique speaker identifier |
|
139 |
+
| `is_gold_transcript` | boolean | Indicates expert-validated transcription |
|
140 |
+
| `accent` | string | Accent identifier if applicable, 'None' otherwise |
|
141 |
|
142 |
### Data Splits
|
143 |
|
144 |
+
The dataset is divided into three splits:
|
145 |
+
- Train: Contains the main training data
|
146 |
+
- Validation (dev): Used for model validation during training
|
147 |
+
- Test: Reserved for final model evaluation
|
148 |
+
|
149 |
+
Each split is provided in a compressed tar.gz format with accompanying metadata in TSV files.
|
150 |
|
151 |
## Dataset Creation
|
152 |
|
153 |
+
## Considerations for Using the Data
|
154 |
|
155 |
+
### Intended Uses
|
156 |
+
|
157 |
+
This dataset is primarily intended for:
|
158 |
+
- Training and evaluating Armenian speech recognition models
|
159 |
+
- Developing multilingual speech processing systems
|
160 |
+
- Acoustic model training for Armenian language
|
161 |
+
- Speech processing research focusing on low-resource languages
|
162 |
+
|
163 |
+
### Limitations
|
164 |
+
|
165 |
+
- The dataset may have limited coverage of regional Armenian dialects
|
166 |
+
- Background noise levels may vary across recordings
|
167 |
+
- Speaker diversity may be limited in terms of age groups and accents
|
168 |
+
|
169 |
+
### Ethical Considerations
|
170 |
+
|
171 |
+
- The dataset contains only consented speech recordings
|
172 |
+
- Personal identifying information has been removed
|
173 |
+
- Usage should respect Armenian language and culture
|
174 |
+
|
175 |
+
## Additional Information
|
176 |
+
|
177 |
+
### Licensing Information
|
178 |
+
|
179 |
+
This dataset is released under the [Creative Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/).
|
180 |
+
|
181 |
+
### Citation Information
|
182 |
+
|
183 |
+
If you use this dataset in your research, please cite:
|
184 |
+
|
185 |
+
```bibtex
|
186 |
+
@dataset{hyvoxpopuli2023,
|
187 |
+
title = {HyVoxPopuli: Armenian Speech Recognition Dataset},
|
188 |
+
year = {2023},
|
189 |
+
publisher = {Hugging Face},
|
190 |
+
journal = {Hugging Face Datasets},
|
191 |
+
url = {https://huggingface.co/datasets/Edmon02/hyvoxpopuli}
|
192 |
+
}
|
193 |
+
```
|
194 |
+
|
195 |
+
### Contributions
|
196 |
+
|
197 |
+
Contributions to improve the dataset are welcome! Please check our [contribution guidelines](CONTRIBUTING.md) for more information.
|
198 |
+
|
199 |
+
To report issues or suggest improvements:
|
200 |
+
1. Open an issue on the [dataset repository](https://huggingface.co/datasets/Edmon02/hyvoxpopuli/issues)
|
201 |
+
2. Submit a pull request with your proposed changes
|
202 |
+
|
203 |
+
## Acknowledgements
|
204 |
+
|
205 |
+
Special thanks to all contributors and native speakers who helped validate the transcriptions and improve the dataset quality.
|
206 |
+
|
207 |
+
The audio data comes from carefully selected Armenian speech recordings, processed to ensure consistent audio quality with a 16kHz sampling rate. Each audio segment has been cleaned and normalized to maintain high quality standards for speech recognition tasks.
|
208 |
+
|
209 |
+
### Annotations
|
210 |
+
|
211 |
+
The dataset features:
|
212 |
+
- Expert-validated transcriptions marked with `is_gold_transcript=True`
|
213 |
+
- Normalized text that follows consistent transcription guidelines
|
214 |
+
- Speaker metadata including gender and unique identifier
|
215 |
+
- Optional accent annotations where relevant
|
216 |
+
|
217 |
+
### Quality Control
|
218 |
|
219 |
+
Quality assurance measures include:
|
220 |
+
1. Consistent 16kHz audio sampling rate across all recordings
|
221 |
+
2. Expert validation of transcriptions
|
222 |
+
3. Metadata validation for completeness and accuracy
|
223 |
+
4. Clear separation between train, validation, and test sets
|
224 |
|
225 |
The raw data is collected from [Գրքասեր site ](https://grqaser.org/am)
|
226 |
|
dataset_infos.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"annotations_creators": ["expert-generated"],
|
3 |
+
"language": ["hy"],
|
4 |
+
"language_creators": ["found"],
|
5 |
+
"license": ["cc-by-4.0"],
|
6 |
+
"multilinguality": ["monolingual"],
|
7 |
+
"pretty_name": "HyVoxPopuli",
|
8 |
+
"size_categories": ["10K<n<100K"],
|
9 |
+
"source_datasets": ["original"],
|
10 |
+
"task_categories": ["automatic-speech-recognition"],
|
11 |
+
"task_ids": ["speech-recognition"],
|
12 |
+
"paperswithcode_id": "hyvoxpopuli"
|
13 |
+
}
|
examples/load_dataset.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Example script demonstrating how to load and use the HyVoxPopuli dataset."""
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torchaudio
|
5 |
+
from datasets import load_dataset
|
6 |
+
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
7 |
+
|
8 |
+
def main():
|
9 |
+
# Load the dataset
|
10 |
+
dataset = load_dataset("Edmon02/hyvoxpopuli", split="train[:5]")
|
11 |
+
print(f"Loaded {len(dataset)} examples")
|
12 |
+
|
13 |
+
# Load Whisper model and processor
|
14 |
+
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
|
15 |
+
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
|
16 |
+
|
17 |
+
# Process an example
|
18 |
+
example = dataset[0]
|
19 |
+
print("\nExample metadata:")
|
20 |
+
print(f"Audio ID: {example['audio_id']}")
|
21 |
+
print(f"Speaker: {example['speaker_id']} (Gender: {example['gender']})")
|
22 |
+
print(f"Reference text: {example['normalized_text']}")
|
23 |
+
|
24 |
+
# Process audio with Whisper
|
25 |
+
input_features = processor(
|
26 |
+
example["audio"]["array"],
|
27 |
+
sampling_rate=example["audio"]["sampling_rate"],
|
28 |
+
return_tensors="pt"
|
29 |
+
).input_features
|
30 |
+
|
31 |
+
# Generate tokens
|
32 |
+
predicted_ids = model.generate(input_features)
|
33 |
+
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
34 |
+
|
35 |
+
print(f"\nWhisper transcription: {transcription}")
|
36 |
+
|
37 |
+
if __name__ == "__main__":
|
38 |
+
main()
|
hyvoxpopuli.py
CHANGED
@@ -1,98 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from collections import defaultdict
|
|
|
2 |
import os
|
|
|
3 |
import json
|
4 |
import csv
|
5 |
|
6 |
import datasets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
_BASE_DATA_DIR = "data/"
|
9 |
-
|
10 |
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz"
|
11 |
-
|
12 |
_METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv"
|
13 |
|
14 |
class Hyvoxpopuli(datasets.GeneratorBasedBuilder):
|
15 |
-
"""The HyVoxPopuli dataset."""
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
features = datasets.Features(
|
19 |
{
|
20 |
"audio_id": datasets.Value("string"),
|
21 |
"audio": datasets.Audio(sampling_rate=16_000),
|
22 |
"raw_text": datasets.Value("string"),
|
23 |
"normalized_text": datasets.Value("string"),
|
24 |
-
"gender": datasets.Value("string"),
|
25 |
"speaker_id": datasets.Value("string"),
|
26 |
"is_gold_transcript": datasets.Value("bool"),
|
27 |
-
"accent": datasets.Value("string"),
|
28 |
}
|
29 |
)
|
30 |
return datasets.DatasetInfo(
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
)
|
33 |
|
34 |
-
def _split_generators(self, dl_manager):
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
48 |
meta_paths = dl_manager.download_and_extract(meta_urls)
|
49 |
audio_paths = dl_manager.download(audio_urls)
|
|
|
50 |
|
51 |
-
|
52 |
-
dl_manager.extract(audio_paths)
|
53 |
-
)
|
54 |
-
|
55 |
return [
|
56 |
datasets.SplitGenerator(
|
57 |
-
name=
|
58 |
-
gen_kwargs={
|
59 |
-
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
|
60 |
-
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
|
61 |
-
"metadata_paths": meta_paths["train"],
|
62 |
-
}
|
63 |
-
),
|
64 |
-
datasets.SplitGenerator(
|
65 |
-
name=datasets.Split.VALIDATION,
|
66 |
gen_kwargs={
|
67 |
-
"audio_archives": [dl_manager.iter_archive(
|
68 |
-
"
|
69 |
-
|
|
|
|
|
70 |
}
|
71 |
-
)
|
72 |
-
|
73 |
-
name=datasets.Split.TEST,
|
74 |
-
gen_kwargs={
|
75 |
-
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
|
76 |
-
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
|
77 |
-
"metadata_paths": meta_paths["test"],
|
78 |
-
}
|
79 |
-
),
|
80 |
]
|
81 |
|
82 |
-
def _generate_examples(
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
|
84 |
|
85 |
-
|
86 |
-
with open(
|
87 |
-
metadata = {
|
88 |
|
89 |
-
|
|
|
90 |
for audio_filename, audio_file in audio_archive:
|
91 |
-
|
|
|
|
|
|
|
92 |
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""HyVoxPopuli Dataset: A High-Quality Armenian Speech Recognition Dataset.
|
2 |
+
|
3 |
+
This module implements the HyVoxPopuli dataset loader for the Hugging Face datasets library.
|
4 |
+
The dataset contains Armenian speech recordings with expert-validated transcriptions.
|
5 |
+
"""
|
6 |
+
|
7 |
from collections import defaultdict
|
8 |
+
import logging
|
9 |
import os
|
10 |
+
from typing import Any, Dict, List, Iterator, Optional, Tuple
|
11 |
import json
|
12 |
import csv
|
13 |
|
14 |
import datasets
|
15 |
+
from datasets.tasks import AutomaticSpeechRecognition
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
# Configure logging
|
20 |
+
logging.basicConfig(
|
21 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
22 |
+
level=logging.INFO
|
23 |
+
)
|
24 |
+
|
25 |
+
_CITATION = """\
|
26 |
+
@dataset{hyvoxpopuli2023,
|
27 |
+
title = {HyVoxPopuli: Armenian Speech Recognition Dataset},
|
28 |
+
year = {2023},
|
29 |
+
publisher = {Hugging Face},
|
30 |
+
journal = {Hugging Face Datasets},
|
31 |
+
url = {https://huggingface.co/datasets/Edmon02/hyvoxpopuli}
|
32 |
+
}
|
33 |
+
"""
|
34 |
+
|
35 |
+
_DESCRIPTION = """\
|
36 |
+
HyVoxPopuli is a high-quality Armenian speech recognition dataset designed for training
|
37 |
+
and evaluating automatic speech recognition (ASR) models. The dataset contains carefully
|
38 |
+
curated audio segments paired with their transcriptions in Armenian.
|
39 |
+
|
40 |
+
Features:
|
41 |
+
- High-quality audio recordings at 16kHz sampling rate
|
42 |
+
- Expert-validated transcriptions
|
43 |
+
- Speaker metadata including gender and speaker ID
|
44 |
+
- Optional accent information where applicable
|
45 |
+
"""
|
46 |
+
|
47 |
+
_HOMEPAGE = "https://huggingface.co/datasets/Edmon02/hyvoxpopuli"
|
48 |
+
|
49 |
+
_LICENSE = "CC-BY-4.0"
|
50 |
|
51 |
_BASE_DATA_DIR = "data/"
|
|
|
52 |
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz"
|
|
|
53 |
_METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv"
|
54 |
|
55 |
class Hyvoxpopuli(datasets.GeneratorBasedBuilder):
|
56 |
+
"""The HyVoxPopuli dataset: A high-quality Armenian speech recognition dataset."""
|
57 |
+
|
58 |
+
VERSION = datasets.Version("1.0.0")
|
59 |
+
BUILDER_CONFIGS = [
|
60 |
+
datasets.BuilderConfig(
|
61 |
+
name="default",
|
62 |
+
version=VERSION,
|
63 |
+
description="Default configuration for HyVoxPopuli dataset",
|
64 |
+
),
|
65 |
+
]
|
66 |
+
|
67 |
+
def _info(self) -> datasets.DatasetInfo:
|
68 |
+
"""Returns the dataset metadata."""
|
69 |
features = datasets.Features(
|
70 |
{
|
71 |
"audio_id": datasets.Value("string"),
|
72 |
"audio": datasets.Audio(sampling_rate=16_000),
|
73 |
"raw_text": datasets.Value("string"),
|
74 |
"normalized_text": datasets.Value("string"),
|
75 |
+
"gender": datasets.Value("string", id=None),
|
76 |
"speaker_id": datasets.Value("string"),
|
77 |
"is_gold_transcript": datasets.Value("bool"),
|
78 |
+
"accent": datasets.Value("string", id=None),
|
79 |
}
|
80 |
)
|
81 |
return datasets.DatasetInfo(
|
82 |
+
description=_DESCRIPTION,
|
83 |
+
features=features,
|
84 |
+
homepage=_HOMEPAGE,
|
85 |
+
license=_LICENSE,
|
86 |
+
citation=_CITATION,
|
87 |
+
task_templates=[
|
88 |
+
AutomaticSpeechRecognition(
|
89 |
+
audio_column="audio",
|
90 |
+
transcription_column="normalized_text",
|
91 |
+
)
|
92 |
+
],
|
93 |
)
|
94 |
|
95 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
96 |
+
"""Returns SplitGenerators."""
|
97 |
+
split_names = {
|
98 |
+
"train": str(datasets.Split.TRAIN),
|
99 |
+
"dev": str(datasets.Split.VALIDATION),
|
100 |
+
"test": str(datasets.Split.TEST)
|
101 |
+
}
|
102 |
+
|
103 |
+
# Prepare download URLs
|
104 |
+
audio_urls = {
|
105 |
+
split: [_AUDIO_ARCHIVE_PATH.format(split=split)]
|
106 |
+
for split in split_names.keys()
|
107 |
+
}
|
108 |
+
meta_urls = {
|
109 |
+
split: _METADATA_PATH.format(split=split)
|
110 |
+
for split in split_names.keys()
|
111 |
+
}
|
112 |
+
|
113 |
+
# Download and extract files
|
114 |
meta_paths = dl_manager.download_and_extract(meta_urls)
|
115 |
audio_paths = dl_manager.download(audio_urls)
|
116 |
+
local_extracted_audio_paths = dl_manager.extract(audio_paths)
|
117 |
|
118 |
+
# Create split generators
|
|
|
|
|
|
|
119 |
return [
|
120 |
datasets.SplitGenerator(
|
121 |
+
name=split_name,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
gen_kwargs={
|
123 |
+
"audio_archives": [dl_manager.iter_archive(path) for path in audio_paths[split]],
|
124 |
+
"local_extracted_archive_path": local_extracted_audio_paths[split][0]
|
125 |
+
if isinstance(local_extracted_audio_paths[split], list)
|
126 |
+
else local_extracted_audio_paths[split],
|
127 |
+
"metadata_path": meta_paths[split],
|
128 |
}
|
129 |
+
)
|
130 |
+
for split, split_name in split_names.items()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
]
|
132 |
|
133 |
+
def _generate_examples(
|
134 |
+
self,
|
135 |
+
audio_archives: List[Iterator[Tuple[str, Any]]],
|
136 |
+
local_extracted_archive_path: str,
|
137 |
+
metadata_path: str,
|
138 |
+
) -> Iterator[Tuple[str, Dict[str, Any]]]:
|
139 |
+
"""Yields examples as (key, example) tuples."""
|
140 |
features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
|
141 |
|
142 |
+
# Load metadata
|
143 |
+
with open(metadata_path, encoding="utf-8") as f:
|
144 |
+
metadata = {row["id"]: row for row in csv.DictReader(f, delimiter="\t")}
|
145 |
|
146 |
+
# Process audio files
|
147 |
+
for audio_archive in audio_archives:
|
148 |
for audio_filename, audio_file in audio_archive:
|
149 |
+
# Extract audio ID from filename
|
150 |
+
audio_id = os.path.splitext(os.path.basename(audio_filename))[0]
|
151 |
+
|
152 |
+
# Construct audio path
|
153 |
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
|
154 |
+
|
155 |
+
try:
|
156 |
+
# Create example dictionary
|
157 |
+
example = {
|
158 |
+
"audio_id": audio_id,
|
159 |
+
"audio": {"path": path, "bytes": audio_file.read()},
|
160 |
+
}
|
161 |
+
|
162 |
+
# Add metadata fields
|
163 |
+
for feature in features:
|
164 |
+
example[feature] = metadata[audio_id][feature]
|
165 |
+
|
166 |
+
yield audio_id, example
|
167 |
+
|
168 |
+
except Exception as e:
|
169 |
+
logger.warning(f"Error processing audio file {audio_id}: {str(e)}")
|
170 |
+
continue
|
pyproject.toml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.black]
|
2 |
+
line-length = 88
|
3 |
+
include = '\.pyx?$'
|
4 |
+
extend-exclude = '''
|
5 |
+
# A regex preceded with ^/ will apply only to files and directories
|
6 |
+
# in the root of the project.
|
7 |
+
^/external/
|
8 |
+
'''
|
9 |
+
|
10 |
+
[tool.isort]
|
11 |
+
profile = "black"
|
12 |
+
multi_line_output = 3
|
13 |
+
include_trailing_comma = true
|
14 |
+
force_grid_wrap = 0
|
15 |
+
use_parentheses = true
|
16 |
+
ensure_newline_before_comments = true
|
17 |
+
line_length = 88
|
18 |
+
|
19 |
+
[tool.pylint.messages_control]
|
20 |
+
disable = [
|
21 |
+
"C0111", # missing-docstring
|
22 |
+
"C0103", # invalid-name
|
23 |
+
"C0330", # bad-continuation
|
24 |
+
"C0326", # bad-whitespace
|
25 |
+
"W0621", # redefined-outer-name
|
26 |
+
"W0612", # unused-variable
|
27 |
+
"W0611", # unused-import
|
28 |
+
"R0903", # too-few-public-methods
|
29 |
+
]
|
30 |
+
|
31 |
+
[tool.pylint.format]
|
32 |
+
max-line-length = 88
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core dependencies
|
2 |
+
datasets>=2.0.0
|
3 |
+
librosa>=0.8.0
|
4 |
+
numpy>=1.19.0
|
5 |
+
torch>=1.7.0
|
6 |
+
torchaudio>=0.7.0
|
7 |
+
transformers>=4.30.0
|
8 |
+
|
9 |
+
# Development dependencies
|
10 |
+
black>=22.3.0
|
11 |
+
isort>=5.10.1
|
12 |
+
pylint>=2.15.0
|
13 |
+
pytest>=7.0.0
|
14 |
+
pytest-cov>=3.0.0
|
scripts/validate_dataset.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Script to validate the HyVoxPopuli dataset structure and contents."""
|
2 |
+
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
import csv
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Dict, List
|
8 |
+
|
9 |
+
def validate_audio_files(data_dir: Path, split: str) -> List[str]:
|
10 |
+
"""Validate audio files for a given split."""
|
11 |
+
errors = []
|
12 |
+
audio_dir = data_dir / split / f"{split}_dataset.tar.gz"
|
13 |
+
|
14 |
+
if not audio_dir.exists():
|
15 |
+
errors.append(f"Missing audio archive for split {split}")
|
16 |
+
|
17 |
+
return errors
|
18 |
+
|
19 |
+
def validate_metadata(data_dir: Path, split: str) -> List[str]:
|
20 |
+
"""Validate metadata file for a given split."""
|
21 |
+
errors = []
|
22 |
+
metadata_file = data_dir / f"{split}.tsv"
|
23 |
+
|
24 |
+
if not metadata_file.exists():
|
25 |
+
errors.append(f"Missing metadata file for split {split}")
|
26 |
+
return errors
|
27 |
+
|
28 |
+
required_columns = {
|
29 |
+
"id", "raw_text", "normalized_text", "speaker_id",
|
30 |
+
"gender", "is_gold_transcript", "accent"
|
31 |
+
}
|
32 |
+
|
33 |
+
try:
|
34 |
+
with open(metadata_file, "r", encoding="utf-8") as f:
|
35 |
+
reader = csv.DictReader(f, delimiter="\t")
|
36 |
+
headers = set(reader.fieldnames or [])
|
37 |
+
missing_columns = required_columns - headers
|
38 |
+
if missing_columns:
|
39 |
+
errors.append(f"Missing required columns in {split}.tsv: {missing_columns}")
|
40 |
+
except Exception as e:
|
41 |
+
errors.append(f"Error reading {split}.tsv: {str(e)}")
|
42 |
+
|
43 |
+
return errors
|
44 |
+
|
45 |
+
def main():
|
46 |
+
"""Main validation function."""
|
47 |
+
data_dir = Path("data")
|
48 |
+
splits = ["train", "dev", "test"]
|
49 |
+
all_errors: Dict[str, List[str]] = {}
|
50 |
+
|
51 |
+
# Validate n_files.json
|
52 |
+
n_files_path = data_dir / "n_files.json"
|
53 |
+
if not n_files_path.exists():
|
54 |
+
all_errors["n_files"] = ["Missing n_files.json"]
|
55 |
+
else:
|
56 |
+
try:
|
57 |
+
with open(n_files_path, "r") as f:
|
58 |
+
n_files = json.load(f)
|
59 |
+
if not all(split in n_files for split in splits):
|
60 |
+
all_errors["n_files"] = ["Missing split information in n_files.json"]
|
61 |
+
except json.JSONDecodeError:
|
62 |
+
all_errors["n_files"] = ["Invalid JSON in n_files.json"]
|
63 |
+
|
64 |
+
# Validate each split
|
65 |
+
for split in splits:
|
66 |
+
errors = []
|
67 |
+
errors.extend(validate_audio_files(data_dir, split))
|
68 |
+
errors.extend(validate_metadata(data_dir, split))
|
69 |
+
if errors:
|
70 |
+
all_errors[split] = errors
|
71 |
+
|
72 |
+
# Print results
|
73 |
+
if all_errors:
|
74 |
+
print("\nValidation Errors:")
|
75 |
+
for category, errors in all_errors.items():
|
76 |
+
print(f"\n{category}:")
|
77 |
+
for error in errors:
|
78 |
+
print(f" - {error}")
|
79 |
+
exit(1)
|
80 |
+
else:
|
81 |
+
print("\nValidation successful! Dataset structure is correct.")
|
82 |
+
|
83 |
+
if __name__ == "__main__":
|
84 |
+
main()
|
setup.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
with open("README.md", "r", encoding="utf-8") as fh:
|
4 |
+
long_description = fh.read()
|
5 |
+
|
6 |
+
setup(
|
7 |
+
name="hyvoxpopuli",
|
8 |
+
version="1.0.0",
|
9 |
+
author="Edmon",
|
10 |
+
author_email="", # Add your email
|
11 |
+
description="A high-quality Armenian speech recognition dataset",
|
12 |
+
long_description=long_description,
|
13 |
+
long_description_content_type="text/markdown",
|
14 |
+
url="https://huggingface.co/datasets/Edmon02/hyvoxpopuli",
|
15 |
+
packages=find_packages(),
|
16 |
+
classifiers=[
|
17 |
+
"Programming Language :: Python :: 3",
|
18 |
+
"License :: OSI Approved :: Creative Commons Attribution 4.0 International License",
|
19 |
+
"Operating System :: OS Independent",
|
20 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
21 |
+
"Topic :: Multimedia :: Sound/Audio :: Speech",
|
22 |
+
],
|
23 |
+
python_requires=">=3.7",
|
24 |
+
install_requires=[
|
25 |
+
"datasets>=2.0.0",
|
26 |
+
"librosa>=0.8.0",
|
27 |
+
"torch>=1.7.0",
|
28 |
+
"torchaudio>=0.7.0",
|
29 |
+
],
|
30 |
+
)
|
tests/test_dataset.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for the HyVoxPopuli dataset."""
|
2 |
+
import os
|
3 |
+
import unittest
|
4 |
+
import tempfile
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
|
10 |
+
class TestHyVoxPopuli(unittest.TestCase):
|
11 |
+
"""Test cases for HyVoxPopuli dataset."""
|
12 |
+
|
13 |
+
@classmethod
|
14 |
+
def setUpClass(cls):
|
15 |
+
"""Set up test fixtures."""
|
16 |
+
try:
|
17 |
+
cls.dataset = datasets.load_dataset("Edmon02/hyvoxpopuli", split="train[:2]")
|
18 |
+
except Exception as e:
|
19 |
+
raise unittest.SkipTest(f"Failed to load dataset: {str(e)}")
|
20 |
+
|
21 |
+
def test_dataset_features(self):
|
22 |
+
"""Test if dataset has the correct features."""
|
23 |
+
expected_features = {
|
24 |
+
"audio_id", "audio", "raw_text", "normalized_text",
|
25 |
+
"gender", "speaker_id", "is_gold_transcript", "accent"
|
26 |
+
}
|
27 |
+
self.assertEqual(set(self.dataset.features.keys()), expected_features)
|
28 |
+
|
29 |
+
def test_audio_sampling_rate(self):
|
30 |
+
"""Test if audio sampling rate is correct."""
|
31 |
+
self.assertEqual(self.dataset[0]["audio"]["sampling_rate"], 16000)
|
32 |
+
|
33 |
+
def test_text_fields_not_empty(self):
|
34 |
+
"""Test if text fields are not empty."""
|
35 |
+
for example in self.dataset:
|
36 |
+
self.assertTrue(example["normalized_text"].strip())
|
37 |
+
if example["raw_text"]: # raw_text might be empty for some examples
|
38 |
+
self.assertTrue(example["raw_text"].strip())
|
39 |
+
|
40 |
+
def test_speaker_metadata(self):
|
41 |
+
"""Test if speaker metadata is valid."""
|
42 |
+
for example in self.dataset:
|
43 |
+
self.assertIn(example["gender"], ["male", "female"])
|
44 |
+
self.assertTrue(example["speaker_id"].strip())
|
45 |
+
|
46 |
+
def test_audio_array_valid(self):
|
47 |
+
"""Test if audio arrays are valid numpy arrays."""
|
48 |
+
for example in self.dataset:
|
49 |
+
audio_array = example["audio"]["array"]
|
50 |
+
self.assertIsInstance(audio_array, np.ndarray)
|
51 |
+
self.assertEqual(audio_array.dtype, np.float32)
|
52 |
+
self.assertTrue(len(audio_array) > 0)
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
unittest.main()
|