Edmon02 commited on
Commit
d410bf9
·
1 Parent(s): cd0aea2

Remove obsolete files and scripts from the HyVoxPopuli dataset repository

Browse files
.github/workflows/ci.yml DELETED
@@ -1,62 +0,0 @@
1
- name: CI
2
-
3
- on:
4
- push:
5
- branches: [ main ]
6
- pull_request:
7
- branches: [ main ]
8
-
9
- jobs:
10
- test:
11
- runs-on: ubuntu-latest
12
- strategy:
13
- matrix:
14
- python-version: [3.8, 3.9, "3.10"]
15
-
16
- steps:
17
- - uses: actions/checkout@v3
18
-
19
- - name: Set up Python ${{ matrix.python-version }}
20
- uses: actions/setup-python@v4
21
- with:
22
- python-version: ${{ matrix.python-version }}
23
-
24
- - name: Install dependencies
25
- run: |
26
- python -m pip install --upgrade pip
27
- pip install -r requirements.txt
28
- pip install pytest pytest-cov black isort
29
-
30
- - name: Check code formatting
31
- run: |
32
- black --check .
33
- isort --check-only .
34
-
35
- - name: Run tests with coverage
36
- run: |
37
- pytest tests/ --cov=. --cov-report=xml
38
-
39
- - name: Upload coverage to Codecov
40
- uses: codecov/codecov-action@v3
41
- with:
42
- file: ./coverage.xml
43
- fail_ci_if_error: true
44
-
45
- validate-dataset:
46
- runs-on: ubuntu-latest
47
- steps:
48
- - uses: actions/checkout@v3
49
-
50
- - name: Set up Python
51
- uses: actions/setup-python@v4
52
- with:
53
- python-version: "3.10"
54
-
55
- - name: Install dependencies
56
- run: |
57
- python -m pip install --upgrade pip
58
- pip install -r requirements.txt
59
-
60
- - name: Validate dataset
61
- run: |
62
- python scripts/validate_dataset.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore CHANGED
@@ -19,13 +19,18 @@ wheels/
19
  *.egg-info/
20
  .installed.cfg
21
  *.egg
 
 
 
22
 
23
  # Ignore original data files but include Parquet files
 
24
  data/*
25
- !data/parquet/
26
- data/parquet/*
27
- !data/parquet/*.parquet
28
- !data/parquet/dataset_info.json
 
29
  MANIFEST
30
 
31
  # Virtual Environment
 
19
  *.egg-info/
20
  .installed.cfg
21
  *.egg
22
+ *.py
23
+ *.txt
24
+ *.toml
25
 
26
  # Ignore original data files but include Parquet files
27
+ temp_hf_repo/*
28
  data/*
29
+ examples/*
30
+ scripts/*
31
+ tests/*
32
+ .github/*
33
+ becnchmarks/*
34
  MANIFEST
35
 
36
  # Virtual Environment
data/parquet/dataset_info.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "default": {
3
- "features": {
4
- "audio_id": "string",
5
- "audio": {
6
- "path": "string",
7
- "array": "float32[]",
8
- "sampling_rate": "int32"
9
- },
10
- "raw_text": "string",
11
- "normalized_text": "string",
12
- "gender": "string",
13
- "speaker_id": "string",
14
- "is_gold_transcript": "bool",
15
- "accent": "string"
16
- },
17
- "splits": {
18
- "train": {
19
- "num_examples": 498
20
- },
21
- "validation": {
22
- "num_examples": 62
23
- },
24
- "test": {
25
- "num_examples": 63
26
- }
27
- }
28
- }
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/load_dataset.py DELETED
@@ -1,38 +0,0 @@
1
- """Example script demonstrating how to load and use the HyVoxPopuli dataset."""
2
-
3
- import torch
4
- import torchaudio
5
- from datasets import load_dataset
6
- from transformers import WhisperProcessor, WhisperForConditionalGeneration
7
-
8
- def main():
9
- # Load the dataset
10
- dataset = load_dataset("Edmon02/hyvoxpopuli", split="train[:5]")
11
- print(f"Loaded {len(dataset)} examples")
12
-
13
- # Load Whisper model and processor
14
- processor = WhisperProcessor.from_pretrained("openai/whisper-small")
15
- model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
16
-
17
- # Process an example
18
- example = dataset[0]
19
- print("\nExample metadata:")
20
- print(f"Audio ID: {example['audio_id']}")
21
- print(f"Speaker: {example['speaker_id']} (Gender: {example['gender']})")
22
- print(f"Reference text: {example['normalized_text']}")
23
-
24
- # Process audio with Whisper
25
- input_features = processor(
26
- example["audio"]["array"],
27
- sampling_rate=example["audio"]["sampling_rate"],
28
- return_tensors="pt"
29
- ).input_features
30
-
31
- # Generate tokens
32
- predicted_ids = model.generate(input_features)
33
- transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
34
-
35
- print(f"\nWhisper transcription: {transcription}")
36
-
37
- if __name__ == "__main__":
38
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hyvoxpopuli.py DELETED
@@ -1,170 +0,0 @@
1
- """HyVoxPopuli Dataset: A High-Quality Armenian Speech Recognition Dataset.
2
-
3
- This module implements the HyVoxPopuli dataset loader for the Hugging Face datasets library.
4
- The dataset contains Armenian speech recordings with expert-validated transcriptions.
5
- """
6
-
7
- from collections import defaultdict
8
- import logging
9
- import os
10
- from typing import Any, Dict, List, Iterator, Optional, Tuple
11
- import json
12
- import csv
13
-
14
- import datasets
15
- from datasets.tasks import AutomaticSpeechRecognition
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
- # Configure logging
20
- logging.basicConfig(
21
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
22
- level=logging.INFO
23
- )
24
-
25
- _CITATION = """\
26
- @dataset{hyvoxpopuli2023,
27
- title = {HyVoxPopuli: Armenian Speech Recognition Dataset},
28
- year = {2023},
29
- publisher = {Hugging Face},
30
- journal = {Hugging Face Datasets},
31
- url = {https://huggingface.co/datasets/Edmon02/hyvoxpopuli}
32
- }
33
- """
34
-
35
- _DESCRIPTION = """\
36
- HyVoxPopuli is a high-quality Armenian speech recognition dataset designed for training
37
- and evaluating automatic speech recognition (ASR) models. The dataset contains carefully
38
- curated audio segments paired with their transcriptions in Armenian.
39
-
40
- Features:
41
- - High-quality audio recordings at 16kHz sampling rate
42
- - Expert-validated transcriptions
43
- - Speaker metadata including gender and speaker ID
44
- - Optional accent information where applicable
45
- """
46
-
47
- _HOMEPAGE = "https://huggingface.co/datasets/Edmon02/hyvoxpopuli"
48
-
49
- _LICENSE = "CC-BY-4.0"
50
-
51
- _BASE_DATA_DIR = "data/"
52
- _AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{split}/{split}_dataset.tar.gz"
53
- _METADATA_PATH = _BASE_DATA_DIR + "{split}.tsv"
54
-
55
- class Hyvoxpopuli(datasets.GeneratorBasedBuilder):
56
- """The HyVoxPopuli dataset: A high-quality Armenian speech recognition dataset."""
57
-
58
- VERSION = datasets.Version("1.0.0")
59
- BUILDER_CONFIGS = [
60
- datasets.BuilderConfig(
61
- name="default",
62
- version=VERSION,
63
- description="Default configuration for HyVoxPopuli dataset",
64
- ),
65
- ]
66
-
67
- def _info(self) -> datasets.DatasetInfo:
68
- """Returns the dataset metadata."""
69
- features = datasets.Features(
70
- {
71
- "audio_id": datasets.Value("string"),
72
- "audio": datasets.Audio(sampling_rate=16_000),
73
- "raw_text": datasets.Value("string"),
74
- "normalized_text": datasets.Value("string"),
75
- "gender": datasets.Value("string", id=None),
76
- "speaker_id": datasets.Value("string"),
77
- "is_gold_transcript": datasets.Value("bool"),
78
- "accent": datasets.Value("string", id=None),
79
- }
80
- )
81
- return datasets.DatasetInfo(
82
- description=_DESCRIPTION,
83
- features=features,
84
- homepage=_HOMEPAGE,
85
- license=_LICENSE,
86
- citation=_CITATION,
87
- task_templates=[
88
- AutomaticSpeechRecognition(
89
- audio_column="audio",
90
- transcription_column="normalized_text",
91
- )
92
- ],
93
- )
94
-
95
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
96
- """Returns SplitGenerators."""
97
- split_names = {
98
- "train": str(datasets.Split.TRAIN),
99
- "dev": str(datasets.Split.VALIDATION),
100
- "test": str(datasets.Split.TEST)
101
- }
102
-
103
- # Prepare download URLs
104
- audio_urls = {
105
- split: [_AUDIO_ARCHIVE_PATH.format(split=split)]
106
- for split in split_names.keys()
107
- }
108
- meta_urls = {
109
- split: _METADATA_PATH.format(split=split)
110
- for split in split_names.keys()
111
- }
112
-
113
- # Download and extract files
114
- meta_paths = dl_manager.download_and_extract(meta_urls)
115
- audio_paths = dl_manager.download(audio_urls)
116
- local_extracted_audio_paths = dl_manager.extract(audio_paths)
117
-
118
- # Create split generators
119
- return [
120
- datasets.SplitGenerator(
121
- name=split_name,
122
- gen_kwargs={
123
- "audio_archives": [dl_manager.iter_archive(path) for path in audio_paths[split]],
124
- "local_extracted_archive_path": local_extracted_audio_paths[split][0]
125
- if isinstance(local_extracted_audio_paths[split], list)
126
- else local_extracted_audio_paths[split],
127
- "metadata_path": meta_paths[split],
128
- }
129
- )
130
- for split, split_name in split_names.items()
131
- ]
132
-
133
- def _generate_examples(
134
- self,
135
- audio_archives: List[Iterator[Tuple[str, Any]]],
136
- local_extracted_archive_path: str,
137
- metadata_path: str,
138
- ) -> Iterator[Tuple[str, Dict[str, Any]]]:
139
- """Yields examples as (key, example) tuples."""
140
- features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
141
-
142
- # Load metadata
143
- with open(metadata_path, encoding="utf-8") as f:
144
- metadata = {row["id"]: row for row in csv.DictReader(f, delimiter="\t")}
145
-
146
- # Process audio files
147
- for audio_archive in audio_archives:
148
- for audio_filename, audio_file in audio_archive:
149
- # Extract audio ID from filename
150
- audio_id = os.path.splitext(os.path.basename(audio_filename))[0]
151
-
152
- # Construct audio path
153
- path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
154
-
155
- try:
156
- # Create example dictionary
157
- example = {
158
- "audio_id": audio_id,
159
- "audio": {"path": path, "bytes": audio_file.read()},
160
- }
161
-
162
- # Add metadata fields
163
- for feature in features:
164
- example[feature] = metadata[audio_id][feature]
165
-
166
- yield audio_id, example
167
-
168
- except Exception as e:
169
- logger.warning(f"Error processing audio file {audio_id}: {str(e)}")
170
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pyproject.toml DELETED
@@ -1,32 +0,0 @@
1
- [tool.black]
2
- line-length = 88
3
- include = '\.pyx?$'
4
- extend-exclude = '''
5
- # A regex preceded with ^/ will apply only to files and directories
6
- # in the root of the project.
7
- ^/external/
8
- '''
9
-
10
- [tool.isort]
11
- profile = "black"
12
- multi_line_output = 3
13
- include_trailing_comma = true
14
- force_grid_wrap = 0
15
- use_parentheses = true
16
- ensure_newline_before_comments = true
17
- line_length = 88
18
-
19
- [tool.pylint.messages_control]
20
- disable = [
21
- "C0111", # missing-docstring
22
- "C0103", # invalid-name
23
- "C0330", # bad-continuation
24
- "C0326", # bad-whitespace
25
- "W0621", # redefined-outer-name
26
- "W0612", # unused-variable
27
- "W0611", # unused-import
28
- "R0903", # too-few-public-methods
29
- ]
30
-
31
- [tool.pylint.format]
32
- max-line-length = 88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,17 +0,0 @@
1
- # Core dependencies
2
- datasets>=2.0.0
3
- librosa>=0.8.0
4
- numpy>=1.19.0
5
- torch>=1.7.0
6
- torchaudio>=0.7.0
7
- transformers>=4.30.0
8
- jiwer>=2.5.0
9
- pesq>=0.0.3
10
- pystoi>=0.3.3
11
-
12
- # Development dependencies
13
- black>=22.3.0
14
- isort>=5.10.1
15
- pylint>=2.15.0
16
- pytest>=7.0.0
17
- pytest-cov>=3.0.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/convert_to_parquet.py DELETED
@@ -1,131 +0,0 @@
1
- """Script to convert HyVoxPopuli dataset to Parquet format for Hugging Face."""
2
- import os
3
- import json
4
- import tarfile
5
- import pandas as pd
6
- from pathlib import Path
7
- from typing import Dict, Any
8
- import datasets
9
- from datasets import Dataset, Features, Value, Audio
10
-
11
- def read_tsv(file_path: str) -> pd.DataFrame:
12
- """Read TSV file into a pandas DataFrame."""
13
- return pd.read_csv(file_path, sep='\t', quoting=3)
14
-
15
- def process_audio_archive(archive_path: str) -> Dict[str, bytes]:
16
- """Extract audio files from tar.gz archive."""
17
- audio_files = {}
18
- with tarfile.open(archive_path, 'r:gz') as tar:
19
- for member in tar.getmembers():
20
- if member.name.endswith('.wav'):
21
- f = tar.extractfile(member)
22
- if f is not None:
23
- audio_files[Path(member.name).stem] = f.read()
24
- return audio_files
25
-
26
- def convert_split_to_parquet(split_name: str, base_dir: Path):
27
- """Convert a single split to Parquet format."""
28
- print(f"Processing {split_name} split...")
29
-
30
- # Read metadata
31
- tsv_path = base_dir / f"{split_name}.tsv"
32
- df = read_tsv(str(tsv_path))
33
-
34
- # Process audio files
35
- archive_path = base_dir / split_name / f"{split_name}_dataset.tar.gz"
36
- audio_files = process_audio_archive(str(archive_path))
37
-
38
- # Create dataset
39
- features = Features({
40
- 'audio_id': Value('string'),
41
- 'audio': Audio(sampling_rate=16000),
42
- 'raw_text': Value('string'),
43
- 'normalized_text': Value('string'),
44
- 'gender': Value('string'),
45
- 'speaker_id': Value('string'),
46
- 'is_gold_transcript': Value('bool'),
47
- 'accent': Value('string'),
48
- })
49
-
50
- examples = []
51
- for _, row in df.iterrows():
52
- audio_id = row['id']
53
- if audio_id in audio_files:
54
- example = {
55
- 'audio_id': audio_id,
56
- 'audio': {'bytes': audio_files[audio_id], 'path': f"{audio_id}.wav"},
57
- 'raw_text': row['raw_text'],
58
- 'normalized_text': row['normalized_text'],
59
- 'gender': row['gender'],
60
- 'speaker_id': row['speaker_id'],
61
- 'is_gold_transcript': row['is_gold_transcript'],
62
- 'accent': row['accent'],
63
- }
64
- examples.append(example)
65
-
66
- # Create and save dataset
67
- dataset = Dataset.from_list(examples, features=features)
68
- parquet_dir = base_dir / "parquet"
69
- parquet_dir.mkdir(exist_ok=True)
70
- dataset.to_parquet(str(parquet_dir / f"{split_name}.parquet"))
71
-
72
- return len(examples)
73
-
74
- def create_dataset_info(stats: Dict[str, int]):
75
- """Create dataset info JSON file."""
76
- info = {
77
- "default": {
78
- "features": {
79
- "audio_id": "string",
80
- "audio": {
81
- "path": "string",
82
- "array": "float32[]",
83
- "sampling_rate": "int32"
84
- },
85
- "raw_text": "string",
86
- "normalized_text": "string",
87
- "gender": "string",
88
- "speaker_id": "string",
89
- "is_gold_transcript": "bool",
90
- "accent": "string"
91
- },
92
- "splits": {
93
- "train": {"num_examples": stats["train"]},
94
- "validation": {"num_examples": stats["validation"]},
95
- "test": {"num_examples": stats["test"]}
96
- }
97
- }
98
- }
99
-
100
- return info
101
-
102
- def convert_to_parquet():
103
- """Convert the dataset to Parquet format."""
104
- base_dir = Path("data")
105
-
106
- # Process each split
107
- stats = {}
108
- split_mapping = {
109
- "train": "train",
110
- "dev": "validation",
111
- "test": "test"
112
- }
113
-
114
- for source_split, target_split in split_mapping.items():
115
- stats[target_split] = convert_split_to_parquet(source_split, base_dir)
116
-
117
- # Create and save dataset info
118
- info = create_dataset_info(stats)
119
-
120
- parquet_dir = base_dir / "parquet"
121
- with open(parquet_dir / "dataset_info.json", "w", encoding="utf-8") as f:
122
- json.dump(info, f, indent=2)
123
-
124
- # Print statistics
125
- print("\nConversion complete!")
126
- print("Dataset statistics:")
127
- for split, count in stats.items():
128
- print(f"{split}: {count} examples")
129
-
130
- if __name__ == "__main__":
131
- convert_to_parquet()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/run_benchmarks.py DELETED
@@ -1,125 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Benchmark script for HyVoxPopuli dataset.
4
- Evaluates different ASR models on the dataset and computes relevant metrics.
5
- """
6
-
7
- import argparse
8
- import json
9
- import logging
10
- from pathlib import Path
11
- from typing import Dict, List, Optional
12
-
13
- import torch
14
- import torchaudio
15
- from datasets import load_dataset
16
- from transformers import (
17
- Wav2Vec2ForCTC,
18
- Wav2Vec2Processor,
19
- WhisperForConditionalGeneration,
20
- WhisperProcessor,
21
- )
22
-
23
- logging.basicConfig(level=logging.INFO)
24
- logger = logging.getLogger(__name__)
25
-
26
- MODELS = {
27
- "wav2vec2": "facebook/wav2vec2-large-xlsr-53",
28
- "whisper": "openai/whisper-large-v3",
29
- }
30
-
31
- def load_model_and_processor(model_name: str):
32
- """Load model and processor based on model name."""
33
- if model_name == "wav2vec2":
34
- model = Wav2Vec2ForCTC.from_pretrained(MODELS[model_name])
35
- processor = Wav2Vec2Processor.from_pretrained(MODELS[model_name])
36
- elif model_name == "whisper":
37
- model = WhisperForConditionalGeneration.from_pretrained(MODELS[model_name])
38
- processor = WhisperProcessor.from_pretrained(MODELS[model_name])
39
- else:
40
- raise ValueError(f"Unsupported model: {model_name}")
41
-
42
- return model, processor
43
-
44
- def compute_metrics(predictions: List[str], references: List[str]) -> Dict[str, float]:
45
- """Compute WER and CER metrics."""
46
- from jiwer import cer, wer
47
-
48
- metrics = {
49
- "wer": wer(references, predictions),
50
- "cer": cer(references, predictions)
51
- }
52
- return metrics
53
-
54
- def run_benchmark(model_name: str, split: str = "test"):
55
- """Run benchmark on specified model and dataset split."""
56
- logger.info(f"Running benchmark for {model_name} on {split} split")
57
-
58
- # Load dataset
59
- dataset = load_dataset("parquet", split=split)
60
-
61
- # Load model and processor
62
- model, processor = load_model_and_processor(model_name)
63
- device = "cuda" if torch.cuda.is_available() else "cpu"
64
- model = model.to(device)
65
-
66
- predictions = []
67
- references = []
68
-
69
- for example in dataset:
70
- # Process audio
71
- audio_input = processor(example["audio"], return_tensors="pt", padding=True)
72
-
73
- # Generate prediction
74
- with torch.no_grad():
75
- if model_name == "wav2vec2":
76
- logits = model(audio_input.input_values.to(device)).logits
77
- predicted_ids = torch.argmax(logits, dim=-1)
78
- transcription = processor.decode(predicted_ids[0])
79
- else: # whisper
80
- predicted_ids = model.generate(audio_input.input_features.to(device))
81
- transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
82
-
83
- predictions.append(transcription)
84
- references.append(example["text"])
85
-
86
- # Compute metrics
87
- metrics = compute_metrics(predictions, references)
88
-
89
- # Save results
90
- output_dir = Path("benchmarks") / model_name
91
- output_dir.mkdir(parents=True, exist_ok=True)
92
-
93
- with open(output_dir / f"{split}_results.json", "w") as f:
94
- json.dump({
95
- "model": MODELS[model_name],
96
- "split": split,
97
- "metrics": metrics,
98
- "num_examples": len(dataset),
99
- }, f, indent=2)
100
-
101
- logger.info(f"Results saved to {output_dir}/{split}_results.json")
102
- return metrics
103
-
104
- def main():
105
- parser = argparse.ArgumentParser(description="Run benchmarks on HyVoxPopuli dataset")
106
- parser.add_argument(
107
- "--model",
108
- choices=["wav2vec2", "whisper"],
109
- required=True,
110
- help="Model to benchmark"
111
- )
112
- parser.add_argument(
113
- "--split",
114
- choices=["train", "dev", "test"],
115
- default="test",
116
- help="Dataset split to evaluate on"
117
- )
118
-
119
- args = parser.parse_args()
120
- metrics = run_benchmark(args.model, args.split)
121
- print(f"Results for {args.model} on {args.split} split:")
122
- print(json.dumps(metrics, indent=2))
123
-
124
- if __name__ == "__main__":
125
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/validate_dataset.py DELETED
@@ -1,84 +0,0 @@
1
- """Script to validate the HyVoxPopuli dataset structure and contents."""
2
-
3
- import os
4
- import json
5
- import csv
6
- from pathlib import Path
7
- from typing import Dict, List
8
-
9
- def validate_audio_files(data_dir: Path, split: str) -> List[str]:
10
- """Validate audio files for a given split."""
11
- errors = []
12
- audio_dir = data_dir / split / f"{split}_dataset.tar.gz"
13
-
14
- if not audio_dir.exists():
15
- errors.append(f"Missing audio archive for split {split}")
16
-
17
- return errors
18
-
19
- def validate_metadata(data_dir: Path, split: str) -> List[str]:
20
- """Validate metadata file for a given split."""
21
- errors = []
22
- metadata_file = data_dir / f"{split}.tsv"
23
-
24
- if not metadata_file.exists():
25
- errors.append(f"Missing metadata file for split {split}")
26
- return errors
27
-
28
- required_columns = {
29
- "id", "raw_text", "normalized_text", "speaker_id",
30
- "gender", "is_gold_transcript", "accent"
31
- }
32
-
33
- try:
34
- with open(metadata_file, "r", encoding="utf-8") as f:
35
- reader = csv.DictReader(f, delimiter="\t")
36
- headers = set(reader.fieldnames or [])
37
- missing_columns = required_columns - headers
38
- if missing_columns:
39
- errors.append(f"Missing required columns in {split}.tsv: {missing_columns}")
40
- except Exception as e:
41
- errors.append(f"Error reading {split}.tsv: {str(e)}")
42
-
43
- return errors
44
-
45
- def main():
46
- """Main validation function."""
47
- data_dir = Path("data")
48
- splits = ["train", "dev", "test"]
49
- all_errors: Dict[str, List[str]] = {}
50
-
51
- # Validate n_files.json
52
- n_files_path = data_dir / "n_files.json"
53
- if not n_files_path.exists():
54
- all_errors["n_files"] = ["Missing n_files.json"]
55
- else:
56
- try:
57
- with open(n_files_path, "r") as f:
58
- n_files = json.load(f)
59
- if not all(split in n_files for split in splits):
60
- all_errors["n_files"] = ["Missing split information in n_files.json"]
61
- except json.JSONDecodeError:
62
- all_errors["n_files"] = ["Invalid JSON in n_files.json"]
63
-
64
- # Validate each split
65
- for split in splits:
66
- errors = []
67
- errors.extend(validate_audio_files(data_dir, split))
68
- errors.extend(validate_metadata(data_dir, split))
69
- if errors:
70
- all_errors[split] = errors
71
-
72
- # Print results
73
- if all_errors:
74
- print("\nValidation Errors:")
75
- for category, errors in all_errors.items():
76
- print(f"\n{category}:")
77
- for error in errors:
78
- print(f" - {error}")
79
- exit(1)
80
- else:
81
- print("\nValidation successful! Dataset structure is correct.")
82
-
83
- if __name__ == "__main__":
84
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
setup.py DELETED
@@ -1,30 +0,0 @@
1
- from setuptools import setup, find_packages
2
-
3
- with open("README.md", "r", encoding="utf-8") as fh:
4
- long_description = fh.read()
5
-
6
- setup(
7
- name="hyvoxpopuli",
8
- version="1.0.0",
9
- author="Edmon",
10
- author_email="", # Add your email
11
- description="A high-quality Armenian speech recognition dataset",
12
- long_description=long_description,
13
- long_description_content_type="text/markdown",
14
- url="https://huggingface.co/datasets/Edmon02/hyvoxpopuli",
15
- packages=find_packages(),
16
- classifiers=[
17
- "Programming Language :: Python :: 3",
18
- "License :: OSI Approved :: Creative Commons Attribution 4.0 International License",
19
- "Operating System :: OS Independent",
20
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
21
- "Topic :: Multimedia :: Sound/Audio :: Speech",
22
- ],
23
- python_requires=">=3.7",
24
- install_requires=[
25
- "datasets>=2.0.0",
26
- "librosa>=0.8.0",
27
- "torch>=1.7.0",
28
- "torchaudio>=0.7.0",
29
- ],
30
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/parquet/test.parquet → test-00000-of-00001.parquet RENAMED
File without changes
tests/test_dataset.py DELETED
@@ -1,56 +0,0 @@
1
- """Tests for the HyVoxPopuli dataset."""
2
- import os
3
- import unittest
4
- import tempfile
5
-
6
- import datasets
7
- import numpy as np
8
-
9
-
10
- class TestHyVoxPopuli(unittest.TestCase):
11
- """Test cases for HyVoxPopuli dataset."""
12
-
13
- @classmethod
14
- def setUpClass(cls):
15
- """Set up test fixtures."""
16
- try:
17
- cls.dataset = datasets.load_dataset("Edmon02/hyvoxpopuli", split="train[:2]")
18
- except Exception as e:
19
- raise unittest.SkipTest(f"Failed to load dataset: {str(e)}")
20
-
21
- def test_dataset_features(self):
22
- """Test if dataset has the correct features."""
23
- expected_features = {
24
- "audio_id", "audio", "raw_text", "normalized_text",
25
- "gender", "speaker_id", "is_gold_transcript", "accent"
26
- }
27
- self.assertEqual(set(self.dataset.features.keys()), expected_features)
28
-
29
- def test_audio_sampling_rate(self):
30
- """Test if audio sampling rate is correct."""
31
- self.assertEqual(self.dataset[0]["audio"]["sampling_rate"], 16000)
32
-
33
- def test_text_fields_not_empty(self):
34
- """Test if text fields are not empty."""
35
- for example in self.dataset:
36
- self.assertTrue(example["normalized_text"].strip())
37
- if example["raw_text"]: # raw_text might be empty for some examples
38
- self.assertTrue(example["raw_text"].strip())
39
-
40
- def test_speaker_metadata(self):
41
- """Test if speaker metadata is valid."""
42
- for example in self.dataset:
43
- self.assertIn(example["gender"], ["male", "female"])
44
- self.assertTrue(example["speaker_id"].strip())
45
-
46
- def test_audio_array_valid(self):
47
- """Test if audio arrays are valid numpy arrays."""
48
- for example in self.dataset:
49
- audio_array = example["audio"]["array"]
50
- self.assertIsInstance(audio_array, np.ndarray)
51
- self.assertEqual(audio_array.dtype, np.float32)
52
- self.assertTrue(len(audio_array) > 0)
53
-
54
-
55
- if __name__ == "__main__":
56
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/parquet/train.parquet → train-00000-of-00001.parquet RENAMED
File without changes
data/parquet/dev.parquet → validation-00000-of-00001.parquet RENAMED
File without changes