bofenghuang's picture
Add files using upload-large-folder tool
a02f7fa verified
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 Bofeng Huang
import os
from collections import defaultdict
import fire
import numpy as np
import pandas as pd
from tqdm import tqdm
from meta import SUBSET_NAMES_AND_PATHS
def _print_ds_info(df, duration_column_name="duration"):
print(f"#utterances: {df.shape[0]}")
durations = df["duration"]
print(
f"Duration statistics: tot {durations.sum() / 3600:.2f} h, "
f"mean {durations.mean():.2f} s, "
f"min {durations.min():.2f} s, "
f"max {durations.max():.2f} s"
)
print()
def main(output_file):
dataset_dir = os.path.dirname(os.path.abspath(__file__))
lang_manifests_dict = defaultdict(list)
for k, v in SUBSET_NAMES_AND_PATHS.items():
lang_manifests_dict[k.split("-")[0]].append((k, f'{dataset_dir}/{v["dir"]}/{v["text_file"]}'))
# print(lang_manifests_dict)
with open(output_file, "w") as f:
for lang, manifest_files in lang_manifests_dict.items():
f.write("\n" + lang + "\n" + "\n")
f.write("| Split | 20% | 10% | 5% | 0% |" + "\n")
f.write("| :--- | :---: | :---: | :---: | :---: |" + "\n")
lines = []
for split, manifest_file in tqdm(manifest_files):
# load dataset
df = pd.read_json(manifest_file, lines=True)
# print("Raw dataset")
# _print_ds_info(df)
# line = f"| {split} |"
# wer_cutoffs = [20, 10, 5, 0]
# for wer_cutoff in wer_cutoffs:
# df_ = df[df["wer"] <= wer_cutoff]
# # print(f"wer_cutoff: {wer_cutoff}")
# # _print_ds_info(df_)
# line += f' {df_["duration"].sum() / 3600:.2f} |'
# f.write(line + "\n")
l = [df[df["wer"] <= wer_cutoff]["duration"].sum() / 3600 for wer_cutoff in [20, 10, 5, 0]]
l.insert(0, split)
lines.append(l)
lines.append(
[
"total",
sum(l[1] for l in lines),
sum(l[2] for l in lines),
sum(l[3] for l in lines),
sum(l[4] for l in lines),
]
)
for l in lines:
f.write(f"| {l[0]} | " + " | ".join([f"{l_:,.2f}" for l_ in l[1:]]) + " |" + "\n")
# break
if __name__ == "__main__":
fire.Fire(main)