Are you going to prepare the Emilia-Yodas dataset?

#2
by kadirnar - opened

Can you share the sample code for this process? I want to do this for other languages and Emilia-Yodas.

Owner

Something like this should work for the base dataset + YODAS with all langs:

# /// script
# requires-python = ">=3.12"
# dependencies = ["huggingface_hub", "datasets", "torchcodec", "soundfile", "torch", "snac", "tqdm", "pyarrow"]
# ///
import argparse
import os
from fnmatch import fnmatch

import pyarrow as pa
import pyarrow.parquet as pq
import torch
from datasets import Audio, Features, disable_progress_bars, load_dataset
from huggingface_hub import list_repo_files
from snac import SNAC
from torch.utils.data import DataLoader
from tqdm import tqdm


def as_chunks(my_list, chunk_size):
    for i in range(0, len(my_list), chunk_size):
        yield my_list[i : i + chunk_size]


def convert(r):
    id = "_id" if "_id" in r["json"] else "id"

    return {
        "id": r["json"][id],
        "language": r["json"]["language"],
        "speaker": r["json"]["speaker"],
        "text": r["json"]["text"].strip(),
        "audio": r["mp3"].get_all_samples().data,
    }


def load_shard(data_files, num_workers, streaming=True):
    cols = ["json", "__key__", "__url__", "mp3"]
    feat = Features({"mp3": Audio(sampling_rate=24000, decode=True)})
    ds = load_dataset("amphion/Emilia-Dataset", data_files=data_files, streaming=streaming)["train"]
    ds = ds.map(convert, remove_columns=cols, features=feat)
    dl = DataLoader(
        ds, batch_size=256, num_workers=num_workers, pin_memory=True, collate_fn=lambda rows: rows
    )
    return dl




@torch
	.compile()
def encode(codec, audio):
    return codec.encode(audio)




@torch
	.inference_mode()
def process_shard(codec, schema, path, shard, num_workers, pbar):
    pbar.write(path)
    wr = pq.ParquetWriter(path, schema=schema)

    with torch.autocast(device_type="cuda"):
        for batch in load_shard(shard, num_workers, streaming=True):
            for row in batch:
                codes = encode(codec, row["audio"].to(device="cuda", non_blocking=True).unsqueeze(0))
                del row["audio"]
                row["c_12"] = codes[0].flatten().tolist()
                row["c_24"] = codes[1].flatten().tolist()
                row["c_48"] = codes[2].flatten().tolist()

            table = pa.Table.from_pylist(batch, schema=schema)
            wr.write_table(table, row_group_size=args.row_group_size)
            pbar.update(len(batch))

    wr.close()


def main(args) -> None:
    disable_progress_bars()  # so datasets dont spam us with per shard bars
    torch.set_float32_matmul_precision("medium")  # gotta go fast

    codec = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to(device="cuda").eval()

    paths = list_repo_files("amphion/Emilia-Dataset", repo_type="dataset")
    pbar = tqdm(total=54_792_590, unit="rows")  # total taken from estimate on the hf repo

    schema = {
        "id": pa.string(),
        "language": pa.string(),
        "speaker": pa.string(),
        "text": pa.string(),
        "c_12": pa.list_(pa.int16()),
        "c_24": pa.list_(pa.int16()),
        "c_48": pa.list_(pa.int16()),
    }

    schema = pa.schema(schema)

    for lang in ["DE", "EN", "FR", "JA", "KO", "ZH"]:
        os.makedirs(f"{args.path}/Emilia/{lang}", exist_ok=True)
        shards = list(as_chunks([p for p in paths if fnmatch(p, f"Emilia/{lang}/*.tar")], args.chunk))

        for i, shard in enumerate(shards):
            path = f"{args.path}/Emilia/{lang}/{lang}-{i:03d}-of-{len(shards) - 1:03d}.parquet"
            process_shard(codec, schema, path, shard, args.chunk, pbar)

        os.makedirs(f"{args.path}/Emilia-YODAS/{lang}", exist_ok=True)
        shards = list(as_chunks([p for p in paths if fnmatch(p, f"Emilia-YODAS/{lang}/*.tar")], args.chunk))

        for i, shard in enumerate(shards):
            path = f"{args.path}/Emilia-YODAS/{lang}/{lang}-{i:03d}-of-{len(shards) - 1:03d}.parquet"
            process_shard(codec, schema, path, shard, args.chunk, pbar)

    pbar.close()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--row-group-size", type=int, default=200, help="parquet row group size")
    parser.add_argument("--chunk", type=int, default=8, help="src shards per dst shard (concurrency)")
    parser.add_argument("--path", default=".", help="output path")
    args = parser.parse_args()

    main(args)

The output format is a bit diff from what I have in this repo but is better. Multiple shards so can be read with n>1 workers afterward, and has more of the original metadata apart from just transcripts. Can also add dnsmos, duration and phone_count if you want those.

Thanks. Have you ever tried training? I’ve tried training with the qwen3 and SmolLM3 models. However, I’m getting an error related to the snac model during the inference stage. Could there be an issue with the dataset?

Owner

Yes I've trained on this dataset, and it roundtrips through SNAC as well. I would assume the issue is in transformation from SNAC ids -> tokenizer -> SNAC ids for your architecture. This dataset is not specialized to any particular frame layout or tokenizer offset, it is raw SNAC codes as they come out of SNAC.

Why is the text content empty? Is there an error related to Emilia? Or is there a mistake in the snac tokenizer process?

image.png

Text %0:

image.png

I skipped the columns that are not text.
image.png

Code:

import torch
from datasets import load_dataset
from transformers import AutoTokenizer
from huggingface_hub import snapshot_download
import os

def tokenize_for_llm(dataset_name, output_dataset_name, tokenizer_name="meta-llama/Llama-3.2-1B-Instructt"):
    tokeniser_length = 128256
    start_of_text = 128000
    end_of_text = 128009
    
    start_of_speech = tokeniser_length + 1
    end_of_speech = tokeniser_length + 2
    
    start_of_human = tokeniser_length + 3
    end_of_human = tokeniser_length + 4
    
    start_of_ai = tokeniser_length + 5
    end_of_ai = tokeniser_length + 6
    pad_token = tokeniser_length + 7
    
    print(f"Downloading dataset: {dataset_name}")
    snapshot_download(
        repo_id=dataset_name,
        repo_type="dataset",
        revision="main",
        max_workers=64,
    )
    
    ds = load_dataset(dataset_name, "Emilia", split="train")
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
    num_proc = os.cpu_count() - 2
    
    def create_llm_input_ids(example):
        text_field = "text"
        text_ids = tokenizer.encode(example[text_field], add_special_tokens=True)
        text_ids.append(end_of_text)
        example["text_tokens"] = text_ids
        
        c_12 = example["c_12"]
        c_24 = example["c_24"] 
        c_48 = example["c_48"]
        
        all_codes = []
        max_len = len(c_12)  
        
        for i in range(max_len):
            all_codes.append(c_12[i] + 128266)
            
            if 2*i < len(c_24):
                all_codes.append(c_24[2*i] + 128266 + 4096)
            if 2*i + 1 < len(c_24):
                all_codes.append(c_24[2*i + 1] + 128266 + (4*4096))
            
            if 4*i < len(c_48):
                all_codes.append(c_48[4*i] + 128266 + (2*4096))
            if 4*i + 1 < len(c_48):
                all_codes.append(c_48[4*i + 1] + 128266 + (3*4096))
            if 4*i + 2 < len(c_48):
                all_codes.append(c_48[4*i + 2] + 128266 + (5*4096))
            if 4*i + 3 < len(c_48):
                all_codes.append(c_48[4*i + 3] + 128266 + (6*4096))
        
        input_ids = (
            [start_of_human]
            + example["text_tokens"]
            + [end_of_human]
            + [start_of_ai]
            + [start_of_speech]
            + all_codes
            + [end_of_speech]
            + [end_of_ai]
        )
        
        example["input_ids"] = input_ids
        example["labels"] = input_ids.copy()
        example["attention_mask"] = [1] * len(input_ids)
        
        return example
    
    def filter_valid_data(example):
        return (
            example.get("c_12") is not None and len(example["c_12"]) > 0 and
            example.get("c_24") is not None and len(example["c_24"]) > 0 and
            example.get("c_48") is not None and len(example["c_48"]) > 0 and
            example.get("text") is not None and len(example["text"].strip()) > 0
        )
    
    ds = ds.filter(filter_valid_data)
    
    ds = ds.map(create_llm_input_ids, num_proc=num_proc)
    
    columns_to_keep = ["input_ids", "labels", "attention_mask"]
    columns_to_remove = [col for col in ds.column_names if col not in columns_to_keep]
    
    if columns_to_remove:
        ds = ds.remove_columns(columns_to_remove)
    
    print(f"Pushing tokenized dataset to: {output_dataset_name}")
    ds.push_to_hub(output_dataset_name)
    
    print("LLM tokenization completed successfully!")
    return ds

if __name__ == "__main__":
    input_dataset = "nytopop/emilia-en-snac"
    output_dataset = "OpenSpeechHub/emilia-en-snac-llama3"
    tokenizer_name = "meta-llama/Llama-3.2-1B-Instruct"
    
    tokenize_for_llm(input_dataset, output_dataset, tokenizer_name)

Is there really such a text in the Emilia dataset?
image.png

Why is the text content empty? Is there an error related to Emilia? Or is there a mistake in the snac tokenizer process?

The 1.96k..2.21k 0% indicates that 0% of the rows in the whole dataset have a text field of length falling in the bucket of 1.96k through 2.21k. It shouldn't map to empty text columns.

Is there really such a text in the Emilia dataset?

I didn't add any extra rows or filter any. Emilia is a machine transcribed/processed dataset so there are bound to be some weird mistakes in there. Although in theory that row might actually have that audio, it just looks like a mistake. Looking at the other low frequency buckets of abnormally high length text (1k+) I can see several other rows with a similar pattern of repeated words. I think it's probably just a whisper failure.

kadirnar changed discussion status to closed

Sign up or log in to comment