--- dataset_info: features: - name: input_ids sequence: int32 - name: labels sequence: int64 - name: attention_mask sequence: int8 splits: - name: train num_bytes: 16014371080 num_examples: 2236072 download_size: 5191715192 dataset_size: 16014371080 configs: - config_name: default data_files: - split: train path: data/train-* --- dataset_paths = [ "Closed-TTS/Emilia-All-Ja-LLama-3B-Tokenized", "Closed-TTS/Starrail-Voice-Ja-Llama-3B-Tokenized", "Closed-TTS/Genshin-Voice-Ja-Llama-3B-Tokenized", "Closed-TTS/2M-Belebele-Ja-Llama-3B-Tokenized", "Closed-TTS/Common-Voice-17-Ja-Llama-3B-Tokenized", "Closed-TTS/Common-Voice-17-Ja-Llama-3B-Tokenized" ]