export HF_PROJECT="t5-small-24L-dutch-english" | |
export DATASET="yhavinga/mc4_nl_cleaned" # Name of the dataset in the Huggingface Hub | |
export DATASET_CONFIG="large_en_nl" # Config of the dataset in the Huggingface Hub | |
export DATASET_SPLIT="train" # Split to use for training tokenizer and model | |
export CONFIG_NAME="../config/config-small-24L.json" | |
export TOKENIZER_NAME="yhavinga/net5-v1.1-base-cased-500" | |
export MODEL_PATH="${HOME}/data/${HF_PROJECT}" # Path to the model | |
python3 ../train/run_t5_mlm_flax_pmap.py \ | |
--output_dir="${MODEL_PATH}" \ | |
--model_type="t5" \ | |
--config_name="${CONFIG_NAME}" \ | |
--tokenizer_name="${TOKENIZER_NAME}" \ | |
--auth_token="$(cat ~/.huggingface/token)" \ | |
--preprocessing_num_workers="96" \ | |
--do_train --do_eval \ | |
--dataset_name="${DATASET}" \ | |
--dataset_config_name="${DATASET_CONFIG}" \ | |
--max_seq_length="512" \ | |
--per_device_train_batch_size="16" \ | |
--per_device_eval_batch_size="32" \ | |
--dtype="float32" \ | |
--optim="adafactor" \ | |
--learning_rate="0.005" \ | |
--lr_decay="linear" \ | |
--overwrite_output_dir \ | |
--num_train_epochs="1" \ | |
--logging_steps="100" \ | |
--save_steps="5000" \ | |
--eval_steps="5000" \ | |
--warmup_steps="20000" \ | |
--validation_split_count="15000" \ | |
--wandb_project="t5-small-24L-dutch-english" \ | |
--wandb_job_type="pmap" | |
# \ | |
# --resume_from_checkpoint="${MODEL_PATH}" | |
# --lr_decay="exponential" \ | |
# --lr_transition_steps="200000" \ | |
# --lr_decay_rate="0.7" \ | |
# --lr_staircase="false" \ | |