File size: 1,174 Bytes
0a3ed29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
model_name: Llama-3.2-1B
out_dir: /home/aiops/zhuty/litgpt/out/pretrain/llama3-1b-amv1-4epoch-2k
precision: bf16-mixed
initial_checkpoint_dir: checkpoints/meta-llama/Llama-3.2-1B
resume: auto
data:
class_path: litgpt.data.TextFiles
init_args:
train_data_path: /home/aiops/zhuty/cont_data/bio/amv1/train
val_data_path: /home/aiops/zhuty/cont_data/bio/amv1/test
seed: 42
num_workers: 10
add_eos: true
train:
save_interval: 2500
save_interval_epochs: 1
save_optimizer_state: false
log_interval: 1
global_batch_size: 256
micro_batch_size: 1
lr_warmup_fraction: 0.01
epochs: 4
max_seq_length: 2048
max_norm: 1.0
min_lr: 5.0e-06
eval:
interval: 20
max_iters: 100
initial_validation: true
final_validation: true
evaluate_example: first
num_generation_examples: 1
calculate_exact_match: false
log:
project: mathcont
optimizer:
class_path: torch.optim.AdamW
init_args:
lr: 5.0e-05
weight_decay: 0.1
betas:
- 0.9
- 0.95
devices: auto
num_nodes: 1
tokenizer_dir: checkpoints/meta-llama/Llama-3.2-1B
logger_name: wandb
seed: 42
compiler: torch
executors:
- sdpa
- torchcompile
- torch
strategy: fsdp
|