phi3m0128-wds-0.9-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-50
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.04036326942482341, | |
"eval_steps": 50, | |
"global_step": 50, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008072653884964682, | |
"grad_norm": 0.04381619393825531, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.719314575195312, | |
"logits/rejected": 15.156938552856445, | |
"logps/chosen": -0.2856016755104065, | |
"logps/rejected": -0.31895095109939575, | |
"loss": 0.9242, | |
"rewards/accuracies": 0.4375, | |
"rewards/chosen": -0.42840251326560974, | |
"rewards/margins": 0.050023891031742096, | |
"rewards/rejected": -0.47842639684677124, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.016145307769929364, | |
"grad_norm": 0.05155143886804581, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.559402465820312, | |
"logits/rejected": 15.32939338684082, | |
"logps/chosen": -0.2736968398094177, | |
"logps/rejected": -0.3458033502101898, | |
"loss": 0.9127, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.4105452597141266, | |
"rewards/margins": 0.10815979540348053, | |
"rewards/rejected": -0.5187050104141235, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.024217961654894045, | |
"grad_norm": 0.05071854218840599, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.653738021850586, | |
"logits/rejected": 15.168347358703613, | |
"logps/chosen": -0.2985997200012207, | |
"logps/rejected": -0.34624338150024414, | |
"loss": 0.9141, | |
"rewards/accuracies": 0.4625000059604645, | |
"rewards/chosen": -0.44789963960647583, | |
"rewards/margins": 0.07146544009447098, | |
"rewards/rejected": -0.5193650722503662, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.03229061553985873, | |
"grad_norm": 0.052318744361400604, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.621539115905762, | |
"logits/rejected": 15.138806343078613, | |
"logps/chosen": -0.27971988916397095, | |
"logps/rejected": -0.360626757144928, | |
"loss": 0.9313, | |
"rewards/accuracies": 0.5625, | |
"rewards/chosen": -0.4195798337459564, | |
"rewards/margins": 0.12136033922433853, | |
"rewards/rejected": -0.5409401655197144, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04036326942482341, | |
"grad_norm": 0.06900553405284882, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.308789253234863, | |
"logits/rejected": 14.605737686157227, | |
"logps/chosen": -0.2685723304748535, | |
"logps/rejected": -0.323064386844635, | |
"loss": 0.9076, | |
"rewards/accuracies": 0.4749999940395355, | |
"rewards/chosen": -0.40285855531692505, | |
"rewards/margins": 0.08173803985118866, | |
"rewards/rejected": -0.4845965802669525, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04036326942482341, | |
"eval_logits/chosen": 14.528907775878906, | |
"eval_logits/rejected": 15.016877174377441, | |
"eval_logps/chosen": -0.2801212966442108, | |
"eval_logps/rejected": -0.34862396121025085, | |
"eval_loss": 0.9108895063400269, | |
"eval_rewards/accuracies": 0.5544554591178894, | |
"eval_rewards/chosen": -0.4201819598674774, | |
"eval_rewards/margins": 0.10275395959615707, | |
"eval_rewards/rejected": -0.5229359865188599, | |
"eval_runtime": 30.01, | |
"eval_samples_per_second": 26.691, | |
"eval_steps_per_second": 3.366, | |
"step": 50 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.2074278135739187e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |