phi3m0128-wds-0.75-kendall-onof-ofif-corr-max-2-simpo-max1500-default
/
checkpoint-50
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.04440497335701599, | |
"eval_steps": 50, | |
"global_step": 50, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.008880994671403197, | |
"grad_norm": 0.045356735587120056, | |
"learning_rate": 4.999451708687114e-06, | |
"logits/chosen": 14.352750778198242, | |
"logits/rejected": 14.82281494140625, | |
"logps/chosen": -0.2592294216156006, | |
"logps/rejected": -0.32852867245674133, | |
"loss": 0.9315, | |
"rewards/accuracies": 0.574999988079071, | |
"rewards/chosen": -0.38884416222572327, | |
"rewards/margins": 0.10394889116287231, | |
"rewards/rejected": -0.4927930235862732, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.017761989342806393, | |
"grad_norm": 0.05255189165472984, | |
"learning_rate": 4.997807075247147e-06, | |
"logits/chosen": 14.80792236328125, | |
"logits/rejected": 15.017183303833008, | |
"logps/chosen": -0.2825874388217926, | |
"logps/rejected": -0.36373966932296753, | |
"loss": 0.9318, | |
"rewards/accuracies": 0.5249999761581421, | |
"rewards/chosen": -0.4238811433315277, | |
"rewards/margins": 0.12172831594944, | |
"rewards/rejected": -0.5456094741821289, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.02664298401420959, | |
"grad_norm": 0.056027185171842575, | |
"learning_rate": 4.9950668210706795e-06, | |
"logits/chosen": 14.45336627960205, | |
"logits/rejected": 15.261484146118164, | |
"logps/chosen": -0.2638034522533417, | |
"logps/rejected": -0.37216562032699585, | |
"loss": 0.9211, | |
"rewards/accuracies": 0.6625000238418579, | |
"rewards/chosen": -0.3957051932811737, | |
"rewards/margins": 0.1625431776046753, | |
"rewards/rejected": -0.5582484006881714, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.035523978685612786, | |
"grad_norm": 0.06511653959751129, | |
"learning_rate": 4.9912321481237616e-06, | |
"logits/chosen": 14.59851360321045, | |
"logits/rejected": 15.112770080566406, | |
"logps/chosen": -0.28972768783569336, | |
"logps/rejected": -0.36043626070022583, | |
"loss": 0.941, | |
"rewards/accuracies": 0.5375000238418579, | |
"rewards/chosen": -0.43459147214889526, | |
"rewards/margins": 0.10606291145086288, | |
"rewards/rejected": -0.5406544208526611, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"grad_norm": 0.0637577474117279, | |
"learning_rate": 4.986304738420684e-06, | |
"logits/chosen": 14.678179740905762, | |
"logits/rejected": 15.114399909973145, | |
"logps/chosen": -0.3033604919910431, | |
"logps/rejected": -0.3262741267681122, | |
"loss": 0.929, | |
"rewards/accuracies": 0.44999998807907104, | |
"rewards/chosen": -0.45504075288772583, | |
"rewards/margins": 0.03437047079205513, | |
"rewards/rejected": -0.4894111752510071, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.04440497335701599, | |
"eval_logits/chosen": 14.936029434204102, | |
"eval_logits/rejected": 14.780267715454102, | |
"eval_logps/chosen": -0.29386037588119507, | |
"eval_logps/rejected": -0.3304942548274994, | |
"eval_loss": 0.9458721876144409, | |
"eval_rewards/accuracies": 0.49450549483299255, | |
"eval_rewards/chosen": -0.4407905340194702, | |
"eval_rewards/margins": 0.05495081841945648, | |
"eval_rewards/rejected": -0.49574142694473267, | |
"eval_runtime": 27.7436, | |
"eval_samples_per_second": 26.24, | |
"eval_steps_per_second": 3.28, | |
"step": 50 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 1500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 50, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.2135594806843802e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |