s1.1-direct-32B / trainer_state.json
xiaomama2002's picture
Upload trainer_state.json with huggingface_hub
8fd435a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.896,
"eval_steps": 500,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.096,
"grad_norm": 3.609203815460205,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.4792,
"step": 1
},
{
"epoch": 0.192,
"grad_norm": 3.4065887928009033,
"learning_rate": 6.666666666666667e-06,
"loss": 0.4425,
"step": 2
},
{
"epoch": 0.288,
"grad_norm": 3.396181583404541,
"learning_rate": 1e-05,
"loss": 0.4887,
"step": 3
},
{
"epoch": 0.384,
"grad_norm": 1.740384578704834,
"learning_rate": 9.966191788709716e-06,
"loss": 0.3527,
"step": 4
},
{
"epoch": 0.48,
"grad_norm": 1.299846887588501,
"learning_rate": 9.86522435289912e-06,
"loss": 0.3075,
"step": 5
},
{
"epoch": 0.576,
"grad_norm": 1.0410711765289307,
"learning_rate": 9.698463103929542e-06,
"loss": 0.3255,
"step": 6
},
{
"epoch": 0.672,
"grad_norm": 0.9354220628738403,
"learning_rate": 9.468163201617063e-06,
"loss": 0.2772,
"step": 7
},
{
"epoch": 0.768,
"grad_norm": 0.684252917766571,
"learning_rate": 9.177439057064684e-06,
"loss": 0.2475,
"step": 8
},
{
"epoch": 0.864,
"grad_norm": 0.548189640045166,
"learning_rate": 8.83022221559489e-06,
"loss": 0.2523,
"step": 9
},
{
"epoch": 0.96,
"grad_norm": 0.5903598070144653,
"learning_rate": 8.43120818934367e-06,
"loss": 0.2553,
"step": 10
},
{
"epoch": 1.064,
"grad_norm": 1.125814437866211,
"learning_rate": 7.985792958513932e-06,
"loss": 0.4129,
"step": 11
},
{
"epoch": 1.16,
"grad_norm": 0.6294312477111816,
"learning_rate": 7.500000000000001e-06,
"loss": 0.2131,
"step": 12
},
{
"epoch": 1.256,
"grad_norm": 0.6567461490631104,
"learning_rate": 6.980398830195785e-06,
"loss": 0.2005,
"step": 13
},
{
"epoch": 1.3519999999999999,
"grad_norm": 0.5858513712882996,
"learning_rate": 6.434016163555452e-06,
"loss": 0.2022,
"step": 14
},
{
"epoch": 1.448,
"grad_norm": 0.6008303761482239,
"learning_rate": 5.8682408883346535e-06,
"loss": 0.1983,
"step": 15
},
{
"epoch": 1.544,
"grad_norm": 0.6003760099411011,
"learning_rate": 5.290724144552379e-06,
"loss": 0.1655,
"step": 16
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.5758850574493408,
"learning_rate": 4.7092758554476215e-06,
"loss": 0.1897,
"step": 17
},
{
"epoch": 1.736,
"grad_norm": 0.5658831000328064,
"learning_rate": 4.131759111665349e-06,
"loss": 0.1846,
"step": 18
},
{
"epoch": 1.8319999999999999,
"grad_norm": 0.4785068929195404,
"learning_rate": 3.5659838364445505e-06,
"loss": 0.1496,
"step": 19
},
{
"epoch": 1.928,
"grad_norm": 0.5116356015205383,
"learning_rate": 3.019601169804216e-06,
"loss": 0.1628,
"step": 20
},
{
"epoch": 2.032,
"grad_norm": 0.9877704381942749,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.2728,
"step": 21
},
{
"epoch": 2.128,
"grad_norm": 0.51435786485672,
"learning_rate": 2.0142070414860704e-06,
"loss": 0.1391,
"step": 22
},
{
"epoch": 2.224,
"grad_norm": 0.4802136719226837,
"learning_rate": 1.5687918106563326e-06,
"loss": 0.1173,
"step": 23
},
{
"epoch": 2.32,
"grad_norm": 0.45264706015586853,
"learning_rate": 1.1697777844051105e-06,
"loss": 0.1175,
"step": 24
},
{
"epoch": 2.416,
"grad_norm": 0.506596565246582,
"learning_rate": 8.225609429353187e-07,
"loss": 0.139,
"step": 25
},
{
"epoch": 2.512,
"grad_norm": 0.5231615304946899,
"learning_rate": 5.318367983829393e-07,
"loss": 0.1215,
"step": 26
},
{
"epoch": 2.608,
"grad_norm": 0.4291784167289734,
"learning_rate": 3.015368960704584e-07,
"loss": 0.1056,
"step": 27
},
{
"epoch": 2.7039999999999997,
"grad_norm": 0.4940212666988373,
"learning_rate": 1.3477564710088097e-07,
"loss": 0.1158,
"step": 28
},
{
"epoch": 2.8,
"grad_norm": 0.4577478766441345,
"learning_rate": 3.3808211290284886e-08,
"loss": 0.1173,
"step": 29
},
{
"epoch": 2.896,
"grad_norm": 0.48457086086273193,
"learning_rate": 0.0,
"loss": 0.1155,
"step": 30
},
{
"epoch": 2.896,
"step": 30,
"total_flos": 14576698425344.0,
"train_loss": 0.2289695508778095,
"train_runtime": 1925.3202,
"train_samples_per_second": 1.558,
"train_steps_per_second": 0.016
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 14576698425344.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}