|
{ |
|
"best_metric": 0.810126582278481, |
|
"best_model_checkpoint": "deit-base-distilled-patch16-224-55-fold2/checkpoint-101", |
|
"epoch": 85.71428571428571, |
|
"eval_steps": 500, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"eval_accuracy": 0.5443037974683544, |
|
"eval_loss": 0.7763431072235107, |
|
"eval_runtime": 1.1332, |
|
"eval_samples_per_second": 69.717, |
|
"eval_steps_per_second": 2.647, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.620253164556962, |
|
"eval_loss": 0.6779789328575134, |
|
"eval_runtime": 1.1183, |
|
"eval_samples_per_second": 70.64, |
|
"eval_steps_per_second": 2.683, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 2.252147674560547, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.721, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_accuracy": 0.5316455696202531, |
|
"eval_loss": 0.6954271793365479, |
|
"eval_runtime": 1.1555, |
|
"eval_samples_per_second": 68.371, |
|
"eval_steps_per_second": 2.596, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.620253164556962, |
|
"eval_loss": 0.6370497941970825, |
|
"eval_runtime": 1.1498, |
|
"eval_samples_per_second": 68.706, |
|
"eval_steps_per_second": 2.609, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 4.857142857142857, |
|
"eval_accuracy": 0.5949367088607594, |
|
"eval_loss": 0.6104753017425537, |
|
"eval_runtime": 1.1377, |
|
"eval_samples_per_second": 69.439, |
|
"eval_steps_per_second": 2.637, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 3.993246555328369, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.6207, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6835443037974683, |
|
"eval_loss": 0.5798102021217346, |
|
"eval_runtime": 1.1138, |
|
"eval_samples_per_second": 70.93, |
|
"eval_steps_per_second": 2.694, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.5703896880149841, |
|
"eval_runtime": 1.1441, |
|
"eval_samples_per_second": 69.05, |
|
"eval_steps_per_second": 2.622, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.5878576636314392, |
|
"eval_runtime": 1.1443, |
|
"eval_samples_per_second": 69.035, |
|
"eval_steps_per_second": 2.622, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 8.652019500732422, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5427, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 8.857142857142858, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.6726908087730408, |
|
"eval_runtime": 1.1277, |
|
"eval_samples_per_second": 70.055, |
|
"eval_steps_per_second": 2.66, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.6962025316455697, |
|
"eval_loss": 0.5840915441513062, |
|
"eval_runtime": 1.1335, |
|
"eval_samples_per_second": 69.694, |
|
"eval_steps_per_second": 2.647, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 10.857142857142858, |
|
"eval_accuracy": 0.6962025316455697, |
|
"eval_loss": 0.605922520160675, |
|
"eval_runtime": 1.1431, |
|
"eval_samples_per_second": 69.11, |
|
"eval_steps_per_second": 2.624, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 6.731996059417725, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.4775, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.6075949367088608, |
|
"eval_loss": 1.0271003246307373, |
|
"eval_runtime": 1.1385, |
|
"eval_samples_per_second": 69.392, |
|
"eval_steps_per_second": 2.635, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 12.857142857142858, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.6412355303764343, |
|
"eval_runtime": 1.1419, |
|
"eval_samples_per_second": 69.181, |
|
"eval_steps_per_second": 2.627, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.8064222931861877, |
|
"eval_runtime": 1.1524, |
|
"eval_samples_per_second": 68.552, |
|
"eval_steps_per_second": 2.603, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"grad_norm": 9.111239433288574, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.4961, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 14.857142857142858, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.5599769949913025, |
|
"eval_runtime": 1.1323, |
|
"eval_samples_per_second": 69.767, |
|
"eval_steps_per_second": 2.649, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.6708860759493671, |
|
"eval_loss": 0.5889371633529663, |
|
"eval_runtime": 1.1493, |
|
"eval_samples_per_second": 68.735, |
|
"eval_steps_per_second": 2.61, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 16.857142857142858, |
|
"eval_accuracy": 0.6835443037974683, |
|
"eval_loss": 0.8380601406097412, |
|
"eval_runtime": 1.1402, |
|
"eval_samples_per_second": 69.284, |
|
"eval_steps_per_second": 2.631, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 17.142857142857142, |
|
"grad_norm": 9.676458358764648, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.4391, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.6962025316455697, |
|
"eval_loss": 0.6724734902381897, |
|
"eval_runtime": 1.1265, |
|
"eval_samples_per_second": 70.13, |
|
"eval_steps_per_second": 2.663, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 18.857142857142858, |
|
"eval_accuracy": 0.7215189873417721, |
|
"eval_loss": 0.5349910855293274, |
|
"eval_runtime": 1.1568, |
|
"eval_samples_per_second": 68.293, |
|
"eval_steps_per_second": 2.593, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 5.236982822418213, |
|
"learning_rate": 4.259259259259259e-05, |
|
"loss": 0.413, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.6033266186714172, |
|
"eval_runtime": 1.1208, |
|
"eval_samples_per_second": 70.483, |
|
"eval_steps_per_second": 2.677, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 20.857142857142858, |
|
"eval_accuracy": 0.6835443037974683, |
|
"eval_loss": 0.7280053496360779, |
|
"eval_runtime": 1.1275, |
|
"eval_samples_per_second": 70.064, |
|
"eval_steps_per_second": 2.661, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.6081877946853638, |
|
"eval_runtime": 1.1526, |
|
"eval_samples_per_second": 68.541, |
|
"eval_steps_per_second": 2.603, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"grad_norm": 3.7129807472229004, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.336, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.6529555916786194, |
|
"eval_runtime": 1.1446, |
|
"eval_samples_per_second": 69.021, |
|
"eval_steps_per_second": 2.621, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.6921886205673218, |
|
"eval_runtime": 1.1486, |
|
"eval_samples_per_second": 68.779, |
|
"eval_steps_per_second": 2.612, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 24.857142857142858, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.6648610234260559, |
|
"eval_runtime": 1.1222, |
|
"eval_samples_per_second": 70.395, |
|
"eval_steps_per_second": 2.673, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 25.714285714285715, |
|
"grad_norm": 2.387436866760254, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.2745, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.7311040163040161, |
|
"eval_runtime": 1.1344, |
|
"eval_samples_per_second": 69.64, |
|
"eval_steps_per_second": 2.645, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 26.857142857142858, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.7192353010177612, |
|
"eval_runtime": 1.1573, |
|
"eval_samples_per_second": 68.263, |
|
"eval_steps_per_second": 2.592, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.7215189873417721, |
|
"eval_loss": 0.7408412098884583, |
|
"eval_runtime": 1.1424, |
|
"eval_samples_per_second": 69.154, |
|
"eval_steps_per_second": 2.626, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 28.571428571428573, |
|
"grad_norm": 6.9804277420043945, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.2494, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 28.857142857142858, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.5842322111129761, |
|
"eval_runtime": 1.1414, |
|
"eval_samples_per_second": 69.212, |
|
"eval_steps_per_second": 2.628, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.5948711037635803, |
|
"eval_runtime": 1.1375, |
|
"eval_samples_per_second": 69.448, |
|
"eval_steps_per_second": 2.637, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 30.857142857142858, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.6884945034980774, |
|
"eval_runtime": 1.1212, |
|
"eval_samples_per_second": 70.462, |
|
"eval_steps_per_second": 2.676, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 31.428571428571427, |
|
"grad_norm": 2.36922287940979, |
|
"learning_rate": 3.518518518518519e-05, |
|
"loss": 0.2291, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.874555766582489, |
|
"eval_runtime": 1.1468, |
|
"eval_samples_per_second": 68.887, |
|
"eval_steps_per_second": 2.616, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 32.857142857142854, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.800529420375824, |
|
"eval_runtime": 1.1336, |
|
"eval_samples_per_second": 69.688, |
|
"eval_steps_per_second": 2.646, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.7033743858337402, |
|
"eval_runtime": 1.1296, |
|
"eval_samples_per_second": 69.939, |
|
"eval_steps_per_second": 2.656, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 34.285714285714285, |
|
"grad_norm": 3.042140483856201, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 34.857142857142854, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.7047172784805298, |
|
"eval_runtime": 1.141, |
|
"eval_samples_per_second": 69.24, |
|
"eval_steps_per_second": 2.629, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.8362169861793518, |
|
"eval_runtime": 1.1313, |
|
"eval_samples_per_second": 69.828, |
|
"eval_steps_per_second": 2.652, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 36.857142857142854, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8509089946746826, |
|
"eval_runtime": 1.1603, |
|
"eval_samples_per_second": 68.085, |
|
"eval_steps_per_second": 2.586, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 37.142857142857146, |
|
"grad_norm": 2.2762393951416016, |
|
"learning_rate": 3.148148148148148e-05, |
|
"loss": 0.1674, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.9237213134765625, |
|
"eval_runtime": 1.1366, |
|
"eval_samples_per_second": 69.505, |
|
"eval_steps_per_second": 2.639, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 38.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.7526997327804565, |
|
"eval_runtime": 1.1354, |
|
"eval_samples_per_second": 69.579, |
|
"eval_steps_per_second": 2.642, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 4.063281059265137, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.1764, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.7904393076896667, |
|
"eval_runtime": 1.1403, |
|
"eval_samples_per_second": 69.279, |
|
"eval_steps_per_second": 2.631, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 40.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.7332668304443359, |
|
"eval_runtime": 1.1312, |
|
"eval_samples_per_second": 69.835, |
|
"eval_steps_per_second": 2.652, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.7777941226959229, |
|
"eval_runtime": 1.1452, |
|
"eval_samples_per_second": 68.983, |
|
"eval_steps_per_second": 2.62, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 42.857142857142854, |
|
"grad_norm": 1.630341649055481, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.1706, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 42.857142857142854, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.7341672778129578, |
|
"eval_runtime": 1.1386, |
|
"eval_samples_per_second": 69.385, |
|
"eval_steps_per_second": 2.635, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8143826127052307, |
|
"eval_runtime": 1.1491, |
|
"eval_samples_per_second": 68.751, |
|
"eval_steps_per_second": 2.611, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 44.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8299198150634766, |
|
"eval_runtime": 1.1355, |
|
"eval_samples_per_second": 69.576, |
|
"eval_steps_per_second": 2.642, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 45.714285714285715, |
|
"grad_norm": 3.097490072250366, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.1617, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 1.0110950469970703, |
|
"eval_runtime": 1.179, |
|
"eval_samples_per_second": 67.003, |
|
"eval_steps_per_second": 2.544, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 46.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8602201342582703, |
|
"eval_runtime": 1.1263, |
|
"eval_samples_per_second": 70.14, |
|
"eval_steps_per_second": 2.664, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.8332175016403198, |
|
"eval_runtime": 1.1297, |
|
"eval_samples_per_second": 69.928, |
|
"eval_steps_per_second": 2.656, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 48.57142857142857, |
|
"grad_norm": 4.115355014801025, |
|
"learning_rate": 2.4074074074074074e-05, |
|
"loss": 0.1622, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 48.857142857142854, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8297150731086731, |
|
"eval_runtime": 1.1338, |
|
"eval_samples_per_second": 69.677, |
|
"eval_steps_per_second": 2.646, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.881697952747345, |
|
"eval_runtime": 1.1442, |
|
"eval_samples_per_second": 69.042, |
|
"eval_steps_per_second": 2.622, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 50.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8742266297340393, |
|
"eval_runtime": 1.1356, |
|
"eval_samples_per_second": 69.566, |
|
"eval_steps_per_second": 2.642, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 51.42857142857143, |
|
"grad_norm": 2.3206353187561035, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.1437, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 1.0696383714675903, |
|
"eval_runtime": 1.1494, |
|
"eval_samples_per_second": 68.729, |
|
"eval_steps_per_second": 2.61, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 52.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.9411976337432861, |
|
"eval_runtime": 1.1462, |
|
"eval_samples_per_second": 68.926, |
|
"eval_steps_per_second": 2.617, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.7411226630210876, |
|
"eval_runtime": 1.132, |
|
"eval_samples_per_second": 69.787, |
|
"eval_steps_per_second": 2.65, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 54.285714285714285, |
|
"grad_norm": 3.764564037322998, |
|
"learning_rate": 2.037037037037037e-05, |
|
"loss": 0.1492, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 54.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.9043496251106262, |
|
"eval_runtime": 1.1547, |
|
"eval_samples_per_second": 68.414, |
|
"eval_steps_per_second": 2.598, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7935737371444702, |
|
"eval_runtime": 1.1556, |
|
"eval_samples_per_second": 68.362, |
|
"eval_steps_per_second": 2.596, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 56.857142857142854, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8230910301208496, |
|
"eval_runtime": 1.1502, |
|
"eval_samples_per_second": 68.685, |
|
"eval_steps_per_second": 2.608, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 57.142857142857146, |
|
"grad_norm": 2.8445208072662354, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.1279, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 1.089436650276184, |
|
"eval_runtime": 1.168, |
|
"eval_samples_per_second": 67.635, |
|
"eval_steps_per_second": 2.568, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 58.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 1.007054328918457, |
|
"eval_runtime": 1.1442, |
|
"eval_samples_per_second": 69.042, |
|
"eval_steps_per_second": 2.622, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"grad_norm": 2.42126202583313, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.1317, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.9893307089805603, |
|
"eval_runtime": 1.1414, |
|
"eval_samples_per_second": 69.214, |
|
"eval_steps_per_second": 2.628, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 60.857142857142854, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 1.0475964546203613, |
|
"eval_runtime": 1.1369, |
|
"eval_samples_per_second": 69.487, |
|
"eval_steps_per_second": 2.639, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.8080578446388245, |
|
"eval_runtime": 1.1418, |
|
"eval_samples_per_second": 69.191, |
|
"eval_steps_per_second": 2.628, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 62.857142857142854, |
|
"grad_norm": 2.587399482727051, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 0.1456, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 62.857142857142854, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8136084079742432, |
|
"eval_runtime": 1.1341, |
|
"eval_samples_per_second": 69.657, |
|
"eval_steps_per_second": 2.645, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.9612722992897034, |
|
"eval_runtime": 1.1822, |
|
"eval_samples_per_second": 66.825, |
|
"eval_steps_per_second": 2.538, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 64.85714285714286, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.9783176183700562, |
|
"eval_runtime": 1.1425, |
|
"eval_samples_per_second": 69.145, |
|
"eval_steps_per_second": 2.626, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 65.71428571428571, |
|
"grad_norm": 2.009462594985962, |
|
"learning_rate": 1.2962962962962962e-05, |
|
"loss": 0.119, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 1.0225824117660522, |
|
"eval_runtime": 1.1642, |
|
"eval_samples_per_second": 67.858, |
|
"eval_steps_per_second": 2.577, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 66.85714285714286, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 1.0809653997421265, |
|
"eval_runtime": 1.1413, |
|
"eval_samples_per_second": 69.221, |
|
"eval_steps_per_second": 2.629, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.9605960249900818, |
|
"eval_runtime": 1.1307, |
|
"eval_samples_per_second": 69.871, |
|
"eval_steps_per_second": 2.653, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 68.57142857142857, |
|
"grad_norm": 2.1945383548736572, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1323, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 68.85714285714286, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.9852112531661987, |
|
"eval_runtime": 1.1679, |
|
"eval_samples_per_second": 67.643, |
|
"eval_steps_per_second": 2.569, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8826334476470947, |
|
"eval_runtime": 1.1435, |
|
"eval_samples_per_second": 69.086, |
|
"eval_steps_per_second": 2.624, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 70.85714285714286, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8169065117835999, |
|
"eval_runtime": 1.1429, |
|
"eval_samples_per_second": 69.122, |
|
"eval_steps_per_second": 2.625, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 71.42857142857143, |
|
"grad_norm": 4.010135650634766, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.126, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8814924359321594, |
|
"eval_runtime": 1.1672, |
|
"eval_samples_per_second": 67.681, |
|
"eval_steps_per_second": 2.57, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 72.85714285714286, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.9870919585227966, |
|
"eval_runtime": 1.1576, |
|
"eval_samples_per_second": 68.247, |
|
"eval_steps_per_second": 2.592, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8927366733551025, |
|
"eval_runtime": 1.1442, |
|
"eval_samples_per_second": 69.044, |
|
"eval_steps_per_second": 2.622, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 74.28571428571429, |
|
"grad_norm": 2.5069663524627686, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.1013, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 74.85714285714286, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8364698886871338, |
|
"eval_runtime": 1.1466, |
|
"eval_samples_per_second": 68.9, |
|
"eval_steps_per_second": 2.616, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8423016667366028, |
|
"eval_runtime": 1.1449, |
|
"eval_samples_per_second": 69.0, |
|
"eval_steps_per_second": 2.62, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 76.85714285714286, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.8330836296081543, |
|
"eval_runtime": 1.1475, |
|
"eval_samples_per_second": 68.845, |
|
"eval_steps_per_second": 2.614, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 77.14285714285714, |
|
"grad_norm": 2.3882863521575928, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.1142, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8203690052032471, |
|
"eval_runtime": 1.1426, |
|
"eval_samples_per_second": 69.14, |
|
"eval_steps_per_second": 2.626, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 78.85714285714286, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8286263942718506, |
|
"eval_runtime": 1.1509, |
|
"eval_samples_per_second": 68.642, |
|
"eval_steps_per_second": 2.607, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"grad_norm": 2.164783239364624, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.1287, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8701770901679993, |
|
"eval_runtime": 1.1544, |
|
"eval_samples_per_second": 68.434, |
|
"eval_steps_per_second": 2.599, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 80.85714285714286, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.9069644808769226, |
|
"eval_runtime": 1.1505, |
|
"eval_samples_per_second": 68.667, |
|
"eval_steps_per_second": 2.608, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.9024941921234131, |
|
"eval_runtime": 1.1478, |
|
"eval_samples_per_second": 68.826, |
|
"eval_steps_per_second": 2.614, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 82.85714285714286, |
|
"grad_norm": 3.1293911933898926, |
|
"learning_rate": 1.8518518518518519e-06, |
|
"loss": 0.099, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 82.85714285714286, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.8805702328681946, |
|
"eval_runtime": 1.1532, |
|
"eval_samples_per_second": 68.506, |
|
"eval_steps_per_second": 2.602, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8637142777442932, |
|
"eval_runtime": 1.1416, |
|
"eval_samples_per_second": 69.202, |
|
"eval_steps_per_second": 2.628, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 84.85714285714286, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8577674031257629, |
|
"eval_runtime": 1.1432, |
|
"eval_samples_per_second": 69.103, |
|
"eval_steps_per_second": 2.624, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"grad_norm": 2.0774335861206055, |
|
"learning_rate": 0.0, |
|
"loss": 0.1141, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.855110764503479, |
|
"eval_runtime": 1.1385, |
|
"eval_samples_per_second": 69.39, |
|
"eval_steps_per_second": 2.635, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"step": 300, |
|
"total_flos": 2.9362240500074496e+18, |
|
"train_loss": 0.24900371452172598, |
|
"train_runtime": 1679.7368, |
|
"train_samples_per_second": 26.314, |
|
"train_steps_per_second": 0.179 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.5842322111129761, |
|
"eval_runtime": 1.1419, |
|
"eval_samples_per_second": 69.185, |
|
"eval_steps_per_second": 2.627, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 300, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.9362240500074496e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|