henggg commited on
Commit
86cd44f
·
verified ·
1 Parent(s): fc88c06

Upload 18 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/tokenizer.json filter=lfs diff=lfs merge=lfs -text
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft-qwen2.5-7b-instruct-graph-planning-bs128
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft-qwen2.5-7b-instruct-graph-planning-bs128
18
+
19
+ This model is a fine-tuned version of [/nas/shared/ma4agi/model/Qwen2.5-7B-Instruct](https://huggingface.co//nas/shared/ma4agi/model/Qwen2.5-7B-Instruct) on the graph_planning_train dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0001
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 4
44
+ - gradient_accumulation_steps: 16
45
+ - total_train_batch_size: 128
46
+ - total_eval_batch_size: 32
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - num_epochs: 3.0
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.12.0
58
+ - Transformers 4.48.2
59
+ - Pytorch 2.5.1+cu121
60
+ - Datasets 3.2.0
61
+ - Tokenizers 0.21.0
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "model/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "o_proj",
24
+ "q_proj",
25
+ "v_proj",
26
+ "gate_proj",
27
+ "up_proj",
28
+ "k_proj",
29
+ "down_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd4e8a00f97b4f8898d8132bf875ce230c5176b5110fc99efcd196b9f515cff8
3
+ size 80792096
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.997242140099283,
3
+ "num_input_tokens_seen": 69953200,
4
+ "total_flos": 2.976146663409713e+18,
5
+ "train_loss": 0.004280612113766934,
6
+ "train_runtime": 8852.4475,
7
+ "train_samples_per_second": 4.914,
8
+ "train_steps_per_second": 0.038
9
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/llamaboard_config.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: []
3
+ top.finetuning_type: lora
4
+ top.model_name: Qwen2.5-7B-Instruct
5
+ top.quantization_bit: none
6
+ top.quantization_method: bitsandbytes
7
+ top.rope_scaling: none
8
+ top.template: qwen
9
+ train.additional_target: ''
10
+ train.apollo_rank: 16
11
+ train.apollo_scale: 32
12
+ train.apollo_target: all
13
+ train.apollo_update_interval: 200
14
+ train.badam_mode: layer
15
+ train.badam_switch_interval: 50
16
+ train.badam_switch_mode: ascending
17
+ train.badam_update_ratio: 0.05
18
+ train.batch_size: 2
19
+ train.compute_type: bf16
20
+ train.create_new_adapter: false
21
+ train.cutoff_len: 4096
22
+ train.dataset:
23
+ - graph_planning_train
24
+ train.dataset_dir: data
25
+ train.ds_offload: false
26
+ train.ds_stage: none
27
+ train.extra_args: '{"optim": "adamw_torch"}'
28
+ train.freeze_extra_modules: ''
29
+ train.freeze_trainable_layers: 2
30
+ train.freeze_trainable_modules: all
31
+ train.galore_rank: 16
32
+ train.galore_scale: 2
33
+ train.galore_target: all
34
+ train.galore_update_interval: 200
35
+ train.gradient_accumulation_steps: 16
36
+ train.learning_rate: 1e-4
37
+ train.logging_steps: 1
38
+ train.lora_alpha: 16
39
+ train.lora_dropout: 0
40
+ train.lora_rank: 8
41
+ train.lora_target: ''
42
+ train.loraplus_lr_ratio: 0
43
+ train.lr_scheduler_type: cosine
44
+ train.mask_history: false
45
+ train.max_grad_norm: '1.0'
46
+ train.max_samples: '100000'
47
+ train.neat_packing: false
48
+ train.neftune_alpha: 0
49
+ train.num_train_epochs: '3.0'
50
+ train.packing: false
51
+ train.ppo_score_norm: false
52
+ train.ppo_whiten_rewards: false
53
+ train.pref_beta: 0.1
54
+ train.pref_ftx: 0
55
+ train.pref_loss: sigmoid
56
+ train.report_to:
57
+ - none
58
+ train.resize_vocab: false
59
+ train.reward_model: []
60
+ train.save_steps: 100
61
+ train.swanlab_api_key: ''
62
+ train.swanlab_mode: cloud
63
+ train.swanlab_project: llamafactory
64
+ train.swanlab_run_name: ''
65
+ train.swanlab_workspace: ''
66
+ train.train_on_prompt: false
67
+ train.training_stage: Supervised Fine-Tuning
68
+ train.use_apollo: false
69
+ train.use_badam: false
70
+ train.use_dora: false
71
+ train.use_galore: false
72
+ train.use_llama_pro: false
73
+ train.use_pissa: false
74
+ train.use_rslora: false
75
+ train.use_swanlab: false
76
+ train.val_size: 0
77
+ train.warmup_steps: 0
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/running_log.txt ADDED
@@ -0,0 +1,1044 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file merges.txt
2
+
3
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file tokenizer.json
4
+
5
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file added_tokens.json
6
+
7
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file special_tokens_map.json
8
+
9
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file tokenizer_config.json
10
+
11
+ [INFO|2025-02-10 23:10:14] tokenization_utils_base.py:2032 >> loading file chat_template.jinja
12
+
13
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
14
+
15
+ [INFO|2025-02-10 23:10:15] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
16
+
17
+ [INFO|2025-02-10 23:10:15] configuration_utils.py:768 >> Model config Qwen2Config {
18
+ "_name_or_path": "/nas/shared/ma4agi/model/Qwen2.5-7B-Instruct",
19
+ "architectures": [
20
+ "Qwen2ForCausalLM"
21
+ ],
22
+ "attention_dropout": 0.0,
23
+ "bos_token_id": 151643,
24
+ "eos_token_id": 151645,
25
+ "hidden_act": "silu",
26
+ "hidden_size": 3584,
27
+ "initializer_range": 0.02,
28
+ "intermediate_size": 18944,
29
+ "max_position_embeddings": 32768,
30
+ "max_window_layers": 28,
31
+ "model_type": "qwen2",
32
+ "num_attention_heads": 28,
33
+ "num_hidden_layers": 28,
34
+ "num_key_value_heads": 4,
35
+ "rms_norm_eps": 1e-06,
36
+ "rope_scaling": null,
37
+ "rope_theta": 1000000.0,
38
+ "sliding_window": null,
39
+ "tie_word_embeddings": false,
40
+ "torch_dtype": "bfloat16",
41
+ "transformers_version": "4.48.2",
42
+ "use_cache": true,
43
+ "use_sliding_window": false,
44
+ "vocab_size": 152064
45
+ }
46
+
47
+
48
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file vocab.json
49
+
50
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file merges.txt
51
+
52
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file tokenizer.json
53
+
54
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file added_tokens.json
55
+
56
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file special_tokens_map.json
57
+
58
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file tokenizer_config.json
59
+
60
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2032 >> loading file chat_template.jinja
61
+
62
+ [INFO|2025-02-10 23:10:15] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
63
+
64
+ [INFO|2025-02-10 23:10:15] logging.py:157 >> Add <|im_end|> to stop words.
65
+
66
+ [INFO|2025-02-10 23:10:15] logging.py:157 >> Loading dataset graph_planning/graph_planning_train.json...
67
+
68
+ [INFO|2025-02-10 23:10:23] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
69
+
70
+ [INFO|2025-02-10 23:10:23] configuration_utils.py:768 >> Model config Qwen2Config {
71
+ "_name_or_path": "/nas/shared/ma4agi/model/Qwen2.5-7B-Instruct",
72
+ "architectures": [
73
+ "Qwen2ForCausalLM"
74
+ ],
75
+ "attention_dropout": 0.0,
76
+ "bos_token_id": 151643,
77
+ "eos_token_id": 151645,
78
+ "hidden_act": "silu",
79
+ "hidden_size": 3584,
80
+ "initializer_range": 0.02,
81
+ "intermediate_size": 18944,
82
+ "max_position_embeddings": 32768,
83
+ "max_window_layers": 28,
84
+ "model_type": "qwen2",
85
+ "num_attention_heads": 28,
86
+ "num_hidden_layers": 28,
87
+ "num_key_value_heads": 4,
88
+ "rms_norm_eps": 1e-06,
89
+ "rope_scaling": null,
90
+ "rope_theta": 1000000.0,
91
+ "sliding_window": null,
92
+ "tie_word_embeddings": false,
93
+ "torch_dtype": "bfloat16",
94
+ "transformers_version": "4.48.2",
95
+ "use_cache": true,
96
+ "use_sliding_window": false,
97
+ "vocab_size": 152064
98
+ }
99
+
100
+
101
+ [INFO|2025-02-10 23:10:24] modeling_utils.py:3901 >> loading weights file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/model.safetensors.index.json
102
+
103
+ [INFO|2025-02-10 23:10:24] modeling_utils.py:1582 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
104
+
105
+ [INFO|2025-02-10 23:10:24] configuration_utils.py:1140 >> Generate config GenerationConfig {
106
+ "bos_token_id": 151643,
107
+ "eos_token_id": 151645
108
+ }
109
+
110
+
111
+ [INFO|2025-02-10 23:10:27] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM.
112
+
113
+
114
+ [INFO|2025-02-10 23:10:27] modeling_utils.py:4896 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct.
115
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
116
+
117
+ [INFO|2025-02-10 23:10:27] configuration_utils.py:1093 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/generation_config.json
118
+
119
+ [INFO|2025-02-10 23:10:27] configuration_utils.py:1140 >> Generate config GenerationConfig {
120
+ "bos_token_id": 151643,
121
+ "do_sample": true,
122
+ "eos_token_id": [
123
+ 151645,
124
+ 151643
125
+ ],
126
+ "pad_token_id": 151643,
127
+ "repetition_penalty": 1.05,
128
+ "temperature": 0.7,
129
+ "top_k": 20,
130
+ "top_p": 0.8
131
+ }
132
+
133
+
134
+ [INFO|2025-02-10 23:10:27] logging.py:157 >> Gradient checkpointing enabled.
135
+
136
+ [INFO|2025-02-10 23:10:27] logging.py:157 >> Using torch SDPA for faster training and inference.
137
+
138
+ [INFO|2025-02-10 23:10:27] logging.py:157 >> Upcasting trainable params to float32.
139
+
140
+ [INFO|2025-02-10 23:10:27] logging.py:157 >> Fine-tuning method: LoRA
141
+
142
+ [INFO|2025-02-10 23:10:27] logging.py:157 >> Found linear modules: o_proj,q_proj,gate_proj,v_proj,up_proj,k_proj,down_proj
143
+
144
+ [INFO|2025-02-10 23:10:28] logging.py:157 >> trainable params: 20,185,088 || all params: 7,635,801,600 || trainable%: 0.2643
145
+
146
+ [INFO|2025-02-10 23:10:28] trainer.py:741 >> Using auto half precision backend
147
+
148
+ [INFO|2025-02-10 23:10:28] trainer.py:2369 >> ***** Running training *****
149
+
150
+ [INFO|2025-02-10 23:10:28] trainer.py:2370 >> Num examples = 14,500
151
+
152
+ [INFO|2025-02-10 23:10:28] trainer.py:2371 >> Num Epochs = 3
153
+
154
+ [INFO|2025-02-10 23:10:28] trainer.py:2372 >> Instantaneous batch size per device = 2
155
+
156
+ [INFO|2025-02-10 23:10:28] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 128
157
+
158
+ [INFO|2025-02-10 23:10:28] trainer.py:2376 >> Gradient Accumulation steps = 16
159
+
160
+ [INFO|2025-02-10 23:10:28] trainer.py:2377 >> Total optimization steps = 339
161
+
162
+ [INFO|2025-02-10 23:10:28] trainer.py:2378 >> Number of trainable parameters = 20,185,088
163
+
164
+ [INFO|2025-02-10 23:10:56] logging.py:157 >> {'loss': 0.2055, 'learning_rate': 9.9998e-05, 'epoch': 0.01, 'throughput': 7338.97}
165
+
166
+ [INFO|2025-02-10 23:11:22] logging.py:157 >> {'loss': 0.1902, 'learning_rate': 9.9991e-05, 'epoch': 0.02, 'throughput': 7493.38}
167
+
168
+ [INFO|2025-02-10 23:11:47] logging.py:157 >> {'loss': 0.1421, 'learning_rate': 9.9981e-05, 'epoch': 0.03, 'throughput': 7812.96}
169
+
170
+ [INFO|2025-02-10 23:12:15] logging.py:157 >> {'loss': 0.1102, 'learning_rate': 9.9966e-05, 'epoch': 0.04, 'throughput': 7668.61}
171
+
172
+ [INFO|2025-02-10 23:12:40] logging.py:157 >> {'loss': 0.0801, 'learning_rate': 9.9946e-05, 'epoch': 0.04, 'throughput': 7740.76}
173
+
174
+ [INFO|2025-02-10 23:13:07] logging.py:157 >> {'loss': 0.0574, 'learning_rate': 9.9923e-05, 'epoch': 0.05, 'throughput': 7743.47}
175
+
176
+ [INFO|2025-02-10 23:13:31] logging.py:157 >> {'loss': 0.0401, 'learning_rate': 9.9895e-05, 'epoch': 0.06, 'throughput': 7830.06}
177
+
178
+ [INFO|2025-02-10 23:14:00] logging.py:157 >> {'loss': 0.0295, 'learning_rate': 9.9863e-05, 'epoch': 0.07, 'throughput': 7781.82}
179
+
180
+ [INFO|2025-02-10 23:14:23] logging.py:157 >> {'loss': 0.0262, 'learning_rate': 9.9826e-05, 'epoch': 0.08, 'throughput': 7819.12}
181
+
182
+ [INFO|2025-02-10 23:14:51] logging.py:157 >> {'loss': 0.0263, 'learning_rate': 9.9785e-05, 'epoch': 0.09, 'throughput': 7807.27}
183
+
184
+ [INFO|2025-02-10 23:15:15] logging.py:157 >> {'loss': 0.0220, 'learning_rate': 9.9740e-05, 'epoch': 0.10, 'throughput': 7850.91}
185
+
186
+ [INFO|2025-02-10 23:15:39] logging.py:157 >> {'loss': 0.0202, 'learning_rate': 9.9691e-05, 'epoch': 0.11, 'throughput': 7850.63}
187
+
188
+ [INFO|2025-02-10 23:16:08] logging.py:157 >> {'loss': 0.0202, 'learning_rate': 9.9638e-05, 'epoch': 0.11, 'throughput': 7823.47}
189
+
190
+ [INFO|2025-02-10 23:16:38] logging.py:157 >> {'loss': 0.0178, 'learning_rate': 9.9580e-05, 'epoch': 0.12, 'throughput': 7772.01}
191
+
192
+ [INFO|2025-02-10 23:17:02] logging.py:157 >> {'loss': 0.0158, 'learning_rate': 9.9518e-05, 'epoch': 0.13, 'throughput': 7802.71}
193
+
194
+ [INFO|2025-02-10 23:17:29] logging.py:157 >> {'loss': 0.0159, 'learning_rate': 9.9451e-05, 'epoch': 0.14, 'throughput': 7786.47}
195
+
196
+ [INFO|2025-02-10 23:17:56] logging.py:157 >> {'loss': 0.0147, 'learning_rate': 9.9381e-05, 'epoch': 0.15, 'throughput': 7807.80}
197
+
198
+ [INFO|2025-02-10 23:18:22] logging.py:157 >> {'loss': 0.0138, 'learning_rate': 9.9306e-05, 'epoch': 0.16, 'throughput': 7814.87}
199
+
200
+ [INFO|2025-02-10 23:18:46] logging.py:157 >> {'loss': 0.0128, 'learning_rate': 9.9227e-05, 'epoch': 0.17, 'throughput': 7835.96}
201
+
202
+ [INFO|2025-02-10 23:19:13] logging.py:157 >> {'loss': 0.0124, 'learning_rate': 9.9144e-05, 'epoch': 0.18, 'throughput': 7833.94}
203
+
204
+ [INFO|2025-02-10 23:19:39] logging.py:157 >> {'loss': 0.0116, 'learning_rate': 9.9056e-05, 'epoch': 0.19, 'throughput': 7845.67}
205
+
206
+ [INFO|2025-02-10 23:20:05] logging.py:157 >> {'loss': 0.0109, 'learning_rate': 9.8964e-05, 'epoch': 0.19, 'throughput': 7844.79}
207
+
208
+ [INFO|2025-02-10 23:20:32] logging.py:157 >> {'loss': 0.0106, 'learning_rate': 9.8869e-05, 'epoch': 0.20, 'throughput': 7843.06}
209
+
210
+ [INFO|2025-02-10 23:20:59] logging.py:157 >> {'loss': 0.0099, 'learning_rate': 9.8768e-05, 'epoch': 0.21, 'throughput': 7837.07}
211
+
212
+ [INFO|2025-02-10 23:21:24] logging.py:157 >> {'loss': 0.0109, 'learning_rate': 9.8664e-05, 'epoch': 0.22, 'throughput': 7848.70}
213
+
214
+ [INFO|2025-02-10 23:21:48] logging.py:157 >> {'loss': 0.0084, 'learning_rate': 9.8556e-05, 'epoch': 0.23, 'throughput': 7866.16}
215
+
216
+ [INFO|2025-02-10 23:22:15] logging.py:157 >> {'loss': 0.0081, 'learning_rate': 9.8443e-05, 'epoch': 0.24, 'throughput': 7874.35}
217
+
218
+ [INFO|2025-02-10 23:22:41] logging.py:157 >> {'loss': 0.0078, 'learning_rate': 9.8326e-05, 'epoch': 0.25, 'throughput': 7877.49}
219
+
220
+ [INFO|2025-02-10 23:23:09] logging.py:157 >> {'loss': 0.0081, 'learning_rate': 9.8205e-05, 'epoch': 0.26, 'throughput': 7855.15}
221
+
222
+ [INFO|2025-02-10 23:23:36] logging.py:157 >> {'loss': 0.0087, 'learning_rate': 9.8080e-05, 'epoch': 0.26, 'throughput': 7862.08}
223
+
224
+ [INFO|2025-02-10 23:24:02] logging.py:157 >> {'loss': 0.0079, 'learning_rate': 9.7951e-05, 'epoch': 0.27, 'throughput': 7859.19}
225
+
226
+ [INFO|2025-02-10 23:24:28] logging.py:157 >> {'loss': 0.0086, 'learning_rate': 9.7817e-05, 'epoch': 0.28, 'throughput': 7858.66}
227
+
228
+ [INFO|2025-02-10 23:24:55] logging.py:157 >> {'loss': 0.0079, 'learning_rate': 9.7680e-05, 'epoch': 0.29, 'throughput': 7854.06}
229
+
230
+ [INFO|2025-02-10 23:25:21] logging.py:157 >> {'loss': 0.0069, 'learning_rate': 9.7538e-05, 'epoch': 0.30, 'throughput': 7844.67}
231
+
232
+ [INFO|2025-02-10 23:25:48] logging.py:157 >> {'loss': 0.0064, 'learning_rate': 9.7393e-05, 'epoch': 0.31, 'throughput': 7832.92}
233
+
234
+ [INFO|2025-02-10 23:26:13] logging.py:157 >> {'loss': 0.0060, 'learning_rate': 9.7243e-05, 'epoch': 0.32, 'throughput': 7834.75}
235
+
236
+ [INFO|2025-02-10 23:26:41] logging.py:157 >> {'loss': 0.0067, 'learning_rate': 9.7089e-05, 'epoch': 0.33, 'throughput': 7845.98}
237
+
238
+ [INFO|2025-02-10 23:27:07] logging.py:157 >> {'loss': 0.0063, 'learning_rate': 9.6932e-05, 'epoch': 0.34, 'throughput': 7852.67}
239
+
240
+ [INFO|2025-02-10 23:27:36] logging.py:157 >> {'loss': 0.0058, 'learning_rate': 9.6770e-05, 'epoch': 0.34, 'throughput': 7846.29}
241
+
242
+ [INFO|2025-02-10 23:28:05] logging.py:157 >> {'loss': 0.0061, 'learning_rate': 9.6604e-05, 'epoch': 0.35, 'throughput': 7836.02}
243
+
244
+ [INFO|2025-02-10 23:28:29] logging.py:157 >> {'loss': 0.0055, 'learning_rate': 9.6434e-05, 'epoch': 0.36, 'throughput': 7840.93}
245
+
246
+ [INFO|2025-02-10 23:28:56] logging.py:157 >> {'loss': 0.0050, 'learning_rate': 9.6260e-05, 'epoch': 0.37, 'throughput': 7842.20}
247
+
248
+ [INFO|2025-02-10 23:29:22] logging.py:157 >> {'loss': 0.0060, 'learning_rate': 9.6082e-05, 'epoch': 0.38, 'throughput': 7833.15}
249
+
250
+ [INFO|2025-02-10 23:29:48] logging.py:157 >> {'loss': 0.0048, 'learning_rate': 9.5901e-05, 'epoch': 0.39, 'throughput': 7835.19}
251
+
252
+ [INFO|2025-02-10 23:30:14] logging.py:157 >> {'loss': 0.0047, 'learning_rate': 9.5715e-05, 'epoch': 0.40, 'throughput': 7841.66}
253
+
254
+ [INFO|2025-02-10 23:30:41] logging.py:157 >> {'loss': 0.0053, 'learning_rate': 9.5525e-05, 'epoch': 0.41, 'throughput': 7848.48}
255
+
256
+ [INFO|2025-02-10 23:31:05] logging.py:157 >> {'loss': 0.0044, 'learning_rate': 9.5332e-05, 'epoch': 0.41, 'throughput': 7851.65}
257
+
258
+ [INFO|2025-02-10 23:31:31] logging.py:157 >> {'loss': 0.0043, 'learning_rate': 9.5134e-05, 'epoch': 0.42, 'throughput': 7850.33}
259
+
260
+ [INFO|2025-02-10 23:31:57] logging.py:157 >> {'loss': 0.0041, 'learning_rate': 9.4933e-05, 'epoch': 0.43, 'throughput': 7853.40}
261
+
262
+ [INFO|2025-02-10 23:32:26] logging.py:157 >> {'loss': 0.0044, 'learning_rate': 9.4728e-05, 'epoch': 0.44, 'throughput': 7850.51}
263
+
264
+ [INFO|2025-02-10 23:32:52] logging.py:157 >> {'loss': 0.0040, 'learning_rate': 9.4519e-05, 'epoch': 0.45, 'throughput': 7847.60}
265
+
266
+ [INFO|2025-02-10 23:33:18] logging.py:157 >> {'loss': 0.0045, 'learning_rate': 9.4306e-05, 'epoch': 0.46, 'throughput': 7853.35}
267
+
268
+ [INFO|2025-02-10 23:33:43] logging.py:157 >> {'loss': 0.0028, 'learning_rate': 9.4089e-05, 'epoch': 0.47, 'throughput': 7845.79}
269
+
270
+ [INFO|2025-02-10 23:34:08] logging.py:157 >> {'loss': 0.0034, 'learning_rate': 9.3869e-05, 'epoch': 0.48, 'throughput': 7853.45}
271
+
272
+ [INFO|2025-02-10 23:34:36] logging.py:157 >> {'loss': 0.0042, 'learning_rate': 9.3645e-05, 'epoch': 0.49, 'throughput': 7852.19}
273
+
274
+ [INFO|2025-02-10 23:34:59] logging.py:157 >> {'loss': 0.0028, 'learning_rate': 9.3417e-05, 'epoch': 0.49, 'throughput': 7868.08}
275
+
276
+ [INFO|2025-02-10 23:35:24] logging.py:157 >> {'loss': 0.0035, 'learning_rate': 9.3185e-05, 'epoch': 0.50, 'throughput': 7875.29}
277
+
278
+ [INFO|2025-02-10 23:35:48] logging.py:157 >> {'loss': 0.0029, 'learning_rate': 9.2950e-05, 'epoch': 0.51, 'throughput': 7884.45}
279
+
280
+ [INFO|2025-02-10 23:36:13] logging.py:157 >> {'loss': 0.0028, 'learning_rate': 9.2710e-05, 'epoch': 0.52, 'throughput': 7890.10}
281
+
282
+ [INFO|2025-02-10 23:36:37] logging.py:157 >> {'loss': 0.0027, 'learning_rate': 9.2468e-05, 'epoch': 0.53, 'throughput': 7895.72}
283
+
284
+ [INFO|2025-02-10 23:37:02] logging.py:157 >> {'loss': 0.0022, 'learning_rate': 9.2221e-05, 'epoch': 0.54, 'throughput': 7896.89}
285
+
286
+ [INFO|2025-02-10 23:37:34] logging.py:157 >> {'loss': 0.0027, 'learning_rate': 9.1971e-05, 'epoch': 0.55, 'throughput': 7889.78}
287
+
288
+ [INFO|2025-02-10 23:38:02] logging.py:157 >> {'loss': 0.0025, 'learning_rate': 9.1718e-05, 'epoch': 0.56, 'throughput': 7887.26}
289
+
290
+ [INFO|2025-02-10 23:38:27] logging.py:157 >> {'loss': 0.0025, 'learning_rate': 9.1461e-05, 'epoch': 0.56, 'throughput': 7894.29}
291
+
292
+ [INFO|2025-02-10 23:38:57] logging.py:157 >> {'loss': 0.0022, 'learning_rate': 9.1200e-05, 'epoch': 0.57, 'throughput': 7878.69}
293
+
294
+ [INFO|2025-02-10 23:39:23] logging.py:157 >> {'loss': 0.0023, 'learning_rate': 9.0935e-05, 'epoch': 0.58, 'throughput': 7879.18}
295
+
296
+ [INFO|2025-02-10 23:39:50] logging.py:157 >> {'loss': 0.0024, 'learning_rate': 9.0668e-05, 'epoch': 0.59, 'throughput': 7877.08}
297
+
298
+ [INFO|2025-02-10 23:40:18] logging.py:157 >> {'loss': 0.0015, 'learning_rate': 9.0396e-05, 'epoch': 0.60, 'throughput': 7867.14}
299
+
300
+ [INFO|2025-02-10 23:40:48] logging.py:157 >> {'loss': 0.0033, 'learning_rate': 9.0122e-05, 'epoch': 0.61, 'throughput': 7856.50}
301
+
302
+ [INFO|2025-02-10 23:41:16] logging.py:157 >> {'loss': 0.0021, 'learning_rate': 8.9843e-05, 'epoch': 0.62, 'throughput': 7860.92}
303
+
304
+ [INFO|2025-02-10 23:41:41] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 8.9562e-05, 'epoch': 0.63, 'throughput': 7863.29}
305
+
306
+ [INFO|2025-02-10 23:42:04] logging.py:157 >> {'loss': 0.0016, 'learning_rate': 8.9277e-05, 'epoch': 0.64, 'throughput': 7866.76}
307
+
308
+ [INFO|2025-02-10 23:42:29] logging.py:157 >> {'loss': 0.0019, 'learning_rate': 8.8988e-05, 'epoch': 0.64, 'throughput': 7870.01}
309
+
310
+ [INFO|2025-02-10 23:42:56] logging.py:157 >> {'loss': 0.0017, 'learning_rate': 8.8696e-05, 'epoch': 0.65, 'throughput': 7870.22}
311
+
312
+ [INFO|2025-02-10 23:43:23] logging.py:157 >> {'loss': 0.0015, 'learning_rate': 8.8401e-05, 'epoch': 0.66, 'throughput': 7872.22}
313
+
314
+ [INFO|2025-02-10 23:43:51] logging.py:157 >> {'loss': 0.0017, 'learning_rate': 8.8103e-05, 'epoch': 0.67, 'throughput': 7866.00}
315
+
316
+ [INFO|2025-02-10 23:44:17] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.7801e-05, 'epoch': 0.68, 'throughput': 7866.60}
317
+
318
+ [INFO|2025-02-10 23:44:42] logging.py:157 >> {'loss': 0.0018, 'learning_rate': 8.7496e-05, 'epoch': 0.69, 'throughput': 7869.35}
319
+
320
+ [INFO|2025-02-10 23:45:05] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.7188e-05, 'epoch': 0.70, 'throughput': 7871.98}
321
+
322
+ [INFO|2025-02-10 23:45:31] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 8.6877e-05, 'epoch': 0.71, 'throughput': 7877.09}
323
+
324
+ [INFO|2025-02-10 23:45:56] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.6562e-05, 'epoch': 0.71, 'throughput': 7874.92}
325
+
326
+ [INFO|2025-02-10 23:46:27] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 8.6245e-05, 'epoch': 0.72, 'throughput': 7863.86}
327
+
328
+ [INFO|2025-02-10 23:46:50] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 8.5924e-05, 'epoch': 0.73, 'throughput': 7864.79}
329
+
330
+ [INFO|2025-02-10 23:47:14] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 8.5600e-05, 'epoch': 0.74, 'throughput': 7871.03}
331
+
332
+ [INFO|2025-02-10 23:47:40] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 8.5273e-05, 'epoch': 0.75, 'throughput': 7869.86}
333
+
334
+ [INFO|2025-02-10 23:48:09] logging.py:157 >> {'loss': 0.0037, 'learning_rate': 8.4943e-05, 'epoch': 0.76, 'throughput': 7865.40}
335
+
336
+ [INFO|2025-02-10 23:48:36] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 8.4611e-05, 'epoch': 0.77, 'throughput': 7860.70}
337
+
338
+ [INFO|2025-02-10 23:49:00] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 8.4275e-05, 'epoch': 0.78, 'throughput': 7865.68}
339
+
340
+ [INFO|2025-02-10 23:49:28] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 8.3936e-05, 'epoch': 0.79, 'throughput': 7857.75}
341
+
342
+ [INFO|2025-02-10 23:49:53] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.3594e-05, 'epoch': 0.79, 'throughput': 7861.64}
343
+
344
+ [INFO|2025-02-10 23:50:18] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.3249e-05, 'epoch': 0.80, 'throughput': 7864.33}
345
+
346
+ [INFO|2025-02-10 23:50:43] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 8.2902e-05, 'epoch': 0.81, 'throughput': 7862.08}
347
+
348
+ [INFO|2025-02-10 23:51:08] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 8.2552e-05, 'epoch': 0.82, 'throughput': 7866.62}
349
+
350
+ [INFO|2025-02-10 23:51:33] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.2199e-05, 'epoch': 0.83, 'throughput': 7869.38}
351
+
352
+ [INFO|2025-02-10 23:51:59] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 8.1843e-05, 'epoch': 0.84, 'throughput': 7872.25}
353
+
354
+ [INFO|2025-02-10 23:52:23] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 8.1484e-05, 'epoch': 0.85, 'throughput': 7875.39}
355
+
356
+ [INFO|2025-02-10 23:52:51] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 8.1123e-05, 'epoch': 0.86, 'throughput': 7868.64}
357
+
358
+ [INFO|2025-02-10 23:53:21] logging.py:157 >> {'loss': 0.0009, 'learning_rate': 8.0759e-05, 'epoch': 0.86, 'throughput': 7865.85}
359
+
360
+ [INFO|2025-02-10 23:53:44] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 8.0392e-05, 'epoch': 0.87, 'throughput': 7870.61}
361
+
362
+ [INFO|2025-02-10 23:54:08] logging.py:157 >> {'loss': 0.0009, 'learning_rate': 8.0023e-05, 'epoch': 0.88, 'throughput': 7876.56}
363
+
364
+ [INFO|2025-02-10 23:54:08] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-100
365
+
366
+ [INFO|2025-02-10 23:54:08] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
367
+
368
+ [INFO|2025-02-10 23:54:08] configuration_utils.py:768 >> Model config Qwen2Config {
369
+ "architectures": [
370
+ "Qwen2ForCausalLM"
371
+ ],
372
+ "attention_dropout": 0.0,
373
+ "bos_token_id": 151643,
374
+ "eos_token_id": 151645,
375
+ "hidden_act": "silu",
376
+ "hidden_size": 3584,
377
+ "initializer_range": 0.02,
378
+ "intermediate_size": 18944,
379
+ "max_position_embeddings": 32768,
380
+ "max_window_layers": 28,
381
+ "model_type": "qwen2",
382
+ "num_attention_heads": 28,
383
+ "num_hidden_layers": 28,
384
+ "num_key_value_heads": 4,
385
+ "rms_norm_eps": 1e-06,
386
+ "rope_scaling": null,
387
+ "rope_theta": 1000000.0,
388
+ "sliding_window": null,
389
+ "tie_word_embeddings": false,
390
+ "torch_dtype": "bfloat16",
391
+ "transformers_version": "4.48.2",
392
+ "use_cache": true,
393
+ "use_sliding_window": false,
394
+ "vocab_size": 152064
395
+ }
396
+
397
+
398
+ [INFO|2025-02-10 23:54:08] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-100/tokenizer_config.json
399
+
400
+ [INFO|2025-02-10 23:54:08] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-100/special_tokens_map.json
401
+
402
+ [INFO|2025-02-10 23:54:33] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 7.9651e-05, 'epoch': 0.89, 'throughput': 7876.49}
403
+
404
+ [INFO|2025-02-10 23:55:01] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 7.9277e-05, 'epoch': 0.90, 'throughput': 7875.34}
405
+
406
+ [INFO|2025-02-10 23:55:25] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 7.8900e-05, 'epoch': 0.91, 'throughput': 7878.05}
407
+
408
+ [INFO|2025-02-10 23:55:48] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 7.8520e-05, 'epoch': 0.92, 'throughput': 7881.27}
409
+
410
+ [INFO|2025-02-10 23:56:16] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 7.8139e-05, 'epoch': 0.93, 'throughput': 7877.02}
411
+
412
+ [INFO|2025-02-10 23:56:44] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 7.7754e-05, 'epoch': 0.94, 'throughput': 7877.23}
413
+
414
+ [INFO|2025-02-10 23:57:08] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 7.7368e-05, 'epoch': 0.94, 'throughput': 7878.63}
415
+
416
+ [INFO|2025-02-10 23:57:34] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 7.6979e-05, 'epoch': 0.95, 'throughput': 7883.06}
417
+
418
+ [INFO|2025-02-10 23:57:59] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 7.6588e-05, 'epoch': 0.96, 'throughput': 7881.65}
419
+
420
+ [INFO|2025-02-10 23:58:23] logging.py:157 >> {'loss': 0.0008, 'learning_rate': 7.6194e-05, 'epoch': 0.97, 'throughput': 7883.60}
421
+
422
+ [INFO|2025-02-10 23:58:51] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 7.5798e-05, 'epoch': 0.98, 'throughput': 7884.35}
423
+
424
+ [INFO|2025-02-10 23:59:14] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 7.5400e-05, 'epoch': 0.99, 'throughput': 7889.50}
425
+
426
+ [INFO|2025-02-10 23:59:42] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 7.5000e-05, 'epoch': 1.00, 'throughput': 7889.54}
427
+
428
+ [INFO|2025-02-11 00:00:15] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 7.4598e-05, 'epoch': 1.01, 'throughput': 7889.22}
429
+
430
+ [INFO|2025-02-11 00:00:42] logging.py:157 >> {'loss': 0.0007, 'learning_rate': 7.4193e-05, 'epoch': 1.02, 'throughput': 7886.62}
431
+
432
+ [INFO|2025-02-11 00:01:08] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 7.3787e-05, 'epoch': 1.03, 'throughput': 7886.10}
433
+
434
+ [INFO|2025-02-11 00:01:33] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 7.3378e-05, 'epoch': 1.04, 'throughput': 7886.46}
435
+
436
+ [INFO|2025-02-11 00:01:56] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 7.2967e-05, 'epoch': 1.04, 'throughput': 7889.90}
437
+
438
+ [INFO|2025-02-11 00:02:21] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 7.2555e-05, 'epoch': 1.05, 'throughput': 7889.45}
439
+
440
+ [INFO|2025-02-11 00:02:50] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 7.2140e-05, 'epoch': 1.06, 'throughput': 7886.78}
441
+
442
+ [INFO|2025-02-11 00:03:14] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 7.1724e-05, 'epoch': 1.07, 'throughput': 7893.28}
443
+
444
+ [INFO|2025-02-11 00:03:40] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 7.1306e-05, 'epoch': 1.08, 'throughput': 7896.08}
445
+
446
+ [INFO|2025-02-11 00:04:05] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 7.0886e-05, 'epoch': 1.09, 'throughput': 7901.06}
447
+
448
+ [INFO|2025-02-11 00:04:28] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 7.0464e-05, 'epoch': 1.10, 'throughput': 7904.61}
449
+
450
+ [INFO|2025-02-11 00:04:54] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 7.0040e-05, 'epoch': 1.11, 'throughput': 7904.82}
451
+
452
+ [INFO|2025-02-11 00:05:22] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 6.9615e-05, 'epoch': 1.11, 'throughput': 7905.63}
453
+
454
+ [INFO|2025-02-11 00:05:49] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 6.9188e-05, 'epoch': 1.12, 'throughput': 7904.23}
455
+
456
+ [INFO|2025-02-11 00:06:16] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 6.8759e-05, 'epoch': 1.13, 'throughput': 7903.09}
457
+
458
+ [INFO|2025-02-11 00:06:46] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.8329e-05, 'epoch': 1.14, 'throughput': 7898.37}
459
+
460
+ [INFO|2025-02-11 00:07:13] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.7897e-05, 'epoch': 1.15, 'throughput': 7897.68}
461
+
462
+ [INFO|2025-02-11 00:07:41] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 6.7463e-05, 'epoch': 1.16, 'throughput': 7897.31}
463
+
464
+ [INFO|2025-02-11 00:08:08] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.7028e-05, 'epoch': 1.17, 'throughput': 7895.24}
465
+
466
+ [INFO|2025-02-11 00:08:34] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 6.6592e-05, 'epoch': 1.18, 'throughput': 7895.50}
467
+
468
+ [INFO|2025-02-11 00:09:01] logging.py:157 >> {'loss': 0.0009, 'learning_rate': 6.6154e-05, 'epoch': 1.19, 'throughput': 7896.09}
469
+
470
+ [INFO|2025-02-11 00:09:29] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 6.5715e-05, 'epoch': 1.19, 'throughput': 7892.01}
471
+
472
+ [INFO|2025-02-11 00:09:54] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 6.5274e-05, 'epoch': 1.20, 'throughput': 7891.89}
473
+
474
+ [INFO|2025-02-11 00:10:22] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 6.4833e-05, 'epoch': 1.21, 'throughput': 7888.92}
475
+
476
+ [INFO|2025-02-11 00:10:49] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 6.4389e-05, 'epoch': 1.22, 'throughput': 7888.52}
477
+
478
+ [INFO|2025-02-11 00:11:14] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 6.3945e-05, 'epoch': 1.23, 'throughput': 7890.66}
479
+
480
+ [INFO|2025-02-11 00:11:36] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.3500e-05, 'epoch': 1.24, 'throughput': 7892.64}
481
+
482
+ [INFO|2025-02-11 00:12:00] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 6.3053e-05, 'epoch': 1.25, 'throughput': 7894.92}
483
+
484
+ [INFO|2025-02-11 00:12:29] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.2605e-05, 'epoch': 1.26, 'throughput': 7892.49}
485
+
486
+ [INFO|2025-02-11 00:12:54] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 6.2156e-05, 'epoch': 1.26, 'throughput': 7894.05}
487
+
488
+ [INFO|2025-02-11 00:13:20] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 6.1706e-05, 'epoch': 1.27, 'throughput': 7893.70}
489
+
490
+ [INFO|2025-02-11 00:13:42] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 6.1255e-05, 'epoch': 1.28, 'throughput': 7896.62}
491
+
492
+ [INFO|2025-02-11 00:14:09] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 6.0803e-05, 'epoch': 1.29, 'throughput': 7895.09}
493
+
494
+ [INFO|2025-02-11 00:14:34] logging.py:157 >> {'loss': 0.0005, 'learning_rate': 6.0350e-05, 'epoch': 1.30, 'throughput': 7897.58}
495
+
496
+ [INFO|2025-02-11 00:15:00] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.9896e-05, 'epoch': 1.31, 'throughput': 7896.39}
497
+
498
+ [INFO|2025-02-11 00:15:25] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.9442e-05, 'epoch': 1.32, 'throughput': 7897.88}
499
+
500
+ [INFO|2025-02-11 00:15:48] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.8986e-05, 'epoch': 1.33, 'throughput': 7902.00}
501
+
502
+ [INFO|2025-02-11 00:16:14] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.8530e-05, 'epoch': 1.34, 'throughput': 7900.57}
503
+
504
+ [INFO|2025-02-11 00:16:41] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.8073e-05, 'epoch': 1.34, 'throughput': 7901.36}
505
+
506
+ [INFO|2025-02-11 00:17:06] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.7616e-05, 'epoch': 1.35, 'throughput': 7898.31}
507
+
508
+ [INFO|2025-02-11 00:17:30] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.7157e-05, 'epoch': 1.36, 'throughput': 7901.98}
509
+
510
+ [INFO|2025-02-11 00:17:58] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.6699e-05, 'epoch': 1.37, 'throughput': 7898.91}
511
+
512
+ [INFO|2025-02-11 00:18:22] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.6239e-05, 'epoch': 1.38, 'throughput': 7901.45}
513
+
514
+ [INFO|2025-02-11 00:18:47] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.5779e-05, 'epoch': 1.39, 'throughput': 7900.71}
515
+
516
+ [INFO|2025-02-11 00:19:14] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.5319e-05, 'epoch': 1.40, 'throughput': 7898.98}
517
+
518
+ [INFO|2025-02-11 00:19:41] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.4858e-05, 'epoch': 1.41, 'throughput': 7897.63}
519
+
520
+ [INFO|2025-02-11 00:20:08] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.4396e-05, 'epoch': 1.41, 'throughput': 7897.71}
521
+
522
+ [INFO|2025-02-11 00:20:33] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.3935e-05, 'epoch': 1.42, 'throughput': 7897.58}
523
+
524
+ [INFO|2025-02-11 00:20:57] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.3472e-05, 'epoch': 1.43, 'throughput': 7900.98}
525
+
526
+ [INFO|2025-02-11 00:21:26] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.3010e-05, 'epoch': 1.44, 'throughput': 7899.49}
527
+
528
+ [INFO|2025-02-11 00:21:50] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.2547e-05, 'epoch': 1.45, 'throughput': 7899.53}
529
+
530
+ [INFO|2025-02-11 00:22:13] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.2085e-05, 'epoch': 1.46, 'throughput': 7901.89}
531
+
532
+ [INFO|2025-02-11 00:22:39] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 5.1621e-05, 'epoch': 1.47, 'throughput': 7902.03}
533
+
534
+ [INFO|2025-02-11 00:23:05] logging.py:157 >> {'loss': 0.0016, 'learning_rate': 5.1158e-05, 'epoch': 1.48, 'throughput': 7901.05}
535
+
536
+ [INFO|2025-02-11 00:23:32] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.0695e-05, 'epoch': 1.49, 'throughput': 7901.53}
537
+
538
+ [INFO|2025-02-11 00:23:57] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.0232e-05, 'epoch': 1.49, 'throughput': 7901.79}
539
+
540
+ [INFO|2025-02-11 00:24:22] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.9768e-05, 'epoch': 1.50, 'throughput': 7901.03}
541
+
542
+ [INFO|2025-02-11 00:24:45] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 4.9305e-05, 'epoch': 1.51, 'throughput': 7903.96}
543
+
544
+ [INFO|2025-02-11 00:25:11] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.8842e-05, 'epoch': 1.52, 'throughput': 7905.12}
545
+
546
+ [INFO|2025-02-11 00:25:33] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.8379e-05, 'epoch': 1.53, 'throughput': 7909.17}
547
+
548
+ [INFO|2025-02-11 00:25:58] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.7915e-05, 'epoch': 1.54, 'throughput': 7909.02}
549
+
550
+ [INFO|2025-02-11 00:26:29] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.7453e-05, 'epoch': 1.55, 'throughput': 7905.59}
551
+
552
+ [INFO|2025-02-11 00:26:56] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.6990e-05, 'epoch': 1.56, 'throughput': 7905.47}
553
+
554
+ [INFO|2025-02-11 00:27:22] logging.py:157 >> {'loss': 0.0027, 'learning_rate': 4.6528e-05, 'epoch': 1.56, 'throughput': 7904.70}
555
+
556
+ [INFO|2025-02-11 00:27:47] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 4.6065e-05, 'epoch': 1.57, 'throughput': 7905.80}
557
+
558
+ [INFO|2025-02-11 00:28:12] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 4.5604e-05, 'epoch': 1.58, 'throughput': 7907.65}
559
+
560
+ [INFO|2025-02-11 00:28:39] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 4.5142e-05, 'epoch': 1.59, 'throughput': 7907.43}
561
+
562
+ [INFO|2025-02-11 00:29:09] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.4681e-05, 'epoch': 1.60, 'throughput': 7902.86}
563
+
564
+ [INFO|2025-02-11 00:29:35] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.4221e-05, 'epoch': 1.61, 'throughput': 7903.10}
565
+
566
+ [INFO|2025-02-11 00:30:02] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.3761e-05, 'epoch': 1.62, 'throughput': 7904.08}
567
+
568
+ [INFO|2025-02-11 00:30:28] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 4.3301e-05, 'epoch': 1.63, 'throughput': 7905.25}
569
+
570
+ [INFO|2025-02-11 00:30:50] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 4.2843e-05, 'epoch': 1.64, 'throughput': 7908.78}
571
+
572
+ [INFO|2025-02-11 00:31:13] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.2384e-05, 'epoch': 1.64, 'throughput': 7912.45}
573
+
574
+ [INFO|2025-02-11 00:31:40] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.1927e-05, 'epoch': 1.65, 'throughput': 7912.95}
575
+
576
+ [INFO|2025-02-11 00:32:06] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.1470e-05, 'epoch': 1.66, 'throughput': 7913.14}
577
+
578
+ [INFO|2025-02-11 00:32:33] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.1014e-05, 'epoch': 1.67, 'throughput': 7914.08}
579
+
580
+ [INFO|2025-02-11 00:33:02] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.0558e-05, 'epoch': 1.68, 'throughput': 7911.16}
581
+
582
+ [INFO|2025-02-11 00:33:29] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 4.0104e-05, 'epoch': 1.69, 'throughput': 7911.74}
583
+
584
+ [INFO|2025-02-11 00:33:56] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 3.9650e-05, 'epoch': 1.70, 'throughput': 7911.05}
585
+
586
+ [INFO|2025-02-11 00:34:21] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.9197e-05, 'epoch': 1.71, 'throughput': 7910.45}
587
+
588
+ [INFO|2025-02-11 00:34:46] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.8745e-05, 'epoch': 1.71, 'throughput': 7911.47}
589
+
590
+ [INFO|2025-02-11 00:35:10] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.8294e-05, 'epoch': 1.72, 'throughput': 7913.07}
591
+
592
+ [INFO|2025-02-11 00:35:36] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.7844e-05, 'epoch': 1.73, 'throughput': 7912.61}
593
+
594
+ [INFO|2025-02-11 00:36:01] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.7395e-05, 'epoch': 1.74, 'throughput': 7914.15}
595
+
596
+ [INFO|2025-02-11 00:36:33] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.6947e-05, 'epoch': 1.75, 'throughput': 7908.90}
597
+
598
+ [INFO|2025-02-11 00:36:56] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.6500e-05, 'epoch': 1.76, 'throughput': 7909.88}
599
+
600
+ [INFO|2025-02-11 00:37:27] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.6055e-05, 'epoch': 1.77, 'throughput': 7904.23}
601
+
602
+ [INFO|2025-02-11 00:37:27] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-200
603
+
604
+ [INFO|2025-02-11 00:37:28] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
605
+
606
+ [INFO|2025-02-11 00:37:28] configuration_utils.py:768 >> Model config Qwen2Config {
607
+ "architectures": [
608
+ "Qwen2ForCausalLM"
609
+ ],
610
+ "attention_dropout": 0.0,
611
+ "bos_token_id": 151643,
612
+ "eos_token_id": 151645,
613
+ "hidden_act": "silu",
614
+ "hidden_size": 3584,
615
+ "initializer_range": 0.02,
616
+ "intermediate_size": 18944,
617
+ "max_position_embeddings": 32768,
618
+ "max_window_layers": 28,
619
+ "model_type": "qwen2",
620
+ "num_attention_heads": 28,
621
+ "num_hidden_layers": 28,
622
+ "num_key_value_heads": 4,
623
+ "rms_norm_eps": 1e-06,
624
+ "rope_scaling": null,
625
+ "rope_theta": 1000000.0,
626
+ "sliding_window": null,
627
+ "tie_word_embeddings": false,
628
+ "torch_dtype": "bfloat16",
629
+ "transformers_version": "4.48.2",
630
+ "use_cache": true,
631
+ "use_sliding_window": false,
632
+ "vocab_size": 152064
633
+ }
634
+
635
+
636
+ [INFO|2025-02-11 00:37:28] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-200/tokenizer_config.json
637
+
638
+ [INFO|2025-02-11 00:37:28] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-200/special_tokens_map.json
639
+
640
+ [INFO|2025-02-11 00:37:58] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.5611e-05, 'epoch': 1.78, 'throughput': 7898.91}
641
+
642
+ [INFO|2025-02-11 00:38:25] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.5167e-05, 'epoch': 1.79, 'throughput': 7897.44}
643
+
644
+ [INFO|2025-02-11 00:38:53] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 3.4726e-05, 'epoch': 1.79, 'throughput': 7894.63}
645
+
646
+ [INFO|2025-02-11 00:39:20] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.4285e-05, 'epoch': 1.80, 'throughput': 7893.32}
647
+
648
+ [INFO|2025-02-11 00:39:45] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.3846e-05, 'epoch': 1.81, 'throughput': 7893.48}
649
+
650
+ [INFO|2025-02-11 00:40:10] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.3408e-05, 'epoch': 1.82, 'throughput': 7897.32}
651
+
652
+ [INFO|2025-02-11 00:40:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.2972e-05, 'epoch': 1.83, 'throughput': 7898.07}
653
+
654
+ [INFO|2025-02-11 00:41:01] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.2537e-05, 'epoch': 1.84, 'throughput': 7898.41}
655
+
656
+ [INFO|2025-02-11 00:41:27] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.2103e-05, 'epoch': 1.85, 'throughput': 7898.41}
657
+
658
+ [INFO|2025-02-11 00:41:56] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.1671e-05, 'epoch': 1.86, 'throughput': 7897.79}
659
+
660
+ [INFO|2025-02-11 00:42:22] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.1241e-05, 'epoch': 1.86, 'throughput': 7896.37}
661
+
662
+ [INFO|2025-02-11 00:42:45] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 3.0812e-05, 'epoch': 1.87, 'throughput': 7897.48}
663
+
664
+ [INFO|2025-02-11 00:43:13] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.0385e-05, 'epoch': 1.88, 'throughput': 7898.39}
665
+
666
+ [INFO|2025-02-11 00:43:39] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.9960e-05, 'epoch': 1.89, 'throughput': 7897.60}
667
+
668
+ [INFO|2025-02-11 00:44:03] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.9536e-05, 'epoch': 1.90, 'throughput': 7898.54}
669
+
670
+ [INFO|2025-02-11 00:44:28] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.9114e-05, 'epoch': 1.91, 'throughput': 7900.99}
671
+
672
+ [INFO|2025-02-11 00:44:55] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.8694e-05, 'epoch': 1.92, 'throughput': 7899.67}
673
+
674
+ [INFO|2025-02-11 00:45:23] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.8276e-05, 'epoch': 1.93, 'throughput': 7898.82}
675
+
676
+ [INFO|2025-02-11 00:45:50] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.7860e-05, 'epoch': 1.94, 'throughput': 7898.40}
677
+
678
+ [INFO|2025-02-11 00:46:19] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.7445e-05, 'epoch': 1.94, 'throughput': 7895.44}
679
+
680
+ [INFO|2025-02-11 00:46:46] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 2.7033e-05, 'epoch': 1.95, 'throughput': 7892.18}
681
+
682
+ [INFO|2025-02-11 00:47:13] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.6622e-05, 'epoch': 1.96, 'throughput': 7890.65}
683
+
684
+ [INFO|2025-02-11 00:47:36] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.6213e-05, 'epoch': 1.97, 'throughput': 7892.67}
685
+
686
+ [INFO|2025-02-11 00:48:03] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.5807e-05, 'epoch': 1.98, 'throughput': 7892.10}
687
+
688
+ [INFO|2025-02-11 00:48:31] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.5402e-05, 'epoch': 1.99, 'throughput': 7891.47}
689
+
690
+ [INFO|2025-02-11 00:48:57] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 2.5000e-05, 'epoch': 2.00, 'throughput': 7891.65}
691
+
692
+ [INFO|2025-02-11 00:49:32] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 2.4600e-05, 'epoch': 2.01, 'throughput': 7890.98}
693
+
694
+ [INFO|2025-02-11 00:49:59] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.4202e-05, 'epoch': 2.02, 'throughput': 7888.59}
695
+
696
+ [INFO|2025-02-11 00:50:25] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.3806e-05, 'epoch': 2.03, 'throughput': 7889.79}
697
+
698
+ [INFO|2025-02-11 00:50:54] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 2.3412e-05, 'epoch': 2.04, 'throughput': 7887.38}
699
+
700
+ [INFO|2025-02-11 00:51:17] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.3021e-05, 'epoch': 2.04, 'throughput': 7889.45}
701
+
702
+ [INFO|2025-02-11 00:51:46] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.2632e-05, 'epoch': 2.05, 'throughput': 7887.07}
703
+
704
+ [INFO|2025-02-11 00:52:11] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.2246e-05, 'epoch': 2.06, 'throughput': 7889.59}
705
+
706
+ [INFO|2025-02-11 00:52:43] logging.py:157 >> {'loss': 0.0025, 'learning_rate': 2.1861e-05, 'epoch': 2.07, 'throughput': 7885.55}
707
+
708
+ [INFO|2025-02-11 00:53:13] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 2.1480e-05, 'epoch': 2.08, 'throughput': 7881.47}
709
+
710
+ [INFO|2025-02-11 00:53:37] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.1100e-05, 'epoch': 2.09, 'throughput': 7884.07}
711
+
712
+ [INFO|2025-02-11 00:54:04] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.0723e-05, 'epoch': 2.10, 'throughput': 7884.69}
713
+
714
+ [INFO|2025-02-11 00:54:29] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.0349e-05, 'epoch': 2.11, 'throughput': 7886.69}
715
+
716
+ [INFO|2025-02-11 00:54:55] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.9977e-05, 'epoch': 2.11, 'throughput': 7886.90}
717
+
718
+ [INFO|2025-02-11 00:55:19] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.9608e-05, 'epoch': 2.12, 'throughput': 7888.14}
719
+
720
+ [INFO|2025-02-11 00:55:44] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.9241e-05, 'epoch': 2.13, 'throughput': 7889.13}
721
+
722
+ [INFO|2025-02-11 00:56:09] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.8877e-05, 'epoch': 2.14, 'throughput': 7888.30}
723
+
724
+ [INFO|2025-02-11 00:56:35] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.8516e-05, 'epoch': 2.15, 'throughput': 7887.77}
725
+
726
+ [INFO|2025-02-11 00:56:59] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.8157e-05, 'epoch': 2.16, 'throughput': 7889.80}
727
+
728
+ [INFO|2025-02-11 00:57:28] logging.py:157 >> {'loss': 0.0006, 'learning_rate': 1.7801e-05, 'epoch': 2.17, 'throughput': 7887.66}
729
+
730
+ [INFO|2025-02-11 00:57:55] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.7448e-05, 'epoch': 2.18, 'throughput': 7886.91}
731
+
732
+ [INFO|2025-02-11 00:58:18] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.7098e-05, 'epoch': 2.19, 'throughput': 7889.08}
733
+
734
+ [INFO|2025-02-11 00:58:42] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.6751e-05, 'epoch': 2.19, 'throughput': 7890.61}
735
+
736
+ [INFO|2025-02-11 00:59:08] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.6406e-05, 'epoch': 2.20, 'throughput': 7889.22}
737
+
738
+ [INFO|2025-02-11 00:59:33] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.6064e-05, 'epoch': 2.21, 'throughput': 7890.88}
739
+
740
+ [INFO|2025-02-11 00:59:58] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.5725e-05, 'epoch': 2.22, 'throughput': 7890.72}
741
+
742
+ [INFO|2025-02-11 01:00:24] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 1.5389e-05, 'epoch': 2.23, 'throughput': 7890.97}
743
+
744
+ [INFO|2025-02-11 01:00:47] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.5057e-05, 'epoch': 2.24, 'throughput': 7892.37}
745
+
746
+ [INFO|2025-02-11 01:01:11] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 1.4727e-05, 'epoch': 2.25, 'throughput': 7894.16}
747
+
748
+ [INFO|2025-02-11 01:01:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.4400e-05, 'epoch': 2.26, 'throughput': 7894.39}
749
+
750
+ [INFO|2025-02-11 01:02:01] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.4076e-05, 'epoch': 2.26, 'throughput': 7895.04}
751
+
752
+ [INFO|2025-02-11 01:02:29] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.3755e-05, 'epoch': 2.27, 'throughput': 7894.62}
753
+
754
+ [INFO|2025-02-11 01:02:57] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.3438e-05, 'epoch': 2.28, 'throughput': 7893.02}
755
+
756
+ [INFO|2025-02-11 01:03:28] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.3123e-05, 'epoch': 2.29, 'throughput': 7891.68}
757
+
758
+ [INFO|2025-02-11 01:03:52] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.2812e-05, 'epoch': 2.30, 'throughput': 7892.09}
759
+
760
+ [INFO|2025-02-11 01:04:18] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.2504e-05, 'epoch': 2.31, 'throughput': 7894.29}
761
+
762
+ [INFO|2025-02-11 01:04:46] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.2199e-05, 'epoch': 2.32, 'throughput': 7893.79}
763
+
764
+ [INFO|2025-02-11 01:05:13] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.1897e-05, 'epoch': 2.33, 'throughput': 7894.54}
765
+
766
+ [INFO|2025-02-11 01:05:37] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.1599e-05, 'epoch': 2.34, 'throughput': 7894.72}
767
+
768
+ [INFO|2025-02-11 01:06:03] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.1304e-05, 'epoch': 2.34, 'throughput': 7893.77}
769
+
770
+ [INFO|2025-02-11 01:06:29] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.1012e-05, 'epoch': 2.35, 'throughput': 7895.00}
771
+
772
+ [INFO|2025-02-11 01:06:55] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.0723e-05, 'epoch': 2.36, 'throughput': 7895.05}
773
+
774
+ [INFO|2025-02-11 01:07:22] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.0438e-05, 'epoch': 2.37, 'throughput': 7895.15}
775
+
776
+ [INFO|2025-02-11 01:07:48] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.0157e-05, 'epoch': 2.38, 'throughput': 7895.90}
777
+
778
+ [INFO|2025-02-11 01:08:14] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 9.8785e-06, 'epoch': 2.39, 'throughput': 7897.00}
779
+
780
+ [INFO|2025-02-11 01:08:43] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 9.6037e-06, 'epoch': 2.40, 'throughput': 7893.16}
781
+
782
+ [INFO|2025-02-11 01:09:10] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 9.3324e-06, 'epoch': 2.41, 'throughput': 7891.24}
783
+
784
+ [INFO|2025-02-11 01:09:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 9.0646e-06, 'epoch': 2.41, 'throughput': 7893.30}
785
+
786
+ [INFO|2025-02-11 01:10:00] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 8.8003e-06, 'epoch': 2.42, 'throughput': 7893.02}
787
+
788
+ [INFO|2025-02-11 01:10:25] logging.py:157 >> {'loss': 0.0000, 'learning_rate': 8.5395e-06, 'epoch': 2.43, 'throughput': 7892.52}
789
+
790
+ [INFO|2025-02-11 01:10:51] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 8.2823e-06, 'epoch': 2.44, 'throughput': 7892.63}
791
+
792
+ [INFO|2025-02-11 01:11:17] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 8.0287e-06, 'epoch': 2.45, 'throughput': 7892.41}
793
+
794
+ [INFO|2025-02-11 01:11:45] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 7.7786e-06, 'epoch': 2.46, 'throughput': 7889.96}
795
+
796
+ [INFO|2025-02-11 01:12:10] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 7.5322e-06, 'epoch': 2.47, 'throughput': 7892.18}
797
+
798
+ [INFO|2025-02-11 01:12:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 7.2895e-06, 'epoch': 2.48, 'throughput': 7893.12}
799
+
800
+ [INFO|2025-02-11 01:13:03] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 7.0504e-06, 'epoch': 2.49, 'throughput': 7891.37}
801
+
802
+ [INFO|2025-02-11 01:13:30] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 6.8150e-06, 'epoch': 2.49, 'throughput': 7891.23}
803
+
804
+ [INFO|2025-02-11 01:13:58] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 6.5834e-06, 'epoch': 2.50, 'throughput': 7890.21}
805
+
806
+ [INFO|2025-02-11 01:14:25] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 6.3554e-06, 'epoch': 2.51, 'throughput': 7889.96}
807
+
808
+ [INFO|2025-02-11 01:14:53] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 6.1312e-06, 'epoch': 2.52, 'throughput': 7889.86}
809
+
810
+ [INFO|2025-02-11 01:15:20] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 5.9108e-06, 'epoch': 2.53, 'throughput': 7889.91}
811
+
812
+ [INFO|2025-02-11 01:15:44] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.6941e-06, 'epoch': 2.54, 'throughput': 7891.05}
813
+
814
+ [INFO|2025-02-11 01:16:10] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 5.4813e-06, 'epoch': 2.55, 'throughput': 7890.97}
815
+
816
+ [INFO|2025-02-11 01:16:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 5.2723e-06, 'epoch': 2.56, 'throughput': 7891.95}
817
+
818
+ [INFO|2025-02-11 01:16:58] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 5.0671e-06, 'epoch': 2.56, 'throughput': 7893.32}
819
+
820
+ [INFO|2025-02-11 01:17:24] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.8657e-06, 'epoch': 2.57, 'throughput': 7893.98}
821
+
822
+ [INFO|2025-02-11 01:17:51] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 4.6683e-06, 'epoch': 2.58, 'throughput': 7892.90}
823
+
824
+ [INFO|2025-02-11 01:18:18] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 4.4748e-06, 'epoch': 2.59, 'throughput': 7892.36}
825
+
826
+ [INFO|2025-02-11 01:18:46] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 4.2851e-06, 'epoch': 2.60, 'throughput': 7891.40}
827
+
828
+ [INFO|2025-02-11 01:19:11] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 4.0994e-06, 'epoch': 2.61, 'throughput': 7891.16}
829
+
830
+ [INFO|2025-02-11 01:19:35] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.9176e-06, 'epoch': 2.62, 'throughput': 7892.02}
831
+
832
+ [INFO|2025-02-11 01:19:59] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.7398e-06, 'epoch': 2.63, 'throughput': 7892.85}
833
+
834
+ [INFO|2025-02-11 01:20:26] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.5660e-06, 'epoch': 2.64, 'throughput': 7892.72}
835
+
836
+ [INFO|2025-02-11 01:20:52] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 3.3961e-06, 'epoch': 2.64, 'throughput': 7893.04}
837
+
838
+ [INFO|2025-02-11 01:21:17] logging.py:157 >> {'loss': 0.0000, 'learning_rate': 3.2303e-06, 'epoch': 2.65, 'throughput': 7892.55}
839
+
840
+ [INFO|2025-02-11 01:21:17] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-300
841
+
842
+ [INFO|2025-02-11 01:21:17] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
843
+
844
+ [INFO|2025-02-11 01:21:17] configuration_utils.py:768 >> Model config Qwen2Config {
845
+ "architectures": [
846
+ "Qwen2ForCausalLM"
847
+ ],
848
+ "attention_dropout": 0.0,
849
+ "bos_token_id": 151643,
850
+ "eos_token_id": 151645,
851
+ "hidden_act": "silu",
852
+ "hidden_size": 3584,
853
+ "initializer_range": 0.02,
854
+ "intermediate_size": 18944,
855
+ "max_position_embeddings": 32768,
856
+ "max_window_layers": 28,
857
+ "model_type": "qwen2",
858
+ "num_attention_heads": 28,
859
+ "num_hidden_layers": 28,
860
+ "num_key_value_heads": 4,
861
+ "rms_norm_eps": 1e-06,
862
+ "rope_scaling": null,
863
+ "rope_theta": 1000000.0,
864
+ "sliding_window": null,
865
+ "tie_word_embeddings": false,
866
+ "torch_dtype": "bfloat16",
867
+ "transformers_version": "4.48.2",
868
+ "use_cache": true,
869
+ "use_sliding_window": false,
870
+ "vocab_size": 152064
871
+ }
872
+
873
+
874
+ [INFO|2025-02-11 01:21:17] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-300/tokenizer_config.json
875
+
876
+ [INFO|2025-02-11 01:21:17] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-300/special_tokens_map.json
877
+
878
+ [INFO|2025-02-11 01:21:42] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.0684e-06, 'epoch': 2.66, 'throughput': 7893.70}
879
+
880
+ [INFO|2025-02-11 01:22:07] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.9106e-06, 'epoch': 2.67, 'throughput': 7893.69}
881
+
882
+ [INFO|2025-02-11 01:22:31] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.7569e-06, 'epoch': 2.68, 'throughput': 7893.79}
883
+
884
+ [INFO|2025-02-11 01:22:58] logging.py:157 >> {'loss': 0.0004, 'learning_rate': 2.6071e-06, 'epoch': 2.69, 'throughput': 7893.85}
885
+
886
+ [INFO|2025-02-11 01:23:27] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.4615e-06, 'epoch': 2.70, 'throughput': 7891.77}
887
+
888
+ [INFO|2025-02-11 01:23:51] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.3200e-06, 'epoch': 2.71, 'throughput': 7892.22}
889
+
890
+ [INFO|2025-02-11 01:24:18] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.1825e-06, 'epoch': 2.71, 'throughput': 7891.29}
891
+
892
+ [INFO|2025-02-11 01:24:43] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.0492e-06, 'epoch': 2.72, 'throughput': 7892.66}
893
+
894
+ [INFO|2025-02-11 01:25:08] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.9199e-06, 'epoch': 2.73, 'throughput': 7892.67}
895
+
896
+ [INFO|2025-02-11 01:25:34] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.7948e-06, 'epoch': 2.74, 'throughput': 7892.91}
897
+
898
+ [INFO|2025-02-11 01:26:01] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.6739e-06, 'epoch': 2.75, 'throughput': 7892.87}
899
+
900
+ [INFO|2025-02-11 01:26:28] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.5570e-06, 'epoch': 2.76, 'throughput': 7892.66}
901
+
902
+ [INFO|2025-02-11 01:26:55] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.4444e-06, 'epoch': 2.77, 'throughput': 7894.09}
903
+
904
+ [INFO|2025-02-11 01:27:20] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.3359e-06, 'epoch': 2.78, 'throughput': 7894.56}
905
+
906
+ [INFO|2025-02-11 01:27:46] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 1.2316e-06, 'epoch': 2.79, 'throughput': 7893.92}
907
+
908
+ [INFO|2025-02-11 01:28:12] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.1315e-06, 'epoch': 2.79, 'throughput': 7894.65}
909
+
910
+ [INFO|2025-02-11 01:28:38] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.0356e-06, 'epoch': 2.80, 'throughput': 7893.46}
911
+
912
+ [INFO|2025-02-11 01:29:02] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 9.4386e-07, 'epoch': 2.81, 'throughput': 7894.99}
913
+
914
+ [INFO|2025-02-11 01:29:31] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 8.5636e-07, 'epoch': 2.82, 'throughput': 7894.00}
915
+
916
+ [INFO|2025-02-11 01:29:57] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 7.7308e-07, 'epoch': 2.83, 'throughput': 7895.06}
917
+
918
+ [INFO|2025-02-11 01:30:24] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 6.9403e-07, 'epoch': 2.84, 'throughput': 7894.58}
919
+
920
+ [INFO|2025-02-11 01:30:53] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 6.1921e-07, 'epoch': 2.85, 'throughput': 7893.93}
921
+
922
+ [INFO|2025-02-11 01:31:19] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 5.4864e-07, 'epoch': 2.86, 'throughput': 7893.63}
923
+
924
+ [INFO|2025-02-11 01:31:45] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 4.8231e-07, 'epoch': 2.86, 'throughput': 7893.28}
925
+
926
+ [INFO|2025-02-11 01:32:10] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 4.2023e-07, 'epoch': 2.87, 'throughput': 7894.28}
927
+
928
+ [INFO|2025-02-11 01:32:33] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.6241e-07, 'epoch': 2.88, 'throughput': 7895.04}
929
+
930
+ [INFO|2025-02-11 01:32:57] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.0886e-07, 'epoch': 2.89, 'throughput': 7896.03}
931
+
932
+ [INFO|2025-02-11 01:33:21] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.5957e-07, 'epoch': 2.90, 'throughput': 7896.57}
933
+
934
+ [INFO|2025-02-11 01:33:45] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 2.1455e-07, 'epoch': 2.91, 'throughput': 7897.70}
935
+
936
+ [INFO|2025-02-11 01:34:10] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.7381e-07, 'epoch': 2.92, 'throughput': 7898.05}
937
+
938
+ [INFO|2025-02-11 01:34:36] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 1.3735e-07, 'epoch': 2.93, 'throughput': 7898.34}
939
+
940
+ [INFO|2025-02-11 01:35:01] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 1.0517e-07, 'epoch': 2.94, 'throughput': 7898.84}
941
+
942
+ [INFO|2025-02-11 01:35:26] logging.py:157 >> {'loss': 0.0002, 'learning_rate': 7.7274e-08, 'epoch': 2.94, 'throughput': 7900.67}
943
+
944
+ [INFO|2025-02-11 01:35:51] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 5.3666e-08, 'epoch': 2.95, 'throughput': 7901.22}
945
+
946
+ [INFO|2025-02-11 01:36:19] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 3.4349e-08, 'epoch': 2.96, 'throughput': 7899.73}
947
+
948
+ [INFO|2025-02-11 01:36:44] logging.py:157 >> {'loss': 0.0003, 'learning_rate': 1.9322e-08, 'epoch': 2.97, 'throughput': 7900.03}
949
+
950
+ [INFO|2025-02-11 01:37:08] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 8.5879e-09, 'epoch': 2.98, 'throughput': 7902.03}
951
+
952
+ [INFO|2025-02-11 01:37:35] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 2.1470e-09, 'epoch': 2.99, 'throughput': 7903.27}
953
+
954
+ [INFO|2025-02-11 01:38:00] logging.py:157 >> {'loss': 0.0001, 'learning_rate': 0.0000e+00, 'epoch': 3.00, 'throughput': 7902.86}
955
+
956
+ [INFO|2025-02-11 01:38:00] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-339
957
+
958
+ [INFO|2025-02-11 01:38:00] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
959
+
960
+ [INFO|2025-02-11 01:38:00] configuration_utils.py:768 >> Model config Qwen2Config {
961
+ "architectures": [
962
+ "Qwen2ForCausalLM"
963
+ ],
964
+ "attention_dropout": 0.0,
965
+ "bos_token_id": 151643,
966
+ "eos_token_id": 151645,
967
+ "hidden_act": "silu",
968
+ "hidden_size": 3584,
969
+ "initializer_range": 0.02,
970
+ "intermediate_size": 18944,
971
+ "max_position_embeddings": 32768,
972
+ "max_window_layers": 28,
973
+ "model_type": "qwen2",
974
+ "num_attention_heads": 28,
975
+ "num_hidden_layers": 28,
976
+ "num_key_value_heads": 4,
977
+ "rms_norm_eps": 1e-06,
978
+ "rope_scaling": null,
979
+ "rope_theta": 1000000.0,
980
+ "sliding_window": null,
981
+ "tie_word_embeddings": false,
982
+ "torch_dtype": "bfloat16",
983
+ "transformers_version": "4.48.2",
984
+ "use_cache": true,
985
+ "use_sliding_window": false,
986
+ "vocab_size": 152064
987
+ }
988
+
989
+
990
+ [INFO|2025-02-11 01:38:00] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-339/tokenizer_config.json
991
+
992
+ [INFO|2025-02-11 01:38:00] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/checkpoint-339/special_tokens_map.json
993
+
994
+ [INFO|2025-02-11 01:38:01] trainer.py:2643 >>
995
+
996
+ Training completed. Do not forget to share your model on huggingface.co/models =)
997
+
998
+
999
+
1000
+ [INFO|2025-02-11 01:38:01] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128
1001
+
1002
+ [INFO|2025-02-11 01:38:01] configuration_utils.py:694 >> loading configuration file /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct/config.json
1003
+
1004
+ [INFO|2025-02-11 01:38:01] configuration_utils.py:768 >> Model config Qwen2Config {
1005
+ "architectures": [
1006
+ "Qwen2ForCausalLM"
1007
+ ],
1008
+ "attention_dropout": 0.0,
1009
+ "bos_token_id": 151643,
1010
+ "eos_token_id": 151645,
1011
+ "hidden_act": "silu",
1012
+ "hidden_size": 3584,
1013
+ "initializer_range": 0.02,
1014
+ "intermediate_size": 18944,
1015
+ "max_position_embeddings": 32768,
1016
+ "max_window_layers": 28,
1017
+ "model_type": "qwen2",
1018
+ "num_attention_heads": 28,
1019
+ "num_hidden_layers": 28,
1020
+ "num_key_value_heads": 4,
1021
+ "rms_norm_eps": 1e-06,
1022
+ "rope_scaling": null,
1023
+ "rope_theta": 1000000.0,
1024
+ "sliding_window": null,
1025
+ "tie_word_embeddings": false,
1026
+ "torch_dtype": "bfloat16",
1027
+ "transformers_version": "4.48.2",
1028
+ "use_cache": true,
1029
+ "use_sliding_window": false,
1030
+ "vocab_size": 152064
1031
+ }
1032
+
1033
+
1034
+ [INFO|2025-02-11 01:38:01] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/tokenizer_config.json
1035
+
1036
+ [INFO|2025-02-11 01:38:01] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128/special_tokens_map.json
1037
+
1038
+ [WARNING|2025-02-11 01:38:02] logging.py:162 >> No metric eval_loss to plot.
1039
+
1040
+ [WARNING|2025-02-11 01:38:02] logging.py:162 >> No metric eval_accuracy to plot.
1041
+
1042
+ [INFO|2025-02-11 01:38:02] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
1043
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
1044
+
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 4096,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.997242140099283,
3
+ "num_input_tokens_seen": 69953200,
4
+ "total_flos": 2.976146663409713e+18,
5
+ "train_loss": 0.004280612113766934,
6
+ "train_runtime": 8852.4475,
7
+ "train_samples_per_second": 4.914,
8
+ "train_steps_per_second": 0.038
9
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/trainer_log.jsonl ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 339, "loss": 0.2055, "lr": 9.999785297426788e-05, "epoch": 0.00882515168229454, "percentage": 0.29, "elapsed_time": "0:00:27", "remaining_time": "2:35:54", "throughput": 7338.97, "total_tokens": 203120}
2
+ {"current_steps": 2, "total_steps": 339, "loss": 0.1902, "lr": 9.999141208146028e-05, "epoch": 0.01765030336458908, "percentage": 0.59, "elapsed_time": "0:00:54", "remaining_time": "2:32:10", "throughput": 7493.38, "total_tokens": 406048}
3
+ {"current_steps": 3, "total_steps": 339, "loss": 0.1421, "lr": 9.998067787472772e-05, "epoch": 0.026475455046883617, "percentage": 0.88, "elapsed_time": "0:01:18", "remaining_time": "2:26:52", "throughput": 7812.96, "total_tokens": 614736}
4
+ {"current_steps": 4, "total_steps": 339, "loss": 0.1102, "lr": 9.996565127593488e-05, "epoch": 0.03530060672917816, "percentage": 1.18, "elapsed_time": "0:01:46", "remaining_time": "2:28:36", "throughput": 7668.61, "total_tokens": 816416}
5
+ {"current_steps": 5, "total_steps": 339, "loss": 0.0801, "lr": 9.994633357558158e-05, "epoch": 0.0441257584114727, "percentage": 1.47, "elapsed_time": "0:02:12", "remaining_time": "2:27:19", "throughput": 7740.76, "total_tokens": 1024272}
6
+ {"current_steps": 6, "total_steps": 339, "loss": 0.0574, "lr": 9.99227264326918e-05, "epoch": 0.052950910093767234, "percentage": 1.77, "elapsed_time": "0:02:38", "remaining_time": "2:26:42", "throughput": 7743.47, "total_tokens": 1228192}
7
+ {"current_steps": 7, "total_steps": 339, "loss": 0.0401, "lr": 9.989483187467127e-05, "epoch": 0.06177606177606178, "percentage": 2.06, "elapsed_time": "0:03:03", "remaining_time": "2:24:52", "throughput": 7830.06, "total_tokens": 1434992}
8
+ {"current_steps": 8, "total_steps": 339, "loss": 0.0295, "lr": 9.986265229713331e-05, "epoch": 0.07060121345835632, "percentage": 2.36, "elapsed_time": "0:03:31", "remaining_time": "2:25:54", "throughput": 7781.82, "total_tokens": 1646560}
9
+ {"current_steps": 9, "total_steps": 339, "loss": 0.0262, "lr": 9.982619046369321e-05, "epoch": 0.07942636514065085, "percentage": 2.65, "elapsed_time": "0:03:55", "remaining_time": "2:23:41", "throughput": 7819.12, "total_tokens": 1838624}
10
+ {"current_steps": 10, "total_steps": 339, "loss": 0.0263, "lr": 9.978544950573074e-05, "epoch": 0.0882515168229454, "percentage": 2.95, "elapsed_time": "0:04:23", "remaining_time": "2:24:13", "throughput": 7807.27, "total_tokens": 2053488}
11
+ {"current_steps": 11, "total_steps": 339, "loss": 0.022, "lr": 9.974043292212128e-05, "epoch": 0.09707666850523994, "percentage": 3.24, "elapsed_time": "0:04:47", "remaining_time": "2:22:39", "throughput": 7850.91, "total_tokens": 2253680}
12
+ {"current_steps": 12, "total_steps": 339, "loss": 0.0202, "lr": 9.96911445789354e-05, "epoch": 0.10590182018753447, "percentage": 3.54, "elapsed_time": "0:05:11", "remaining_time": "2:21:16", "throughput": 7850.63, "total_tokens": 2442000}
13
+ {"current_steps": 13, "total_steps": 339, "loss": 0.0202, "lr": 9.963758870910671e-05, "epoch": 0.11472697186982901, "percentage": 3.83, "elapsed_time": "0:05:39", "remaining_time": "2:21:53", "throughput": 7823.47, "total_tokens": 2655920}
14
+ {"current_steps": 14, "total_steps": 339, "loss": 0.0178, "lr": 9.957976991206846e-05, "epoch": 0.12355212355212356, "percentage": 4.13, "elapsed_time": "0:06:09", "remaining_time": "2:23:04", "throughput": 7772.01, "total_tokens": 2874064}
15
+ {"current_steps": 15, "total_steps": 339, "loss": 0.0158, "lr": 9.951769315335844e-05, "epoch": 0.13237727523441808, "percentage": 4.42, "elapsed_time": "0:06:33", "remaining_time": "2:21:43", "throughput": 7802.71, "total_tokens": 3071744}
16
+ {"current_steps": 16, "total_steps": 339, "loss": 0.0159, "lr": 9.945136376419259e-05, "epoch": 0.14120242691671264, "percentage": 4.72, "elapsed_time": "0:07:00", "remaining_time": "2:21:38", "throughput": 7786.47, "total_tokens": 3277904}
17
+ {"current_steps": 17, "total_steps": 339, "loss": 0.0147, "lr": 9.938078744100712e-05, "epoch": 0.15002757859900717, "percentage": 5.01, "elapsed_time": "0:07:27", "remaining_time": "2:21:14", "throughput": 7807.8, "total_tokens": 3493136}
18
+ {"current_steps": 18, "total_steps": 339, "loss": 0.0138, "lr": 9.930597024496931e-05, "epoch": 0.1588527302813017, "percentage": 5.31, "elapsed_time": "0:07:54", "remaining_time": "2:20:53", "throughput": 7814.87, "total_tokens": 3704288}
19
+ {"current_steps": 19, "total_steps": 339, "loss": 0.0128, "lr": 9.922691860145696e-05, "epoch": 0.16767788196359626, "percentage": 5.6, "elapsed_time": "0:08:18", "remaining_time": "2:19:51", "throughput": 7835.96, "total_tokens": 3904352}
20
+ {"current_steps": 20, "total_steps": 339, "loss": 0.0124, "lr": 9.914363929950659e-05, "epoch": 0.1765030336458908, "percentage": 5.9, "elapsed_time": "0:08:45", "remaining_time": "2:19:35", "throughput": 7833.94, "total_tokens": 4113888}
21
+ {"current_steps": 21, "total_steps": 339, "loss": 0.0116, "lr": 9.905613949123036e-05, "epoch": 0.18532818532818532, "percentage": 6.19, "elapsed_time": "0:09:11", "remaining_time": "2:19:04", "throughput": 7845.67, "total_tokens": 4323504}
22
+ {"current_steps": 22, "total_steps": 339, "loss": 0.0109, "lr": 9.896442669120187e-05, "epoch": 0.19415333701047988, "percentage": 6.49, "elapsed_time": "0:09:36", "remaining_time": "2:18:27", "throughput": 7844.79, "total_tokens": 4523008}
23
+ {"current_steps": 23, "total_steps": 339, "loss": 0.0106, "lr": 9.886850877581079e-05, "epoch": 0.2029784886927744, "percentage": 6.78, "elapsed_time": "0:10:03", "remaining_time": "2:18:10", "throughput": 7843.06, "total_tokens": 4732864}
24
+ {"current_steps": 24, "total_steps": 339, "loss": 0.0099, "lr": 9.876839398258641e-05, "epoch": 0.21180364037506894, "percentage": 7.08, "elapsed_time": "0:10:30", "remaining_time": "2:17:56", "throughput": 7837.07, "total_tokens": 4941936}
25
+ {"current_steps": 25, "total_steps": 339, "loss": 0.0109, "lr": 9.866409090949022e-05, "epoch": 0.2206287920573635, "percentage": 7.37, "elapsed_time": "0:10:55", "remaining_time": "2:17:11", "throughput": 7848.7, "total_tokens": 5143584}
26
+ {"current_steps": 26, "total_steps": 339, "loss": 0.0084, "lr": 9.855560851417752e-05, "epoch": 0.22945394373965802, "percentage": 7.67, "elapsed_time": "0:11:20", "remaining_time": "2:16:29", "throughput": 7866.16, "total_tokens": 5351024}
27
+ {"current_steps": 27, "total_steps": 339, "loss": 0.0081, "lr": 9.844295611322804e-05, "epoch": 0.23827909542195255, "percentage": 7.96, "elapsed_time": "0:11:46", "remaining_time": "2:16:04", "throughput": 7874.35, "total_tokens": 5563760}
28
+ {"current_steps": 28, "total_steps": 339, "loss": 0.0078, "lr": 9.832614338134595e-05, "epoch": 0.2471042471042471, "percentage": 8.26, "elapsed_time": "0:12:12", "remaining_time": "2:15:39", "throughput": 7877.49, "total_tokens": 5772416}
29
+ {"current_steps": 29, "total_steps": 339, "loss": 0.0081, "lr": 9.820518035052889e-05, "epoch": 0.25592939878654164, "percentage": 8.55, "elapsed_time": "0:12:40", "remaining_time": "2:15:30", "throughput": 7855.15, "total_tokens": 5974464}
30
+ {"current_steps": 30, "total_steps": 339, "loss": 0.0087, "lr": 9.808007740920646e-05, "epoch": 0.26475455046883617, "percentage": 8.85, "elapsed_time": "0:13:07", "remaining_time": "2:15:14", "throughput": 7862.08, "total_tokens": 6193520}
31
+ {"current_steps": 31, "total_steps": 339, "loss": 0.0079, "lr": 9.795084530134801e-05, "epoch": 0.2735797021511307, "percentage": 9.14, "elapsed_time": "0:13:34", "remaining_time": "2:14:50", "throughput": 7859.19, "total_tokens": 6399792}
32
+ {"current_steps": 32, "total_steps": 339, "loss": 0.0086, "lr": 9.781749512553999e-05, "epoch": 0.2824048538334253, "percentage": 9.44, "elapsed_time": "0:14:00", "remaining_time": "2:14:21", "throughput": 7858.66, "total_tokens": 6603584}
33
+ {"current_steps": 33, "total_steps": 339, "loss": 0.0079, "lr": 9.768003833403278e-05, "epoch": 0.2912300055157198, "percentage": 9.73, "elapsed_time": "0:14:27", "remaining_time": "2:14:00", "throughput": 7854.06, "total_tokens": 6810656}
34
+ {"current_steps": 34, "total_steps": 339, "loss": 0.0069, "lr": 9.753848673175707e-05, "epoch": 0.30005515719801434, "percentage": 10.03, "elapsed_time": "0:14:52", "remaining_time": "2:13:26", "throughput": 7844.67, "total_tokens": 7001792}
35
+ {"current_steps": 35, "total_steps": 339, "loss": 0.0064, "lr": 9.739285247531018e-05, "epoch": 0.3088803088803089, "percentage": 10.32, "elapsed_time": "0:15:19", "remaining_time": "2:13:10", "throughput": 7832.92, "total_tokens": 7205952}
36
+ {"current_steps": 36, "total_steps": 339, "loss": 0.006, "lr": 9.724314807191195e-05, "epoch": 0.3177054605626034, "percentage": 10.62, "elapsed_time": "0:15:45", "remaining_time": "2:12:36", "throughput": 7834.75, "total_tokens": 7406304}
37
+ {"current_steps": 37, "total_steps": 339, "loss": 0.0067, "lr": 9.708938637833065e-05, "epoch": 0.32653061224489793, "percentage": 10.91, "elapsed_time": "0:16:12", "remaining_time": "2:12:17", "throughput": 7845.98, "total_tokens": 7629568}
38
+ {"current_steps": 38, "total_steps": 339, "loss": 0.0063, "lr": 9.693158059977878e-05, "epoch": 0.3353557639271925, "percentage": 11.21, "elapsed_time": "0:16:39", "remaining_time": "2:11:53", "throughput": 7852.67, "total_tokens": 7845200}
39
+ {"current_steps": 39, "total_steps": 339, "loss": 0.0058, "lr": 9.676974428877901e-05, "epoch": 0.34418091560948705, "percentage": 11.5, "elapsed_time": "0:17:07", "remaining_time": "2:11:43", "throughput": 7846.29, "total_tokens": 8061840}
40
+ {"current_steps": 40, "total_steps": 339, "loss": 0.0061, "lr": 9.660389134400033e-05, "epoch": 0.3530060672917816, "percentage": 11.8, "elapsed_time": "0:17:36", "remaining_time": "2:11:38", "throughput": 7836.02, "total_tokens": 8279664}
41
+ {"current_steps": 41, "total_steps": 339, "loss": 0.0055, "lr": 9.643403600906433e-05, "epoch": 0.3618312189740761, "percentage": 12.09, "elapsed_time": "0:18:00", "remaining_time": "2:10:56", "throughput": 7840.93, "total_tokens": 8475376}
42
+ {"current_steps": 42, "total_steps": 339, "loss": 0.005, "lr": 9.626019287132203e-05, "epoch": 0.37065637065637064, "percentage": 12.39, "elapsed_time": "0:18:28", "remaining_time": "2:10:37", "throughput": 7842.2, "total_tokens": 8691760}
43
+ {"current_steps": 43, "total_steps": 339, "loss": 0.006, "lr": 9.608237686060099e-05, "epoch": 0.3794815223386652, "percentage": 12.68, "elapsed_time": "0:18:54", "remaining_time": "2:10:07", "throughput": 7833.15, "total_tokens": 8884736}
44
+ {"current_steps": 44, "total_steps": 339, "loss": 0.0048, "lr": 9.590060324792327e-05, "epoch": 0.38830667402095975, "percentage": 12.98, "elapsed_time": "0:19:19", "remaining_time": "2:09:33", "throughput": 7835.19, "total_tokens": 9084064}
45
+ {"current_steps": 45, "total_steps": 339, "loss": 0.0047, "lr": 9.571488764419381e-05, "epoch": 0.3971318257032543, "percentage": 13.27, "elapsed_time": "0:19:46", "remaining_time": "2:09:10", "throughput": 7841.66, "total_tokens": 9302144}
46
+ {"current_steps": 46, "total_steps": 339, "loss": 0.0053, "lr": 9.552524599885981e-05, "epoch": 0.4059569773855488, "percentage": 13.57, "elapsed_time": "0:20:12", "remaining_time": "2:08:44", "throughput": 7848.48, "total_tokens": 9517456}
47
+ {"current_steps": 47, "total_steps": 339, "loss": 0.0044, "lr": 9.533169459854098e-05, "epoch": 0.41478212906784334, "percentage": 13.86, "elapsed_time": "0:20:36", "remaining_time": "2:08:03", "throughput": 7851.65, "total_tokens": 9710768}
48
+ {"current_steps": 48, "total_steps": 339, "loss": 0.0043, "lr": 9.513425006563079e-05, "epoch": 0.42360728075013787, "percentage": 14.16, "elapsed_time": "0:21:02", "remaining_time": "2:07:36", "throughput": 7850.33, "total_tokens": 9914064}
49
+ {"current_steps": 49, "total_steps": 339, "loss": 0.0041, "lr": 9.493292935686895e-05, "epoch": 0.43243243243243246, "percentage": 14.45, "elapsed_time": "0:21:28", "remaining_time": "2:07:06", "throughput": 7853.4, "total_tokens": 10120208}
50
+ {"current_steps": 50, "total_steps": 339, "loss": 0.0044, "lr": 9.472774976188515e-05, "epoch": 0.441257584114727, "percentage": 14.75, "elapsed_time": "0:21:57", "remaining_time": "2:06:57", "throughput": 7850.51, "total_tokens": 10346304}
51
+ {"current_steps": 51, "total_steps": 339, "loss": 0.004, "lr": 9.451872890171419e-05, "epoch": 0.4500827357970215, "percentage": 15.04, "elapsed_time": "0:22:24", "remaining_time": "2:06:30", "throughput": 7847.6, "total_tokens": 10547984}
52
+ {"current_steps": 52, "total_steps": 339, "loss": 0.0045, "lr": 9.43058847272827e-05, "epoch": 0.45890788747931605, "percentage": 15.34, "elapsed_time": "0:22:49", "remaining_time": "2:05:57", "throughput": 7853.35, "total_tokens": 10754288}
53
+ {"current_steps": 53, "total_steps": 339, "loss": 0.0028, "lr": 9.408923551786743e-05, "epoch": 0.4677330391616106, "percentage": 15.63, "elapsed_time": "0:23:14", "remaining_time": "2:05:26", "throughput": 7845.79, "total_tokens": 10942704}
54
+ {"current_steps": 54, "total_steps": 339, "loss": 0.0034, "lr": 9.386879987952549e-05, "epoch": 0.4765581908439051, "percentage": 15.93, "elapsed_time": "0:23:39", "remaining_time": "2:04:53", "throughput": 7853.45, "total_tokens": 11150864}
55
+ {"current_steps": 55, "total_steps": 339, "loss": 0.0042, "lr": 9.364459674349641e-05, "epoch": 0.4853833425261997, "percentage": 16.22, "elapsed_time": "0:24:07", "remaining_time": "2:04:35", "throughput": 7852.19, "total_tokens": 11367728}
56
+ {"current_steps": 56, "total_steps": 339, "loss": 0.0028, "lr": 9.341664536457626e-05, "epoch": 0.4942084942084942, "percentage": 16.52, "elapsed_time": "0:24:31", "remaining_time": "2:03:54", "throughput": 7868.08, "total_tokens": 11575536}
57
+ {"current_steps": 57, "total_steps": 339, "loss": 0.0035, "lr": 9.31849653194641e-05, "epoch": 0.5030336458907887, "percentage": 16.81, "elapsed_time": "0:24:55", "remaining_time": "2:03:21", "throughput": 7875.29, "total_tokens": 11781328}
58
+ {"current_steps": 58, "total_steps": 339, "loss": 0.0029, "lr": 9.294957650508065e-05, "epoch": 0.5118587975730833, "percentage": 17.11, "elapsed_time": "0:25:19", "remaining_time": "2:02:42", "throughput": 7884.45, "total_tokens": 11981232}
59
+ {"current_steps": 59, "total_steps": 339, "loss": 0.0028, "lr": 9.27104991368596e-05, "epoch": 0.5206839492553779, "percentage": 17.4, "elapsed_time": "0:25:44", "remaining_time": "2:02:10", "throughput": 7890.1, "total_tokens": 12187296}
60
+ {"current_steps": 60, "total_steps": 339, "loss": 0.0027, "lr": 9.246775374701139e-05, "epoch": 0.5295091009376723, "percentage": 17.7, "elapsed_time": "0:26:08", "remaining_time": "2:01:34", "throughput": 7895.72, "total_tokens": 12385632}
61
+ {"current_steps": 61, "total_steps": 339, "loss": 0.0022, "lr": 9.222136118275995e-05, "epoch": 0.5383342526199669, "percentage": 17.99, "elapsed_time": "0:26:34", "remaining_time": "2:01:05", "throughput": 7896.89, "total_tokens": 12588928}
62
+ {"current_steps": 62, "total_steps": 339, "loss": 0.0027, "lr": 9.197134260455233e-05, "epoch": 0.5471594043022614, "percentage": 18.29, "elapsed_time": "0:27:05", "remaining_time": "2:01:02", "throughput": 7889.78, "total_tokens": 12825616}
63
+ {"current_steps": 63, "total_steps": 339, "loss": 0.0025, "lr": 9.171771948424137e-05, "epoch": 0.555984555984556, "percentage": 18.58, "elapsed_time": "0:27:33", "remaining_time": "2:00:45", "throughput": 7887.26, "total_tokens": 13044976}
64
+ {"current_steps": 64, "total_steps": 339, "loss": 0.0025, "lr": 9.146051360324166e-05, "epoch": 0.5648097076668506, "percentage": 18.88, "elapsed_time": "0:27:59", "remaining_time": "2:00:14", "throughput": 7894.29, "total_tokens": 13255280}
65
+ {"current_steps": 65, "total_steps": 339, "loss": 0.0022, "lr": 9.119974705065901e-05, "epoch": 0.573634859349145, "percentage": 19.17, "elapsed_time": "0:28:28", "remaining_time": "2:00:03", "throughput": 7878.69, "total_tokens": 13463456}
66
+ {"current_steps": 66, "total_steps": 339, "loss": 0.0023, "lr": 9.093544222139337e-05, "epoch": 0.5824600110314396, "percentage": 19.47, "elapsed_time": "0:28:54", "remaining_time": "1:59:35", "throughput": 7879.18, "total_tokens": 13667744}
67
+ {"current_steps": 67, "total_steps": 339, "loss": 0.0024, "lr": 9.066762181421552e-05, "epoch": 0.5912851627137341, "percentage": 19.76, "elapsed_time": "0:29:21", "remaining_time": "1:59:10", "throughput": 7877.08, "total_tokens": 13874240}
68
+ {"current_steps": 68, "total_steps": 339, "loss": 0.0015, "lr": 9.039630882981768e-05, "epoch": 0.6001103143960287, "percentage": 20.06, "elapsed_time": "0:29:49", "remaining_time": "1:58:53", "throughput": 7867.14, "total_tokens": 14081392}
69
+ {"current_steps": 69, "total_steps": 339, "loss": 0.0033, "lr": 9.012152656883823e-05, "epoch": 0.6089354660783233, "percentage": 20.35, "elapsed_time": "0:30:20", "remaining_time": "1:58:42", "throughput": 7856.5, "total_tokens": 14300896}
70
+ {"current_steps": 70, "total_steps": 339, "loss": 0.0021, "lr": 8.984329862986056e-05, "epoch": 0.6177606177606177, "percentage": 20.65, "elapsed_time": "0:30:47", "remaining_time": "1:58:20", "throughput": 7860.92, "total_tokens": 14523968}
71
+ {"current_steps": 71, "total_steps": 339, "loss": 0.0013, "lr": 8.956164890738643e-05, "epoch": 0.6265857694429123, "percentage": 20.94, "elapsed_time": "0:31:13", "remaining_time": "1:57:50", "throughput": 7863.29, "total_tokens": 14728960}
72
+ {"current_steps": 72, "total_steps": 339, "loss": 0.0016, "lr": 8.927660158978392e-05, "epoch": 0.6354109211252068, "percentage": 21.24, "elapsed_time": "0:31:35", "remaining_time": "1:57:09", "throughput": 7866.76, "total_tokens": 14912480}
73
+ {"current_steps": 73, "total_steps": 339, "loss": 0.0019, "lr": 8.898818115721008e-05, "epoch": 0.6442360728075014, "percentage": 21.53, "elapsed_time": "0:32:00", "remaining_time": "1:56:38", "throughput": 7870.01, "total_tokens": 15114608}
74
+ {"current_steps": 74, "total_steps": 339, "loss": 0.0017, "lr": 8.86964123795085e-05, "epoch": 0.6530612244897959, "percentage": 21.83, "elapsed_time": "0:32:27", "remaining_time": "1:56:13", "throughput": 7870.22, "total_tokens": 15326112}
75
+ {"current_steps": 75, "total_steps": 339, "loss": 0.0015, "lr": 8.84013203140821e-05, "epoch": 0.6618863761720905, "percentage": 22.12, "elapsed_time": "0:32:54", "remaining_time": "1:55:50", "throughput": 7872.22, "total_tokens": 15545248}
76
+ {"current_steps": 76, "total_steps": 339, "loss": 0.0017, "lr": 8.810293030374126e-05, "epoch": 0.670711527854385, "percentage": 22.42, "elapsed_time": "0:33:22", "remaining_time": "1:55:29", "throughput": 7866.0, "total_tokens": 15751872}
77
+ {"current_steps": 77, "total_steps": 339, "loss": 0.001, "lr": 8.780126797452713e-05, "epoch": 0.6795366795366795, "percentage": 22.71, "elapsed_time": "0:33:48", "remaining_time": "1:55:02", "throughput": 7866.6, "total_tokens": 15957872}
78
+ {"current_steps": 78, "total_steps": 339, "loss": 0.0018, "lr": 8.749635923351107e-05, "epoch": 0.6883618312189741, "percentage": 23.01, "elapsed_time": "0:34:13", "remaining_time": "1:54:32", "throughput": 7869.35, "total_tokens": 16162640}
79
+ {"current_steps": 79, "total_steps": 339, "loss": 0.001, "lr": 8.71882302665696e-05, "epoch": 0.6971869829012686, "percentage": 23.3, "elapsed_time": "0:34:37", "remaining_time": "1:53:56", "throughput": 7871.98, "total_tokens": 16352368}
80
+ {"current_steps": 80, "total_steps": 339, "loss": 0.0014, "lr": 8.687690753613554e-05, "epoch": 0.7060121345835632, "percentage": 23.6, "elapsed_time": "0:35:02", "remaining_time": "1:53:27", "throughput": 7877.09, "total_tokens": 16563920}
81
+ {"current_steps": 81, "total_steps": 339, "loss": 0.001, "lr": 8.656241777892543e-05, "epoch": 0.7148372862658577, "percentage": 23.89, "elapsed_time": "0:35:28", "remaining_time": "1:52:58", "throughput": 7874.92, "total_tokens": 16759024}
82
+ {"current_steps": 82, "total_steps": 339, "loss": 0.0013, "lr": 8.624478800364332e-05, "epoch": 0.7236624379481522, "percentage": 24.19, "elapsed_time": "0:35:58", "remaining_time": "1:52:44", "throughput": 7863.86, "total_tokens": 16973728}
83
+ {"current_steps": 83, "total_steps": 339, "loss": 0.0012, "lr": 8.592404548866123e-05, "epoch": 0.7324875896304468, "percentage": 24.48, "elapsed_time": "0:36:22", "remaining_time": "1:52:10", "throughput": 7864.79, "total_tokens": 17162752}
84
+ {"current_steps": 84, "total_steps": 339, "loss": 0.0013, "lr": 8.560021777967649e-05, "epoch": 0.7413127413127413, "percentage": 24.78, "elapsed_time": "0:36:46", "remaining_time": "1:51:37", "throughput": 7871.03, "total_tokens": 17364064}
85
+ {"current_steps": 85, "total_steps": 339, "loss": 0.0011, "lr": 8.527333268734606e-05, "epoch": 0.7501378929950359, "percentage": 25.07, "elapsed_time": "0:37:11", "remaining_time": "1:51:09", "throughput": 7869.86, "total_tokens": 17564576}
86
+ {"current_steps": 86, "total_steps": 339, "loss": 0.0037, "lr": 8.494341828489812e-05, "epoch": 0.7589630446773304, "percentage": 25.37, "elapsed_time": "0:37:40", "remaining_time": "1:50:49", "throughput": 7865.4, "total_tokens": 17778752}
87
+ {"current_steps": 87, "total_steps": 339, "loss": 0.0007, "lr": 8.461050290572114e-05, "epoch": 0.7677881963596249, "percentage": 25.66, "elapsed_time": "0:38:07", "remaining_time": "1:50:26", "throughput": 7860.7, "total_tokens": 17982448}
88
+ {"current_steps": 88, "total_steps": 339, "loss": 0.0008, "lr": 8.427461514093056e-05, "epoch": 0.7766133480419195, "percentage": 25.96, "elapsed_time": "0:38:31", "remaining_time": "1:49:52", "throughput": 7865.68, "total_tokens": 18180608}
89
+ {"current_steps": 89, "total_steps": 339, "loss": 0.0006, "lr": 8.393578383691329e-05, "epoch": 0.785438499724214, "percentage": 26.25, "elapsed_time": "0:38:59", "remaining_time": "1:49:32", "throughput": 7857.75, "total_tokens": 18384496}
90
+ {"current_steps": 90, "total_steps": 339, "loss": 0.001, "lr": 8.359403809285053e-05, "epoch": 0.7942636514065086, "percentage": 26.55, "elapsed_time": "0:39:24", "remaining_time": "1:49:01", "throughput": 7861.64, "total_tokens": 18587744}
91
+ {"current_steps": 91, "total_steps": 339, "loss": 0.001, "lr": 8.324940725821852e-05, "epoch": 0.803088803088803, "percentage": 26.84, "elapsed_time": "0:39:49", "remaining_time": "1:48:31", "throughput": 7864.33, "total_tokens": 18791056}
92
+ {"current_steps": 92, "total_steps": 339, "loss": 0.0008, "lr": 8.290192093026805e-05, "epoch": 0.8119139547710976, "percentage": 27.14, "elapsed_time": "0:40:14", "remaining_time": "1:48:03", "throughput": 7862.08, "total_tokens": 18985008}
93
+ {"current_steps": 93, "total_steps": 339, "loss": 0.0014, "lr": 8.255160895148263e-05, "epoch": 0.8207391064533922, "percentage": 27.43, "elapsed_time": "0:40:39", "remaining_time": "1:47:33", "throughput": 7866.62, "total_tokens": 19193888}
94
+ {"current_steps": 94, "total_steps": 339, "loss": 0.001, "lr": 8.219850140701557e-05, "epoch": 0.8295642581356867, "percentage": 27.73, "elapsed_time": "0:41:05", "remaining_time": "1:47:05", "throughput": 7869.38, "total_tokens": 19399552}
95
+ {"current_steps": 95, "total_steps": 339, "loss": 0.0007, "lr": 8.184262862210624e-05, "epoch": 0.8383894098179813, "percentage": 28.02, "elapsed_time": "0:41:30", "remaining_time": "1:46:36", "throughput": 7872.25, "total_tokens": 19605120}
96
+ {"current_steps": 96, "total_steps": 339, "loss": 0.0008, "lr": 8.148402115947571e-05, "epoch": 0.8472145615002757, "percentage": 28.32, "elapsed_time": "0:41:54", "remaining_time": "1:46:04", "throughput": 7875.39, "total_tokens": 19802480}
97
+ {"current_steps": 97, "total_steps": 339, "loss": 0.0011, "lr": 8.112270981670196e-05, "epoch": 0.8560397131825703, "percentage": 28.61, "elapsed_time": "0:42:22", "remaining_time": "1:45:44", "throughput": 7868.64, "total_tokens": 20009520}
98
+ {"current_steps": 98, "total_steps": 339, "loss": 0.0009, "lr": 8.075872562357501e-05, "epoch": 0.8648648648648649, "percentage": 28.91, "elapsed_time": "0:42:52", "remaining_time": "1:45:26", "throughput": 7865.85, "total_tokens": 20235888}
99
+ {"current_steps": 99, "total_steps": 339, "loss": 0.0006, "lr": 8.039209983943201e-05, "epoch": 0.8736900165471594, "percentage": 29.2, "elapsed_time": "0:43:16", "remaining_time": "1:44:53", "throughput": 7870.61, "total_tokens": 20433600}
100
+ {"current_steps": 100, "total_steps": 339, "loss": 0.0009, "lr": 8.002286395047267e-05, "epoch": 0.882515168229454, "percentage": 29.5, "elapsed_time": "0:43:39", "remaining_time": "1:44:20", "throughput": 7876.56, "total_tokens": 20631664}
101
+ {"current_steps": 101, "total_steps": 339, "loss": 0.0006, "lr": 7.965104966705518e-05, "epoch": 0.8913403199117484, "percentage": 29.79, "elapsed_time": "0:44:04", "remaining_time": "1:43:52", "throughput": 7876.49, "total_tokens": 20833056}
102
+ {"current_steps": 102, "total_steps": 339, "loss": 0.0008, "lr": 7.927668892097289e-05, "epoch": 0.900165471594043, "percentage": 30.09, "elapsed_time": "0:44:33", "remaining_time": "1:43:30", "throughput": 7875.34, "total_tokens": 21051104}
103
+ {"current_steps": 103, "total_steps": 339, "loss": 0.0005, "lr": 7.889981386271201e-05, "epoch": 0.9089906232763376, "percentage": 30.38, "elapsed_time": "0:44:56", "remaining_time": "1:42:59", "throughput": 7878.05, "total_tokens": 21246080}
104
+ {"current_steps": 104, "total_steps": 339, "loss": 0.0006, "lr": 7.852045685869045e-05, "epoch": 0.9178157749586321, "percentage": 30.68, "elapsed_time": "0:45:20", "remaining_time": "1:42:26", "throughput": 7881.27, "total_tokens": 21439696}
105
+ {"current_steps": 105, "total_steps": 339, "loss": 0.0008, "lr": 7.813865048847819e-05, "epoch": 0.9266409266409267, "percentage": 30.97, "elapsed_time": "0:45:48", "remaining_time": "1:42:04", "throughput": 7877.02, "total_tokens": 21648432}
106
+ {"current_steps": 106, "total_steps": 339, "loss": 0.0007, "lr": 7.775442754199928e-05, "epoch": 0.9354660783232212, "percentage": 31.27, "elapsed_time": "0:46:15", "remaining_time": "1:41:41", "throughput": 7877.23, "total_tokens": 21864368}
107
+ {"current_steps": 107, "total_steps": 339, "loss": 0.0006, "lr": 7.736782101671587e-05, "epoch": 0.9442912300055157, "percentage": 31.56, "elapsed_time": "0:46:40", "remaining_time": "1:41:11", "throughput": 7878.63, "total_tokens": 22061968}
108
+ {"current_steps": 108, "total_steps": 339, "loss": 0.0012, "lr": 7.697886411479423e-05, "epoch": 0.9531163816878102, "percentage": 31.86, "elapsed_time": "0:47:06", "remaining_time": "1:40:44", "throughput": 7883.06, "total_tokens": 22278128}
109
+ {"current_steps": 109, "total_steps": 339, "loss": 0.0004, "lr": 7.658759024025349e-05, "epoch": 0.9619415333701048, "percentage": 32.15, "elapsed_time": "0:47:30", "remaining_time": "1:40:15", "throughput": 7881.65, "total_tokens": 22469056}
110
+ {"current_steps": 110, "total_steps": 339, "loss": 0.0008, "lr": 7.619403299609668e-05, "epoch": 0.9707666850523994, "percentage": 32.45, "elapsed_time": "0:47:54", "remaining_time": "1:39:44", "throughput": 7883.6, "total_tokens": 22662128}
111
+ {"current_steps": 111, "total_steps": 339, "loss": 0.0007, "lr": 7.579822618142505e-05, "epoch": 0.9795918367346939, "percentage": 32.74, "elapsed_time": "0:48:22", "remaining_time": "1:39:21", "throughput": 7884.35, "total_tokens": 22883216}
112
+ {"current_steps": 112, "total_steps": 339, "loss": 0.0005, "lr": 7.540020378853523e-05, "epoch": 0.9884169884169884, "percentage": 33.04, "elapsed_time": "0:48:46", "remaining_time": "1:38:50", "throughput": 7889.5, "total_tokens": 23085888}
113
+ {"current_steps": 113, "total_steps": 339, "loss": 0.0007, "lr": 7.500000000000001e-05, "epoch": 0.9972421400992829, "percentage": 33.33, "elapsed_time": "0:49:14", "remaining_time": "1:38:28", "throughput": 7889.54, "total_tokens": 23307520}
114
+ {"current_steps": 114, "total_steps": 339, "loss": 0.0014, "lr": 7.459764918573264e-05, "epoch": 1.0088251516822946, "percentage": 33.63, "elapsed_time": "0:49:46", "remaining_time": "1:38:15", "throughput": 7889.22, "total_tokens": 23564192}
115
+ {"current_steps": 115, "total_steps": 339, "loss": 0.0007, "lr": 7.419318590003523e-05, "epoch": 1.0176503033645892, "percentage": 33.92, "elapsed_time": "0:50:13", "remaining_time": "1:37:50", "throughput": 7886.62, "total_tokens": 23768816}
116
+ {"current_steps": 116, "total_steps": 339, "loss": 0.0006, "lr": 7.378664487863103e-05, "epoch": 1.0264754550468835, "percentage": 34.22, "elapsed_time": "0:50:40", "remaining_time": "1:37:24", "throughput": 7886.1, "total_tokens": 23974096}
117
+ {"current_steps": 117, "total_steps": 339, "loss": 0.0003, "lr": 7.33780610356814e-05, "epoch": 1.0353006067291781, "percentage": 34.51, "elapsed_time": "0:51:05", "remaining_time": "1:36:55", "throughput": 7886.46, "total_tokens": 24172256}
118
+ {"current_steps": 118, "total_steps": 339, "loss": 0.0004, "lr": 7.296746946078736e-05, "epoch": 1.0441257584114727, "percentage": 34.81, "elapsed_time": "0:51:27", "remaining_time": "1:36:23", "throughput": 7889.9, "total_tokens": 24362208}
119
+ {"current_steps": 119, "total_steps": 339, "loss": 0.0003, "lr": 7.255490541597594e-05, "epoch": 1.0529509100937673, "percentage": 35.1, "elapsed_time": "0:51:53", "remaining_time": "1:35:55", "throughput": 7889.45, "total_tokens": 24562224}
120
+ {"current_steps": 120, "total_steps": 339, "loss": 0.0005, "lr": 7.214040433267198e-05, "epoch": 1.0617760617760619, "percentage": 35.4, "elapsed_time": "0:52:21", "remaining_time": "1:35:33", "throughput": 7886.78, "total_tokens": 24776528}
121
+ {"current_steps": 121, "total_steps": 339, "loss": 0.0003, "lr": 7.172400180865513e-05, "epoch": 1.0706012134583562, "percentage": 35.69, "elapsed_time": "0:52:45", "remaining_time": "1:35:02", "throughput": 7893.28, "total_tokens": 24985008}
122
+ {"current_steps": 122, "total_steps": 339, "loss": 0.0005, "lr": 7.130573360500276e-05, "epoch": 1.0794263651406508, "percentage": 35.99, "elapsed_time": "0:53:11", "remaining_time": "1:34:36", "throughput": 7896.08, "total_tokens": 25200720}
123
+ {"current_steps": 123, "total_steps": 339, "loss": 0.0004, "lr": 7.088563564301873e-05, "epoch": 1.0882515168229454, "percentage": 36.28, "elapsed_time": "0:53:36", "remaining_time": "1:34:08", "throughput": 7901.06, "total_tokens": 25413568}
124
+ {"current_steps": 124, "total_steps": 339, "loss": 0.0003, "lr": 7.046374400114842e-05, "epoch": 1.09707666850524, "percentage": 36.58, "elapsed_time": "0:53:59", "remaining_time": "1:33:37", "throughput": 7904.61, "total_tokens": 25608576}
125
+ {"current_steps": 125, "total_steps": 339, "loss": 0.0003, "lr": 7.004009491188022e-05, "epoch": 1.1059018201875346, "percentage": 36.87, "elapsed_time": "0:54:26", "remaining_time": "1:33:11", "throughput": 7904.82, "total_tokens": 25818400}
126
+ {"current_steps": 126, "total_steps": 339, "loss": 0.0005, "lr": 6.961472475863405e-05, "epoch": 1.114726971869829, "percentage": 37.17, "elapsed_time": "0:54:53", "remaining_time": "1:32:47", "throughput": 7905.63, "total_tokens": 26037424}
127
+ {"current_steps": 127, "total_steps": 339, "loss": 0.0005, "lr": 6.918767007263646e-05, "epoch": 1.1235521235521235, "percentage": 37.46, "elapsed_time": "0:55:21", "remaining_time": "1:32:23", "throughput": 7904.23, "total_tokens": 26250480}
128
+ {"current_steps": 128, "total_steps": 339, "loss": 0.0005, "lr": 6.875896752978344e-05, "epoch": 1.1323772752344181, "percentage": 37.76, "elapsed_time": "0:55:47", "remaining_time": "1:31:58", "throughput": 7903.09, "total_tokens": 26458592}
129
+ {"current_steps": 129, "total_steps": 339, "loss": 0.0004, "lr": 6.832865394749065e-05, "epoch": 1.1412024269167127, "percentage": 38.05, "elapsed_time": "0:56:17", "remaining_time": "1:31:38", "throughput": 7898.37, "total_tokens": 26680256}
130
+ {"current_steps": 130, "total_steps": 339, "loss": 0.0004, "lr": 6.789676628153143e-05, "epoch": 1.150027578599007, "percentage": 38.35, "elapsed_time": "0:56:44", "remaining_time": "1:31:13", "throughput": 7897.68, "total_tokens": 26887424}
131
+ {"current_steps": 131, "total_steps": 339, "loss": 0.0003, "lr": 6.746334162286307e-05, "epoch": 1.1588527302813016, "percentage": 38.64, "elapsed_time": "0:57:13", "remaining_time": "1:30:51", "throughput": 7897.31, "total_tokens": 27112736}
132
+ {"current_steps": 132, "total_steps": 339, "loss": 0.0004, "lr": 6.702841719444141e-05, "epoch": 1.1676778819635962, "percentage": 38.94, "elapsed_time": "0:57:40", "remaining_time": "1:30:26", "throughput": 7895.24, "total_tokens": 27320064}
133
+ {"current_steps": 133, "total_steps": 339, "loss": 0.0003, "lr": 6.659203034802397e-05, "epoch": 1.1765030336458908, "percentage": 39.23, "elapsed_time": "0:58:05", "remaining_time": "1:29:58", "throughput": 7895.5, "total_tokens": 27520544}
134
+ {"current_steps": 134, "total_steps": 339, "loss": 0.0009, "lr": 6.615421856096231e-05, "epoch": 1.1853281853281854, "percentage": 39.53, "elapsed_time": "0:58:32", "remaining_time": "1:29:34", "throughput": 7896.09, "total_tokens": 27737920}
135
+ {"current_steps": 135, "total_steps": 339, "loss": 0.0014, "lr": 6.571501943298334e-05, "epoch": 1.19415333701048, "percentage": 39.82, "elapsed_time": "0:59:01", "remaining_time": "1:29:11", "throughput": 7892.01, "total_tokens": 27947552}
136
+ {"current_steps": 136, "total_steps": 339, "loss": 0.0003, "lr": 6.527447068296026e-05, "epoch": 1.2029784886927744, "percentage": 40.12, "elapsed_time": "0:59:26", "remaining_time": "1:28:43", "throughput": 7891.89, "total_tokens": 28143808}
137
+ {"current_steps": 137, "total_steps": 339, "loss": 0.0002, "lr": 6.483261014567311e-05, "epoch": 1.211803640375069, "percentage": 40.41, "elapsed_time": "0:59:53", "remaining_time": "1:28:18", "throughput": 7888.92, "total_tokens": 28349312}
138
+ {"current_steps": 138, "total_steps": 339, "loss": 0.0002, "lr": 6.438947576855968e-05, "epoch": 1.2206287920573635, "percentage": 40.71, "elapsed_time": "1:00:20", "remaining_time": "1:27:53", "throughput": 7888.52, "total_tokens": 28560096}
139
+ {"current_steps": 139, "total_steps": 339, "loss": 0.0005, "lr": 6.394510560845637e-05, "epoch": 1.229453943739658, "percentage": 41.0, "elapsed_time": "1:00:45", "remaining_time": "1:27:25", "throughput": 7890.66, "total_tokens": 28764544}
140
+ {"current_steps": 140, "total_steps": 339, "loss": 0.0004, "lr": 6.349953782832991e-05, "epoch": 1.2382790954219525, "percentage": 41.3, "elapsed_time": "1:01:07", "remaining_time": "1:26:53", "throughput": 7892.64, "total_tokens": 28949360}
141
+ {"current_steps": 141, "total_steps": 339, "loss": 0.0002, "lr": 6.305281069399989e-05, "epoch": 1.247104247104247, "percentage": 41.59, "elapsed_time": "1:01:32", "remaining_time": "1:26:24", "throughput": 7894.92, "total_tokens": 29148112}
142
+ {"current_steps": 142, "total_steps": 339, "loss": 0.0004, "lr": 6.26049625708524e-05, "epoch": 1.2559293987865416, "percentage": 41.89, "elapsed_time": "1:02:01", "remaining_time": "1:26:02", "throughput": 7892.49, "total_tokens": 29370624}
143
+ {"current_steps": 143, "total_steps": 339, "loss": 0.0003, "lr": 6.215603192054522e-05, "epoch": 1.2647545504688362, "percentage": 42.18, "elapsed_time": "1:02:26", "remaining_time": "1:25:34", "throughput": 7894.05, "total_tokens": 29572464}
144
+ {"current_steps": 144, "total_steps": 339, "loss": 0.0006, "lr": 6.17060572977047e-05, "epoch": 1.2735797021511308, "percentage": 42.48, "elapsed_time": "1:02:51", "remaining_time": "1:25:07", "throughput": 7893.7, "total_tokens": 29771152}
145
+ {"current_steps": 145, "total_steps": 339, "loss": 0.0003, "lr": 6.125507734661458e-05, "epoch": 1.2824048538334254, "percentage": 42.77, "elapsed_time": "1:03:13", "remaining_time": "1:24:35", "throughput": 7896.62, "total_tokens": 29954960}
146
+ {"current_steps": 146, "total_steps": 339, "loss": 0.0004, "lr": 6.080313079789723e-05, "epoch": 1.2912300055157198, "percentage": 43.07, "elapsed_time": "1:03:40", "remaining_time": "1:24:10", "throughput": 7895.09, "total_tokens": 30165568}
147
+ {"current_steps": 147, "total_steps": 339, "loss": 0.0005, "lr": 6.035025646518746e-05, "epoch": 1.3000551571980143, "percentage": 43.36, "elapsed_time": "1:04:05", "remaining_time": "1:23:43", "throughput": 7897.58, "total_tokens": 30372160}
148
+ {"current_steps": 148, "total_steps": 339, "loss": 0.0003, "lr": 5.989649324179911e-05, "epoch": 1.308880308880309, "percentage": 43.66, "elapsed_time": "1:04:31", "remaining_time": "1:23:16", "throughput": 7896.39, "total_tokens": 30572752}
149
+ {"current_steps": 149, "total_steps": 339, "loss": 0.0004, "lr": 5.944188009738483e-05, "epoch": 1.3177054605626033, "percentage": 43.95, "elapsed_time": "1:04:57", "remaining_time": "1:22:49", "throughput": 7897.88, "total_tokens": 30780496}
150
+ {"current_steps": 150, "total_steps": 339, "loss": 0.0004, "lr": 5.8986456074589404e-05, "epoch": 1.3265306122448979, "percentage": 44.25, "elapsed_time": "1:05:19", "remaining_time": "1:22:19", "throughput": 7902.0, "total_tokens": 30975120}
151
+ {"current_steps": 151, "total_steps": 339, "loss": 0.0002, "lr": 5.853026028569667e-05, "epoch": 1.3353557639271925, "percentage": 44.54, "elapsed_time": "1:05:45", "remaining_time": "1:21:52", "throughput": 7900.57, "total_tokens": 31174000}
152
+ {"current_steps": 152, "total_steps": 339, "loss": 0.0003, "lr": 5.807333190927053e-05, "epoch": 1.344180915609487, "percentage": 44.84, "elapsed_time": "1:06:12", "remaining_time": "1:21:27", "throughput": 7901.36, "total_tokens": 31387088}
153
+ {"current_steps": 153, "total_steps": 339, "loss": 0.0003, "lr": 5.761571018679025e-05, "epoch": 1.3530060672917816, "percentage": 45.13, "elapsed_time": "1:06:37", "remaining_time": "1:21:00", "throughput": 7898.31, "total_tokens": 31576400}
154
+ {"current_steps": 154, "total_steps": 339, "loss": 0.0003, "lr": 5.715743441928041e-05, "epoch": 1.3618312189740762, "percentage": 45.43, "elapsed_time": "1:07:02", "remaining_time": "1:20:32", "throughput": 7901.98, "total_tokens": 31784320}
155
+ {"current_steps": 155, "total_steps": 339, "loss": 0.0004, "lr": 5.669854396393559e-05, "epoch": 1.3706563706563706, "percentage": 45.72, "elapsed_time": "1:07:29", "remaining_time": "1:20:07", "throughput": 7898.91, "total_tokens": 31987520}
156
+ {"current_steps": 156, "total_steps": 339, "loss": 0.0004, "lr": 5.6239078230740436e-05, "epoch": 1.3794815223386652, "percentage": 46.02, "elapsed_time": "1:07:53", "remaining_time": "1:19:38", "throughput": 7901.45, "total_tokens": 32187456}
157
+ {"current_steps": 157, "total_steps": 339, "loss": 0.0002, "lr": 5.5779076679085054e-05, "epoch": 1.3883066740209598, "percentage": 46.31, "elapsed_time": "1:08:18", "remaining_time": "1:19:11", "throughput": 7900.71, "total_tokens": 32384528}
158
+ {"current_steps": 158, "total_steps": 339, "loss": 0.0004, "lr": 5.531857881437612e-05, "epoch": 1.3971318257032543, "percentage": 46.61, "elapsed_time": "1:08:46", "remaining_time": "1:18:46", "throughput": 7898.98, "total_tokens": 32593040}
159
+ {"current_steps": 159, "total_steps": 339, "loss": 0.0003, "lr": 5.48576241846443e-05, "epoch": 1.4059569773855487, "percentage": 46.9, "elapsed_time": "1:09:12", "remaining_time": "1:18:21", "throughput": 7897.63, "total_tokens": 32797952}
160
+ {"current_steps": 160, "total_steps": 339, "loss": 0.0003, "lr": 5.4396252377147615e-05, "epoch": 1.4147821290678433, "percentage": 47.2, "elapsed_time": "1:09:39", "remaining_time": "1:17:55", "throughput": 7897.71, "total_tokens": 33008800}
161
+ {"current_steps": 161, "total_steps": 339, "loss": 0.0003, "lr": 5.3934503014971793e-05, "epoch": 1.4236072807501379, "percentage": 47.49, "elapsed_time": "1:10:04", "remaining_time": "1:17:28", "throughput": 7897.58, "total_tokens": 33208352}
162
+ {"current_steps": 162, "total_steps": 339, "loss": 0.0002, "lr": 5.347241575362729e-05, "epoch": 1.4324324324324325, "percentage": 47.79, "elapsed_time": "1:10:28", "remaining_time": "1:17:00", "throughput": 7900.98, "total_tokens": 33410208}
163
+ {"current_steps": 163, "total_steps": 339, "loss": 0.0003, "lr": 5.30100302776438e-05, "epoch": 1.441257584114727, "percentage": 48.08, "elapsed_time": "1:10:57", "remaining_time": "1:16:37", "throughput": 7899.49, "total_tokens": 33631888}
164
+ {"current_steps": 164, "total_steps": 339, "loss": 0.0004, "lr": 5.254738629716186e-05, "epoch": 1.4500827357970216, "percentage": 48.38, "elapsed_time": "1:11:21", "remaining_time": "1:16:09", "throughput": 7899.53, "total_tokens": 33825152}
165
+ {"current_steps": 165, "total_steps": 339, "loss": 0.0003, "lr": 5.208452354452274e-05, "epoch": 1.458907887479316, "percentage": 48.67, "elapsed_time": "1:11:45", "remaining_time": "1:15:40", "throughput": 7901.89, "total_tokens": 34020352}
166
+ {"current_steps": 166, "total_steps": 339, "loss": 0.0004, "lr": 5.162148177085604e-05, "epoch": 1.4677330391616106, "percentage": 48.97, "elapsed_time": "1:12:11", "remaining_time": "1:15:13", "throughput": 7902.03, "total_tokens": 34226288}
167
+ {"current_steps": 167, "total_steps": 339, "loss": 0.0016, "lr": 5.115830074266591e-05, "epoch": 1.4765581908439052, "percentage": 49.26, "elapsed_time": "1:12:37", "remaining_time": "1:14:47", "throughput": 7901.05, "total_tokens": 34426672}
168
+ {"current_steps": 168, "total_steps": 339, "loss": 0.0002, "lr": 5.0695020238415756e-05, "epoch": 1.4853833425261997, "percentage": 49.56, "elapsed_time": "1:13:03", "remaining_time": "1:14:21", "throughput": 7901.53, "total_tokens": 34636944}
169
+ {"current_steps": 169, "total_steps": 339, "loss": 0.0003, "lr": 5.0231680045112176e-05, "epoch": 1.494208494208494, "percentage": 49.85, "elapsed_time": "1:13:29", "remaining_time": "1:13:55", "throughput": 7901.79, "total_tokens": 34839456}
170
+ {"current_steps": 170, "total_steps": 339, "loss": 0.0002, "lr": 4.976831995488784e-05, "epoch": 1.5030336458907887, "percentage": 50.15, "elapsed_time": "1:13:53", "remaining_time": "1:13:27", "throughput": 7901.03, "total_tokens": 35031600}
171
+ {"current_steps": 171, "total_steps": 339, "loss": 0.0004, "lr": 4.9304979761584256e-05, "epoch": 1.5118587975730833, "percentage": 50.44, "elapsed_time": "1:14:16", "remaining_time": "1:12:58", "throughput": 7903.96, "total_tokens": 35227728}
172
+ {"current_steps": 172, "total_steps": 339, "loss": 0.0002, "lr": 4.884169925733409e-05, "epoch": 1.5206839492553779, "percentage": 50.74, "elapsed_time": "1:14:42", "remaining_time": "1:12:32", "throughput": 7905.12, "total_tokens": 35436528}
173
+ {"current_steps": 173, "total_steps": 339, "loss": 0.0002, "lr": 4.837851822914397e-05, "epoch": 1.5295091009376725, "percentage": 51.03, "elapsed_time": "1:15:04", "remaining_time": "1:12:02", "throughput": 7909.17, "total_tokens": 35628624}
174
+ {"current_steps": 174, "total_steps": 339, "loss": 0.0002, "lr": 4.791547645547726e-05, "epoch": 1.538334252619967, "percentage": 51.33, "elapsed_time": "1:15:29", "remaining_time": "1:11:35", "throughput": 7909.02, "total_tokens": 35827376}
175
+ {"current_steps": 175, "total_steps": 339, "loss": 0.0003, "lr": 4.745261370283817e-05, "epoch": 1.5471594043022614, "percentage": 51.62, "elapsed_time": "1:16:00", "remaining_time": "1:11:14", "throughput": 7905.59, "total_tokens": 36056560}
176
+ {"current_steps": 176, "total_steps": 339, "loss": 0.0002, "lr": 4.698996972235622e-05, "epoch": 1.555984555984556, "percentage": 51.92, "elapsed_time": "1:16:27", "remaining_time": "1:10:48", "throughput": 7905.47, "total_tokens": 36267568}
177
+ {"current_steps": 177, "total_steps": 339, "loss": 0.0027, "lr": 4.652758424637271e-05, "epoch": 1.5648097076668506, "percentage": 52.21, "elapsed_time": "1:16:54", "remaining_time": "1:10:23", "throughput": 7904.7, "total_tokens": 36473008}
178
+ {"current_steps": 178, "total_steps": 339, "loss": 0.0004, "lr": 4.606549698502823e-05, "epoch": 1.573634859349145, "percentage": 52.51, "elapsed_time": "1:17:18", "remaining_time": "1:09:55", "throughput": 7905.8, "total_tokens": 36670944}
179
+ {"current_steps": 179, "total_steps": 339, "loss": 0.0001, "lr": 4.56037476228524e-05, "epoch": 1.5824600110314395, "percentage": 52.8, "elapsed_time": "1:17:44", "remaining_time": "1:09:29", "throughput": 7907.65, "total_tokens": 36882256}
180
+ {"current_steps": 180, "total_steps": 339, "loss": 0.0004, "lr": 4.5142375815355706e-05, "epoch": 1.591285162713734, "percentage": 53.1, "elapsed_time": "1:18:10", "remaining_time": "1:09:03", "throughput": 7907.43, "total_tokens": 37091392}
181
+ {"current_steps": 181, "total_steps": 339, "loss": 0.0002, "lr": 4.468142118562389e-05, "epoch": 1.6001103143960287, "percentage": 53.39, "elapsed_time": "1:18:41", "remaining_time": "1:08:41", "throughput": 7902.86, "total_tokens": 37309680}
182
+ {"current_steps": 182, "total_steps": 339, "loss": 0.0003, "lr": 4.4220923320914964e-05, "epoch": 1.6089354660783233, "percentage": 53.69, "elapsed_time": "1:19:07", "remaining_time": "1:08:15", "throughput": 7903.1, "total_tokens": 37517952}
183
+ {"current_steps": 183, "total_steps": 339, "loss": 0.0003, "lr": 4.376092176925958e-05, "epoch": 1.6177606177606179, "percentage": 53.98, "elapsed_time": "1:19:33", "remaining_time": "1:07:49", "throughput": 7904.08, "total_tokens": 37732160}
184
+ {"current_steps": 184, "total_steps": 339, "loss": 0.0004, "lr": 4.330145603606441e-05, "epoch": 1.6265857694429124, "percentage": 54.28, "elapsed_time": "1:19:59", "remaining_time": "1:07:22", "throughput": 7905.25, "total_tokens": 37940368}
185
+ {"current_steps": 185, "total_steps": 339, "loss": 0.0004, "lr": 4.2842565580719595e-05, "epoch": 1.6354109211252068, "percentage": 54.57, "elapsed_time": "1:20:21", "remaining_time": "1:06:53", "throughput": 7908.78, "total_tokens": 38135024}
186
+ {"current_steps": 186, "total_steps": 339, "loss": 0.0002, "lr": 4.238428981320975e-05, "epoch": 1.6442360728075014, "percentage": 54.87, "elapsed_time": "1:20:45", "remaining_time": "1:06:25", "throughput": 7912.45, "total_tokens": 38336176}
187
+ {"current_steps": 187, "total_steps": 339, "loss": 0.0003, "lr": 4.192666809072948e-05, "epoch": 1.6530612244897958, "percentage": 55.16, "elapsed_time": "1:21:11", "remaining_time": "1:05:59", "throughput": 7912.95, "total_tokens": 38548880}
188
+ {"current_steps": 188, "total_steps": 339, "loss": 0.0003, "lr": 4.146973971430333e-05, "epoch": 1.6618863761720903, "percentage": 55.46, "elapsed_time": "1:21:37", "remaining_time": "1:05:33", "throughput": 7913.14, "total_tokens": 38755920}
189
+ {"current_steps": 189, "total_steps": 339, "loss": 0.0002, "lr": 4.101354392541061e-05, "epoch": 1.670711527854385, "percentage": 55.75, "elapsed_time": "1:22:04", "remaining_time": "1:05:08", "throughput": 7914.08, "total_tokens": 38973328}
190
+ {"current_steps": 190, "total_steps": 339, "loss": 0.0003, "lr": 4.0558119902615174e-05, "epoch": 1.6795366795366795, "percentage": 56.05, "elapsed_time": "1:22:34", "remaining_time": "1:04:45", "throughput": 7911.16, "total_tokens": 39193232}
191
+ {"current_steps": 191, "total_steps": 339, "loss": 0.0003, "lr": 4.010350675820091e-05, "epoch": 1.688361831218974, "percentage": 56.34, "elapsed_time": "1:23:00", "remaining_time": "1:04:19", "throughput": 7911.74, "total_tokens": 39406608}
192
+ {"current_steps": 192, "total_steps": 339, "loss": 0.0004, "lr": 3.964974353481254e-05, "epoch": 1.6971869829012687, "percentage": 56.64, "elapsed_time": "1:23:28", "remaining_time": "1:03:54", "throughput": 7911.05, "total_tokens": 39620160}
193
+ {"current_steps": 193, "total_steps": 339, "loss": 0.0001, "lr": 3.919686920210277e-05, "epoch": 1.7060121345835633, "percentage": 56.93, "elapsed_time": "1:23:53", "remaining_time": "1:03:27", "throughput": 7910.45, "total_tokens": 39815952}
194
+ {"current_steps": 194, "total_steps": 339, "loss": 0.0003, "lr": 3.874492265338544e-05, "epoch": 1.7148372862658579, "percentage": 57.23, "elapsed_time": "1:24:17", "remaining_time": "1:03:00", "throughput": 7911.47, "total_tokens": 40015408}
195
+ {"current_steps": 195, "total_steps": 339, "loss": 0.0002, "lr": 3.829394270229531e-05, "epoch": 1.7236624379481522, "percentage": 57.52, "elapsed_time": "1:24:42", "remaining_time": "1:02:32", "throughput": 7913.07, "total_tokens": 40215328}
196
+ {"current_steps": 196, "total_steps": 339, "loss": 0.0002, "lr": 3.784396807945477e-05, "epoch": 1.7324875896304468, "percentage": 57.82, "elapsed_time": "1:25:07", "remaining_time": "1:02:06", "throughput": 7912.61, "total_tokens": 40414384}
197
+ {"current_steps": 197, "total_steps": 339, "loss": 0.0002, "lr": 3.7395037429147615e-05, "epoch": 1.7413127413127412, "percentage": 58.11, "elapsed_time": "1:25:32", "remaining_time": "1:01:39", "throughput": 7914.15, "total_tokens": 40620656}
198
+ {"current_steps": 198, "total_steps": 339, "loss": 0.0003, "lr": 3.694718930600012e-05, "epoch": 1.7501378929950357, "percentage": 58.41, "elapsed_time": "1:26:04", "remaining_time": "1:01:17", "throughput": 7908.9, "total_tokens": 40847008}
199
+ {"current_steps": 199, "total_steps": 339, "loss": 0.0001, "lr": 3.65004621716701e-05, "epoch": 1.7589630446773303, "percentage": 58.7, "elapsed_time": "1:26:27", "remaining_time": "1:00:49", "throughput": 7909.88, "total_tokens": 41036368}
200
+ {"current_steps": 200, "total_steps": 339, "loss": 0.0003, "lr": 3.6054894391543646e-05, "epoch": 1.767788196359625, "percentage": 59.0, "elapsed_time": "1:26:59", "remaining_time": "1:00:27", "throughput": 7904.23, "total_tokens": 41252976}
201
+ {"current_steps": 201, "total_steps": 339, "loss": 0.0002, "lr": 3.561052423144032e-05, "epoch": 1.7766133480419195, "percentage": 59.29, "elapsed_time": "1:27:29", "remaining_time": "1:00:04", "throughput": 7898.91, "total_tokens": 41465104}
202
+ {"current_steps": 202, "total_steps": 339, "loss": 0.0002, "lr": 3.5167389854326905e-05, "epoch": 1.785438499724214, "percentage": 59.59, "elapsed_time": "1:27:56", "remaining_time": "0:59:38", "throughput": 7897.44, "total_tokens": 41670800}
203
+ {"current_steps": 203, "total_steps": 339, "loss": 0.0013, "lr": 3.4725529317039754e-05, "epoch": 1.7942636514065087, "percentage": 59.88, "elapsed_time": "1:28:25", "remaining_time": "0:59:14", "throughput": 7894.63, "total_tokens": 41883536}
204
+ {"current_steps": 204, "total_steps": 339, "loss": 0.0001, "lr": 3.428498056701665e-05, "epoch": 1.803088803088803, "percentage": 60.18, "elapsed_time": "1:28:51", "remaining_time": "0:58:48", "throughput": 7893.32, "total_tokens": 42083360}
205
+ {"current_steps": 205, "total_steps": 339, "loss": 0.0002, "lr": 3.38457814390377e-05, "epoch": 1.8119139547710976, "percentage": 60.47, "elapsed_time": "1:29:16", "remaining_time": "0:58:21", "throughput": 7893.48, "total_tokens": 42283120}
206
+ {"current_steps": 206, "total_steps": 339, "loss": 0.0003, "lr": 3.340796965197604e-05, "epoch": 1.8207391064533922, "percentage": 60.77, "elapsed_time": "1:29:41", "remaining_time": "0:57:54", "throughput": 7897.32, "total_tokens": 42499088}
207
+ {"current_steps": 207, "total_steps": 339, "loss": 0.0001, "lr": 3.297158280555862e-05, "epoch": 1.8295642581356866, "percentage": 61.06, "elapsed_time": "1:30:05", "remaining_time": "0:57:26", "throughput": 7898.07, "total_tokens": 42692976}
208
+ {"current_steps": 208, "total_steps": 339, "loss": 0.0003, "lr": 3.2536658377136935e-05, "epoch": 1.8383894098179812, "percentage": 61.36, "elapsed_time": "1:30:32", "remaining_time": "0:57:01", "throughput": 7898.41, "total_tokens": 42907216}
209
+ {"current_steps": 209, "total_steps": 339, "loss": 0.0001, "lr": 3.210323371846857e-05, "epoch": 1.8472145615002757, "percentage": 61.65, "elapsed_time": "1:30:58", "remaining_time": "0:56:35", "throughput": 7898.41, "total_tokens": 43112448}
210
+ {"current_steps": 210, "total_steps": 339, "loss": 0.0003, "lr": 3.167134605250938e-05, "epoch": 1.8560397131825703, "percentage": 61.95, "elapsed_time": "1:31:27", "remaining_time": "0:56:10", "throughput": 7897.79, "total_tokens": 43340096}
211
+ {"current_steps": 211, "total_steps": 339, "loss": 0.0001, "lr": 3.124103247021657e-05, "epoch": 1.864864864864865, "percentage": 62.24, "elapsed_time": "1:31:53", "remaining_time": "0:55:44", "throughput": 7896.37, "total_tokens": 43539664}
212
+ {"current_steps": 212, "total_steps": 339, "loss": 0.0003, "lr": 3.081232992736355e-05, "epoch": 1.8736900165471595, "percentage": 62.54, "elapsed_time": "1:32:16", "remaining_time": "0:55:16", "throughput": 7897.48, "total_tokens": 43727664}
213
+ {"current_steps": 213, "total_steps": 339, "loss": 0.0002, "lr": 3.0385275241365962e-05, "epoch": 1.882515168229454, "percentage": 62.83, "elapsed_time": "1:32:44", "remaining_time": "0:54:51", "throughput": 7898.39, "total_tokens": 43953584}
214
+ {"current_steps": 214, "total_steps": 339, "loss": 0.0002, "lr": 2.9959905088119776e-05, "epoch": 1.8913403199117484, "percentage": 63.13, "elapsed_time": "1:33:11", "remaining_time": "0:54:25", "throughput": 7897.6, "total_tokens": 44157504}
215
+ {"current_steps": 215, "total_steps": 339, "loss": 0.0001, "lr": 2.9536255998851613e-05, "epoch": 1.900165471594043, "percentage": 63.42, "elapsed_time": "1:33:35", "remaining_time": "0:53:58", "throughput": 7898.54, "total_tokens": 44350448}
216
+ {"current_steps": 216, "total_steps": 339, "loss": 0.0002, "lr": 2.9114364356981272e-05, "epoch": 1.9089906232763376, "percentage": 63.72, "elapsed_time": "1:33:59", "remaining_time": "0:53:31", "throughput": 7900.99, "total_tokens": 44561472}
217
+ {"current_steps": 217, "total_steps": 339, "loss": 0.0002, "lr": 2.8694266394997238e-05, "epoch": 1.917815774958632, "percentage": 64.01, "elapsed_time": "1:34:27", "remaining_time": "0:53:06", "throughput": 7899.67, "total_tokens": 44769936}
218
+ {"current_steps": 218, "total_steps": 339, "loss": 0.0002, "lr": 2.8275998191344888e-05, "epoch": 1.9266409266409266, "percentage": 64.31, "elapsed_time": "1:34:54", "remaining_time": "0:52:40", "throughput": 7898.82, "total_tokens": 44979344}
219
+ {"current_steps": 219, "total_steps": 339, "loss": 0.0002, "lr": 2.7859595667328026e-05, "epoch": 1.9354660783232212, "percentage": 64.6, "elapsed_time": "1:35:22", "remaining_time": "0:52:15", "throughput": 7898.4, "total_tokens": 45196944}
220
+ {"current_steps": 220, "total_steps": 339, "loss": 0.0001, "lr": 2.7445094584024067e-05, "epoch": 1.9442912300055157, "percentage": 64.9, "elapsed_time": "1:35:51", "remaining_time": "0:51:50", "throughput": 7895.44, "total_tokens": 45406832}
221
+ {"current_steps": 221, "total_steps": 339, "loss": 0.0003, "lr": 2.7032530539212658e-05, "epoch": 1.9531163816878103, "percentage": 65.19, "elapsed_time": "1:36:18", "remaining_time": "0:51:25", "throughput": 7892.18, "total_tokens": 45603120}
222
+ {"current_steps": 222, "total_steps": 339, "loss": 0.0002, "lr": 2.6621938964318595e-05, "epoch": 1.961941533370105, "percentage": 65.49, "elapsed_time": "1:36:44", "remaining_time": "0:50:59", "throughput": 7890.65, "total_tokens": 45805184}
223
+ {"current_steps": 223, "total_steps": 339, "loss": 0.0001, "lr": 2.621335512136899e-05, "epoch": 1.9707666850523995, "percentage": 65.78, "elapsed_time": "1:37:08", "remaining_time": "0:50:31", "throughput": 7892.67, "total_tokens": 46001184}
224
+ {"current_steps": 224, "total_steps": 339, "loss": 0.0002, "lr": 2.5806814099964772e-05, "epoch": 1.9795918367346939, "percentage": 66.08, "elapsed_time": "1:37:34", "remaining_time": "0:50:05", "throughput": 7892.1, "total_tokens": 46206288}
225
+ {"current_steps": 225, "total_steps": 339, "loss": 0.0002, "lr": 2.540235081426736e-05, "epoch": 1.9884169884169884, "percentage": 66.37, "elapsed_time": "1:38:03", "remaining_time": "0:49:40", "throughput": 7891.47, "total_tokens": 46427344}
226
+ {"current_steps": 226, "total_steps": 339, "loss": 0.0003, "lr": 2.500000000000001e-05, "epoch": 1.9972421400992828, "percentage": 66.67, "elapsed_time": "1:38:28", "remaining_time": "0:49:14", "throughput": 7891.65, "total_tokens": 46627344}
227
+ {"current_steps": 227, "total_steps": 339, "loss": 0.0011, "lr": 2.459979621146477e-05, "epoch": 2.0088251516822946, "percentage": 66.96, "elapsed_time": "1:39:03", "remaining_time": "0:48:52", "throughput": 7890.98, "total_tokens": 46901504}
228
+ {"current_steps": 228, "total_steps": 339, "loss": 0.0001, "lr": 2.4201773818574956e-05, "epoch": 2.017650303364589, "percentage": 67.26, "elapsed_time": "1:39:31", "remaining_time": "0:48:27", "throughput": 7888.59, "total_tokens": 47104400}
229
+ {"current_steps": 229, "total_steps": 339, "loss": 0.0001, "lr": 2.3805967003903333e-05, "epoch": 2.0264754550468838, "percentage": 67.55, "elapsed_time": "1:39:56", "remaining_time": "0:48:00", "throughput": 7889.79, "total_tokens": 47314176}
230
+ {"current_steps": 230, "total_steps": 339, "loss": 0.0003, "lr": 2.3412409759746528e-05, "epoch": 2.0353006067291783, "percentage": 67.85, "elapsed_time": "1:40:25", "remaining_time": "0:47:35", "throughput": 7887.38, "total_tokens": 47525264}
231
+ {"current_steps": 231, "total_steps": 339, "loss": 0.0001, "lr": 2.302113588520578e-05, "epoch": 2.0441257584114725, "percentage": 68.14, "elapsed_time": "1:40:49", "remaining_time": "0:47:08", "throughput": 7889.45, "total_tokens": 47724528}
232
+ {"current_steps": 232, "total_steps": 339, "loss": 0.0002, "lr": 2.2632178983284153e-05, "epoch": 2.052950910093767, "percentage": 68.44, "elapsed_time": "1:41:17", "remaining_time": "0:46:42", "throughput": 7887.07, "total_tokens": 47932624}
233
+ {"current_steps": 233, "total_steps": 339, "loss": 0.0001, "lr": 2.2245572458000712e-05, "epoch": 2.0617760617760617, "percentage": 68.73, "elapsed_time": "1:41:42", "remaining_time": "0:46:16", "throughput": 7889.59, "total_tokens": 48148608}
234
+ {"current_steps": 234, "total_steps": 339, "loss": 0.0025, "lr": 2.1861349511521815e-05, "epoch": 2.0706012134583562, "percentage": 69.03, "elapsed_time": "1:42:14", "remaining_time": "0:45:52", "throughput": 7885.55, "total_tokens": 48373632}
235
+ {"current_steps": 235, "total_steps": 339, "loss": 0.0013, "lr": 2.147954314130955e-05, "epoch": 2.079426365140651, "percentage": 69.32, "elapsed_time": "1:42:44", "remaining_time": "0:45:28", "throughput": 7881.47, "total_tokens": 48586512}
236
+ {"current_steps": 236, "total_steps": 339, "loss": 0.0001, "lr": 2.1100186137288e-05, "epoch": 2.0882515168229454, "percentage": 69.62, "elapsed_time": "1:43:08", "remaining_time": "0:45:01", "throughput": 7884.07, "total_tokens": 48793568}
237
+ {"current_steps": 237, "total_steps": 339, "loss": 0.0001, "lr": 2.072331107902713e-05, "epoch": 2.09707666850524, "percentage": 69.91, "elapsed_time": "1:43:35", "remaining_time": "0:44:34", "throughput": 7884.69, "total_tokens": 49006224}
238
+ {"current_steps": 238, "total_steps": 339, "loss": 0.0002, "lr": 2.0348950332944834e-05, "epoch": 2.1059018201875346, "percentage": 70.21, "elapsed_time": "1:44:00", "remaining_time": "0:44:08", "throughput": 7886.69, "total_tokens": 49217632}
239
+ {"current_steps": 239, "total_steps": 339, "loss": 0.0001, "lr": 1.9977136049527345e-05, "epoch": 2.114726971869829, "percentage": 70.5, "elapsed_time": "1:44:26", "remaining_time": "0:43:42", "throughput": 7886.9, "total_tokens": 49426624}
240
+ {"current_steps": 240, "total_steps": 339, "loss": 0.0001, "lr": 1.960790016056801e-05, "epoch": 2.1235521235521237, "percentage": 70.8, "elapsed_time": "1:44:50", "remaining_time": "0:43:14", "throughput": 7888.14, "total_tokens": 49623376}
241
+ {"current_steps": 241, "total_steps": 339, "loss": 0.0002, "lr": 1.9241274376425e-05, "epoch": 2.132377275234418, "percentage": 71.09, "elapsed_time": "1:45:16", "remaining_time": "0:42:48", "throughput": 7889.13, "total_tokens": 49828144}
242
+ {"current_steps": 242, "total_steps": 339, "loss": 0.0002, "lr": 1.8877290183298057e-05, "epoch": 2.1412024269167125, "percentage": 71.39, "elapsed_time": "1:45:40", "remaining_time": "0:42:21", "throughput": 7888.3, "total_tokens": 50018448}
243
+ {"current_steps": 243, "total_steps": 339, "loss": 0.0001, "lr": 1.8515978840524302e-05, "epoch": 2.150027578599007, "percentage": 71.68, "elapsed_time": "1:46:06", "remaining_time": "0:41:55", "throughput": 7887.77, "total_tokens": 50218176}
244
+ {"current_steps": 244, "total_steps": 339, "loss": 0.0002, "lr": 1.815737137789377e-05, "epoch": 2.1588527302813016, "percentage": 71.98, "elapsed_time": "1:46:31", "remaining_time": "0:41:28", "throughput": 7889.8, "total_tokens": 50424896}
245
+ {"current_steps": 245, "total_steps": 339, "loss": 0.0006, "lr": 1.7801498592984446e-05, "epoch": 2.1676778819635962, "percentage": 72.27, "elapsed_time": "1:46:59", "remaining_time": "0:41:03", "throughput": 7887.66, "total_tokens": 50635088}
246
+ {"current_steps": 246, "total_steps": 339, "loss": 0.0001, "lr": 1.7448391048517376e-05, "epoch": 2.176503033645891, "percentage": 72.57, "elapsed_time": "1:47:27", "remaining_time": "0:40:37", "throughput": 7886.91, "total_tokens": 50849552}
247
+ {"current_steps": 247, "total_steps": 339, "loss": 0.0002, "lr": 1.7098079069731958e-05, "epoch": 2.1853281853281854, "percentage": 72.86, "elapsed_time": "1:47:49", "remaining_time": "0:40:09", "throughput": 7889.08, "total_tokens": 51037776}
248
+ {"current_steps": 248, "total_steps": 339, "loss": 0.0002, "lr": 1.6750592741781497e-05, "epoch": 2.19415333701048, "percentage": 73.16, "elapsed_time": "1:48:14", "remaining_time": "0:39:42", "throughput": 7890.61, "total_tokens": 51242672}
249
+ {"current_steps": 249, "total_steps": 339, "loss": 0.0001, "lr": 1.640596190714947e-05, "epoch": 2.2029784886927746, "percentage": 73.45, "elapsed_time": "1:48:39", "remaining_time": "0:39:16", "throughput": 7889.22, "total_tokens": 51437008}
250
+ {"current_steps": 250, "total_steps": 339, "loss": 0.0001, "lr": 1.6064216163086716e-05, "epoch": 2.211803640375069, "percentage": 73.75, "elapsed_time": "1:49:04", "remaining_time": "0:38:49", "throughput": 7890.88, "total_tokens": 51641264}
251
+ {"current_steps": 251, "total_steps": 339, "loss": 0.0001, "lr": 1.5725384859069455e-05, "epoch": 2.2206287920573633, "percentage": 74.04, "elapsed_time": "1:49:30", "remaining_time": "0:38:23", "throughput": 7890.72, "total_tokens": 51842592}
252
+ {"current_steps": 252, "total_steps": 339, "loss": 0.0012, "lr": 1.538949709427886e-05, "epoch": 2.229453943739658, "percentage": 74.34, "elapsed_time": "1:49:55", "remaining_time": "0:37:57", "throughput": 7890.97, "total_tokens": 52047456}
253
+ {"current_steps": 253, "total_steps": 339, "loss": 0.0001, "lr": 1.5056581715101886e-05, "epoch": 2.2382790954219525, "percentage": 74.63, "elapsed_time": "1:50:19", "remaining_time": "0:37:30", "throughput": 7892.37, "total_tokens": 52242208}
254
+ {"current_steps": 254, "total_steps": 339, "loss": 0.0003, "lr": 1.472666731265394e-05, "epoch": 2.247104247104247, "percentage": 74.93, "elapsed_time": "1:50:42", "remaining_time": "0:37:02", "throughput": 7894.16, "total_tokens": 52436800}
255
+ {"current_steps": 255, "total_steps": 339, "loss": 0.0001, "lr": 1.4399782220323515e-05, "epoch": 2.2559293987865416, "percentage": 75.22, "elapsed_time": "1:51:06", "remaining_time": "0:36:35", "throughput": 7894.39, "total_tokens": 52624752}
256
+ {"current_steps": 256, "total_steps": 339, "loss": 0.0001, "lr": 1.4075954511338785e-05, "epoch": 2.2647545504688362, "percentage": 75.52, "elapsed_time": "1:51:32", "remaining_time": "0:36:09", "throughput": 7895.04, "total_tokens": 52836384}
257
+ {"current_steps": 257, "total_steps": 339, "loss": 0.0001, "lr": 1.3755211996356687e-05, "epoch": 2.273579702151131, "percentage": 75.81, "elapsed_time": "1:52:00", "remaining_time": "0:35:44", "throughput": 7894.62, "total_tokens": 53059296}
258
+ {"current_steps": 258, "total_steps": 339, "loss": 0.0001, "lr": 1.3437582221074573e-05, "epoch": 2.2824048538334254, "percentage": 76.11, "elapsed_time": "1:52:28", "remaining_time": "0:35:18", "throughput": 7893.02, "total_tokens": 53267440}
259
+ {"current_steps": 259, "total_steps": 339, "loss": 0.0001, "lr": 1.3123092463864456e-05, "epoch": 2.29123000551572, "percentage": 76.4, "elapsed_time": "1:52:59", "remaining_time": "0:34:54", "throughput": 7891.68, "total_tokens": 53501008}
260
+ {"current_steps": 260, "total_steps": 339, "loss": 0.0001, "lr": 1.2811769733430406e-05, "epoch": 2.300055157198014, "percentage": 76.7, "elapsed_time": "1:53:24", "remaining_time": "0:34:27", "throughput": 7892.09, "total_tokens": 53700432}
261
+ {"current_steps": 261, "total_steps": 339, "loss": 0.0002, "lr": 1.250364076648894e-05, "epoch": 2.3088803088803087, "percentage": 76.99, "elapsed_time": "1:53:50", "remaining_time": "0:34:01", "throughput": 7894.29, "total_tokens": 53919616}
262
+ {"current_steps": 262, "total_steps": 339, "loss": 0.0001, "lr": 1.2198732025472876e-05, "epoch": 2.3177054605626033, "percentage": 77.29, "elapsed_time": "1:54:17", "remaining_time": "0:33:35", "throughput": 7893.79, "total_tokens": 54130528}
263
+ {"current_steps": 263, "total_steps": 339, "loss": 0.0002, "lr": 1.1897069696258755e-05, "epoch": 2.326530612244898, "percentage": 77.58, "elapsed_time": "1:54:44", "remaining_time": "0:33:09", "throughput": 7894.54, "total_tokens": 54350560}
264
+ {"current_steps": 264, "total_steps": 339, "loss": 0.0001, "lr": 1.1598679685917901e-05, "epoch": 2.3353557639271925, "percentage": 77.88, "elapsed_time": "1:55:08", "remaining_time": "0:32:42", "throughput": 7894.72, "total_tokens": 54542224}
265
+ {"current_steps": 265, "total_steps": 339, "loss": 0.0002, "lr": 1.1303587620491513e-05, "epoch": 2.344180915609487, "percentage": 78.17, "elapsed_time": "1:55:35", "remaining_time": "0:32:16", "throughput": 7893.77, "total_tokens": 54745136}
266
+ {"current_steps": 266, "total_steps": 339, "loss": 0.0001, "lr": 1.1011818842789928e-05, "epoch": 2.3530060672917816, "percentage": 78.47, "elapsed_time": "1:56:01", "remaining_time": "0:31:50", "throughput": 7895.0, "total_tokens": 54957584}
267
+ {"current_steps": 267, "total_steps": 339, "loss": 0.0001, "lr": 1.0723398410216084e-05, "epoch": 2.361831218974076, "percentage": 78.76, "elapsed_time": "1:56:26", "remaining_time": "0:31:24", "throughput": 7895.05, "total_tokens": 55162496}
268
+ {"current_steps": 268, "total_steps": 339, "loss": 0.0002, "lr": 1.0438351092613569e-05, "epoch": 2.370656370656371, "percentage": 79.06, "elapsed_time": "1:56:53", "remaining_time": "0:30:58", "throughput": 7895.15, "total_tokens": 55376544}
269
+ {"current_steps": 269, "total_steps": 339, "loss": 0.0001, "lr": 1.0156701370139454e-05, "epoch": 2.3794815223386654, "percentage": 79.35, "elapsed_time": "1:57:19", "remaining_time": "0:30:31", "throughput": 7895.9, "total_tokens": 55583072}
270
+ {"current_steps": 270, "total_steps": 339, "loss": 0.0002, "lr": 9.878473431161767e-06, "epoch": 2.38830667402096, "percentage": 79.65, "elapsed_time": "1:57:46", "remaining_time": "0:30:05", "throughput": 7897.0, "total_tokens": 55801200}
271
+ {"current_steps": 271, "total_steps": 339, "loss": 0.0001, "lr": 9.603691170182317e-06, "epoch": 2.397131825703254, "percentage": 79.94, "elapsed_time": "1:58:14", "remaining_time": "0:29:40", "throughput": 7893.16, "total_tokens": 55998080}
272
+ {"current_steps": 272, "total_steps": 339, "loss": 0.0002, "lr": 9.33237818578449e-06, "epoch": 2.4059569773855487, "percentage": 80.24, "elapsed_time": "1:58:41", "remaining_time": "0:29:14", "throughput": 7891.24, "total_tokens": 56200448}
273
+ {"current_steps": 273, "total_steps": 339, "loss": 0.0001, "lr": 9.064557778606631e-06, "epoch": 2.4147821290678433, "percentage": 80.53, "elapsed_time": "1:59:05", "remaining_time": "0:28:47", "throughput": 7893.3, "total_tokens": 56400416}
274
+ {"current_steps": 274, "total_steps": 339, "loss": 0.0002, "lr": 8.800252949340998e-06, "epoch": 2.423607280750138, "percentage": 80.83, "elapsed_time": "1:59:31", "remaining_time": "0:28:21", "throughput": 7893.02, "total_tokens": 56606128}
275
+ {"current_steps": 275, "total_steps": 339, "loss": 0.0, "lr": 8.539486396758356e-06, "epoch": 2.4324324324324325, "percentage": 81.12, "elapsed_time": "1:59:56", "remaining_time": "0:27:54", "throughput": 7892.52, "total_tokens": 56797824}
276
+ {"current_steps": 276, "total_steps": 339, "loss": 0.0001, "lr": 8.28228051575864e-06, "epoch": 2.441257584114727, "percentage": 81.42, "elapsed_time": "2:00:22", "remaining_time": "0:27:28", "throughput": 7892.63, "total_tokens": 57006384}
277
+ {"current_steps": 277, "total_steps": 339, "loss": 0.0001, "lr": 8.02865739544767e-06, "epoch": 2.4500827357970216, "percentage": 81.71, "elapsed_time": "2:00:48", "remaining_time": "0:27:02", "throughput": 7892.41, "total_tokens": 57207824}
278
+ {"current_steps": 278, "total_steps": 339, "loss": 0.0001, "lr": 7.778638817240042e-06, "epoch": 2.458907887479316, "percentage": 82.01, "elapsed_time": "2:01:16", "remaining_time": "0:26:36", "throughput": 7889.96, "total_tokens": 57415152}
279
+ {"current_steps": 279, "total_steps": 339, "loss": 0.0001, "lr": 7.532246252988617e-06, "epoch": 2.467733039161611, "percentage": 82.3, "elapsed_time": "2:01:41", "remaining_time": "0:26:10", "throughput": 7892.18, "total_tokens": 57628096}
280
+ {"current_steps": 280, "total_steps": 339, "loss": 0.0001, "lr": 7.289500863140414e-06, "epoch": 2.476558190843905, "percentage": 82.6, "elapsed_time": "2:02:05", "remaining_time": "0:25:43", "throughput": 7893.12, "total_tokens": 57824064}
281
+ {"current_steps": 281, "total_steps": 339, "loss": 0.0002, "lr": 7.05042349491935e-06, "epoch": 2.4853833425261995, "percentage": 82.89, "elapsed_time": "2:02:35", "remaining_time": "0:25:18", "throughput": 7891.37, "total_tokens": 58042720}
282
+ {"current_steps": 282, "total_steps": 339, "loss": 0.0001, "lr": 6.815034680535915e-06, "epoch": 2.494208494208494, "percentage": 83.19, "elapsed_time": "2:03:02", "remaining_time": "0:24:52", "throughput": 7891.23, "total_tokens": 58255408}
283
+ {"current_steps": 283, "total_steps": 339, "loss": 0.0001, "lr": 6.5833546354237556e-06, "epoch": 2.5030336458907887, "percentage": 83.48, "elapsed_time": "2:03:29", "remaining_time": "0:24:26", "throughput": 7890.21, "total_tokens": 58464800}
284
+ {"current_steps": 284, "total_steps": 339, "loss": 0.0001, "lr": 6.355403256503595e-06, "epoch": 2.5118587975730833, "percentage": 83.78, "elapsed_time": "2:03:56", "remaining_time": "0:24:00", "throughput": 7889.96, "total_tokens": 58672496}
285
+ {"current_steps": 285, "total_steps": 339, "loss": 0.0002, "lr": 6.1312001204745115e-06, "epoch": 2.520683949255378, "percentage": 84.07, "elapsed_time": "2:04:25", "remaining_time": "0:23:34", "throughput": 7889.86, "total_tokens": 58898256}
286
+ {"current_steps": 286, "total_steps": 339, "loss": 0.0001, "lr": 5.910764482132575e-06, "epoch": 2.5295091009376725, "percentage": 84.37, "elapsed_time": "2:04:51", "remaining_time": "0:23:08", "throughput": 7889.91, "total_tokens": 59107152}
287
+ {"current_steps": 287, "total_steps": 339, "loss": 0.0002, "lr": 5.6941152727173265e-06, "epoch": 2.538334252619967, "percentage": 84.66, "elapsed_time": "2:05:15", "remaining_time": "0:22:41", "throughput": 7891.05, "total_tokens": 59307664}
288
+ {"current_steps": 288, "total_steps": 339, "loss": 0.0003, "lr": 5.481271098285817e-06, "epoch": 2.5471594043022616, "percentage": 84.96, "elapsed_time": "2:05:42", "remaining_time": "0:22:15", "throughput": 7890.97, "total_tokens": 59514736}
289
+ {"current_steps": 289, "total_steps": 339, "loss": 0.0001, "lr": 5.272250238114856e-06, "epoch": 2.5559845559845558, "percentage": 85.25, "elapsed_time": "2:06:06", "remaining_time": "0:21:49", "throughput": 7891.95, "total_tokens": 59712512}
290
+ {"current_steps": 290, "total_steps": 339, "loss": 0.0001, "lr": 5.067070643131055e-06, "epoch": 2.564809707666851, "percentage": 85.55, "elapsed_time": "2:06:29", "remaining_time": "0:21:22", "throughput": 7893.32, "total_tokens": 59910000}
291
+ {"current_steps": 291, "total_steps": 339, "loss": 0.0002, "lr": 4.865749934369223e-06, "epoch": 2.573634859349145, "percentage": 85.84, "elapsed_time": "2:06:55", "remaining_time": "0:20:56", "throughput": 7893.98, "total_tokens": 60116400}
292
+ {"current_steps": 292, "total_steps": 339, "loss": 0.0002, "lr": 4.668305401459022e-06, "epoch": 2.5824600110314395, "percentage": 86.14, "elapsed_time": "2:07:22", "remaining_time": "0:20:30", "throughput": 7892.9, "total_tokens": 60320368}
293
+ {"current_steps": 293, "total_steps": 339, "loss": 0.0001, "lr": 4.474754001140191e-06, "epoch": 2.591285162713734, "percentage": 86.43, "elapsed_time": "2:07:50", "remaining_time": "0:20:04", "throughput": 7892.36, "total_tokens": 60536528}
294
+ {"current_steps": 294, "total_steps": 339, "loss": 0.001, "lr": 4.285112355806192e-06, "epoch": 2.6001103143960287, "percentage": 86.73, "elapsed_time": "2:08:17", "remaining_time": "0:19:38", "throughput": 7891.4, "total_tokens": 60743120}
295
+ {"current_steps": 295, "total_steps": 339, "loss": 0.0001, "lr": 4.099396752076745e-06, "epoch": 2.6089354660783233, "percentage": 87.02, "elapsed_time": "2:08:42", "remaining_time": "0:19:11", "throughput": 7891.16, "total_tokens": 60942704}
296
+ {"current_steps": 296, "total_steps": 339, "loss": 0.0001, "lr": 3.917623139399018e-06, "epoch": 2.617760617760618, "percentage": 87.32, "elapsed_time": "2:09:07", "remaining_time": "0:18:45", "throughput": 7892.02, "total_tokens": 61140128}
297
+ {"current_steps": 297, "total_steps": 339, "loss": 0.0001, "lr": 3.7398071286779857e-06, "epoch": 2.6265857694429124, "percentage": 87.61, "elapsed_time": "2:09:30", "remaining_time": "0:18:18", "throughput": 7892.85, "total_tokens": 61334224}
298
+ {"current_steps": 298, "total_steps": 339, "loss": 0.0001, "lr": 3.5659639909356723e-06, "epoch": 2.6354109211252066, "percentage": 87.91, "elapsed_time": "2:09:57", "remaining_time": "0:17:52", "throughput": 7892.72, "total_tokens": 61543280}
299
+ {"current_steps": 299, "total_steps": 339, "loss": 0.0002, "lr": 3.3961086559996803e-06, "epoch": 2.6442360728075016, "percentage": 88.2, "elapsed_time": "2:10:23", "remaining_time": "0:17:26", "throughput": 7893.04, "total_tokens": 61750720}
300
+ {"current_steps": 300, "total_steps": 339, "loss": 0.0, "lr": 3.230255711220992e-06, "epoch": 2.6530612244897958, "percentage": 88.5, "elapsed_time": "2:10:48", "remaining_time": "0:17:00", "throughput": 7892.55, "total_tokens": 61945952}
301
+ {"current_steps": 301, "total_steps": 339, "loss": 0.0001, "lr": 3.0684194002212287e-06, "epoch": 2.6618863761720903, "percentage": 88.79, "elapsed_time": "2:11:14", "remaining_time": "0:16:34", "throughput": 7893.7, "total_tokens": 62155632}
302
+ {"current_steps": 302, "total_steps": 339, "loss": 0.0001, "lr": 2.910613621669356e-06, "epoch": 2.670711527854385, "percentage": 89.09, "elapsed_time": "2:11:39", "remaining_time": "0:16:07", "throughput": 7893.69, "total_tokens": 62353216}
303
+ {"current_steps": 303, "total_steps": 339, "loss": 0.0001, "lr": 2.7568519280880558e-06, "epoch": 2.6795366795366795, "percentage": 89.38, "elapsed_time": "2:12:03", "remaining_time": "0:15:41", "throughput": 7893.79, "total_tokens": 62544128}
304
+ {"current_steps": 304, "total_steps": 339, "loss": 0.0004, "lr": 2.607147524689829e-06, "epoch": 2.688361831218974, "percentage": 89.68, "elapsed_time": "2:12:29", "remaining_time": "0:15:15", "throughput": 7893.85, "total_tokens": 62752688}
305
+ {"current_steps": 305, "total_steps": 339, "loss": 0.0001, "lr": 2.4615132682429374e-06, "epoch": 2.6971869829012687, "percentage": 89.97, "elapsed_time": "2:12:58", "remaining_time": "0:14:49", "throughput": 7891.77, "total_tokens": 62963296}
306
+ {"current_steps": 306, "total_steps": 339, "loss": 0.0002, "lr": 2.3199616659672354e-06, "epoch": 2.7060121345835633, "percentage": 90.27, "elapsed_time": "2:13:23", "remaining_time": "0:14:23", "throughput": 7892.22, "total_tokens": 63161904}
307
+ {"current_steps": 307, "total_steps": 339, "loss": 0.0001, "lr": 2.182504874460006e-06, "epoch": 2.714837286265858, "percentage": 90.56, "elapsed_time": "2:13:49", "remaining_time": "0:13:56", "throughput": 7891.29, "total_tokens": 63365744}
308
+ {"current_steps": 308, "total_steps": 339, "loss": 0.0001, "lr": 2.049154698651989e-06, "epoch": 2.7236624379481524, "percentage": 90.86, "elapsed_time": "2:14:14", "remaining_time": "0:13:30", "throughput": 7892.66, "total_tokens": 63571808}
309
+ {"current_steps": 309, "total_steps": 339, "loss": 0.0001, "lr": 1.919922590793549e-06, "epoch": 2.7324875896304466, "percentage": 91.15, "elapsed_time": "2:14:39", "remaining_time": "0:13:04", "throughput": 7892.67, "total_tokens": 63768960}
310
+ {"current_steps": 310, "total_steps": 339, "loss": 0.0001, "lr": 1.7948196494711188e-06, "epoch": 2.741312741312741, "percentage": 91.45, "elapsed_time": "2:15:05", "remaining_time": "0:12:38", "throughput": 7892.91, "total_tokens": 63979648}
311
+ {"current_steps": 311, "total_steps": 339, "loss": 0.0001, "lr": 1.6738566186540627e-06, "epoch": 2.7501378929950357, "percentage": 91.74, "elapsed_time": "2:15:32", "remaining_time": "0:12:12", "throughput": 7892.87, "total_tokens": 64189712}
312
+ {"current_steps": 312, "total_steps": 339, "loss": 0.0001, "lr": 1.5570438867719694e-06, "epoch": 2.7589630446773303, "percentage": 92.04, "elapsed_time": "2:15:59", "remaining_time": "0:11:46", "throughput": 7892.66, "total_tokens": 64400624}
313
+ {"current_steps": 313, "total_steps": 339, "loss": 0.0001, "lr": 1.4443914858224938e-06, "epoch": 2.767788196359625, "percentage": 92.33, "elapsed_time": "2:16:26", "remaining_time": "0:11:20", "throughput": 7894.09, "total_tokens": 64626320}
314
+ {"current_steps": 314, "total_steps": 339, "loss": 0.0001, "lr": 1.3359090905097848e-06, "epoch": 2.7766133480419195, "percentage": 92.63, "elapsed_time": "2:16:51", "remaining_time": "0:10:53", "throughput": 7894.56, "total_tokens": 64826480}
315
+ {"current_steps": 315, "total_steps": 339, "loss": 0.0003, "lr": 1.2316060174136002e-06, "epoch": 2.785438499724214, "percentage": 92.92, "elapsed_time": "2:17:18", "remaining_time": "0:10:27", "throughput": 7893.92, "total_tokens": 65031984}
316
+ {"current_steps": 316, "total_steps": 339, "loss": 0.0001, "lr": 1.1314912241892183e-06, "epoch": 2.7942636514065087, "percentage": 93.22, "elapsed_time": "2:17:43", "remaining_time": "0:10:01", "throughput": 7894.65, "total_tokens": 65239456}
317
+ {"current_steps": 317, "total_steps": 339, "loss": 0.0002, "lr": 1.0355733087981378e-06, "epoch": 2.8030888030888033, "percentage": 93.51, "elapsed_time": "2:18:09", "remaining_time": "0:09:35", "throughput": 7893.46, "total_tokens": 65433888}
318
+ {"current_steps": 318, "total_steps": 339, "loss": 0.0001, "lr": 9.43860508769645e-07, "epoch": 2.8119139547710974, "percentage": 93.81, "elapsed_time": "2:18:33", "remaining_time": "0:09:09", "throughput": 7894.99, "total_tokens": 65638288}
319
+ {"current_steps": 319, "total_steps": 339, "loss": 0.0002, "lr": 8.563607004934193e-07, "epoch": 2.8207391064533924, "percentage": 94.1, "elapsed_time": "2:19:02", "remaining_time": "0:08:43", "throughput": 7894.0, "total_tokens": 65855952}
320
+ {"current_steps": 320, "total_steps": 339, "loss": 0.0002, "lr": 7.730813985430407e-07, "epoch": 2.8295642581356866, "percentage": 94.4, "elapsed_time": "2:19:28", "remaining_time": "0:08:16", "throughput": 7895.06, "total_tokens": 66070192}
321
+ {"current_steps": 321, "total_steps": 339, "loss": 0.0001, "lr": 6.940297550306896e-07, "epoch": 2.838389409817981, "percentage": 94.69, "elapsed_time": "2:19:56", "remaining_time": "0:07:50", "throughput": 7894.58, "total_tokens": 66283808}
322
+ {"current_steps": 322, "total_steps": 339, "loss": 0.0002, "lr": 6.192125589928821e-07, "epoch": 2.8472145615002757, "percentage": 94.99, "elapsed_time": "2:20:25", "remaining_time": "0:07:24", "throughput": 7893.93, "total_tokens": 66507776}
323
+ {"current_steps": 323, "total_steps": 339, "loss": 0.0002, "lr": 5.486362358074094e-07, "epoch": 2.8560397131825703, "percentage": 95.28, "elapsed_time": "2:20:50", "remaining_time": "0:06:58", "throughput": 7893.63, "total_tokens": 66708320}
324
+ {"current_steps": 324, "total_steps": 339, "loss": 0.0001, "lr": 4.823068466415615e-07, "epoch": 2.864864864864865, "percentage": 95.58, "elapsed_time": "2:21:16", "remaining_time": "0:06:32", "throughput": 7893.28, "total_tokens": 66910032}
325
+ {"current_steps": 325, "total_steps": 339, "loss": 0.0001, "lr": 4.202300879315446e-07, "epoch": 2.8736900165471595, "percentage": 95.87, "elapsed_time": "2:21:41", "remaining_time": "0:06:06", "throughput": 7894.28, "total_tokens": 67112784}
326
+ {"current_steps": 326, "total_steps": 339, "loss": 0.0001, "lr": 3.624112908932942e-07, "epoch": 2.882515168229454, "percentage": 96.17, "elapsed_time": "2:22:05", "remaining_time": "0:05:39", "throughput": 7895.04, "total_tokens": 67306464}
327
+ {"current_steps": 327, "total_steps": 339, "loss": 0.0001, "lr": 3.088554210646133e-07, "epoch": 2.8913403199117482, "percentage": 96.46, "elapsed_time": "2:22:29", "remaining_time": "0:05:13", "throughput": 7896.03, "total_tokens": 67504720}
328
+ {"current_steps": 328, "total_steps": 339, "loss": 0.0001, "lr": 2.595670778787196e-07, "epoch": 2.9001654715940433, "percentage": 96.76, "elapsed_time": "2:22:52", "remaining_time": "0:04:47", "throughput": 7896.57, "total_tokens": 67694048}
329
+ {"current_steps": 329, "total_steps": 339, "loss": 0.0002, "lr": 2.1455049426926666e-07, "epoch": 2.9089906232763374, "percentage": 97.05, "elapsed_time": "2:23:16", "remaining_time": "0:04:21", "throughput": 7897.7, "total_tokens": 67895008}
330
+ {"current_steps": 330, "total_steps": 339, "loss": 0.0001, "lr": 1.7380953630678488e-07, "epoch": 2.917815774958632, "percentage": 97.35, "elapsed_time": "2:23:41", "remaining_time": "0:03:55", "throughput": 7898.05, "total_tokens": 68093168}
331
+ {"current_steps": 331, "total_steps": 339, "loss": 0.0002, "lr": 1.373477028666803e-07, "epoch": 2.9266409266409266, "percentage": 97.64, "elapsed_time": "2:24:08", "remaining_time": "0:03:29", "throughput": 7898.34, "total_tokens": 68305568}
332
+ {"current_steps": 332, "total_steps": 339, "loss": 0.0001, "lr": 1.0516812532873621e-07, "epoch": 2.935466078323221, "percentage": 97.94, "elapsed_time": "2:24:32", "remaining_time": "0:03:02", "throughput": 7898.84, "total_tokens": 68506384}
333
+ {"current_steps": 333, "total_steps": 339, "loss": 0.0002, "lr": 7.727356730820035e-08, "epoch": 2.9442912300055157, "percentage": 98.23, "elapsed_time": "2:24:57", "remaining_time": "0:02:36", "throughput": 7900.67, "total_tokens": 68716160}
334
+ {"current_steps": 334, "total_steps": 339, "loss": 0.0001, "lr": 5.3666424418413744e-08, "epoch": 2.9531163816878103, "percentage": 98.53, "elapsed_time": "2:25:22", "remaining_time": "0:02:10", "throughput": 7901.22, "total_tokens": 68918048}
335
+ {"current_steps": 335, "total_steps": 339, "loss": 0.0001, "lr": 3.4348724065119685e-08, "epoch": 2.961941533370105, "percentage": 98.82, "elapsed_time": "2:25:50", "remaining_time": "0:01:44", "throughput": 7899.73, "total_tokens": 69129152}
336
+ {"current_steps": 336, "total_steps": 339, "loss": 0.0003, "lr": 1.9322125272297488e-08, "epoch": 2.9707666850523995, "percentage": 99.12, "elapsed_time": "2:26:15", "remaining_time": "0:01:18", "throughput": 7900.03, "total_tokens": 69328576}
337
+ {"current_steps": 337, "total_steps": 339, "loss": 0.0001, "lr": 8.587918539726402e-09, "epoch": 2.979591836734694, "percentage": 99.41, "elapsed_time": "2:26:39", "remaining_time": "0:00:52", "throughput": 7902.03, "total_tokens": 69537232}
338
+ {"current_steps": 338, "total_steps": 339, "loss": 0.0001, "lr": 2.1470257321298813e-09, "epoch": 2.988416988416988, "percentage": 99.71, "elapsed_time": "2:27:06", "remaining_time": "0:00:26", "throughput": 7903.27, "total_tokens": 69761008}
339
+ {"current_steps": 339, "total_steps": 339, "loss": 0.0001, "lr": 0.0, "epoch": 2.997242140099283, "percentage": 100.0, "elapsed_time": "2:27:31", "remaining_time": "0:00:00", "throughput": 7902.86, "total_tokens": 69953200}
340
+ {"current_steps": 339, "total_steps": 339, "epoch": 2.997242140099283, "percentage": 100.0, "elapsed_time": "2:27:32", "remaining_time": "0:00:00", "throughput": 7902.18, "total_tokens": 69953200}
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/trainer_state.json ADDED
@@ -0,0 +1,2755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.997242140099283,
5
+ "eval_steps": 500,
6
+ "global_step": 339,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.00882515168229454,
13
+ "grad_norm": 0.40674829483032227,
14
+ "learning_rate": 9.999785297426788e-05,
15
+ "loss": 0.2055,
16
+ "num_input_tokens_seen": 203120,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.01765030336458908,
21
+ "grad_norm": 0.4242195785045624,
22
+ "learning_rate": 9.999141208146028e-05,
23
+ "loss": 0.1902,
24
+ "num_input_tokens_seen": 406048,
25
+ "step": 2
26
+ },
27
+ {
28
+ "epoch": 0.026475455046883617,
29
+ "grad_norm": 0.3813261389732361,
30
+ "learning_rate": 9.998067787472772e-05,
31
+ "loss": 0.1421,
32
+ "num_input_tokens_seen": 614736,
33
+ "step": 3
34
+ },
35
+ {
36
+ "epoch": 0.03530060672917816,
37
+ "grad_norm": 0.28003761172294617,
38
+ "learning_rate": 9.996565127593488e-05,
39
+ "loss": 0.1102,
40
+ "num_input_tokens_seen": 816416,
41
+ "step": 4
42
+ },
43
+ {
44
+ "epoch": 0.0441257584114727,
45
+ "grad_norm": 0.25300610065460205,
46
+ "learning_rate": 9.994633357558158e-05,
47
+ "loss": 0.0801,
48
+ "num_input_tokens_seen": 1024272,
49
+ "step": 5
50
+ },
51
+ {
52
+ "epoch": 0.052950910093767234,
53
+ "grad_norm": 0.2328871786594391,
54
+ "learning_rate": 9.99227264326918e-05,
55
+ "loss": 0.0574,
56
+ "num_input_tokens_seen": 1228192,
57
+ "step": 6
58
+ },
59
+ {
60
+ "epoch": 0.06177606177606178,
61
+ "grad_norm": 0.17362241446971893,
62
+ "learning_rate": 9.989483187467127e-05,
63
+ "loss": 0.0401,
64
+ "num_input_tokens_seen": 1434992,
65
+ "step": 7
66
+ },
67
+ {
68
+ "epoch": 0.07060121345835632,
69
+ "grad_norm": 0.09250874817371368,
70
+ "learning_rate": 9.986265229713331e-05,
71
+ "loss": 0.0295,
72
+ "num_input_tokens_seen": 1646560,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.07942636514065085,
77
+ "grad_norm": 0.08936059474945068,
78
+ "learning_rate": 9.982619046369321e-05,
79
+ "loss": 0.0262,
80
+ "num_input_tokens_seen": 1838624,
81
+ "step": 9
82
+ },
83
+ {
84
+ "epoch": 0.0882515168229454,
85
+ "grad_norm": 0.08603595942258835,
86
+ "learning_rate": 9.978544950573074e-05,
87
+ "loss": 0.0263,
88
+ "num_input_tokens_seen": 2053488,
89
+ "step": 10
90
+ },
91
+ {
92
+ "epoch": 0.09707666850523994,
93
+ "grad_norm": 0.07848804444074631,
94
+ "learning_rate": 9.974043292212128e-05,
95
+ "loss": 0.022,
96
+ "num_input_tokens_seen": 2253680,
97
+ "step": 11
98
+ },
99
+ {
100
+ "epoch": 0.10590182018753447,
101
+ "grad_norm": 0.06246768683195114,
102
+ "learning_rate": 9.96911445789354e-05,
103
+ "loss": 0.0202,
104
+ "num_input_tokens_seen": 2442000,
105
+ "step": 12
106
+ },
107
+ {
108
+ "epoch": 0.11472697186982901,
109
+ "grad_norm": 0.048259809613227844,
110
+ "learning_rate": 9.963758870910671e-05,
111
+ "loss": 0.0202,
112
+ "num_input_tokens_seen": 2655920,
113
+ "step": 13
114
+ },
115
+ {
116
+ "epoch": 0.12355212355212356,
117
+ "grad_norm": 0.03917853534221649,
118
+ "learning_rate": 9.957976991206846e-05,
119
+ "loss": 0.0178,
120
+ "num_input_tokens_seen": 2874064,
121
+ "step": 14
122
+ },
123
+ {
124
+ "epoch": 0.13237727523441808,
125
+ "grad_norm": 0.040510393679142,
126
+ "learning_rate": 9.951769315335844e-05,
127
+ "loss": 0.0158,
128
+ "num_input_tokens_seen": 3071744,
129
+ "step": 15
130
+ },
131
+ {
132
+ "epoch": 0.14120242691671264,
133
+ "grad_norm": 0.035558607429265976,
134
+ "learning_rate": 9.945136376419259e-05,
135
+ "loss": 0.0159,
136
+ "num_input_tokens_seen": 3277904,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.15002757859900717,
141
+ "grad_norm": 0.034995947033166885,
142
+ "learning_rate": 9.938078744100712e-05,
143
+ "loss": 0.0147,
144
+ "num_input_tokens_seen": 3493136,
145
+ "step": 17
146
+ },
147
+ {
148
+ "epoch": 0.1588527302813017,
149
+ "grad_norm": 0.03230876475572586,
150
+ "learning_rate": 9.930597024496931e-05,
151
+ "loss": 0.0138,
152
+ "num_input_tokens_seen": 3704288,
153
+ "step": 18
154
+ },
155
+ {
156
+ "epoch": 0.16767788196359626,
157
+ "grad_norm": 0.028281500563025475,
158
+ "learning_rate": 9.922691860145696e-05,
159
+ "loss": 0.0128,
160
+ "num_input_tokens_seen": 3904352,
161
+ "step": 19
162
+ },
163
+ {
164
+ "epoch": 0.1765030336458908,
165
+ "grad_norm": 0.026264235377311707,
166
+ "learning_rate": 9.914363929950659e-05,
167
+ "loss": 0.0124,
168
+ "num_input_tokens_seen": 4113888,
169
+ "step": 20
170
+ },
171
+ {
172
+ "epoch": 0.18532818532818532,
173
+ "grad_norm": 0.023232094943523407,
174
+ "learning_rate": 9.905613949123036e-05,
175
+ "loss": 0.0116,
176
+ "num_input_tokens_seen": 4323504,
177
+ "step": 21
178
+ },
179
+ {
180
+ "epoch": 0.19415333701047988,
181
+ "grad_norm": 0.02393435873091221,
182
+ "learning_rate": 9.896442669120187e-05,
183
+ "loss": 0.0109,
184
+ "num_input_tokens_seen": 4523008,
185
+ "step": 22
186
+ },
187
+ {
188
+ "epoch": 0.2029784886927744,
189
+ "grad_norm": 0.024421676993370056,
190
+ "learning_rate": 9.886850877581079e-05,
191
+ "loss": 0.0106,
192
+ "num_input_tokens_seen": 4732864,
193
+ "step": 23
194
+ },
195
+ {
196
+ "epoch": 0.21180364037506894,
197
+ "grad_norm": 0.022869078442454338,
198
+ "learning_rate": 9.876839398258641e-05,
199
+ "loss": 0.0099,
200
+ "num_input_tokens_seen": 4941936,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.2206287920573635,
205
+ "grad_norm": 0.025933578610420227,
206
+ "learning_rate": 9.866409090949022e-05,
207
+ "loss": 0.0109,
208
+ "num_input_tokens_seen": 5143584,
209
+ "step": 25
210
+ },
211
+ {
212
+ "epoch": 0.22945394373965802,
213
+ "grad_norm": 0.02043001353740692,
214
+ "learning_rate": 9.855560851417752e-05,
215
+ "loss": 0.0084,
216
+ "num_input_tokens_seen": 5351024,
217
+ "step": 26
218
+ },
219
+ {
220
+ "epoch": 0.23827909542195255,
221
+ "grad_norm": 0.02140035293996334,
222
+ "learning_rate": 9.844295611322804e-05,
223
+ "loss": 0.0081,
224
+ "num_input_tokens_seen": 5563760,
225
+ "step": 27
226
+ },
227
+ {
228
+ "epoch": 0.2471042471042471,
229
+ "grad_norm": 0.019948888570070267,
230
+ "learning_rate": 9.832614338134595e-05,
231
+ "loss": 0.0078,
232
+ "num_input_tokens_seen": 5772416,
233
+ "step": 28
234
+ },
235
+ {
236
+ "epoch": 0.25592939878654164,
237
+ "grad_norm": 0.021153336390852928,
238
+ "learning_rate": 9.820518035052889e-05,
239
+ "loss": 0.0081,
240
+ "num_input_tokens_seen": 5974464,
241
+ "step": 29
242
+ },
243
+ {
244
+ "epoch": 0.26475455046883617,
245
+ "grad_norm": 0.02002059668302536,
246
+ "learning_rate": 9.808007740920646e-05,
247
+ "loss": 0.0087,
248
+ "num_input_tokens_seen": 6193520,
249
+ "step": 30
250
+ },
251
+ {
252
+ "epoch": 0.2735797021511307,
253
+ "grad_norm": 0.029256833717226982,
254
+ "learning_rate": 9.795084530134801e-05,
255
+ "loss": 0.0079,
256
+ "num_input_tokens_seen": 6399792,
257
+ "step": 31
258
+ },
259
+ {
260
+ "epoch": 0.2824048538334253,
261
+ "grad_norm": 0.02395695447921753,
262
+ "learning_rate": 9.781749512553999e-05,
263
+ "loss": 0.0086,
264
+ "num_input_tokens_seen": 6603584,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.2912300055157198,
269
+ "grad_norm": 0.02185678854584694,
270
+ "learning_rate": 9.768003833403278e-05,
271
+ "loss": 0.0079,
272
+ "num_input_tokens_seen": 6810656,
273
+ "step": 33
274
+ },
275
+ {
276
+ "epoch": 0.30005515719801434,
277
+ "grad_norm": 0.02072463184595108,
278
+ "learning_rate": 9.753848673175707e-05,
279
+ "loss": 0.0069,
280
+ "num_input_tokens_seen": 7001792,
281
+ "step": 34
282
+ },
283
+ {
284
+ "epoch": 0.3088803088803089,
285
+ "grad_norm": 0.018024709075689316,
286
+ "learning_rate": 9.739285247531018e-05,
287
+ "loss": 0.0064,
288
+ "num_input_tokens_seen": 7205952,
289
+ "step": 35
290
+ },
291
+ {
292
+ "epoch": 0.3177054605626034,
293
+ "grad_norm": 0.019729286432266235,
294
+ "learning_rate": 9.724314807191195e-05,
295
+ "loss": 0.006,
296
+ "num_input_tokens_seen": 7406304,
297
+ "step": 36
298
+ },
299
+ {
300
+ "epoch": 0.32653061224489793,
301
+ "grad_norm": 0.01830880530178547,
302
+ "learning_rate": 9.708938637833065e-05,
303
+ "loss": 0.0067,
304
+ "num_input_tokens_seen": 7629568,
305
+ "step": 37
306
+ },
307
+ {
308
+ "epoch": 0.3353557639271925,
309
+ "grad_norm": 0.021113887429237366,
310
+ "learning_rate": 9.693158059977878e-05,
311
+ "loss": 0.0063,
312
+ "num_input_tokens_seen": 7845200,
313
+ "step": 38
314
+ },
315
+ {
316
+ "epoch": 0.34418091560948705,
317
+ "grad_norm": 0.015138108283281326,
318
+ "learning_rate": 9.676974428877901e-05,
319
+ "loss": 0.0058,
320
+ "num_input_tokens_seen": 8061840,
321
+ "step": 39
322
+ },
323
+ {
324
+ "epoch": 0.3530060672917816,
325
+ "grad_norm": 0.017043087631464005,
326
+ "learning_rate": 9.660389134400033e-05,
327
+ "loss": 0.0061,
328
+ "num_input_tokens_seen": 8279664,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.3618312189740761,
333
+ "grad_norm": 0.01955767348408699,
334
+ "learning_rate": 9.643403600906433e-05,
335
+ "loss": 0.0055,
336
+ "num_input_tokens_seen": 8475376,
337
+ "step": 41
338
+ },
339
+ {
340
+ "epoch": 0.37065637065637064,
341
+ "grad_norm": 0.014688636176288128,
342
+ "learning_rate": 9.626019287132203e-05,
343
+ "loss": 0.005,
344
+ "num_input_tokens_seen": 8691760,
345
+ "step": 42
346
+ },
347
+ {
348
+ "epoch": 0.3794815223386652,
349
+ "grad_norm": 0.01973150670528412,
350
+ "learning_rate": 9.608237686060099e-05,
351
+ "loss": 0.006,
352
+ "num_input_tokens_seen": 8884736,
353
+ "step": 43
354
+ },
355
+ {
356
+ "epoch": 0.38830667402095975,
357
+ "grad_norm": 0.01489401888102293,
358
+ "learning_rate": 9.590060324792327e-05,
359
+ "loss": 0.0048,
360
+ "num_input_tokens_seen": 9084064,
361
+ "step": 44
362
+ },
363
+ {
364
+ "epoch": 0.3971318257032543,
365
+ "grad_norm": 0.015995647758245468,
366
+ "learning_rate": 9.571488764419381e-05,
367
+ "loss": 0.0047,
368
+ "num_input_tokens_seen": 9302144,
369
+ "step": 45
370
+ },
371
+ {
372
+ "epoch": 0.4059569773855488,
373
+ "grad_norm": 0.01859475113451481,
374
+ "learning_rate": 9.552524599885981e-05,
375
+ "loss": 0.0053,
376
+ "num_input_tokens_seen": 9517456,
377
+ "step": 46
378
+ },
379
+ {
380
+ "epoch": 0.41478212906784334,
381
+ "grad_norm": 0.018746482208371162,
382
+ "learning_rate": 9.533169459854098e-05,
383
+ "loss": 0.0044,
384
+ "num_input_tokens_seen": 9710768,
385
+ "step": 47
386
+ },
387
+ {
388
+ "epoch": 0.42360728075013787,
389
+ "grad_norm": 0.017155013978481293,
390
+ "learning_rate": 9.513425006563079e-05,
391
+ "loss": 0.0043,
392
+ "num_input_tokens_seen": 9914064,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.43243243243243246,
397
+ "grad_norm": 0.015938682481646538,
398
+ "learning_rate": 9.493292935686895e-05,
399
+ "loss": 0.0041,
400
+ "num_input_tokens_seen": 10120208,
401
+ "step": 49
402
+ },
403
+ {
404
+ "epoch": 0.441257584114727,
405
+ "grad_norm": 0.017114240676164627,
406
+ "learning_rate": 9.472774976188515e-05,
407
+ "loss": 0.0044,
408
+ "num_input_tokens_seen": 10346304,
409
+ "step": 50
410
+ },
411
+ {
412
+ "epoch": 0.4500827357970215,
413
+ "grad_norm": 0.014332287944853306,
414
+ "learning_rate": 9.451872890171419e-05,
415
+ "loss": 0.004,
416
+ "num_input_tokens_seen": 10547984,
417
+ "step": 51
418
+ },
419
+ {
420
+ "epoch": 0.45890788747931605,
421
+ "grad_norm": 0.017018554732203484,
422
+ "learning_rate": 9.43058847272827e-05,
423
+ "loss": 0.0045,
424
+ "num_input_tokens_seen": 10754288,
425
+ "step": 52
426
+ },
427
+ {
428
+ "epoch": 0.4677330391616106,
429
+ "grad_norm": 0.013670100830495358,
430
+ "learning_rate": 9.408923551786743e-05,
431
+ "loss": 0.0028,
432
+ "num_input_tokens_seen": 10942704,
433
+ "step": 53
434
+ },
435
+ {
436
+ "epoch": 0.4765581908439051,
437
+ "grad_norm": 0.016749229282140732,
438
+ "learning_rate": 9.386879987952549e-05,
439
+ "loss": 0.0034,
440
+ "num_input_tokens_seen": 11150864,
441
+ "step": 54
442
+ },
443
+ {
444
+ "epoch": 0.4853833425261997,
445
+ "grad_norm": 0.01554529182612896,
446
+ "learning_rate": 9.364459674349641e-05,
447
+ "loss": 0.0042,
448
+ "num_input_tokens_seen": 11367728,
449
+ "step": 55
450
+ },
451
+ {
452
+ "epoch": 0.4942084942084942,
453
+ "grad_norm": 0.015070905908942223,
454
+ "learning_rate": 9.341664536457626e-05,
455
+ "loss": 0.0028,
456
+ "num_input_tokens_seen": 11575536,
457
+ "step": 56
458
+ },
459
+ {
460
+ "epoch": 0.5030336458907887,
461
+ "grad_norm": 0.016440849751234055,
462
+ "learning_rate": 9.31849653194641e-05,
463
+ "loss": 0.0035,
464
+ "num_input_tokens_seen": 11781328,
465
+ "step": 57
466
+ },
467
+ {
468
+ "epoch": 0.5118587975730833,
469
+ "grad_norm": 0.014468475244939327,
470
+ "learning_rate": 9.294957650508065e-05,
471
+ "loss": 0.0029,
472
+ "num_input_tokens_seen": 11981232,
473
+ "step": 58
474
+ },
475
+ {
476
+ "epoch": 0.5206839492553779,
477
+ "grad_norm": 0.014588565565645695,
478
+ "learning_rate": 9.27104991368596e-05,
479
+ "loss": 0.0028,
480
+ "num_input_tokens_seen": 12187296,
481
+ "step": 59
482
+ },
483
+ {
484
+ "epoch": 0.5295091009376723,
485
+ "grad_norm": 0.0141281234100461,
486
+ "learning_rate": 9.246775374701139e-05,
487
+ "loss": 0.0027,
488
+ "num_input_tokens_seen": 12385632,
489
+ "step": 60
490
+ },
491
+ {
492
+ "epoch": 0.5383342526199669,
493
+ "grad_norm": 0.013463583774864674,
494
+ "learning_rate": 9.222136118275995e-05,
495
+ "loss": 0.0022,
496
+ "num_input_tokens_seen": 12588928,
497
+ "step": 61
498
+ },
499
+ {
500
+ "epoch": 0.5471594043022614,
501
+ "grad_norm": 0.014033553190529346,
502
+ "learning_rate": 9.197134260455233e-05,
503
+ "loss": 0.0027,
504
+ "num_input_tokens_seen": 12825616,
505
+ "step": 62
506
+ },
507
+ {
508
+ "epoch": 0.555984555984556,
509
+ "grad_norm": 0.013906535692512989,
510
+ "learning_rate": 9.171771948424137e-05,
511
+ "loss": 0.0025,
512
+ "num_input_tokens_seen": 13044976,
513
+ "step": 63
514
+ },
515
+ {
516
+ "epoch": 0.5648097076668506,
517
+ "grad_norm": 0.012418747879564762,
518
+ "learning_rate": 9.146051360324166e-05,
519
+ "loss": 0.0025,
520
+ "num_input_tokens_seen": 13255280,
521
+ "step": 64
522
+ },
523
+ {
524
+ "epoch": 0.573634859349145,
525
+ "grad_norm": 0.015126565471291542,
526
+ "learning_rate": 9.119974705065901e-05,
527
+ "loss": 0.0022,
528
+ "num_input_tokens_seen": 13463456,
529
+ "step": 65
530
+ },
531
+ {
532
+ "epoch": 0.5824600110314396,
533
+ "grad_norm": 0.013123284094035625,
534
+ "learning_rate": 9.093544222139337e-05,
535
+ "loss": 0.0023,
536
+ "num_input_tokens_seen": 13667744,
537
+ "step": 66
538
+ },
539
+ {
540
+ "epoch": 0.5912851627137341,
541
+ "grad_norm": 0.014246366918087006,
542
+ "learning_rate": 9.066762181421552e-05,
543
+ "loss": 0.0024,
544
+ "num_input_tokens_seen": 13874240,
545
+ "step": 67
546
+ },
547
+ {
548
+ "epoch": 0.6001103143960287,
549
+ "grad_norm": 0.011402356438338757,
550
+ "learning_rate": 9.039630882981768e-05,
551
+ "loss": 0.0015,
552
+ "num_input_tokens_seen": 14081392,
553
+ "step": 68
554
+ },
555
+ {
556
+ "epoch": 0.6089354660783233,
557
+ "grad_norm": 0.014725148677825928,
558
+ "learning_rate": 9.012152656883823e-05,
559
+ "loss": 0.0033,
560
+ "num_input_tokens_seen": 14300896,
561
+ "step": 69
562
+ },
563
+ {
564
+ "epoch": 0.6177606177606177,
565
+ "grad_norm": 0.014837515540421009,
566
+ "learning_rate": 8.984329862986056e-05,
567
+ "loss": 0.0021,
568
+ "num_input_tokens_seen": 14523968,
569
+ "step": 70
570
+ },
571
+ {
572
+ "epoch": 0.6265857694429123,
573
+ "grad_norm": 0.014493652619421482,
574
+ "learning_rate": 8.956164890738643e-05,
575
+ "loss": 0.0013,
576
+ "num_input_tokens_seen": 14728960,
577
+ "step": 71
578
+ },
579
+ {
580
+ "epoch": 0.6354109211252068,
581
+ "grad_norm": 0.011806878261268139,
582
+ "learning_rate": 8.927660158978392e-05,
583
+ "loss": 0.0016,
584
+ "num_input_tokens_seen": 14912480,
585
+ "step": 72
586
+ },
587
+ {
588
+ "epoch": 0.6442360728075014,
589
+ "grad_norm": 0.01818985864520073,
590
+ "learning_rate": 8.898818115721008e-05,
591
+ "loss": 0.0019,
592
+ "num_input_tokens_seen": 15114608,
593
+ "step": 73
594
+ },
595
+ {
596
+ "epoch": 0.6530612244897959,
597
+ "grad_norm": 0.015412255190312862,
598
+ "learning_rate": 8.86964123795085e-05,
599
+ "loss": 0.0017,
600
+ "num_input_tokens_seen": 15326112,
601
+ "step": 74
602
+ },
603
+ {
604
+ "epoch": 0.6618863761720905,
605
+ "grad_norm": 0.013063928112387657,
606
+ "learning_rate": 8.84013203140821e-05,
607
+ "loss": 0.0015,
608
+ "num_input_tokens_seen": 15545248,
609
+ "step": 75
610
+ },
611
+ {
612
+ "epoch": 0.670711527854385,
613
+ "grad_norm": 0.016336796805262566,
614
+ "learning_rate": 8.810293030374126e-05,
615
+ "loss": 0.0017,
616
+ "num_input_tokens_seen": 15751872,
617
+ "step": 76
618
+ },
619
+ {
620
+ "epoch": 0.6795366795366795,
621
+ "grad_norm": 0.010313590988516808,
622
+ "learning_rate": 8.780126797452713e-05,
623
+ "loss": 0.001,
624
+ "num_input_tokens_seen": 15957872,
625
+ "step": 77
626
+ },
627
+ {
628
+ "epoch": 0.6883618312189741,
629
+ "grad_norm": 0.015468253754079342,
630
+ "learning_rate": 8.749635923351107e-05,
631
+ "loss": 0.0018,
632
+ "num_input_tokens_seen": 16162640,
633
+ "step": 78
634
+ },
635
+ {
636
+ "epoch": 0.6971869829012686,
637
+ "grad_norm": 0.01543041318655014,
638
+ "learning_rate": 8.71882302665696e-05,
639
+ "loss": 0.001,
640
+ "num_input_tokens_seen": 16352368,
641
+ "step": 79
642
+ },
643
+ {
644
+ "epoch": 0.7060121345835632,
645
+ "grad_norm": 0.01957864873111248,
646
+ "learning_rate": 8.687690753613554e-05,
647
+ "loss": 0.0014,
648
+ "num_input_tokens_seen": 16563920,
649
+ "step": 80
650
+ },
651
+ {
652
+ "epoch": 0.7148372862658577,
653
+ "grad_norm": 0.012508533895015717,
654
+ "learning_rate": 8.656241777892543e-05,
655
+ "loss": 0.001,
656
+ "num_input_tokens_seen": 16759024,
657
+ "step": 81
658
+ },
659
+ {
660
+ "epoch": 0.7236624379481522,
661
+ "grad_norm": 0.012273616157472134,
662
+ "learning_rate": 8.624478800364332e-05,
663
+ "loss": 0.0013,
664
+ "num_input_tokens_seen": 16973728,
665
+ "step": 82
666
+ },
667
+ {
668
+ "epoch": 0.7324875896304468,
669
+ "grad_norm": 0.01503776852041483,
670
+ "learning_rate": 8.592404548866123e-05,
671
+ "loss": 0.0012,
672
+ "num_input_tokens_seen": 17162752,
673
+ "step": 83
674
+ },
675
+ {
676
+ "epoch": 0.7413127413127413,
677
+ "grad_norm": 0.014227951876819134,
678
+ "learning_rate": 8.560021777967649e-05,
679
+ "loss": 0.0013,
680
+ "num_input_tokens_seen": 17364064,
681
+ "step": 84
682
+ },
683
+ {
684
+ "epoch": 0.7501378929950359,
685
+ "grad_norm": 0.01252016518265009,
686
+ "learning_rate": 8.527333268734606e-05,
687
+ "loss": 0.0011,
688
+ "num_input_tokens_seen": 17564576,
689
+ "step": 85
690
+ },
691
+ {
692
+ "epoch": 0.7589630446773304,
693
+ "grad_norm": 0.011520475149154663,
694
+ "learning_rate": 8.494341828489812e-05,
695
+ "loss": 0.0037,
696
+ "num_input_tokens_seen": 17778752,
697
+ "step": 86
698
+ },
699
+ {
700
+ "epoch": 0.7677881963596249,
701
+ "grad_norm": 0.010531144216656685,
702
+ "learning_rate": 8.461050290572114e-05,
703
+ "loss": 0.0007,
704
+ "num_input_tokens_seen": 17982448,
705
+ "step": 87
706
+ },
707
+ {
708
+ "epoch": 0.7766133480419195,
709
+ "grad_norm": 0.010875461623072624,
710
+ "learning_rate": 8.427461514093056e-05,
711
+ "loss": 0.0008,
712
+ "num_input_tokens_seen": 18180608,
713
+ "step": 88
714
+ },
715
+ {
716
+ "epoch": 0.785438499724214,
717
+ "grad_norm": 0.007611530367285013,
718
+ "learning_rate": 8.393578383691329e-05,
719
+ "loss": 0.0006,
720
+ "num_input_tokens_seen": 18384496,
721
+ "step": 89
722
+ },
723
+ {
724
+ "epoch": 0.7942636514065086,
725
+ "grad_norm": 0.010159923695027828,
726
+ "learning_rate": 8.359403809285053e-05,
727
+ "loss": 0.001,
728
+ "num_input_tokens_seen": 18587744,
729
+ "step": 90
730
+ },
731
+ {
732
+ "epoch": 0.803088803088803,
733
+ "grad_norm": 0.011715343222022057,
734
+ "learning_rate": 8.324940725821852e-05,
735
+ "loss": 0.001,
736
+ "num_input_tokens_seen": 18791056,
737
+ "step": 91
738
+ },
739
+ {
740
+ "epoch": 0.8119139547710976,
741
+ "grad_norm": 0.012972251512110233,
742
+ "learning_rate": 8.290192093026805e-05,
743
+ "loss": 0.0008,
744
+ "num_input_tokens_seen": 18985008,
745
+ "step": 92
746
+ },
747
+ {
748
+ "epoch": 0.8207391064533922,
749
+ "grad_norm": 0.0135871022939682,
750
+ "learning_rate": 8.255160895148263e-05,
751
+ "loss": 0.0014,
752
+ "num_input_tokens_seen": 19193888,
753
+ "step": 93
754
+ },
755
+ {
756
+ "epoch": 0.8295642581356867,
757
+ "grad_norm": 0.011914449743926525,
758
+ "learning_rate": 8.219850140701557e-05,
759
+ "loss": 0.001,
760
+ "num_input_tokens_seen": 19399552,
761
+ "step": 94
762
+ },
763
+ {
764
+ "epoch": 0.8383894098179813,
765
+ "grad_norm": 0.009591113775968552,
766
+ "learning_rate": 8.184262862210624e-05,
767
+ "loss": 0.0007,
768
+ "num_input_tokens_seen": 19605120,
769
+ "step": 95
770
+ },
771
+ {
772
+ "epoch": 0.8472145615002757,
773
+ "grad_norm": 0.009942690841853619,
774
+ "learning_rate": 8.148402115947571e-05,
775
+ "loss": 0.0008,
776
+ "num_input_tokens_seen": 19802480,
777
+ "step": 96
778
+ },
779
+ {
780
+ "epoch": 0.8560397131825703,
781
+ "grad_norm": 0.012667879462242126,
782
+ "learning_rate": 8.112270981670196e-05,
783
+ "loss": 0.0011,
784
+ "num_input_tokens_seen": 20009520,
785
+ "step": 97
786
+ },
787
+ {
788
+ "epoch": 0.8648648648648649,
789
+ "grad_norm": 0.010983509942889214,
790
+ "learning_rate": 8.075872562357501e-05,
791
+ "loss": 0.0009,
792
+ "num_input_tokens_seen": 20235888,
793
+ "step": 98
794
+ },
795
+ {
796
+ "epoch": 0.8736900165471594,
797
+ "grad_norm": 0.011479397304356098,
798
+ "learning_rate": 8.039209983943201e-05,
799
+ "loss": 0.0006,
800
+ "num_input_tokens_seen": 20433600,
801
+ "step": 99
802
+ },
803
+ {
804
+ "epoch": 0.882515168229454,
805
+ "grad_norm": 0.012184002436697483,
806
+ "learning_rate": 8.002286395047267e-05,
807
+ "loss": 0.0009,
808
+ "num_input_tokens_seen": 20631664,
809
+ "step": 100
810
+ },
811
+ {
812
+ "epoch": 0.8913403199117484,
813
+ "grad_norm": 0.009395604953169823,
814
+ "learning_rate": 7.965104966705518e-05,
815
+ "loss": 0.0006,
816
+ "num_input_tokens_seen": 20833056,
817
+ "step": 101
818
+ },
819
+ {
820
+ "epoch": 0.900165471594043,
821
+ "grad_norm": 0.013585143722593784,
822
+ "learning_rate": 7.927668892097289e-05,
823
+ "loss": 0.0008,
824
+ "num_input_tokens_seen": 21051104,
825
+ "step": 102
826
+ },
827
+ {
828
+ "epoch": 0.9089906232763376,
829
+ "grad_norm": 0.008882119320333004,
830
+ "learning_rate": 7.889981386271201e-05,
831
+ "loss": 0.0005,
832
+ "num_input_tokens_seen": 21246080,
833
+ "step": 103
834
+ },
835
+ {
836
+ "epoch": 0.9178157749586321,
837
+ "grad_norm": 0.010433576069772243,
838
+ "learning_rate": 7.852045685869045e-05,
839
+ "loss": 0.0006,
840
+ "num_input_tokens_seen": 21439696,
841
+ "step": 104
842
+ },
843
+ {
844
+ "epoch": 0.9266409266409267,
845
+ "grad_norm": 0.01474383007735014,
846
+ "learning_rate": 7.813865048847819e-05,
847
+ "loss": 0.0008,
848
+ "num_input_tokens_seen": 21648432,
849
+ "step": 105
850
+ },
851
+ {
852
+ "epoch": 0.9354660783232212,
853
+ "grad_norm": 0.011113091371953487,
854
+ "learning_rate": 7.775442754199928e-05,
855
+ "loss": 0.0007,
856
+ "num_input_tokens_seen": 21864368,
857
+ "step": 106
858
+ },
859
+ {
860
+ "epoch": 0.9442912300055157,
861
+ "grad_norm": 0.009181715548038483,
862
+ "learning_rate": 7.736782101671587e-05,
863
+ "loss": 0.0006,
864
+ "num_input_tokens_seen": 22061968,
865
+ "step": 107
866
+ },
867
+ {
868
+ "epoch": 0.9531163816878102,
869
+ "grad_norm": 0.0140100521966815,
870
+ "learning_rate": 7.697886411479423e-05,
871
+ "loss": 0.0012,
872
+ "num_input_tokens_seen": 22278128,
873
+ "step": 108
874
+ },
875
+ {
876
+ "epoch": 0.9619415333701048,
877
+ "grad_norm": 0.007349591236561537,
878
+ "learning_rate": 7.658759024025349e-05,
879
+ "loss": 0.0004,
880
+ "num_input_tokens_seen": 22469056,
881
+ "step": 109
882
+ },
883
+ {
884
+ "epoch": 0.9707666850523994,
885
+ "grad_norm": 0.01252900529652834,
886
+ "learning_rate": 7.619403299609668e-05,
887
+ "loss": 0.0008,
888
+ "num_input_tokens_seen": 22662128,
889
+ "step": 110
890
+ },
891
+ {
892
+ "epoch": 0.9795918367346939,
893
+ "grad_norm": 0.012083148583769798,
894
+ "learning_rate": 7.579822618142505e-05,
895
+ "loss": 0.0007,
896
+ "num_input_tokens_seen": 22883216,
897
+ "step": 111
898
+ },
899
+ {
900
+ "epoch": 0.9884169884169884,
901
+ "grad_norm": 0.010517132468521595,
902
+ "learning_rate": 7.540020378853523e-05,
903
+ "loss": 0.0005,
904
+ "num_input_tokens_seen": 23085888,
905
+ "step": 112
906
+ },
907
+ {
908
+ "epoch": 0.9972421400992829,
909
+ "grad_norm": 0.01143716461956501,
910
+ "learning_rate": 7.500000000000001e-05,
911
+ "loss": 0.0007,
912
+ "num_input_tokens_seen": 23307520,
913
+ "step": 113
914
+ },
915
+ {
916
+ "epoch": 1.0088251516822946,
917
+ "grad_norm": 0.0287212785333395,
918
+ "learning_rate": 7.459764918573264e-05,
919
+ "loss": 0.0014,
920
+ "num_input_tokens_seen": 23564192,
921
+ "step": 114
922
+ },
923
+ {
924
+ "epoch": 1.0176503033645892,
925
+ "grad_norm": 0.010353313758969307,
926
+ "learning_rate": 7.419318590003523e-05,
927
+ "loss": 0.0007,
928
+ "num_input_tokens_seen": 23768816,
929
+ "step": 115
930
+ },
931
+ {
932
+ "epoch": 1.0264754550468835,
933
+ "grad_norm": 0.013796573504805565,
934
+ "learning_rate": 7.378664487863103e-05,
935
+ "loss": 0.0006,
936
+ "num_input_tokens_seen": 23974096,
937
+ "step": 116
938
+ },
939
+ {
940
+ "epoch": 1.0353006067291781,
941
+ "grad_norm": 0.006352484691888094,
942
+ "learning_rate": 7.33780610356814e-05,
943
+ "loss": 0.0003,
944
+ "num_input_tokens_seen": 24172256,
945
+ "step": 117
946
+ },
947
+ {
948
+ "epoch": 1.0441257584114727,
949
+ "grad_norm": 0.007957457564771175,
950
+ "learning_rate": 7.296746946078736e-05,
951
+ "loss": 0.0004,
952
+ "num_input_tokens_seen": 24362208,
953
+ "step": 118
954
+ },
955
+ {
956
+ "epoch": 1.0529509100937673,
957
+ "grad_norm": 0.0068214968778193,
958
+ "learning_rate": 7.255490541597594e-05,
959
+ "loss": 0.0003,
960
+ "num_input_tokens_seen": 24562224,
961
+ "step": 119
962
+ },
963
+ {
964
+ "epoch": 1.0617760617760619,
965
+ "grad_norm": 0.00877879373729229,
966
+ "learning_rate": 7.214040433267198e-05,
967
+ "loss": 0.0005,
968
+ "num_input_tokens_seen": 24776528,
969
+ "step": 120
970
+ },
971
+ {
972
+ "epoch": 1.0706012134583562,
973
+ "grad_norm": 0.007200079504400492,
974
+ "learning_rate": 7.172400180865513e-05,
975
+ "loss": 0.0003,
976
+ "num_input_tokens_seen": 24985008,
977
+ "step": 121
978
+ },
979
+ {
980
+ "epoch": 1.0794263651406508,
981
+ "grad_norm": 0.010829208418726921,
982
+ "learning_rate": 7.130573360500276e-05,
983
+ "loss": 0.0005,
984
+ "num_input_tokens_seen": 25200720,
985
+ "step": 122
986
+ },
987
+ {
988
+ "epoch": 1.0882515168229454,
989
+ "grad_norm": 0.010170291177928448,
990
+ "learning_rate": 7.088563564301873e-05,
991
+ "loss": 0.0004,
992
+ "num_input_tokens_seen": 25413568,
993
+ "step": 123
994
+ },
995
+ {
996
+ "epoch": 1.09707666850524,
997
+ "grad_norm": 0.007032219786196947,
998
+ "learning_rate": 7.046374400114842e-05,
999
+ "loss": 0.0003,
1000
+ "num_input_tokens_seen": 25608576,
1001
+ "step": 124
1002
+ },
1003
+ {
1004
+ "epoch": 1.1059018201875346,
1005
+ "grad_norm": 0.00843306165188551,
1006
+ "learning_rate": 7.004009491188022e-05,
1007
+ "loss": 0.0003,
1008
+ "num_input_tokens_seen": 25818400,
1009
+ "step": 125
1010
+ },
1011
+ {
1012
+ "epoch": 1.114726971869829,
1013
+ "grad_norm": 0.00947788916528225,
1014
+ "learning_rate": 6.961472475863405e-05,
1015
+ "loss": 0.0005,
1016
+ "num_input_tokens_seen": 26037424,
1017
+ "step": 126
1018
+ },
1019
+ {
1020
+ "epoch": 1.1235521235521235,
1021
+ "grad_norm": 0.009593469090759754,
1022
+ "learning_rate": 6.918767007263646e-05,
1023
+ "loss": 0.0005,
1024
+ "num_input_tokens_seen": 26250480,
1025
+ "step": 127
1026
+ },
1027
+ {
1028
+ "epoch": 1.1323772752344181,
1029
+ "grad_norm": 0.012611499056220055,
1030
+ "learning_rate": 6.875896752978344e-05,
1031
+ "loss": 0.0005,
1032
+ "num_input_tokens_seen": 26458592,
1033
+ "step": 128
1034
+ },
1035
+ {
1036
+ "epoch": 1.1412024269167127,
1037
+ "grad_norm": 0.005860932637006044,
1038
+ "learning_rate": 6.832865394749065e-05,
1039
+ "loss": 0.0004,
1040
+ "num_input_tokens_seen": 26680256,
1041
+ "step": 129
1042
+ },
1043
+ {
1044
+ "epoch": 1.150027578599007,
1045
+ "grad_norm": 0.008905632421374321,
1046
+ "learning_rate": 6.789676628153143e-05,
1047
+ "loss": 0.0004,
1048
+ "num_input_tokens_seen": 26887424,
1049
+ "step": 130
1050
+ },
1051
+ {
1052
+ "epoch": 1.1588527302813016,
1053
+ "grad_norm": 0.00839240662753582,
1054
+ "learning_rate": 6.746334162286307e-05,
1055
+ "loss": 0.0003,
1056
+ "num_input_tokens_seen": 27112736,
1057
+ "step": 131
1058
+ },
1059
+ {
1060
+ "epoch": 1.1676778819635962,
1061
+ "grad_norm": 0.010829194448888302,
1062
+ "learning_rate": 6.702841719444141e-05,
1063
+ "loss": 0.0004,
1064
+ "num_input_tokens_seen": 27320064,
1065
+ "step": 132
1066
+ },
1067
+ {
1068
+ "epoch": 1.1765030336458908,
1069
+ "grad_norm": 0.005576102528721094,
1070
+ "learning_rate": 6.659203034802397e-05,
1071
+ "loss": 0.0003,
1072
+ "num_input_tokens_seen": 27520544,
1073
+ "step": 133
1074
+ },
1075
+ {
1076
+ "epoch": 1.1853281853281854,
1077
+ "grad_norm": 0.008609413169324398,
1078
+ "learning_rate": 6.615421856096231e-05,
1079
+ "loss": 0.0009,
1080
+ "num_input_tokens_seen": 27737920,
1081
+ "step": 134
1082
+ },
1083
+ {
1084
+ "epoch": 1.19415333701048,
1085
+ "grad_norm": 0.013195198960602283,
1086
+ "learning_rate": 6.571501943298334e-05,
1087
+ "loss": 0.0014,
1088
+ "num_input_tokens_seen": 27947552,
1089
+ "step": 135
1090
+ },
1091
+ {
1092
+ "epoch": 1.2029784886927744,
1093
+ "grad_norm": 0.008647961542010307,
1094
+ "learning_rate": 6.527447068296026e-05,
1095
+ "loss": 0.0003,
1096
+ "num_input_tokens_seen": 28143808,
1097
+ "step": 136
1098
+ },
1099
+ {
1100
+ "epoch": 1.211803640375069,
1101
+ "grad_norm": 0.006975845899432898,
1102
+ "learning_rate": 6.483261014567311e-05,
1103
+ "loss": 0.0002,
1104
+ "num_input_tokens_seen": 28349312,
1105
+ "step": 137
1106
+ },
1107
+ {
1108
+ "epoch": 1.2206287920573635,
1109
+ "grad_norm": 0.013750969432294369,
1110
+ "learning_rate": 6.438947576855968e-05,
1111
+ "loss": 0.0002,
1112
+ "num_input_tokens_seen": 28560096,
1113
+ "step": 138
1114
+ },
1115
+ {
1116
+ "epoch": 1.229453943739658,
1117
+ "grad_norm": 0.009799162857234478,
1118
+ "learning_rate": 6.394510560845637e-05,
1119
+ "loss": 0.0005,
1120
+ "num_input_tokens_seen": 28764544,
1121
+ "step": 139
1122
+ },
1123
+ {
1124
+ "epoch": 1.2382790954219525,
1125
+ "grad_norm": 0.00819414108991623,
1126
+ "learning_rate": 6.349953782832991e-05,
1127
+ "loss": 0.0004,
1128
+ "num_input_tokens_seen": 28949360,
1129
+ "step": 140
1130
+ },
1131
+ {
1132
+ "epoch": 1.247104247104247,
1133
+ "grad_norm": 0.008884673938155174,
1134
+ "learning_rate": 6.305281069399989e-05,
1135
+ "loss": 0.0002,
1136
+ "num_input_tokens_seen": 29148112,
1137
+ "step": 141
1138
+ },
1139
+ {
1140
+ "epoch": 1.2559293987865416,
1141
+ "grad_norm": 0.009248818270862103,
1142
+ "learning_rate": 6.26049625708524e-05,
1143
+ "loss": 0.0004,
1144
+ "num_input_tokens_seen": 29370624,
1145
+ "step": 142
1146
+ },
1147
+ {
1148
+ "epoch": 1.2647545504688362,
1149
+ "grad_norm": 0.008902438916265965,
1150
+ "learning_rate": 6.215603192054522e-05,
1151
+ "loss": 0.0003,
1152
+ "num_input_tokens_seen": 29572464,
1153
+ "step": 143
1154
+ },
1155
+ {
1156
+ "epoch": 1.2735797021511308,
1157
+ "grad_norm": 0.012439709156751633,
1158
+ "learning_rate": 6.17060572977047e-05,
1159
+ "loss": 0.0006,
1160
+ "num_input_tokens_seen": 29771152,
1161
+ "step": 144
1162
+ },
1163
+ {
1164
+ "epoch": 1.2824048538334254,
1165
+ "grad_norm": 0.013059360906481743,
1166
+ "learning_rate": 6.125507734661458e-05,
1167
+ "loss": 0.0003,
1168
+ "num_input_tokens_seen": 29954960,
1169
+ "step": 145
1170
+ },
1171
+ {
1172
+ "epoch": 1.2912300055157198,
1173
+ "grad_norm": 0.011295526288449764,
1174
+ "learning_rate": 6.080313079789723e-05,
1175
+ "loss": 0.0004,
1176
+ "num_input_tokens_seen": 30165568,
1177
+ "step": 146
1178
+ },
1179
+ {
1180
+ "epoch": 1.3000551571980143,
1181
+ "grad_norm": 0.01000818982720375,
1182
+ "learning_rate": 6.035025646518746e-05,
1183
+ "loss": 0.0005,
1184
+ "num_input_tokens_seen": 30372160,
1185
+ "step": 147
1186
+ },
1187
+ {
1188
+ "epoch": 1.308880308880309,
1189
+ "grad_norm": 0.010914387181401253,
1190
+ "learning_rate": 5.989649324179911e-05,
1191
+ "loss": 0.0003,
1192
+ "num_input_tokens_seen": 30572752,
1193
+ "step": 148
1194
+ },
1195
+ {
1196
+ "epoch": 1.3177054605626033,
1197
+ "grad_norm": 0.009289560839533806,
1198
+ "learning_rate": 5.944188009738483e-05,
1199
+ "loss": 0.0004,
1200
+ "num_input_tokens_seen": 30780496,
1201
+ "step": 149
1202
+ },
1203
+ {
1204
+ "epoch": 1.3265306122448979,
1205
+ "grad_norm": 0.015559184364974499,
1206
+ "learning_rate": 5.8986456074589404e-05,
1207
+ "loss": 0.0004,
1208
+ "num_input_tokens_seen": 30975120,
1209
+ "step": 150
1210
+ },
1211
+ {
1212
+ "epoch": 1.3353557639271925,
1213
+ "grad_norm": 0.00643413420766592,
1214
+ "learning_rate": 5.853026028569667e-05,
1215
+ "loss": 0.0002,
1216
+ "num_input_tokens_seen": 31174000,
1217
+ "step": 151
1218
+ },
1219
+ {
1220
+ "epoch": 1.344180915609487,
1221
+ "grad_norm": 0.0077626509591937065,
1222
+ "learning_rate": 5.807333190927053e-05,
1223
+ "loss": 0.0003,
1224
+ "num_input_tokens_seen": 31387088,
1225
+ "step": 152
1226
+ },
1227
+ {
1228
+ "epoch": 1.3530060672917816,
1229
+ "grad_norm": 0.0083751380443573,
1230
+ "learning_rate": 5.761571018679025e-05,
1231
+ "loss": 0.0003,
1232
+ "num_input_tokens_seen": 31576400,
1233
+ "step": 153
1234
+ },
1235
+ {
1236
+ "epoch": 1.3618312189740762,
1237
+ "grad_norm": 0.007961435243487358,
1238
+ "learning_rate": 5.715743441928041e-05,
1239
+ "loss": 0.0003,
1240
+ "num_input_tokens_seen": 31784320,
1241
+ "step": 154
1242
+ },
1243
+ {
1244
+ "epoch": 1.3706563706563706,
1245
+ "grad_norm": 0.006737589370459318,
1246
+ "learning_rate": 5.669854396393559e-05,
1247
+ "loss": 0.0004,
1248
+ "num_input_tokens_seen": 31987520,
1249
+ "step": 155
1250
+ },
1251
+ {
1252
+ "epoch": 1.3794815223386652,
1253
+ "grad_norm": 0.014642222784459591,
1254
+ "learning_rate": 5.6239078230740436e-05,
1255
+ "loss": 0.0004,
1256
+ "num_input_tokens_seen": 32187456,
1257
+ "step": 156
1258
+ },
1259
+ {
1260
+ "epoch": 1.3883066740209598,
1261
+ "grad_norm": 0.006064648274332285,
1262
+ "learning_rate": 5.5779076679085054e-05,
1263
+ "loss": 0.0002,
1264
+ "num_input_tokens_seen": 32384528,
1265
+ "step": 157
1266
+ },
1267
+ {
1268
+ "epoch": 1.3971318257032543,
1269
+ "grad_norm": 0.009461612440645695,
1270
+ "learning_rate": 5.531857881437612e-05,
1271
+ "loss": 0.0004,
1272
+ "num_input_tokens_seen": 32593040,
1273
+ "step": 158
1274
+ },
1275
+ {
1276
+ "epoch": 1.4059569773855487,
1277
+ "grad_norm": 0.007511747535318136,
1278
+ "learning_rate": 5.48576241846443e-05,
1279
+ "loss": 0.0003,
1280
+ "num_input_tokens_seen": 32797952,
1281
+ "step": 159
1282
+ },
1283
+ {
1284
+ "epoch": 1.4147821290678433,
1285
+ "grad_norm": 0.02702983096241951,
1286
+ "learning_rate": 5.4396252377147615e-05,
1287
+ "loss": 0.0003,
1288
+ "num_input_tokens_seen": 33008800,
1289
+ "step": 160
1290
+ },
1291
+ {
1292
+ "epoch": 1.4236072807501379,
1293
+ "grad_norm": 0.008439299650490284,
1294
+ "learning_rate": 5.3934503014971793e-05,
1295
+ "loss": 0.0003,
1296
+ "num_input_tokens_seen": 33208352,
1297
+ "step": 161
1298
+ },
1299
+ {
1300
+ "epoch": 1.4324324324324325,
1301
+ "grad_norm": 0.0037907836958765984,
1302
+ "learning_rate": 5.347241575362729e-05,
1303
+ "loss": 0.0002,
1304
+ "num_input_tokens_seen": 33410208,
1305
+ "step": 162
1306
+ },
1307
+ {
1308
+ "epoch": 1.441257584114727,
1309
+ "grad_norm": 0.008237862028181553,
1310
+ "learning_rate": 5.30100302776438e-05,
1311
+ "loss": 0.0003,
1312
+ "num_input_tokens_seen": 33631888,
1313
+ "step": 163
1314
+ },
1315
+ {
1316
+ "epoch": 1.4500827357970216,
1317
+ "grad_norm": 0.009860441088676453,
1318
+ "learning_rate": 5.254738629716186e-05,
1319
+ "loss": 0.0004,
1320
+ "num_input_tokens_seen": 33825152,
1321
+ "step": 164
1322
+ },
1323
+ {
1324
+ "epoch": 1.458907887479316,
1325
+ "grad_norm": 0.007564296945929527,
1326
+ "learning_rate": 5.208452354452274e-05,
1327
+ "loss": 0.0003,
1328
+ "num_input_tokens_seen": 34020352,
1329
+ "step": 165
1330
+ },
1331
+ {
1332
+ "epoch": 1.4677330391616106,
1333
+ "grad_norm": 0.019607344642281532,
1334
+ "learning_rate": 5.162148177085604e-05,
1335
+ "loss": 0.0004,
1336
+ "num_input_tokens_seen": 34226288,
1337
+ "step": 166
1338
+ },
1339
+ {
1340
+ "epoch": 1.4765581908439052,
1341
+ "grad_norm": 0.007924061268568039,
1342
+ "learning_rate": 5.115830074266591e-05,
1343
+ "loss": 0.0016,
1344
+ "num_input_tokens_seen": 34426672,
1345
+ "step": 167
1346
+ },
1347
+ {
1348
+ "epoch": 1.4853833425261997,
1349
+ "grad_norm": 0.006358864717185497,
1350
+ "learning_rate": 5.0695020238415756e-05,
1351
+ "loss": 0.0002,
1352
+ "num_input_tokens_seen": 34636944,
1353
+ "step": 168
1354
+ },
1355
+ {
1356
+ "epoch": 1.494208494208494,
1357
+ "grad_norm": 0.010681587271392345,
1358
+ "learning_rate": 5.0231680045112176e-05,
1359
+ "loss": 0.0003,
1360
+ "num_input_tokens_seen": 34839456,
1361
+ "step": 169
1362
+ },
1363
+ {
1364
+ "epoch": 1.5030336458907887,
1365
+ "grad_norm": 0.01033815648406744,
1366
+ "learning_rate": 4.976831995488784e-05,
1367
+ "loss": 0.0002,
1368
+ "num_input_tokens_seen": 35031600,
1369
+ "step": 170
1370
+ },
1371
+ {
1372
+ "epoch": 1.5118587975730833,
1373
+ "grad_norm": 0.016812577843666077,
1374
+ "learning_rate": 4.9304979761584256e-05,
1375
+ "loss": 0.0004,
1376
+ "num_input_tokens_seen": 35227728,
1377
+ "step": 171
1378
+ },
1379
+ {
1380
+ "epoch": 1.5206839492553779,
1381
+ "grad_norm": 0.008957776241004467,
1382
+ "learning_rate": 4.884169925733409e-05,
1383
+ "loss": 0.0002,
1384
+ "num_input_tokens_seen": 35436528,
1385
+ "step": 172
1386
+ },
1387
+ {
1388
+ "epoch": 1.5295091009376725,
1389
+ "grad_norm": 0.006675931625068188,
1390
+ "learning_rate": 4.837851822914397e-05,
1391
+ "loss": 0.0002,
1392
+ "num_input_tokens_seen": 35628624,
1393
+ "step": 173
1394
+ },
1395
+ {
1396
+ "epoch": 1.538334252619967,
1397
+ "grad_norm": 0.006146900821477175,
1398
+ "learning_rate": 4.791547645547726e-05,
1399
+ "loss": 0.0002,
1400
+ "num_input_tokens_seen": 35827376,
1401
+ "step": 174
1402
+ },
1403
+ {
1404
+ "epoch": 1.5471594043022614,
1405
+ "grad_norm": 0.012180755846202374,
1406
+ "learning_rate": 4.745261370283817e-05,
1407
+ "loss": 0.0003,
1408
+ "num_input_tokens_seen": 36056560,
1409
+ "step": 175
1410
+ },
1411
+ {
1412
+ "epoch": 1.555984555984556,
1413
+ "grad_norm": 0.00920344889163971,
1414
+ "learning_rate": 4.698996972235622e-05,
1415
+ "loss": 0.0002,
1416
+ "num_input_tokens_seen": 36267568,
1417
+ "step": 176
1418
+ },
1419
+ {
1420
+ "epoch": 1.5648097076668506,
1421
+ "grad_norm": 0.010103096254169941,
1422
+ "learning_rate": 4.652758424637271e-05,
1423
+ "loss": 0.0027,
1424
+ "num_input_tokens_seen": 36473008,
1425
+ "step": 177
1426
+ },
1427
+ {
1428
+ "epoch": 1.573634859349145,
1429
+ "grad_norm": 0.012086655013263226,
1430
+ "learning_rate": 4.606549698502823e-05,
1431
+ "loss": 0.0004,
1432
+ "num_input_tokens_seen": 36670944,
1433
+ "step": 178
1434
+ },
1435
+ {
1436
+ "epoch": 1.5824600110314395,
1437
+ "grad_norm": 0.0054108137264847755,
1438
+ "learning_rate": 4.56037476228524e-05,
1439
+ "loss": 0.0001,
1440
+ "num_input_tokens_seen": 36882256,
1441
+ "step": 179
1442
+ },
1443
+ {
1444
+ "epoch": 1.591285162713734,
1445
+ "grad_norm": 0.014871139079332352,
1446
+ "learning_rate": 4.5142375815355706e-05,
1447
+ "loss": 0.0004,
1448
+ "num_input_tokens_seen": 37091392,
1449
+ "step": 180
1450
+ },
1451
+ {
1452
+ "epoch": 1.6001103143960287,
1453
+ "grad_norm": 0.005915229208767414,
1454
+ "learning_rate": 4.468142118562389e-05,
1455
+ "loss": 0.0002,
1456
+ "num_input_tokens_seen": 37309680,
1457
+ "step": 181
1458
+ },
1459
+ {
1460
+ "epoch": 1.6089354660783233,
1461
+ "grad_norm": 0.006937643978744745,
1462
+ "learning_rate": 4.4220923320914964e-05,
1463
+ "loss": 0.0003,
1464
+ "num_input_tokens_seen": 37517952,
1465
+ "step": 182
1466
+ },
1467
+ {
1468
+ "epoch": 1.6177606177606179,
1469
+ "grad_norm": 0.00866376981139183,
1470
+ "learning_rate": 4.376092176925958e-05,
1471
+ "loss": 0.0003,
1472
+ "num_input_tokens_seen": 37732160,
1473
+ "step": 183
1474
+ },
1475
+ {
1476
+ "epoch": 1.6265857694429124,
1477
+ "grad_norm": 0.007841500453650951,
1478
+ "learning_rate": 4.330145603606441e-05,
1479
+ "loss": 0.0004,
1480
+ "num_input_tokens_seen": 37940368,
1481
+ "step": 184
1482
+ },
1483
+ {
1484
+ "epoch": 1.6354109211252068,
1485
+ "grad_norm": 0.008568421937525272,
1486
+ "learning_rate": 4.2842565580719595e-05,
1487
+ "loss": 0.0004,
1488
+ "num_input_tokens_seen": 38135024,
1489
+ "step": 185
1490
+ },
1491
+ {
1492
+ "epoch": 1.6442360728075014,
1493
+ "grad_norm": 0.011796732433140278,
1494
+ "learning_rate": 4.238428981320975e-05,
1495
+ "loss": 0.0002,
1496
+ "num_input_tokens_seen": 38336176,
1497
+ "step": 186
1498
+ },
1499
+ {
1500
+ "epoch": 1.6530612244897958,
1501
+ "grad_norm": 0.00755694042891264,
1502
+ "learning_rate": 4.192666809072948e-05,
1503
+ "loss": 0.0003,
1504
+ "num_input_tokens_seen": 38548880,
1505
+ "step": 187
1506
+ },
1507
+ {
1508
+ "epoch": 1.6618863761720903,
1509
+ "grad_norm": 0.01243317686021328,
1510
+ "learning_rate": 4.146973971430333e-05,
1511
+ "loss": 0.0003,
1512
+ "num_input_tokens_seen": 38755920,
1513
+ "step": 188
1514
+ },
1515
+ {
1516
+ "epoch": 1.670711527854385,
1517
+ "grad_norm": 0.006207725498825312,
1518
+ "learning_rate": 4.101354392541061e-05,
1519
+ "loss": 0.0002,
1520
+ "num_input_tokens_seen": 38973328,
1521
+ "step": 189
1522
+ },
1523
+ {
1524
+ "epoch": 1.6795366795366795,
1525
+ "grad_norm": 0.008532355539500713,
1526
+ "learning_rate": 4.0558119902615174e-05,
1527
+ "loss": 0.0003,
1528
+ "num_input_tokens_seen": 39193232,
1529
+ "step": 190
1530
+ },
1531
+ {
1532
+ "epoch": 1.688361831218974,
1533
+ "grad_norm": 0.008602111600339413,
1534
+ "learning_rate": 4.010350675820091e-05,
1535
+ "loss": 0.0003,
1536
+ "num_input_tokens_seen": 39406608,
1537
+ "step": 191
1538
+ },
1539
+ {
1540
+ "epoch": 1.6971869829012687,
1541
+ "grad_norm": 0.008903734385967255,
1542
+ "learning_rate": 3.964974353481254e-05,
1543
+ "loss": 0.0004,
1544
+ "num_input_tokens_seen": 39620160,
1545
+ "step": 192
1546
+ },
1547
+ {
1548
+ "epoch": 1.7060121345835633,
1549
+ "grad_norm": 0.005871508736163378,
1550
+ "learning_rate": 3.919686920210277e-05,
1551
+ "loss": 0.0001,
1552
+ "num_input_tokens_seen": 39815952,
1553
+ "step": 193
1554
+ },
1555
+ {
1556
+ "epoch": 1.7148372862658579,
1557
+ "grad_norm": 0.008220325224101543,
1558
+ "learning_rate": 3.874492265338544e-05,
1559
+ "loss": 0.0003,
1560
+ "num_input_tokens_seen": 40015408,
1561
+ "step": 194
1562
+ },
1563
+ {
1564
+ "epoch": 1.7236624379481522,
1565
+ "grad_norm": 0.00940727163106203,
1566
+ "learning_rate": 3.829394270229531e-05,
1567
+ "loss": 0.0002,
1568
+ "num_input_tokens_seen": 40215328,
1569
+ "step": 195
1570
+ },
1571
+ {
1572
+ "epoch": 1.7324875896304468,
1573
+ "grad_norm": 0.005745697300881147,
1574
+ "learning_rate": 3.784396807945477e-05,
1575
+ "loss": 0.0002,
1576
+ "num_input_tokens_seen": 40414384,
1577
+ "step": 196
1578
+ },
1579
+ {
1580
+ "epoch": 1.7413127413127412,
1581
+ "grad_norm": 0.009524352848529816,
1582
+ "learning_rate": 3.7395037429147615e-05,
1583
+ "loss": 0.0002,
1584
+ "num_input_tokens_seen": 40620656,
1585
+ "step": 197
1586
+ },
1587
+ {
1588
+ "epoch": 1.7501378929950357,
1589
+ "grad_norm": 0.00809427909553051,
1590
+ "learning_rate": 3.694718930600012e-05,
1591
+ "loss": 0.0003,
1592
+ "num_input_tokens_seen": 40847008,
1593
+ "step": 198
1594
+ },
1595
+ {
1596
+ "epoch": 1.7589630446773303,
1597
+ "grad_norm": 0.0051635075360536575,
1598
+ "learning_rate": 3.65004621716701e-05,
1599
+ "loss": 0.0001,
1600
+ "num_input_tokens_seen": 41036368,
1601
+ "step": 199
1602
+ },
1603
+ {
1604
+ "epoch": 1.767788196359625,
1605
+ "grad_norm": 0.006504002492874861,
1606
+ "learning_rate": 3.6054894391543646e-05,
1607
+ "loss": 0.0003,
1608
+ "num_input_tokens_seen": 41252976,
1609
+ "step": 200
1610
+ },
1611
+ {
1612
+ "epoch": 1.7766133480419195,
1613
+ "grad_norm": 0.009855791926383972,
1614
+ "learning_rate": 3.561052423144032e-05,
1615
+ "loss": 0.0002,
1616
+ "num_input_tokens_seen": 41465104,
1617
+ "step": 201
1618
+ },
1619
+ {
1620
+ "epoch": 1.785438499724214,
1621
+ "grad_norm": 0.004304118454456329,
1622
+ "learning_rate": 3.5167389854326905e-05,
1623
+ "loss": 0.0002,
1624
+ "num_input_tokens_seen": 41670800,
1625
+ "step": 202
1626
+ },
1627
+ {
1628
+ "epoch": 1.7942636514065087,
1629
+ "grad_norm": 0.014682441018521786,
1630
+ "learning_rate": 3.4725529317039754e-05,
1631
+ "loss": 0.0013,
1632
+ "num_input_tokens_seen": 41883536,
1633
+ "step": 203
1634
+ },
1635
+ {
1636
+ "epoch": 1.803088803088803,
1637
+ "grad_norm": 0.0061918287537992,
1638
+ "learning_rate": 3.428498056701665e-05,
1639
+ "loss": 0.0001,
1640
+ "num_input_tokens_seen": 42083360,
1641
+ "step": 204
1642
+ },
1643
+ {
1644
+ "epoch": 1.8119139547710976,
1645
+ "grad_norm": 0.009490927681326866,
1646
+ "learning_rate": 3.38457814390377e-05,
1647
+ "loss": 0.0002,
1648
+ "num_input_tokens_seen": 42283120,
1649
+ "step": 205
1650
+ },
1651
+ {
1652
+ "epoch": 1.8207391064533922,
1653
+ "grad_norm": 0.008434086106717587,
1654
+ "learning_rate": 3.340796965197604e-05,
1655
+ "loss": 0.0003,
1656
+ "num_input_tokens_seen": 42499088,
1657
+ "step": 206
1658
+ },
1659
+ {
1660
+ "epoch": 1.8295642581356866,
1661
+ "grad_norm": 0.004052174277603626,
1662
+ "learning_rate": 3.297158280555862e-05,
1663
+ "loss": 0.0001,
1664
+ "num_input_tokens_seen": 42692976,
1665
+ "step": 207
1666
+ },
1667
+ {
1668
+ "epoch": 1.8383894098179812,
1669
+ "grad_norm": 0.007411065977066755,
1670
+ "learning_rate": 3.2536658377136935e-05,
1671
+ "loss": 0.0003,
1672
+ "num_input_tokens_seen": 42907216,
1673
+ "step": 208
1674
+ },
1675
+ {
1676
+ "epoch": 1.8472145615002757,
1677
+ "grad_norm": 0.006996455602347851,
1678
+ "learning_rate": 3.210323371846857e-05,
1679
+ "loss": 0.0001,
1680
+ "num_input_tokens_seen": 43112448,
1681
+ "step": 209
1682
+ },
1683
+ {
1684
+ "epoch": 1.8560397131825703,
1685
+ "grad_norm": 0.006998082622885704,
1686
+ "learning_rate": 3.167134605250938e-05,
1687
+ "loss": 0.0003,
1688
+ "num_input_tokens_seen": 43340096,
1689
+ "step": 210
1690
+ },
1691
+ {
1692
+ "epoch": 1.864864864864865,
1693
+ "grad_norm": 0.006418649572879076,
1694
+ "learning_rate": 3.124103247021657e-05,
1695
+ "loss": 0.0001,
1696
+ "num_input_tokens_seen": 43539664,
1697
+ "step": 211
1698
+ },
1699
+ {
1700
+ "epoch": 1.8736900165471595,
1701
+ "grad_norm": 0.009151714853942394,
1702
+ "learning_rate": 3.081232992736355e-05,
1703
+ "loss": 0.0003,
1704
+ "num_input_tokens_seen": 43727664,
1705
+ "step": 212
1706
+ },
1707
+ {
1708
+ "epoch": 1.882515168229454,
1709
+ "grad_norm": 0.004692760296165943,
1710
+ "learning_rate": 3.0385275241365962e-05,
1711
+ "loss": 0.0002,
1712
+ "num_input_tokens_seen": 43953584,
1713
+ "step": 213
1714
+ },
1715
+ {
1716
+ "epoch": 1.8913403199117484,
1717
+ "grad_norm": 0.006455820985138416,
1718
+ "learning_rate": 2.9959905088119776e-05,
1719
+ "loss": 0.0002,
1720
+ "num_input_tokens_seen": 44157504,
1721
+ "step": 214
1722
+ },
1723
+ {
1724
+ "epoch": 1.900165471594043,
1725
+ "grad_norm": 0.006325691007077694,
1726
+ "learning_rate": 2.9536255998851613e-05,
1727
+ "loss": 0.0001,
1728
+ "num_input_tokens_seen": 44350448,
1729
+ "step": 215
1730
+ },
1731
+ {
1732
+ "epoch": 1.9089906232763376,
1733
+ "grad_norm": 0.006784004159271717,
1734
+ "learning_rate": 2.9114364356981272e-05,
1735
+ "loss": 0.0002,
1736
+ "num_input_tokens_seen": 44561472,
1737
+ "step": 216
1738
+ },
1739
+ {
1740
+ "epoch": 1.917815774958632,
1741
+ "grad_norm": 0.008874817751348019,
1742
+ "learning_rate": 2.8694266394997238e-05,
1743
+ "loss": 0.0002,
1744
+ "num_input_tokens_seen": 44769936,
1745
+ "step": 217
1746
+ },
1747
+ {
1748
+ "epoch": 1.9266409266409266,
1749
+ "grad_norm": 0.006964050233364105,
1750
+ "learning_rate": 2.8275998191344888e-05,
1751
+ "loss": 0.0002,
1752
+ "num_input_tokens_seen": 44979344,
1753
+ "step": 218
1754
+ },
1755
+ {
1756
+ "epoch": 1.9354660783232212,
1757
+ "grad_norm": 0.014264012686908245,
1758
+ "learning_rate": 2.7859595667328026e-05,
1759
+ "loss": 0.0002,
1760
+ "num_input_tokens_seen": 45196944,
1761
+ "step": 219
1762
+ },
1763
+ {
1764
+ "epoch": 1.9442912300055157,
1765
+ "grad_norm": 0.005279663018882275,
1766
+ "learning_rate": 2.7445094584024067e-05,
1767
+ "loss": 0.0001,
1768
+ "num_input_tokens_seen": 45406832,
1769
+ "step": 220
1770
+ },
1771
+ {
1772
+ "epoch": 1.9531163816878103,
1773
+ "grad_norm": 0.0171637125313282,
1774
+ "learning_rate": 2.7032530539212658e-05,
1775
+ "loss": 0.0003,
1776
+ "num_input_tokens_seen": 45603120,
1777
+ "step": 221
1778
+ },
1779
+ {
1780
+ "epoch": 1.961941533370105,
1781
+ "grad_norm": 0.007687513716518879,
1782
+ "learning_rate": 2.6621938964318595e-05,
1783
+ "loss": 0.0002,
1784
+ "num_input_tokens_seen": 45805184,
1785
+ "step": 222
1786
+ },
1787
+ {
1788
+ "epoch": 1.9707666850523995,
1789
+ "grad_norm": 0.0034611017908900976,
1790
+ "learning_rate": 2.621335512136899e-05,
1791
+ "loss": 0.0001,
1792
+ "num_input_tokens_seen": 46001184,
1793
+ "step": 223
1794
+ },
1795
+ {
1796
+ "epoch": 1.9795918367346939,
1797
+ "grad_norm": 0.004358428996056318,
1798
+ "learning_rate": 2.5806814099964772e-05,
1799
+ "loss": 0.0002,
1800
+ "num_input_tokens_seen": 46206288,
1801
+ "step": 224
1802
+ },
1803
+ {
1804
+ "epoch": 1.9884169884169884,
1805
+ "grad_norm": 0.008765267208218575,
1806
+ "learning_rate": 2.540235081426736e-05,
1807
+ "loss": 0.0002,
1808
+ "num_input_tokens_seen": 46427344,
1809
+ "step": 225
1810
+ },
1811
+ {
1812
+ "epoch": 1.9972421400992828,
1813
+ "grad_norm": 0.006889387033879757,
1814
+ "learning_rate": 2.500000000000001e-05,
1815
+ "loss": 0.0003,
1816
+ "num_input_tokens_seen": 46627344,
1817
+ "step": 226
1818
+ },
1819
+ {
1820
+ "epoch": 2.0088251516822946,
1821
+ "grad_norm": 0.043494511395692825,
1822
+ "learning_rate": 2.459979621146477e-05,
1823
+ "loss": 0.0011,
1824
+ "num_input_tokens_seen": 46901504,
1825
+ "step": 227
1826
+ },
1827
+ {
1828
+ "epoch": 2.017650303364589,
1829
+ "grad_norm": 0.007718184031546116,
1830
+ "learning_rate": 2.4201773818574956e-05,
1831
+ "loss": 0.0001,
1832
+ "num_input_tokens_seen": 47104400,
1833
+ "step": 228
1834
+ },
1835
+ {
1836
+ "epoch": 2.0264754550468838,
1837
+ "grad_norm": 0.003912526648491621,
1838
+ "learning_rate": 2.3805967003903333e-05,
1839
+ "loss": 0.0001,
1840
+ "num_input_tokens_seen": 47314176,
1841
+ "step": 229
1842
+ },
1843
+ {
1844
+ "epoch": 2.0353006067291783,
1845
+ "grad_norm": 0.010783454403281212,
1846
+ "learning_rate": 2.3412409759746528e-05,
1847
+ "loss": 0.0003,
1848
+ "num_input_tokens_seen": 47525264,
1849
+ "step": 230
1850
+ },
1851
+ {
1852
+ "epoch": 2.0441257584114725,
1853
+ "grad_norm": 0.0026623259764164686,
1854
+ "learning_rate": 2.302113588520578e-05,
1855
+ "loss": 0.0001,
1856
+ "num_input_tokens_seen": 47724528,
1857
+ "step": 231
1858
+ },
1859
+ {
1860
+ "epoch": 2.052950910093767,
1861
+ "grad_norm": 0.00557671207934618,
1862
+ "learning_rate": 2.2632178983284153e-05,
1863
+ "loss": 0.0002,
1864
+ "num_input_tokens_seen": 47932624,
1865
+ "step": 232
1866
+ },
1867
+ {
1868
+ "epoch": 2.0617760617760617,
1869
+ "grad_norm": 0.003710981458425522,
1870
+ "learning_rate": 2.2245572458000712e-05,
1871
+ "loss": 0.0001,
1872
+ "num_input_tokens_seen": 48148608,
1873
+ "step": 233
1874
+ },
1875
+ {
1876
+ "epoch": 2.0706012134583562,
1877
+ "grad_norm": 0.009742701426148415,
1878
+ "learning_rate": 2.1861349511521815e-05,
1879
+ "loss": 0.0025,
1880
+ "num_input_tokens_seen": 48373632,
1881
+ "step": 234
1882
+ },
1883
+ {
1884
+ "epoch": 2.079426365140651,
1885
+ "grad_norm": 0.009755464270710945,
1886
+ "learning_rate": 2.147954314130955e-05,
1887
+ "loss": 0.0013,
1888
+ "num_input_tokens_seen": 48586512,
1889
+ "step": 235
1890
+ },
1891
+ {
1892
+ "epoch": 2.0882515168229454,
1893
+ "grad_norm": 0.002706202445551753,
1894
+ "learning_rate": 2.1100186137288e-05,
1895
+ "loss": 0.0001,
1896
+ "num_input_tokens_seen": 48793568,
1897
+ "step": 236
1898
+ },
1899
+ {
1900
+ "epoch": 2.09707666850524,
1901
+ "grad_norm": 0.005180325359106064,
1902
+ "learning_rate": 2.072331107902713e-05,
1903
+ "loss": 0.0001,
1904
+ "num_input_tokens_seen": 49006224,
1905
+ "step": 237
1906
+ },
1907
+ {
1908
+ "epoch": 2.1059018201875346,
1909
+ "grad_norm": 0.005968959536403418,
1910
+ "learning_rate": 2.0348950332944834e-05,
1911
+ "loss": 0.0002,
1912
+ "num_input_tokens_seen": 49217632,
1913
+ "step": 238
1914
+ },
1915
+ {
1916
+ "epoch": 2.114726971869829,
1917
+ "grad_norm": 0.0063306307420134544,
1918
+ "learning_rate": 1.9977136049527345e-05,
1919
+ "loss": 0.0001,
1920
+ "num_input_tokens_seen": 49426624,
1921
+ "step": 239
1922
+ },
1923
+ {
1924
+ "epoch": 2.1235521235521237,
1925
+ "grad_norm": 0.005157762672752142,
1926
+ "learning_rate": 1.960790016056801e-05,
1927
+ "loss": 0.0001,
1928
+ "num_input_tokens_seen": 49623376,
1929
+ "step": 240
1930
+ },
1931
+ {
1932
+ "epoch": 2.132377275234418,
1933
+ "grad_norm": 0.005218483041971922,
1934
+ "learning_rate": 1.9241274376425e-05,
1935
+ "loss": 0.0002,
1936
+ "num_input_tokens_seen": 49828144,
1937
+ "step": 241
1938
+ },
1939
+ {
1940
+ "epoch": 2.1412024269167125,
1941
+ "grad_norm": 0.00744604179635644,
1942
+ "learning_rate": 1.8877290183298057e-05,
1943
+ "loss": 0.0002,
1944
+ "num_input_tokens_seen": 50018448,
1945
+ "step": 242
1946
+ },
1947
+ {
1948
+ "epoch": 2.150027578599007,
1949
+ "grad_norm": 0.005399591755121946,
1950
+ "learning_rate": 1.8515978840524302e-05,
1951
+ "loss": 0.0001,
1952
+ "num_input_tokens_seen": 50218176,
1953
+ "step": 243
1954
+ },
1955
+ {
1956
+ "epoch": 2.1588527302813016,
1957
+ "grad_norm": 0.005761398002505302,
1958
+ "learning_rate": 1.815737137789377e-05,
1959
+ "loss": 0.0002,
1960
+ "num_input_tokens_seen": 50424896,
1961
+ "step": 244
1962
+ },
1963
+ {
1964
+ "epoch": 2.1676778819635962,
1965
+ "grad_norm": 0.006964447908103466,
1966
+ "learning_rate": 1.7801498592984446e-05,
1967
+ "loss": 0.0006,
1968
+ "num_input_tokens_seen": 50635088,
1969
+ "step": 245
1970
+ },
1971
+ {
1972
+ "epoch": 2.176503033645891,
1973
+ "grad_norm": 0.002962745726108551,
1974
+ "learning_rate": 1.7448391048517376e-05,
1975
+ "loss": 0.0001,
1976
+ "num_input_tokens_seen": 50849552,
1977
+ "step": 246
1978
+ },
1979
+ {
1980
+ "epoch": 2.1853281853281854,
1981
+ "grad_norm": 0.005332667380571365,
1982
+ "learning_rate": 1.7098079069731958e-05,
1983
+ "loss": 0.0002,
1984
+ "num_input_tokens_seen": 51037776,
1985
+ "step": 247
1986
+ },
1987
+ {
1988
+ "epoch": 2.19415333701048,
1989
+ "grad_norm": 0.006928949151188135,
1990
+ "learning_rate": 1.6750592741781497e-05,
1991
+ "loss": 0.0002,
1992
+ "num_input_tokens_seen": 51242672,
1993
+ "step": 248
1994
+ },
1995
+ {
1996
+ "epoch": 2.2029784886927746,
1997
+ "grad_norm": 0.004213888198137283,
1998
+ "learning_rate": 1.640596190714947e-05,
1999
+ "loss": 0.0001,
2000
+ "num_input_tokens_seen": 51437008,
2001
+ "step": 249
2002
+ },
2003
+ {
2004
+ "epoch": 2.211803640375069,
2005
+ "grad_norm": 0.010446918196976185,
2006
+ "learning_rate": 1.6064216163086716e-05,
2007
+ "loss": 0.0001,
2008
+ "num_input_tokens_seen": 51641264,
2009
+ "step": 250
2010
+ },
2011
+ {
2012
+ "epoch": 2.2206287920573633,
2013
+ "grad_norm": 0.004029524512588978,
2014
+ "learning_rate": 1.5725384859069455e-05,
2015
+ "loss": 0.0001,
2016
+ "num_input_tokens_seen": 51842592,
2017
+ "step": 251
2018
+ },
2019
+ {
2020
+ "epoch": 2.229453943739658,
2021
+ "grad_norm": 0.006790219806134701,
2022
+ "learning_rate": 1.538949709427886e-05,
2023
+ "loss": 0.0012,
2024
+ "num_input_tokens_seen": 52047456,
2025
+ "step": 252
2026
+ },
2027
+ {
2028
+ "epoch": 2.2382790954219525,
2029
+ "grad_norm": 0.003987099044024944,
2030
+ "learning_rate": 1.5056581715101886e-05,
2031
+ "loss": 0.0001,
2032
+ "num_input_tokens_seen": 52242208,
2033
+ "step": 253
2034
+ },
2035
+ {
2036
+ "epoch": 2.247104247104247,
2037
+ "grad_norm": 0.008930574171245098,
2038
+ "learning_rate": 1.472666731265394e-05,
2039
+ "loss": 0.0003,
2040
+ "num_input_tokens_seen": 52436800,
2041
+ "step": 254
2042
+ },
2043
+ {
2044
+ "epoch": 2.2559293987865416,
2045
+ "grad_norm": 0.004108684603124857,
2046
+ "learning_rate": 1.4399782220323515e-05,
2047
+ "loss": 0.0001,
2048
+ "num_input_tokens_seen": 52624752,
2049
+ "step": 255
2050
+ },
2051
+ {
2052
+ "epoch": 2.2647545504688362,
2053
+ "grad_norm": 0.00732703972607851,
2054
+ "learning_rate": 1.4075954511338785e-05,
2055
+ "loss": 0.0001,
2056
+ "num_input_tokens_seen": 52836384,
2057
+ "step": 256
2058
+ },
2059
+ {
2060
+ "epoch": 2.273579702151131,
2061
+ "grad_norm": 0.006608397234231234,
2062
+ "learning_rate": 1.3755211996356687e-05,
2063
+ "loss": 0.0001,
2064
+ "num_input_tokens_seen": 53059296,
2065
+ "step": 257
2066
+ },
2067
+ {
2068
+ "epoch": 2.2824048538334254,
2069
+ "grad_norm": 0.002376733347773552,
2070
+ "learning_rate": 1.3437582221074573e-05,
2071
+ "loss": 0.0001,
2072
+ "num_input_tokens_seen": 53267440,
2073
+ "step": 258
2074
+ },
2075
+ {
2076
+ "epoch": 2.29123000551572,
2077
+ "grad_norm": 0.004921163432300091,
2078
+ "learning_rate": 1.3123092463864456e-05,
2079
+ "loss": 0.0001,
2080
+ "num_input_tokens_seen": 53501008,
2081
+ "step": 259
2082
+ },
2083
+ {
2084
+ "epoch": 2.300055157198014,
2085
+ "grad_norm": 0.0034377635456621647,
2086
+ "learning_rate": 1.2811769733430406e-05,
2087
+ "loss": 0.0001,
2088
+ "num_input_tokens_seen": 53700432,
2089
+ "step": 260
2090
+ },
2091
+ {
2092
+ "epoch": 2.3088803088803087,
2093
+ "grad_norm": 0.006821690127253532,
2094
+ "learning_rate": 1.250364076648894e-05,
2095
+ "loss": 0.0002,
2096
+ "num_input_tokens_seen": 53919616,
2097
+ "step": 261
2098
+ },
2099
+ {
2100
+ "epoch": 2.3177054605626033,
2101
+ "grad_norm": 0.004776927176862955,
2102
+ "learning_rate": 1.2198732025472876e-05,
2103
+ "loss": 0.0001,
2104
+ "num_input_tokens_seen": 54130528,
2105
+ "step": 262
2106
+ },
2107
+ {
2108
+ "epoch": 2.326530612244898,
2109
+ "grad_norm": 0.004824692849069834,
2110
+ "learning_rate": 1.1897069696258755e-05,
2111
+ "loss": 0.0002,
2112
+ "num_input_tokens_seen": 54350560,
2113
+ "step": 263
2114
+ },
2115
+ {
2116
+ "epoch": 2.3353557639271925,
2117
+ "grad_norm": 0.005174586083739996,
2118
+ "learning_rate": 1.1598679685917901e-05,
2119
+ "loss": 0.0001,
2120
+ "num_input_tokens_seen": 54542224,
2121
+ "step": 264
2122
+ },
2123
+ {
2124
+ "epoch": 2.344180915609487,
2125
+ "grad_norm": 0.012352543883025646,
2126
+ "learning_rate": 1.1303587620491513e-05,
2127
+ "loss": 0.0002,
2128
+ "num_input_tokens_seen": 54745136,
2129
+ "step": 265
2130
+ },
2131
+ {
2132
+ "epoch": 2.3530060672917816,
2133
+ "grad_norm": 0.005056153051555157,
2134
+ "learning_rate": 1.1011818842789928e-05,
2135
+ "loss": 0.0001,
2136
+ "num_input_tokens_seen": 54957584,
2137
+ "step": 266
2138
+ },
2139
+ {
2140
+ "epoch": 2.361831218974076,
2141
+ "grad_norm": 0.010525842197239399,
2142
+ "learning_rate": 1.0723398410216084e-05,
2143
+ "loss": 0.0001,
2144
+ "num_input_tokens_seen": 55162496,
2145
+ "step": 267
2146
+ },
2147
+ {
2148
+ "epoch": 2.370656370656371,
2149
+ "grad_norm": 0.0092442212626338,
2150
+ "learning_rate": 1.0438351092613569e-05,
2151
+ "loss": 0.0002,
2152
+ "num_input_tokens_seen": 55376544,
2153
+ "step": 268
2154
+ },
2155
+ {
2156
+ "epoch": 2.3794815223386654,
2157
+ "grad_norm": 0.00699999462813139,
2158
+ "learning_rate": 1.0156701370139454e-05,
2159
+ "loss": 0.0001,
2160
+ "num_input_tokens_seen": 55583072,
2161
+ "step": 269
2162
+ },
2163
+ {
2164
+ "epoch": 2.38830667402096,
2165
+ "grad_norm": 0.007677710149437189,
2166
+ "learning_rate": 9.878473431161767e-06,
2167
+ "loss": 0.0002,
2168
+ "num_input_tokens_seen": 55801200,
2169
+ "step": 270
2170
+ },
2171
+ {
2172
+ "epoch": 2.397131825703254,
2173
+ "grad_norm": 0.003174175275489688,
2174
+ "learning_rate": 9.603691170182317e-06,
2175
+ "loss": 0.0001,
2176
+ "num_input_tokens_seen": 55998080,
2177
+ "step": 271
2178
+ },
2179
+ {
2180
+ "epoch": 2.4059569773855487,
2181
+ "grad_norm": 0.005871200002729893,
2182
+ "learning_rate": 9.33237818578449e-06,
2183
+ "loss": 0.0002,
2184
+ "num_input_tokens_seen": 56200448,
2185
+ "step": 272
2186
+ },
2187
+ {
2188
+ "epoch": 2.4147821290678433,
2189
+ "grad_norm": 0.00371691957116127,
2190
+ "learning_rate": 9.064557778606631e-06,
2191
+ "loss": 0.0001,
2192
+ "num_input_tokens_seen": 56400416,
2193
+ "step": 273
2194
+ },
2195
+ {
2196
+ "epoch": 2.423607280750138,
2197
+ "grad_norm": 0.007599337492138147,
2198
+ "learning_rate": 8.800252949340998e-06,
2199
+ "loss": 0.0002,
2200
+ "num_input_tokens_seen": 56606128,
2201
+ "step": 274
2202
+ },
2203
+ {
2204
+ "epoch": 2.4324324324324325,
2205
+ "grad_norm": 0.0015243644593283534,
2206
+ "learning_rate": 8.539486396758356e-06,
2207
+ "loss": 0.0,
2208
+ "num_input_tokens_seen": 56797824,
2209
+ "step": 275
2210
+ },
2211
+ {
2212
+ "epoch": 2.441257584114727,
2213
+ "grad_norm": 0.0030196798034012318,
2214
+ "learning_rate": 8.28228051575864e-06,
2215
+ "loss": 0.0001,
2216
+ "num_input_tokens_seen": 57006384,
2217
+ "step": 276
2218
+ },
2219
+ {
2220
+ "epoch": 2.4500827357970216,
2221
+ "grad_norm": 0.005347589962184429,
2222
+ "learning_rate": 8.02865739544767e-06,
2223
+ "loss": 0.0001,
2224
+ "num_input_tokens_seen": 57207824,
2225
+ "step": 277
2226
+ },
2227
+ {
2228
+ "epoch": 2.458907887479316,
2229
+ "grad_norm": 0.005150883924216032,
2230
+ "learning_rate": 7.778638817240042e-06,
2231
+ "loss": 0.0001,
2232
+ "num_input_tokens_seen": 57415152,
2233
+ "step": 278
2234
+ },
2235
+ {
2236
+ "epoch": 2.467733039161611,
2237
+ "grad_norm": 0.006857512053102255,
2238
+ "learning_rate": 7.532246252988617e-06,
2239
+ "loss": 0.0001,
2240
+ "num_input_tokens_seen": 57628096,
2241
+ "step": 279
2242
+ },
2243
+ {
2244
+ "epoch": 2.476558190843905,
2245
+ "grad_norm": 0.005364645272493362,
2246
+ "learning_rate": 7.289500863140414e-06,
2247
+ "loss": 0.0001,
2248
+ "num_input_tokens_seen": 57824064,
2249
+ "step": 280
2250
+ },
2251
+ {
2252
+ "epoch": 2.4853833425261995,
2253
+ "grad_norm": 0.007198365870863199,
2254
+ "learning_rate": 7.05042349491935e-06,
2255
+ "loss": 0.0002,
2256
+ "num_input_tokens_seen": 58042720,
2257
+ "step": 281
2258
+ },
2259
+ {
2260
+ "epoch": 2.494208494208494,
2261
+ "grad_norm": 0.005014900583773851,
2262
+ "learning_rate": 6.815034680535915e-06,
2263
+ "loss": 0.0001,
2264
+ "num_input_tokens_seen": 58255408,
2265
+ "step": 282
2266
+ },
2267
+ {
2268
+ "epoch": 2.5030336458907887,
2269
+ "grad_norm": 0.008873779326677322,
2270
+ "learning_rate": 6.5833546354237556e-06,
2271
+ "loss": 0.0001,
2272
+ "num_input_tokens_seen": 58464800,
2273
+ "step": 283
2274
+ },
2275
+ {
2276
+ "epoch": 2.5118587975730833,
2277
+ "grad_norm": 0.0044725253246724606,
2278
+ "learning_rate": 6.355403256503595e-06,
2279
+ "loss": 0.0001,
2280
+ "num_input_tokens_seen": 58672496,
2281
+ "step": 284
2282
+ },
2283
+ {
2284
+ "epoch": 2.520683949255378,
2285
+ "grad_norm": 0.0047348616644740105,
2286
+ "learning_rate": 6.1312001204745115e-06,
2287
+ "loss": 0.0002,
2288
+ "num_input_tokens_seen": 58898256,
2289
+ "step": 285
2290
+ },
2291
+ {
2292
+ "epoch": 2.5295091009376725,
2293
+ "grad_norm": 0.00710884016007185,
2294
+ "learning_rate": 5.910764482132575e-06,
2295
+ "loss": 0.0001,
2296
+ "num_input_tokens_seen": 59107152,
2297
+ "step": 286
2298
+ },
2299
+ {
2300
+ "epoch": 2.538334252619967,
2301
+ "grad_norm": 0.007686229422688484,
2302
+ "learning_rate": 5.6941152727173265e-06,
2303
+ "loss": 0.0002,
2304
+ "num_input_tokens_seen": 59307664,
2305
+ "step": 287
2306
+ },
2307
+ {
2308
+ "epoch": 2.5471594043022616,
2309
+ "grad_norm": 0.014555118046700954,
2310
+ "learning_rate": 5.481271098285817e-06,
2311
+ "loss": 0.0003,
2312
+ "num_input_tokens_seen": 59514736,
2313
+ "step": 288
2314
+ },
2315
+ {
2316
+ "epoch": 2.5559845559845558,
2317
+ "grad_norm": 0.0028200196102261543,
2318
+ "learning_rate": 5.272250238114856e-06,
2319
+ "loss": 0.0001,
2320
+ "num_input_tokens_seen": 59712512,
2321
+ "step": 289
2322
+ },
2323
+ {
2324
+ "epoch": 2.564809707666851,
2325
+ "grad_norm": 0.004194322973489761,
2326
+ "learning_rate": 5.067070643131055e-06,
2327
+ "loss": 0.0001,
2328
+ "num_input_tokens_seen": 59910000,
2329
+ "step": 290
2330
+ },
2331
+ {
2332
+ "epoch": 2.573634859349145,
2333
+ "grad_norm": 0.006987538188695908,
2334
+ "learning_rate": 4.865749934369223e-06,
2335
+ "loss": 0.0002,
2336
+ "num_input_tokens_seen": 60116400,
2337
+ "step": 291
2338
+ },
2339
+ {
2340
+ "epoch": 2.5824600110314395,
2341
+ "grad_norm": 0.003778768004849553,
2342
+ "learning_rate": 4.668305401459022e-06,
2343
+ "loss": 0.0002,
2344
+ "num_input_tokens_seen": 60320368,
2345
+ "step": 292
2346
+ },
2347
+ {
2348
+ "epoch": 2.591285162713734,
2349
+ "grad_norm": 0.003472360782325268,
2350
+ "learning_rate": 4.474754001140191e-06,
2351
+ "loss": 0.0001,
2352
+ "num_input_tokens_seen": 60536528,
2353
+ "step": 293
2354
+ },
2355
+ {
2356
+ "epoch": 2.6001103143960287,
2357
+ "grad_norm": 0.009052475914359093,
2358
+ "learning_rate": 4.285112355806192e-06,
2359
+ "loss": 0.001,
2360
+ "num_input_tokens_seen": 60743120,
2361
+ "step": 294
2362
+ },
2363
+ {
2364
+ "epoch": 2.6089354660783233,
2365
+ "grad_norm": 0.0060082292184233665,
2366
+ "learning_rate": 4.099396752076745e-06,
2367
+ "loss": 0.0001,
2368
+ "num_input_tokens_seen": 60942704,
2369
+ "step": 295
2370
+ },
2371
+ {
2372
+ "epoch": 2.617760617760618,
2373
+ "grad_norm": 0.0075798071920871735,
2374
+ "learning_rate": 3.917623139399018e-06,
2375
+ "loss": 0.0001,
2376
+ "num_input_tokens_seen": 61140128,
2377
+ "step": 296
2378
+ },
2379
+ {
2380
+ "epoch": 2.6265857694429124,
2381
+ "grad_norm": 0.0055752964690327644,
2382
+ "learning_rate": 3.7398071286779857e-06,
2383
+ "loss": 0.0001,
2384
+ "num_input_tokens_seen": 61334224,
2385
+ "step": 297
2386
+ },
2387
+ {
2388
+ "epoch": 2.6354109211252066,
2389
+ "grad_norm": 0.007863204926252365,
2390
+ "learning_rate": 3.5659639909356723e-06,
2391
+ "loss": 0.0001,
2392
+ "num_input_tokens_seen": 61543280,
2393
+ "step": 298
2394
+ },
2395
+ {
2396
+ "epoch": 2.6442360728075016,
2397
+ "grad_norm": 0.006538075394928455,
2398
+ "learning_rate": 3.3961086559996803e-06,
2399
+ "loss": 0.0002,
2400
+ "num_input_tokens_seen": 61750720,
2401
+ "step": 299
2402
+ },
2403
+ {
2404
+ "epoch": 2.6530612244897958,
2405
+ "grad_norm": 0.002779777627438307,
2406
+ "learning_rate": 3.230255711220992e-06,
2407
+ "loss": 0.0,
2408
+ "num_input_tokens_seen": 61945952,
2409
+ "step": 300
2410
+ },
2411
+ {
2412
+ "epoch": 2.6618863761720903,
2413
+ "grad_norm": 0.004271807614713907,
2414
+ "learning_rate": 3.0684194002212287e-06,
2415
+ "loss": 0.0001,
2416
+ "num_input_tokens_seen": 62155632,
2417
+ "step": 301
2418
+ },
2419
+ {
2420
+ "epoch": 2.670711527854385,
2421
+ "grad_norm": 0.00638817623257637,
2422
+ "learning_rate": 2.910613621669356e-06,
2423
+ "loss": 0.0001,
2424
+ "num_input_tokens_seen": 62353216,
2425
+ "step": 302
2426
+ },
2427
+ {
2428
+ "epoch": 2.6795366795366795,
2429
+ "grad_norm": 0.00442032516002655,
2430
+ "learning_rate": 2.7568519280880558e-06,
2431
+ "loss": 0.0001,
2432
+ "num_input_tokens_seen": 62544128,
2433
+ "step": 303
2434
+ },
2435
+ {
2436
+ "epoch": 2.688361831218974,
2437
+ "grad_norm": 0.008686737157404423,
2438
+ "learning_rate": 2.607147524689829e-06,
2439
+ "loss": 0.0004,
2440
+ "num_input_tokens_seen": 62752688,
2441
+ "step": 304
2442
+ },
2443
+ {
2444
+ "epoch": 2.6971869829012687,
2445
+ "grad_norm": 0.0059651597402989864,
2446
+ "learning_rate": 2.4615132682429374e-06,
2447
+ "loss": 0.0001,
2448
+ "num_input_tokens_seen": 62963296,
2449
+ "step": 305
2450
+ },
2451
+ {
2452
+ "epoch": 2.7060121345835633,
2453
+ "grad_norm": 0.0056177834048867226,
2454
+ "learning_rate": 2.3199616659672354e-06,
2455
+ "loss": 0.0002,
2456
+ "num_input_tokens_seen": 63161904,
2457
+ "step": 306
2458
+ },
2459
+ {
2460
+ "epoch": 2.714837286265858,
2461
+ "grad_norm": 0.0029979923274368048,
2462
+ "learning_rate": 2.182504874460006e-06,
2463
+ "loss": 0.0001,
2464
+ "num_input_tokens_seen": 63365744,
2465
+ "step": 307
2466
+ },
2467
+ {
2468
+ "epoch": 2.7236624379481524,
2469
+ "grad_norm": 0.004314000252634287,
2470
+ "learning_rate": 2.049154698651989e-06,
2471
+ "loss": 0.0001,
2472
+ "num_input_tokens_seen": 63571808,
2473
+ "step": 308
2474
+ },
2475
+ {
2476
+ "epoch": 2.7324875896304466,
2477
+ "grad_norm": 0.006837273947894573,
2478
+ "learning_rate": 1.919922590793549e-06,
2479
+ "loss": 0.0001,
2480
+ "num_input_tokens_seen": 63768960,
2481
+ "step": 309
2482
+ },
2483
+ {
2484
+ "epoch": 2.741312741312741,
2485
+ "grad_norm": 0.0037646403070539236,
2486
+ "learning_rate": 1.7948196494711188e-06,
2487
+ "loss": 0.0001,
2488
+ "num_input_tokens_seen": 63979648,
2489
+ "step": 310
2490
+ },
2491
+ {
2492
+ "epoch": 2.7501378929950357,
2493
+ "grad_norm": 0.0031723175197839737,
2494
+ "learning_rate": 1.6738566186540627e-06,
2495
+ "loss": 0.0001,
2496
+ "num_input_tokens_seen": 64189712,
2497
+ "step": 311
2498
+ },
2499
+ {
2500
+ "epoch": 2.7589630446773303,
2501
+ "grad_norm": 0.005477920174598694,
2502
+ "learning_rate": 1.5570438867719694e-06,
2503
+ "loss": 0.0001,
2504
+ "num_input_tokens_seen": 64400624,
2505
+ "step": 312
2506
+ },
2507
+ {
2508
+ "epoch": 2.767788196359625,
2509
+ "grad_norm": 0.006315939594060183,
2510
+ "learning_rate": 1.4443914858224938e-06,
2511
+ "loss": 0.0001,
2512
+ "num_input_tokens_seen": 64626320,
2513
+ "step": 313
2514
+ },
2515
+ {
2516
+ "epoch": 2.7766133480419195,
2517
+ "grad_norm": 0.004753002431243658,
2518
+ "learning_rate": 1.3359090905097848e-06,
2519
+ "loss": 0.0001,
2520
+ "num_input_tokens_seen": 64826480,
2521
+ "step": 314
2522
+ },
2523
+ {
2524
+ "epoch": 2.785438499724214,
2525
+ "grad_norm": 0.00812880601733923,
2526
+ "learning_rate": 1.2316060174136002e-06,
2527
+ "loss": 0.0003,
2528
+ "num_input_tokens_seen": 65031984,
2529
+ "step": 315
2530
+ },
2531
+ {
2532
+ "epoch": 2.7942636514065087,
2533
+ "grad_norm": 0.0029212606605142355,
2534
+ "learning_rate": 1.1314912241892183e-06,
2535
+ "loss": 0.0001,
2536
+ "num_input_tokens_seen": 65239456,
2537
+ "step": 316
2538
+ },
2539
+ {
2540
+ "epoch": 2.8030888030888033,
2541
+ "grad_norm": 0.006850802339613438,
2542
+ "learning_rate": 1.0355733087981378e-06,
2543
+ "loss": 0.0002,
2544
+ "num_input_tokens_seen": 65433888,
2545
+ "step": 317
2546
+ },
2547
+ {
2548
+ "epoch": 2.8119139547710974,
2549
+ "grad_norm": 0.0020711093675345182,
2550
+ "learning_rate": 9.43860508769645e-07,
2551
+ "loss": 0.0001,
2552
+ "num_input_tokens_seen": 65638288,
2553
+ "step": 318
2554
+ },
2555
+ {
2556
+ "epoch": 2.8207391064533924,
2557
+ "grad_norm": 0.004868640564382076,
2558
+ "learning_rate": 8.563607004934193e-07,
2559
+ "loss": 0.0002,
2560
+ "num_input_tokens_seen": 65855952,
2561
+ "step": 319
2562
+ },
2563
+ {
2564
+ "epoch": 2.8295642581356866,
2565
+ "grad_norm": 0.006297328509390354,
2566
+ "learning_rate": 7.730813985430407e-07,
2567
+ "loss": 0.0002,
2568
+ "num_input_tokens_seen": 66070192,
2569
+ "step": 320
2570
+ },
2571
+ {
2572
+ "epoch": 2.838389409817981,
2573
+ "grad_norm": 0.0036759376525878906,
2574
+ "learning_rate": 6.940297550306896e-07,
2575
+ "loss": 0.0001,
2576
+ "num_input_tokens_seen": 66283808,
2577
+ "step": 321
2578
+ },
2579
+ {
2580
+ "epoch": 2.8472145615002757,
2581
+ "grad_norm": 0.0120092136785388,
2582
+ "learning_rate": 6.192125589928821e-07,
2583
+ "loss": 0.0002,
2584
+ "num_input_tokens_seen": 66507776,
2585
+ "step": 322
2586
+ },
2587
+ {
2588
+ "epoch": 2.8560397131825703,
2589
+ "grad_norm": 0.005414010491222143,
2590
+ "learning_rate": 5.486362358074094e-07,
2591
+ "loss": 0.0002,
2592
+ "num_input_tokens_seen": 66708320,
2593
+ "step": 323
2594
+ },
2595
+ {
2596
+ "epoch": 2.864864864864865,
2597
+ "grad_norm": 0.007992051541805267,
2598
+ "learning_rate": 4.823068466415615e-07,
2599
+ "loss": 0.0001,
2600
+ "num_input_tokens_seen": 66910032,
2601
+ "step": 324
2602
+ },
2603
+ {
2604
+ "epoch": 2.8736900165471595,
2605
+ "grad_norm": 0.006493248511105776,
2606
+ "learning_rate": 4.202300879315446e-07,
2607
+ "loss": 0.0001,
2608
+ "num_input_tokens_seen": 67112784,
2609
+ "step": 325
2610
+ },
2611
+ {
2612
+ "epoch": 2.882515168229454,
2613
+ "grad_norm": 0.004381334874778986,
2614
+ "learning_rate": 3.624112908932942e-07,
2615
+ "loss": 0.0001,
2616
+ "num_input_tokens_seen": 67306464,
2617
+ "step": 326
2618
+ },
2619
+ {
2620
+ "epoch": 2.8913403199117482,
2621
+ "grad_norm": 0.00577085604891181,
2622
+ "learning_rate": 3.088554210646133e-07,
2623
+ "loss": 0.0001,
2624
+ "num_input_tokens_seen": 67504720,
2625
+ "step": 327
2626
+ },
2627
+ {
2628
+ "epoch": 2.9001654715940433,
2629
+ "grad_norm": 0.003793071024119854,
2630
+ "learning_rate": 2.595670778787196e-07,
2631
+ "loss": 0.0001,
2632
+ "num_input_tokens_seen": 67694048,
2633
+ "step": 328
2634
+ },
2635
+ {
2636
+ "epoch": 2.9089906232763374,
2637
+ "grad_norm": 0.00835067592561245,
2638
+ "learning_rate": 2.1455049426926666e-07,
2639
+ "loss": 0.0002,
2640
+ "num_input_tokens_seen": 67895008,
2641
+ "step": 329
2642
+ },
2643
+ {
2644
+ "epoch": 2.917815774958632,
2645
+ "grad_norm": 0.005372443702071905,
2646
+ "learning_rate": 1.7380953630678488e-07,
2647
+ "loss": 0.0001,
2648
+ "num_input_tokens_seen": 68093168,
2649
+ "step": 330
2650
+ },
2651
+ {
2652
+ "epoch": 2.9266409266409266,
2653
+ "grad_norm": 0.010219305753707886,
2654
+ "learning_rate": 1.373477028666803e-07,
2655
+ "loss": 0.0002,
2656
+ "num_input_tokens_seen": 68305568,
2657
+ "step": 331
2658
+ },
2659
+ {
2660
+ "epoch": 2.935466078323221,
2661
+ "grad_norm": 0.0038206197787076235,
2662
+ "learning_rate": 1.0516812532873621e-07,
2663
+ "loss": 0.0001,
2664
+ "num_input_tokens_seen": 68506384,
2665
+ "step": 332
2666
+ },
2667
+ {
2668
+ "epoch": 2.9442912300055157,
2669
+ "grad_norm": 0.007432411424815655,
2670
+ "learning_rate": 7.727356730820035e-08,
2671
+ "loss": 0.0002,
2672
+ "num_input_tokens_seen": 68716160,
2673
+ "step": 333
2674
+ },
2675
+ {
2676
+ "epoch": 2.9531163816878103,
2677
+ "grad_norm": 0.004036502446979284,
2678
+ "learning_rate": 5.3666424418413744e-08,
2679
+ "loss": 0.0001,
2680
+ "num_input_tokens_seen": 68918048,
2681
+ "step": 334
2682
+ },
2683
+ {
2684
+ "epoch": 2.961941533370105,
2685
+ "grad_norm": 0.0045955548994243145,
2686
+ "learning_rate": 3.4348724065119685e-08,
2687
+ "loss": 0.0001,
2688
+ "num_input_tokens_seen": 69129152,
2689
+ "step": 335
2690
+ },
2691
+ {
2692
+ "epoch": 2.9707666850523995,
2693
+ "grad_norm": 0.012164157815277576,
2694
+ "learning_rate": 1.9322125272297488e-08,
2695
+ "loss": 0.0003,
2696
+ "num_input_tokens_seen": 69328576,
2697
+ "step": 336
2698
+ },
2699
+ {
2700
+ "epoch": 2.979591836734694,
2701
+ "grad_norm": 0.0029640356078743935,
2702
+ "learning_rate": 8.587918539726402e-09,
2703
+ "loss": 0.0001,
2704
+ "num_input_tokens_seen": 69537232,
2705
+ "step": 337
2706
+ },
2707
+ {
2708
+ "epoch": 2.988416988416988,
2709
+ "grad_norm": 0.005239939782768488,
2710
+ "learning_rate": 2.1470257321298813e-09,
2711
+ "loss": 0.0001,
2712
+ "num_input_tokens_seen": 69761008,
2713
+ "step": 338
2714
+ },
2715
+ {
2716
+ "epoch": 2.997242140099283,
2717
+ "grad_norm": 0.0060053626075387,
2718
+ "learning_rate": 0.0,
2719
+ "loss": 0.0001,
2720
+ "num_input_tokens_seen": 69953200,
2721
+ "step": 339
2722
+ },
2723
+ {
2724
+ "epoch": 2.997242140099283,
2725
+ "num_input_tokens_seen": 69953200,
2726
+ "step": 339,
2727
+ "total_flos": 2.976146663409713e+18,
2728
+ "train_loss": 0.004280612113766934,
2729
+ "train_runtime": 8852.4475,
2730
+ "train_samples_per_second": 4.914,
2731
+ "train_steps_per_second": 0.038
2732
+ }
2733
+ ],
2734
+ "logging_steps": 1,
2735
+ "max_steps": 339,
2736
+ "num_input_tokens_seen": 69953200,
2737
+ "num_train_epochs": 3,
2738
+ "save_steps": 100,
2739
+ "stateful_callbacks": {
2740
+ "TrainerControl": {
2741
+ "args": {
2742
+ "should_epoch_stop": false,
2743
+ "should_evaluate": false,
2744
+ "should_log": false,
2745
+ "should_save": true,
2746
+ "should_training_stop": true
2747
+ },
2748
+ "attributes": {}
2749
+ }
2750
+ },
2751
+ "total_flos": 2.976146663409713e+18,
2752
+ "train_batch_size": 2,
2753
+ "trial_name": null,
2754
+ "trial_params": null
2755
+ }
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fd9d2ed84b9224ea0980ce9d54347d209b8b7db550f9e136d47533c595945b
3
+ size 5688
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/training_args.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 4096
3
+ dataset: graph_planning_train
4
+ dataset_dir: data
5
+ ddp_timeout: 180000000
6
+ do_train: true
7
+ finetuning_type: lora
8
+ flash_attn: auto
9
+ gradient_accumulation_steps: 16
10
+ include_num_input_tokens_seen: true
11
+ learning_rate: 0.0001
12
+ logging_steps: 1
13
+ lora_alpha: 16
14
+ lora_dropout: 0
15
+ lora_rank: 8
16
+ lora_target: all
17
+ lr_scheduler_type: cosine
18
+ max_grad_norm: 1.0
19
+ max_samples: 100000
20
+ model_name_or_path: /nas/shared/ma4agi/model/Qwen2.5-7B-Instruct
21
+ num_train_epochs: 3.0
22
+ optim: adamw_torch
23
+ output_dir: saves/Qwen2.5-7B-Instruct/lora/sft-qwen2.5-7b-instruct-graph-planning-bs128
24
+ packing: false
25
+ per_device_train_batch_size: 2
26
+ plot_loss: true
27
+ preprocessing_num_workers: 16
28
+ report_to: none
29
+ save_steps: 100
30
+ stage: sft
31
+ template: qwen
32
+ trust_remote_code: true
33
+ warmup_steps: 0
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/training_loss.png ADDED
plan/sft-qwen2.5-7b-instruct-graph-planning-bs128/vocab.json ADDED
The diff for this file is too large to render. See raw diff