jccj commited on
Commit
450d74c
·
verified ·
1 Parent(s): 9a6a204

Upload folder using huggingface_hub

Browse files
pretrained_model/config.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "smolvla",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "IDENTITY",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 6
14
+ ]
15
+ },
16
+ "observation.images.top": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ },
24
+ "observation.images.wrist": {
25
+ "type": "VISUAL",
26
+ "shape": [
27
+ 3,
28
+ 480,
29
+ 640
30
+ ]
31
+ }
32
+ },
33
+ "output_features": {
34
+ "action": {
35
+ "type": "ACTION",
36
+ "shape": [
37
+ 6
38
+ ]
39
+ }
40
+ },
41
+ "device": "cuda",
42
+ "use_amp": false,
43
+ "chunk_size": 50,
44
+ "n_action_steps": 50,
45
+ "max_state_dim": 32,
46
+ "max_action_dim": 32,
47
+ "resize_imgs_with_padding": [
48
+ 512,
49
+ 512
50
+ ],
51
+ "empty_cameras": 0,
52
+ "adapt_to_pi_aloha": false,
53
+ "use_delta_joint_actions_aloha": false,
54
+ "tokenizer_max_length": 48,
55
+ "num_steps": 10,
56
+ "use_cache": true,
57
+ "freeze_vision_encoder": true,
58
+ "train_expert_only": true,
59
+ "train_state_proj": true,
60
+ "optimizer_lr": 0.0001,
61
+ "optimizer_betas": [
62
+ 0.9,
63
+ 0.95
64
+ ],
65
+ "optimizer_eps": 1e-08,
66
+ "optimizer_weight_decay": 1e-10,
67
+ "optimizer_grad_clip_norm": 10.0,
68
+ "scheduler_warmup_steps": 1000,
69
+ "scheduler_decay_steps": 30000,
70
+ "scheduler_decay_lr": 2.5e-06,
71
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
72
+ "load_vlm_weights": true,
73
+ "add_image_special_tokens": false,
74
+ "attention_mode": "cross_attn",
75
+ "prefix_length": 0,
76
+ "pad_language_to": "max_length",
77
+ "num_expert_layers": 0,
78
+ "num_vlm_layers": 16,
79
+ "self_attn_every_n_layers": 2,
80
+ "expert_width_multiplier": 0.75,
81
+ "min_period": 0.004,
82
+ "max_period": 4.0
83
+ }
pretrained_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:750b4c717cdfe55d440a5cddb55b1f2c495a772320bc33ca37ab3fb1e208975f
3
+ size 906713296
pretrained_model/train_config.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "jccj/shape_matching2",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec"
66
+ },
67
+ "env": null,
68
+ "policy": {
69
+ "type": "smolvla",
70
+ "n_obs_steps": 1,
71
+ "normalization_mapping": {
72
+ "VISUAL": "IDENTITY",
73
+ "STATE": "MEAN_STD",
74
+ "ACTION": "MEAN_STD"
75
+ },
76
+ "input_features": {
77
+ "observation.state": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 6
81
+ ]
82
+ },
83
+ "observation.images.top": {
84
+ "type": "VISUAL",
85
+ "shape": [
86
+ 3,
87
+ 480,
88
+ 640
89
+ ]
90
+ },
91
+ "observation.images.wrist": {
92
+ "type": "VISUAL",
93
+ "shape": [
94
+ 3,
95
+ 480,
96
+ 640
97
+ ]
98
+ }
99
+ },
100
+ "output_features": {
101
+ "action": {
102
+ "type": "ACTION",
103
+ "shape": [
104
+ 6
105
+ ]
106
+ }
107
+ },
108
+ "device": "cuda",
109
+ "use_amp": false,
110
+ "chunk_size": 50,
111
+ "n_action_steps": 50,
112
+ "max_state_dim": 32,
113
+ "max_action_dim": 32,
114
+ "resize_imgs_with_padding": [
115
+ 512,
116
+ 512
117
+ ],
118
+ "empty_cameras": 0,
119
+ "adapt_to_pi_aloha": false,
120
+ "use_delta_joint_actions_aloha": false,
121
+ "tokenizer_max_length": 48,
122
+ "num_steps": 10,
123
+ "use_cache": true,
124
+ "freeze_vision_encoder": true,
125
+ "train_expert_only": true,
126
+ "train_state_proj": true,
127
+ "optimizer_lr": 0.0001,
128
+ "optimizer_betas": [
129
+ 0.9,
130
+ 0.95
131
+ ],
132
+ "optimizer_eps": 1e-08,
133
+ "optimizer_weight_decay": 1e-10,
134
+ "optimizer_grad_clip_norm": 10.0,
135
+ "scheduler_warmup_steps": 1000,
136
+ "scheduler_decay_steps": 30000,
137
+ "scheduler_decay_lr": 2.5e-06,
138
+ "vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
139
+ "load_vlm_weights": true,
140
+ "add_image_special_tokens": false,
141
+ "attention_mode": "cross_attn",
142
+ "prefix_length": 0,
143
+ "pad_language_to": "max_length",
144
+ "num_expert_layers": 0,
145
+ "num_vlm_layers": 16,
146
+ "self_attn_every_n_layers": 2,
147
+ "expert_width_multiplier": 0.75,
148
+ "min_period": 0.004,
149
+ "max_period": 4.0
150
+ },
151
+ "output_dir": "outputs/train/2025-06-14/21-17-47_smolvla",
152
+ "job_name": "smolvla",
153
+ "resume": false,
154
+ "seed": 1000,
155
+ "num_workers": 4,
156
+ "batch_size": 64,
157
+ "steps": 200000,
158
+ "eval_freq": 20000,
159
+ "log_freq": 200,
160
+ "save_checkpoint": true,
161
+ "save_freq": 5000,
162
+ "use_policy_training_preset": true,
163
+ "optimizer": {
164
+ "type": "adamw",
165
+ "lr": 0.0001,
166
+ "weight_decay": 1e-10,
167
+ "grad_clip_norm": 10.0,
168
+ "betas": [
169
+ 0.9,
170
+ 0.95
171
+ ],
172
+ "eps": 1e-08
173
+ },
174
+ "scheduler": {
175
+ "type": "cosine_decay_with_warmup",
176
+ "num_warmup_steps": 1000,
177
+ "num_decay_steps": 30000,
178
+ "peak_lr": 0.0001,
179
+ "decay_lr": 2.5e-06
180
+ },
181
+ "eval": {
182
+ "n_episodes": 50,
183
+ "batch_size": 50,
184
+ "use_async_envs": false
185
+ },
186
+ "wandb": {
187
+ "enable": true,
188
+ "disable_artifact": false,
189
+ "project": "lerobot",
190
+ "entity": null,
191
+ "notes": null,
192
+ "run_id": "72z7065z",
193
+ "mode": null
194
+ }
195
+ }
training_state/optimizer_param_groups.json ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "lr": 2.6875000000000013e-05,
4
+ "betas": [
5
+ 0.9,
6
+ 0.95
7
+ ],
8
+ "eps": 1e-08,
9
+ "weight_decay": 1e-10,
10
+ "amsgrad": false,
11
+ "maximize": false,
12
+ "foreach": null,
13
+ "capturable": false,
14
+ "differentiable": false,
15
+ "fused": null,
16
+ "decoupled_weight_decay": true,
17
+ "initial_lr": 0.0001,
18
+ "params": [
19
+ 0,
20
+ 1,
21
+ 2,
22
+ 3,
23
+ 4,
24
+ 5,
25
+ 6,
26
+ 7,
27
+ 8,
28
+ 9,
29
+ 10,
30
+ 11,
31
+ 12,
32
+ 13,
33
+ 14,
34
+ 15,
35
+ 16,
36
+ 17,
37
+ 18,
38
+ 19,
39
+ 20,
40
+ 21,
41
+ 22,
42
+ 23,
43
+ 24,
44
+ 25,
45
+ 26,
46
+ 27,
47
+ 28,
48
+ 29,
49
+ 30,
50
+ 31,
51
+ 32,
52
+ 33,
53
+ 34,
54
+ 35,
55
+ 36,
56
+ 37,
57
+ 38,
58
+ 39,
59
+ 40,
60
+ 41,
61
+ 42,
62
+ 43,
63
+ 44,
64
+ 45,
65
+ 46,
66
+ 47,
67
+ 48,
68
+ 49,
69
+ 50,
70
+ 51,
71
+ 52,
72
+ 53,
73
+ 54,
74
+ 55,
75
+ 56,
76
+ 57,
77
+ 58,
78
+ 59,
79
+ 60,
80
+ 61,
81
+ 62,
82
+ 63,
83
+ 64,
84
+ 65,
85
+ 66,
86
+ 67,
87
+ 68,
88
+ 69,
89
+ 70,
90
+ 71,
91
+ 72,
92
+ 73,
93
+ 74,
94
+ 75,
95
+ 76,
96
+ 77,
97
+ 78,
98
+ 79,
99
+ 80,
100
+ 81,
101
+ 82,
102
+ 83,
103
+ 84,
104
+ 85,
105
+ 86,
106
+ 87,
107
+ 88,
108
+ 89,
109
+ 90,
110
+ 91,
111
+ 92,
112
+ 93,
113
+ 94,
114
+ 95,
115
+ 96,
116
+ 97,
117
+ 98,
118
+ 99,
119
+ 100,
120
+ 101,
121
+ 102,
122
+ 103,
123
+ 104,
124
+ 105,
125
+ 106,
126
+ 107,
127
+ 108,
128
+ 109,
129
+ 110,
130
+ 111,
131
+ 112,
132
+ 113,
133
+ 114,
134
+ 115,
135
+ 116,
136
+ 117,
137
+ 118,
138
+ 119,
139
+ 120,
140
+ 121,
141
+ 122,
142
+ 123,
143
+ 124,
144
+ 125,
145
+ 126,
146
+ 127,
147
+ 128,
148
+ 129,
149
+ 130,
150
+ 131,
151
+ 132,
152
+ 133,
153
+ 134,
154
+ 135,
155
+ 136,
156
+ 137,
157
+ 138,
158
+ 139,
159
+ 140,
160
+ 141,
161
+ 142,
162
+ 143,
163
+ 144,
164
+ 145,
165
+ 146,
166
+ 147,
167
+ 148,
168
+ 149,
169
+ 150,
170
+ 151,
171
+ 152,
172
+ 153,
173
+ 154,
174
+ 155,
175
+ 156,
176
+ 157,
177
+ 158,
178
+ 159,
179
+ 160,
180
+ 161,
181
+ 162,
182
+ 163,
183
+ 164,
184
+ 165,
185
+ 166,
186
+ 167,
187
+ 168,
188
+ 169,
189
+ 170,
190
+ 171,
191
+ 172,
192
+ 173,
193
+ 174,
194
+ 175,
195
+ 176,
196
+ 177,
197
+ 178,
198
+ 179,
199
+ 180,
200
+ 181,
201
+ 182,
202
+ 183,
203
+ 184,
204
+ 185,
205
+ 186,
206
+ 187,
207
+ 188,
208
+ 189,
209
+ 190,
210
+ 191,
211
+ 192,
212
+ 193,
213
+ 194,
214
+ 195,
215
+ 196,
216
+ 197,
217
+ 198,
218
+ 199,
219
+ 200,
220
+ 201,
221
+ 202,
222
+ 203,
223
+ 204,
224
+ 205,
225
+ 206,
226
+ 207,
227
+ 208,
228
+ 209,
229
+ 210,
230
+ 211,
231
+ 212,
232
+ 213,
233
+ 214,
234
+ 215,
235
+ 216,
236
+ 217,
237
+ 218,
238
+ 219,
239
+ 220,
240
+ 221,
241
+ 222,
242
+ 223,
243
+ 224,
244
+ 225,
245
+ 226,
246
+ 227,
247
+ 228,
248
+ 229,
249
+ 230,
250
+ 231,
251
+ 232,
252
+ 233,
253
+ 234,
254
+ 235,
255
+ 236,
256
+ 237,
257
+ 238,
258
+ 239,
259
+ 240,
260
+ 241,
261
+ 242,
262
+ 243,
263
+ 244,
264
+ 245,
265
+ 246,
266
+ 247,
267
+ 248,
268
+ 249,
269
+ 250,
270
+ 251,
271
+ 252,
272
+ 253,
273
+ 254,
274
+ 255,
275
+ 256,
276
+ 257,
277
+ 258,
278
+ 259,
279
+ 260,
280
+ 261,
281
+ 262,
282
+ 263,
283
+ 264,
284
+ 265,
285
+ 266,
286
+ 267,
287
+ 268,
288
+ 269,
289
+ 270,
290
+ 271,
291
+ 272,
292
+ 273,
293
+ 274,
294
+ 275,
295
+ 276,
296
+ 277,
297
+ 278,
298
+ 279,
299
+ 280,
300
+ 281,
301
+ 282,
302
+ 283,
303
+ 284,
304
+ 285,
305
+ 286,
306
+ 287,
307
+ 288,
308
+ 289,
309
+ 290,
310
+ 291,
311
+ 292,
312
+ 293,
313
+ 294,
314
+ 295,
315
+ 296,
316
+ 297,
317
+ 298,
318
+ 299,
319
+ 300,
320
+ 301,
321
+ 302,
322
+ 303,
323
+ 304,
324
+ 305,
325
+ 306,
326
+ 307,
327
+ 308,
328
+ 309,
329
+ 310,
330
+ 311,
331
+ 312,
332
+ 313,
333
+ 314,
334
+ 315,
335
+ 316,
336
+ 317,
337
+ 318,
338
+ 319,
339
+ 320,
340
+ 321,
341
+ 322,
342
+ 323,
343
+ 324,
344
+ 325,
345
+ 326,
346
+ 327,
347
+ 328,
348
+ 329,
349
+ 330,
350
+ 331,
351
+ 332,
352
+ 333,
353
+ 334,
354
+ 335,
355
+ 336,
356
+ 337,
357
+ 338,
358
+ 339,
359
+ 340,
360
+ 341,
361
+ 342,
362
+ 343,
363
+ 344,
364
+ 345,
365
+ 346,
366
+ 347,
367
+ 348,
368
+ 349,
369
+ 350,
370
+ 351,
371
+ 352,
372
+ 353,
373
+ 354,
374
+ 355,
375
+ 356,
376
+ 357,
377
+ 358,
378
+ 359,
379
+ 360,
380
+ 361,
381
+ 362,
382
+ 363,
383
+ 364,
384
+ 365,
385
+ 366,
386
+ 367,
387
+ 368,
388
+ 369,
389
+ 370,
390
+ 371,
391
+ 372,
392
+ 373,
393
+ 374,
394
+ 375,
395
+ 376,
396
+ 377,
397
+ 378,
398
+ 379,
399
+ 380,
400
+ 381,
401
+ 382,
402
+ 383,
403
+ 384,
404
+ 385,
405
+ 386,
406
+ 387,
407
+ 388,
408
+ 389,
409
+ 390,
410
+ 391,
411
+ 392,
412
+ 393,
413
+ 394,
414
+ 395,
415
+ 396,
416
+ 397,
417
+ 398,
418
+ 399,
419
+ 400,
420
+ 401,
421
+ 402,
422
+ 403,
423
+ 404,
424
+ 405,
425
+ 406,
426
+ 407,
427
+ 408,
428
+ 409,
429
+ 410,
430
+ 411,
431
+ 412,
432
+ 413,
433
+ 414,
434
+ 415,
435
+ 416,
436
+ 417,
437
+ 418,
438
+ 419,
439
+ 420,
440
+ 421,
441
+ 422,
442
+ 423,
443
+ 424,
444
+ 425,
445
+ 426,
446
+ 427,
447
+ 428,
448
+ 429,
449
+ 430,
450
+ 431,
451
+ 432,
452
+ 433,
453
+ 434,
454
+ 435,
455
+ 436,
456
+ 437,
457
+ 438,
458
+ 439,
459
+ 440,
460
+ 441,
461
+ 442,
462
+ 443,
463
+ 444,
464
+ 445,
465
+ 446,
466
+ 447,
467
+ 448,
468
+ 449,
469
+ 450,
470
+ 451,
471
+ 452,
472
+ 453,
473
+ 454,
474
+ 455,
475
+ 456,
476
+ 457,
477
+ 458,
478
+ 459,
479
+ 460,
480
+ 461,
481
+ 462,
482
+ 463,
483
+ 464,
484
+ 465,
485
+ 466,
486
+ 467,
487
+ 468,
488
+ 469,
489
+ 470,
490
+ 471,
491
+ 472,
492
+ 473,
493
+ 474,
494
+ 475,
495
+ 476,
496
+ 477,
497
+ 478,
498
+ 479,
499
+ 480,
500
+ 481,
501
+ 482,
502
+ 483,
503
+ 484,
504
+ 485,
505
+ 486,
506
+ 487,
507
+ 488,
508
+ 489,
509
+ 490,
510
+ 491,
511
+ 492,
512
+ 493,
513
+ 494,
514
+ 495,
515
+ 496,
516
+ 497,
517
+ 498,
518
+ 499,
519
+ 500,
520
+ 501,
521
+ 502,
522
+ 503,
523
+ 504,
524
+ 505
525
+ ]
526
+ }
527
+ ]
training_state/optimizer_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:344400c0a4f3bfa26fed54565fefe59acc700af6a44e545b53be1557515a98be
3
+ size 412659164
training_state/rng_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d56e0a41de5675f9552ce0896b63722dd75efbccc0f950498994565740bf3c8
3
+ size 15708
training_state/scheduler_state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_lrs": [
3
+ 0.0001
4
+ ],
5
+ "last_epoch": 20000,
6
+ "_step_count": 20001,
7
+ "_get_lr_called_within_step": false,
8
+ "_last_lr": [
9
+ 2.6875000000000013e-05
10
+ ],
11
+ "lr_lambdas": [
12
+ null
13
+ ]
14
+ }
training_state/training_step.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "step": 20000
3
+ }