yesbut's picture
First Push
68aa2cb verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9812768697738647,
"min": 0.975520670413971,
"max": 2.8683207035064697,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9358.4375,
"min": 9358.4375,
"max": 29437.57421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.896549224853516,
"min": 0.4801107943058014,
"max": 12.896549224853516,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2514.8271484375,
"min": 93.14149475097656,
"max": 2616.41455078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06279528794688054,
"min": 0.06279528794688054,
"max": 0.07386226462104412,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25118115178752215,
"min": 0.25118115178752215,
"max": 0.3693113231052206,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22950710999030693,
"min": 0.1321731962482719,
"max": 0.31209872943513534,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9180284399612277,
"min": 0.5286927849930876,
"max": 1.5604936471756767,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.022727272727273,
"min": 3.727272727272727,
"max": 25.6,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1101.0,
"min": 164.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.022727272727273,
"min": 3.727272727272727,
"max": 25.6,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1101.0,
"min": 164.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1737037797",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1737038371"
},
"total": 574.1508459500001,
"count": 1,
"self": 0.5973476280000796,
"children": {
"run_training.setup": {
"total": 0.024668802999997297,
"count": 1,
"self": 0.024668802999997297
},
"TrainerController.start_learning": {
"total": 573.528829519,
"count": 1,
"self": 0.8477829259961709,
"children": {
"TrainerController._reset_env": {
"total": 3.126226673000019,
"count": 1,
"self": 3.126226673000019
},
"TrainerController.advance": {
"total": 569.4664474300039,
"count": 18202,
"self": 0.4165862070182129,
"children": {
"env_step": {
"total": 569.0498612229857,
"count": 18202,
"self": 438.33819404099603,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.29784191700168,
"count": 18202,
"self": 2.340918872015095,
"children": {
"TorchPolicy.evaluate": {
"total": 127.95692304498658,
"count": 18202,
"self": 127.95692304498658
}
}
},
"workers": {
"total": 0.41382526498796324,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 571.5587180309965,
"count": 18202,
"is_parallel": true,
"self": 265.0254990159862,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007106636000003164,
"count": 1,
"is_parallel": true,
"self": 0.004351427000074182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002755208999928982,
"count": 10,
"is_parallel": true,
"self": 0.002755208999928982
}
}
},
"UnityEnvironment.step": {
"total": 0.10390899800000852,
"count": 1,
"is_parallel": true,
"self": 0.0007416810000790974,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044642699998576063,
"count": 1,
"is_parallel": true,
"self": 0.00044642699998576063
},
"communicator.exchange": {
"total": 0.09503079899997147,
"count": 1,
"is_parallel": true,
"self": 0.09503079899997147
},
"steps_from_proto": {
"total": 0.007690090999972199,
"count": 1,
"is_parallel": true,
"self": 0.005712659999971947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019774310000002515,
"count": 10,
"is_parallel": true,
"self": 0.0019774310000002515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 306.5332190150103,
"count": 18201,
"is_parallel": true,
"self": 14.135163399022531,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.546580039997821,
"count": 18201,
"is_parallel": true,
"self": 7.546580039997821
},
"communicator.exchange": {
"total": 240.91427740299486,
"count": 18201,
"is_parallel": true,
"self": 240.91427740299486
},
"steps_from_proto": {
"total": 43.93719817299507,
"count": 18201,
"is_parallel": true,
"self": 8.758904791992109,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.17829338100296,
"count": 182010,
"is_parallel": true,
"self": 35.17829338100296
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019187199995940318,
"count": 1,
"self": 0.00019187199995940318,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 562.1464733190963,
"count": 789849,
"is_parallel": true,
"self": 18.365904553055202,
"children": {
"process_trajectory": {
"total": 305.4843771960413,
"count": 789849,
"is_parallel": true,
"self": 304.43697047704126,
"children": {
"RLTrainer._checkpoint": {
"total": 1.047406719000037,
"count": 4,
"is_parallel": true,
"self": 1.047406719000037
}
}
},
"_update_policy": {
"total": 238.2961915699998,
"count": 90,
"is_parallel": true,
"self": 65.36884002900115,
"children": {
"TorchPPOOptimizer.update": {
"total": 172.92735154099864,
"count": 4587,
"is_parallel": true,
"self": 172.92735154099864
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08818061800002397,
"count": 1,
"self": 0.0013238410000440126,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08685677699997996,
"count": 1,
"self": 0.08685677699997996
}
}
}
}
}
}
}