Files
racesm/MLAgents/results/HotrodTrack3/run_logs/timers.json
2024-05-03 13:41:30 +02:00

320 lines
14 KiB
JSON

{
"name": "root",
"gauges": {
"HotrodTrack3.Policy.Entropy.mean": {
"value": 1.5406694412231445,
"min": 1.5406694412231445,
"max": 2.1569020748138428,
"count": 265
},
"HotrodTrack3.Policy.Entropy.sum": {
"value": 15308.091796875,
"min": 6912.255859375,
"max": 21810.59375,
"count": 265
},
"HotrodTrack3.Step.mean": {
"value": 2709952.0,
"min": 69941.0,
"max": 2709952.0,
"count": 265
},
"HotrodTrack3.Step.sum": {
"value": 2709952.0,
"min": 69941.0,
"max": 2709952.0,
"count": 265
},
"HotrodTrack3.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.49554705619812,
"min": -0.12287396937608719,
"max": 6.631581783294678,
"count": 265
},
"HotrodTrack3.Policy.ExtrinsicValueEstimate.sum": {
"value": 569.774169921875,
"min": -20.51995277404785,
"max": 1017.1873779296875,
"count": 265
},
"HotrodTrack3.Environment.EpisodeLength.mean": {
"value": 661.625,
"min": 299.0,
"max": 1114.4444444444443,
"count": 265
},
"HotrodTrack3.Environment.EpisodeLength.sum": {
"value": 10586.0,
"min": 2392.0,
"max": 12813.0,
"count": 265
},
"HotrodTrack3.Environment.CumulativeReward.mean": {
"value": 24.86954413865169,
"min": -6.305523566174088,
"max": 26.767239019168,
"count": 265
},
"HotrodTrack3.Environment.CumulativeReward.sum": {
"value": 397.91270621842705,
"min": -201.77675411757082,
"max": 426.14645283529535,
"count": 265
},
"HotrodTrack3.Policy.ExtrinsicReward.mean": {
"value": 24.86954413865169,
"min": -6.305523566174088,
"max": 26.767239019168,
"count": 265
},
"HotrodTrack3.Policy.ExtrinsicReward.sum": {
"value": 397.91270621842705,
"min": -201.77675411757082,
"max": 426.14645283529535,
"count": 265
},
"HotrodTrack3.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 265
},
"HotrodTrack3.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 265
},
"HotrodTrack3.Losses.PolicyLoss.mean": {
"value": 0.10909264485637574,
"min": 0.09851335268712551,
"max": 0.1217805069833684,
"count": 257
},
"HotrodTrack3.Losses.PolicyLoss.sum": {
"value": 0.10909264485637574,
"min": 0.09851335268712551,
"max": 0.1217805069833684,
"count": 257
},
"HotrodTrack3.Losses.ValueLoss.mean": {
"value": 0.4344752586954008,
"min": 0.023163300876816114,
"max": 1.4580619129463561,
"count": 257
},
"HotrodTrack3.Losses.ValueLoss.sum": {
"value": 0.4344752586954008,
"min": 0.023163300876816114,
"max": 1.4580619129463561,
"count": 257
},
"HotrodTrack3.Policy.LearningRate.mean": {
"value": 0.00013741829419392,
"min": 0.00013741829419392,
"max": 0.00029535234154922,
"count": 257
},
"HotrodTrack3.Policy.LearningRate.sum": {
"value": 0.00013741829419392,
"min": 0.00013741829419392,
"max": 0.00029535234154922,
"count": 257
},
"HotrodTrack3.Policy.Epsilon.mean": {
"value": 0.14580608000000003,
"min": 0.14580608000000003,
"max": 0.19845078,
"count": 257
},
"HotrodTrack3.Policy.Epsilon.sum": {
"value": 0.14580608000000003,
"min": 0.14580608000000003,
"max": 0.19845078,
"count": 257
},
"HotrodTrack3.Policy.Beta.mean": {
"value": 0.022908459392000004,
"min": 0.022908459392000004,
"max": 0.04922554492199999,
"count": 257
},
"HotrodTrack3.Policy.Beta.sum": {
"value": 0.022908459392000004,
"min": 0.022908459392000004,
"max": 0.04922554492199999,
"count": 257
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714048902",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\SukkertoppenDDU\\AppData\\Local\\Programs\\Python\\Python39\\Scripts\\mlagents-learn config/LLL.yaml --run-id=HotrodTrack3 --resume --env=Builds\\HotrodTrack3 part2\\Racesm_L.exe",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1714052572"
},
"total": 3669.941056,
"count": 1,
"self": 0.6098805000001448,
"children": {
"run_training.setup": {
"total": 0.06419149999999996,
"count": 1,
"self": 0.06419149999999996
},
"TrainerController.start_learning": {
"total": 3669.266984,
"count": 1,
"self": 6.0508188000694645,
"children": {
"TrainerController._reset_env": {
"total": 7.476565900000001,
"count": 1,
"self": 7.476565900000001
},
"TrainerController.advance": {
"total": 3655.67328719993,
"count": 333736,
"self": 5.652337700004864,
"children": {
"env_step": {
"total": 2016.253376699993,
"count": 333736,
"self": 1509.5151283000343,
"children": {
"SubprocessEnvManager._take_step": {
"total": 503.213429300019,
"count": 333736,
"self": 19.422355000068023,
"children": {
"TorchPolicy.evaluate": {
"total": 483.791074299951,
"count": 330852,
"self": 483.791074299951
}
}
},
"workers": {
"total": 3.5248190999395455,
"count": 333736,
"self": 0.0,
"children": {
"worker_root": {
"total": 3656.0975542999217,
"count": 333736,
"is_parallel": true,
"self": 2469.8470495000106,
"children": {
"steps_from_proto": {
"total": 0.001862700000000217,
"count": 1,
"is_parallel": true,
"self": 0.0006767000000005297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011859999999996873,
"count": 6,
"is_parallel": true,
"self": 0.0011859999999996873
}
}
},
"UnityEnvironment.step": {
"total": 1186.2486420999112,
"count": 333736,
"is_parallel": true,
"self": 38.70277539986296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 68.96651330005905,
"count": 333736,
"is_parallel": true,
"self": 68.96651330005905
},
"communicator.exchange": {
"total": 961.1352359000564,
"count": 333736,
"is_parallel": true,
"self": 961.1352359000564
},
"steps_from_proto": {
"total": 117.44411749993293,
"count": 333736,
"is_parallel": true,
"self": 45.33441769985208,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.10969980008085,
"count": 2002416,
"is_parallel": true,
"self": 72.10969980008085
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1633.7675727999322,
"count": 333736,
"self": 8.36080129990205,
"children": {
"process_trajectory": {
"total": 253.6855172000302,
"count": 333736,
"self": 253.30153870002985,
"children": {
"RLTrainer._checkpoint": {
"total": 0.38397850000035305,
"count": 5,
"self": 0.38397850000035305
}
}
},
"_update_policy": {
"total": 1371.7212542999998,
"count": 257,
"self": 473.822560900015,
"children": {
"TorchPPOOptimizer.update": {
"total": 897.8986933999848,
"count": 158169,
"self": 897.8986933999848
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.000001798791345e-07,
"count": 1,
"self": 8.000001798791345e-07
},
"TrainerController._save_models": {
"total": 0.06631130000005214,
"count": 1,
"self": 0.007919099999980972,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05839220000007117,
"count": 1,
"self": 0.05839220000007117
}
}
}
}
}
}
}