Files
racesm/MLAgents/results/RacecarTrack3/run_logs/timers.json
2024-05-03 13:41:30 +02:00

320 lines
14 KiB
JSON

{
"name": "root",
"gauges": {
"RacecarTrack3.Policy.Entropy.mean": {
"value": 1.4796384572982788,
"min": 1.4796384572982788,
"max": 1.8011406660079956,
"count": 221
},
"RacecarTrack3.Policy.Entropy.sum": {
"value": 14772.7109375,
"min": 14566.03125,
"max": 18166.3515625,
"count": 221
},
"RacecarTrack3.Step.mean": {
"value": 2909976.0,
"min": 709978.0,
"max": 2909976.0,
"count": 221
},
"RacecarTrack3.Step.sum": {
"value": 2909976.0,
"min": 709978.0,
"max": 2909976.0,
"count": 221
},
"RacecarTrack3.Policy.ExtrinsicValueEstimate.mean": {
"value": 5.946944236755371,
"min": 4.228921413421631,
"max": 6.16779088973999,
"count": 221
},
"RacecarTrack3.Policy.ExtrinsicValueEstimate.sum": {
"value": 1005.0335693359375,
"min": 588.3933715820312,
"max": 1042.356689453125,
"count": 221
},
"RacecarTrack3.Environment.EpisodeLength.mean": {
"value": 358.48148148148147,
"min": 358.48148148148147,
"max": 560.6666666666666,
"count": 221
},
"RacecarTrack3.Environment.EpisodeLength.sum": {
"value": 9679.0,
"min": 5922.0,
"max": 12111.0,
"count": 221
},
"RacecarTrack3.Environment.CumulativeReward.mean": {
"value": 26.18548946934087,
"min": 22.35757621848542,
"max": 26.52224977622525,
"count": 221
},
"RacecarTrack3.Environment.CumulativeReward.sum": {
"value": 733.1937051415443,
"min": 260.3252835869789,
"max": 762.9765789071098,
"count": 221
},
"RacecarTrack3.Policy.ExtrinsicReward.mean": {
"value": 26.18548946934087,
"min": 22.35757621848542,
"max": 26.52224977622525,
"count": 221
},
"RacecarTrack3.Policy.ExtrinsicReward.sum": {
"value": 733.1937051415443,
"min": 260.3252835869789,
"max": 762.9765789071098,
"count": 221
},
"RacecarTrack3.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 221
},
"RacecarTrack3.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 221
},
"RacecarTrack3.Losses.PolicyLoss.mean": {
"value": 0.11885081223693461,
"min": 0.10189401735358336,
"max": 0.12181745231500826,
"count": 214
},
"RacecarTrack3.Losses.PolicyLoss.sum": {
"value": 0.11885081223693461,
"min": 0.10189401735358336,
"max": 0.12181745231500826,
"count": 214
},
"RacecarTrack3.Losses.ValueLoss.mean": {
"value": 0.36536040784624535,
"min": 0.33433397351726285,
"max": 0.5712468318338316,
"count": 214
},
"RacecarTrack3.Losses.ValueLoss.sum": {
"value": 0.36536040784624535,
"min": 0.33433397351726285,
"max": 0.5712468318338316,
"count": 214
},
"RacecarTrack3.Policy.LearningRate.mean": {
"value": 0.00012591011802998,
"min": 0.00012591011802998,
"max": 0.00025725997424668,
"count": 214
},
"RacecarTrack3.Policy.LearningRate.sum": {
"value": 0.00012591011802998,
"min": 0.00012591011802998,
"max": 0.00025725997424668,
"count": 214
},
"RacecarTrack3.Policy.Epsilon.mean": {
"value": 0.14197002,
"min": 0.14197002,
"max": 0.18575331999999997,
"count": 214
},
"RacecarTrack3.Policy.Epsilon.sum": {
"value": 0.14197002,
"min": 0.14197002,
"max": 0.18575331999999997,
"count": 214
},
"RacecarTrack3.Policy.Beta.mean": {
"value": 0.020990812997999996,
"min": 0.020990812997999996,
"max": 0.04287808466800001,
"count": 214
},
"RacecarTrack3.Policy.Beta.sum": {
"value": 0.020990812997999996,
"min": 0.020990812997999996,
"max": 0.04287808466800001,
"count": 214
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714042410",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\SukkertoppenDDU\\AppData\\Local\\Programs\\Python\\Python39\\Scripts\\mlagents-learn config/LLL.yaml --run-id=RacecarTrack3 --resume --env=Builds\\RacecarTrack3 part2\\Racesm_L.exe",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1714045652"
},
"total": 3242.2243571,
"count": 1,
"self": 0.5594621000000188,
"children": {
"run_training.setup": {
"total": 0.06194040000000012,
"count": 1,
"self": 0.06194040000000012
},
"TrainerController.start_learning": {
"total": 3241.6029546,
"count": 1,
"self": 5.0653349000012895,
"children": {
"TrainerController._reset_env": {
"total": 7.000736900000001,
"count": 1,
"self": 7.000736900000001
},
"TrainerController.advance": {
"total": 3229.474093199999,
"count": 280270,
"self": 5.0109806001269135,
"children": {
"env_step": {
"total": 1852.8190169999448,
"count": 280270,
"self": 1423.6688292000272,
"children": {
"SubprocessEnvManager._take_step": {
"total": 426.0273667999402,
"count": 280270,
"self": 16.044380499978388,
"children": {
"TorchPolicy.evaluate": {
"total": 409.9829862999618,
"count": 276237,
"self": 409.9829862999618
}
}
},
"workers": {
"total": 3.122820999977412,
"count": 280270,
"self": 0.0,
"children": {
"worker_root": {
"total": 3228.490231800005,
"count": 280270,
"is_parallel": true,
"self": 2079.59585109995,
"children": {
"steps_from_proto": {
"total": 0.0018254999999998134,
"count": 1,
"is_parallel": true,
"self": 0.0007273999999997116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010981000000001018,
"count": 6,
"is_parallel": true,
"self": 0.0010981000000001018
}
}
},
"UnityEnvironment.step": {
"total": 1148.892555200055,
"count": 280270,
"is_parallel": true,
"self": 33.10466110006723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 55.70622139990634,
"count": 280270,
"is_parallel": true,
"self": 55.70622139990634
},
"communicator.exchange": {
"total": 961.1853786000501,
"count": 280270,
"is_parallel": true,
"self": 961.1853786000501
},
"steps_from_proto": {
"total": 98.89629410003138,
"count": 280270,
"is_parallel": true,
"self": 37.84166759999372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.054626500037664,
"count": 1681620,
"is_parallel": true,
"self": 61.054626500037664
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1371.6440955999274,
"count": 280270,
"self": 7.236616399939749,
"children": {
"process_trajectory": {
"total": 209.1686692999835,
"count": 280270,
"self": 208.8528619999833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31580730000018775,
"count": 4,
"self": 0.31580730000018775
}
}
},
"_update_policy": {
"total": 1155.238809900004,
"count": 215,
"self": 411.30441229994346,
"children": {
"TorchPPOOptimizer.update": {
"total": 743.9343976000606,
"count": 131837,
"self": 743.9343976000606
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.06278889999975945,
"count": 1,
"self": 0.009349500000098487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.053439399999660964,
"count": 1,
"self": 0.053439399999660964
}
}
}
}
}
}
}