Files
racesm/MLAgents/results/BensonV7/run_logs/timers.json
2024-04-23 10:44:10 +02:00

325 lines
14 KiB
JSON

{
"name": "root",
"gauges": {
"BensonV7.Policy.Entropy.mean": {
"value": 1.3791288137435913,
"min": 1.3791288137435913,
"max": 2.187758684158325,
"count": 24
},
"BensonV7.Policy.Entropy.sum": {
"value": 13763.705078125,
"min": 13763.705078125,
"max": 22402.6484375,
"count": 24
},
"BensonV7.Step.mean": {
"value": 239991.0,
"min": 9984.0,
"max": 239991.0,
"count": 24
},
"BensonV7.Step.sum": {
"value": 239991.0,
"min": 9984.0,
"max": 239991.0,
"count": 24
},
"BensonV7.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4123384356498718,
"min": -0.17652811110019684,
"max": 0.4123384356498718,
"count": 24
},
"BensonV7.Policy.ExtrinsicValueEstimate.sum": {
"value": 75.45793151855469,
"min": -43.4259147644043,
"max": 75.45793151855469,
"count": 24
},
"BensonV7.Environment.EpisodeLength.mean": {
"value": 208.6595744680851,
"min": 79.0,
"max": 208.6595744680851,
"count": 24
},
"BensonV7.Environment.EpisodeLength.sum": {
"value": 9807.0,
"min": 8690.0,
"max": 10688.0,
"count": 24
},
"BensonV7.Self-play.ELO.mean": {
"value": 115.6026867741647,
"min": 90.04764891469252,
"max": 1141.217976657289,
"count": 24
},
"BensonV7.Self-play.ELO.sum": {
"value": 5433.326278385741,
"min": 5433.326278385741,
"max": 136946.1571988747,
"count": 24
},
"BensonV7.Environment.CumulativeReward.mean": {
"value": 2.9085576262137773,
"min": -0.11774942415853053,
"max": 2.9085576262137773,
"count": 24
},
"BensonV7.Environment.CumulativeReward.sum": {
"value": 133.79365080583375,
"min": -15.189675716450438,
"max": 133.79365080583375,
"count": 24
},
"BensonV7.Policy.ExtrinsicReward.mean": {
"value": 2.9085576262137773,
"min": -0.11774942415853053,
"max": 2.9085576262137773,
"count": 24
},
"BensonV7.Policy.ExtrinsicReward.sum": {
"value": 133.79365080583375,
"min": -15.189675716450438,
"max": 133.79365080583375,
"count": 24
},
"BensonV7.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 24
},
"BensonV7.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 24
},
"BensonV7.Losses.PolicyLoss.mean": {
"value": 0.11305173472296136,
"min": 0.0902305781915467,
"max": 0.1175793781897086,
"count": 23
},
"BensonV7.Losses.PolicyLoss.sum": {
"value": 0.11305173472296136,
"min": 0.0902305781915467,
"max": 0.1175793781897086,
"count": 23
},
"BensonV7.Losses.ValueLoss.mean": {
"value": 0.02714227997847023,
"min": 2.293270521482928e-05,
"max": 0.05294226645212173,
"count": 23
},
"BensonV7.Losses.ValueLoss.sum": {
"value": 0.02714227997847023,
"min": 2.293270521482928e-05,
"max": 0.05294226645212173,
"count": 23
},
"BensonV7.Policy.LearningRate.mean": {
"value": 0.00029641668119443997,
"min": 0.00029641668119443997,
"max": 0.00029984400005200003,
"count": 23
},
"BensonV7.Policy.LearningRate.sum": {
"value": 0.00029641668119443997,
"min": 0.00029641668119443997,
"max": 0.00029984400005200003,
"count": 23
},
"BensonV7.Policy.Epsilon.mean": {
"value": 0.19880556000000005,
"min": 0.19880556000000005,
"max": 0.19994800000000001,
"count": 23
},
"BensonV7.Policy.Epsilon.sum": {
"value": 0.19880556000000005,
"min": 0.19880556000000005,
"max": 0.19994800000000001,
"count": 23
},
"BensonV7.Policy.Beta.mean": {
"value": 0.049402899444000004,
"min": 0.049402899444000004,
"max": 0.049974005200000005,
"count": 23
},
"BensonV7.Policy.Beta.sum": {
"value": 0.049402899444000004,
"min": 0.049402899444000004,
"max": 0.049974005200000005,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713854668",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV7.yaml --run-id=BensonV7 --force",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713855170"
},
"total": 502.6965699,
"count": 1,
"self": 0.004939400000012029,
"children": {
"run_training.setup": {
"total": 0.07646299999999995,
"count": 1,
"self": 0.07646299999999995
},
"TrainerController.start_learning": {
"total": 502.6151675,
"count": 1,
"self": 0.5009666999978322,
"children": {
"TrainerController._reset_env": {
"total": 5.93716190000001,
"count": 3,
"self": 5.93716190000001
},
"TrainerController.advance": {
"total": 496.02484980000213,
"count": 25454,
"self": 0.4162786999982018,
"children": {
"env_step": {
"total": 306.42174130000774,
"count": 25454,
"self": 212.3873840000059,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.72146940000326,
"count": 25454,
"self": 1.258011400004591,
"children": {
"TorchPolicy.evaluate": {
"total": 92.46345799999867,
"count": 24958,
"self": 92.46345799999867
}
}
},
"workers": {
"total": 0.3128878999985645,
"count": 25454,
"self": 0.0,
"children": {
"worker_root": {
"total": 493.2314424000014,
"count": 25454,
"is_parallel": true,
"self": 306.6950511000011,
"children": {
"steps_from_proto": {
"total": 0.0016319000000137862,
"count": 3,
"is_parallel": true,
"self": 0.0005697000000228769,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010621999999909093,
"count": 18,
"is_parallel": true,
"self": 0.0010621999999909093
}
}
},
"UnityEnvironment.step": {
"total": 186.53475940000033,
"count": 25454,
"is_parallel": true,
"self": 3.401311300002959,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.6382145000039445,
"count": 25454,
"is_parallel": true,
"self": 3.6382145000039445
},
"communicator.exchange": {
"total": 169.24416609999997,
"count": 25454,
"is_parallel": true,
"self": 169.24416609999997
},
"steps_from_proto": {
"total": 10.25106749999345,
"count": 25454,
"is_parallel": true,
"self": 3.835975299991518,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.415092200001932,
"count": 152724,
"is_parallel": true,
"self": 6.415092200001932
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 189.18682979999616,
"count": 25454,
"self": 1.4634836999935033,
"children": {
"process_trajectory": {
"total": 26.51501530000261,
"count": 25454,
"self": 26.51501530000261
},
"_update_policy": {
"total": 161.20833080000006,
"count": 24,
"self": 32.78496999999564,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.42336080000442,
"count": 14628,
"self": 128.42336080000442
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.999999749903509e-07,
"count": 1,
"self": 8.999999749903509e-07
},
"TrainerController._save_models": {
"total": 0.15218820000001188,
"count": 1,
"self": 0.007047300000010637,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14514090000000124,
"count": 1,
"self": 0.14514090000000124
}
}
}
}
}
}
}