Files
racesm/MLAgents/results/BensonV6.1/run_logs/timers.json
2024-04-20 18:48:43 +02:00

320 lines
14 KiB
JSON

{
"name": "root",
"gauges": {
"BensonV6.1.Policy.Entropy.mean": {
"value": 0.6789544820785522,
"min": 0.6789544820785522,
"max": 1.7365357875823975,
"count": 252
},
"BensonV6.1.Policy.Entropy.sum": {
"value": 6762.38671875,
"min": 1786.3543701171875,
"max": 17327.384765625,
"count": 252
},
"BensonV6.1.Step.mean": {
"value": 2999952.0,
"min": 489988.0,
"max": 2999952.0,
"count": 252
},
"BensonV6.1.Step.sum": {
"value": 2999952.0,
"min": 489988.0,
"max": 2999952.0,
"count": 252
},
"BensonV6.1.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.416819095611572,
"min": 1.214796781539917,
"max": 7.417298793792725,
"count": 252
},
"BensonV6.1.Policy.ExtrinsicValueEstimate.sum": {
"value": 1453.696533203125,
"min": 27.14661979675293,
"max": 1453.696533203125,
"count": 252
},
"BensonV6.1.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 252
},
"BensonV6.1.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 252
},
"BensonV6.1.Environment.EpisodeLength.mean": {
"value": 158.25806451612902,
"min": 156.3548387096774,
"max": 315.34375,
"count": 251
},
"BensonV6.1.Environment.EpisodeLength.sum": {
"value": 9812.0,
"min": 9135.0,
"max": 10918.0,
"count": 251
},
"BensonV6.1.Environment.CumulativeReward.mean": {
"value": 17.720831836972916,
"min": 6.600487498161294,
"max": 17.764139349177732,
"count": 251
},
"BensonV6.1.Environment.CumulativeReward.sum": {
"value": 1116.4124057292938,
"min": 235.22235931036994,
"max": 1139.6223163156537,
"count": 251
},
"BensonV6.1.Policy.ExtrinsicReward.mean": {
"value": 17.720831836972916,
"min": 6.600487498161294,
"max": 17.764139349177732,
"count": 251
},
"BensonV6.1.Policy.ExtrinsicReward.sum": {
"value": 1116.4124057292938,
"min": 235.22235931036994,
"max": 1139.6223163156537,
"count": 251
},
"BensonV6.1.Losses.PolicyLoss.mean": {
"value": 0.10757822603643585,
"min": 0.09952940423776513,
"max": 0.12089756364854064,
"count": 244
},
"BensonV6.1.Losses.PolicyLoss.sum": {
"value": 0.10757822603643585,
"min": 0.09952940423776513,
"max": 0.12089756364854064,
"count": 244
},
"BensonV6.1.Losses.ValueLoss.mean": {
"value": 0.161995082059845,
"min": 0.12143607831643365,
"max": 0.7208258142950488,
"count": 244
},
"BensonV6.1.Losses.ValueLoss.sum": {
"value": 0.161995082059845,
"min": 0.12143607831643365,
"max": 0.7208258142950488,
"count": 244
},
"BensonV6.1.Policy.LearningRate.mean": {
"value": 4.348998550666575e-07,
"min": 4.348998550666575e-07,
"max": 0.0002500703166432333,
"count": 244
},
"BensonV6.1.Policy.LearningRate.sum": {
"value": 4.348998550666575e-07,
"min": 4.348998550666575e-07,
"max": 0.0002500703166432333,
"count": 244
},
"BensonV6.1.Policy.Epsilon.mean": {
"value": 0.10014493333333332,
"min": 0.10014493333333332,
"max": 0.18335676666666667,
"count": 244
},
"BensonV6.1.Policy.Epsilon.sum": {
"value": 0.10014493333333332,
"min": 0.10014493333333332,
"max": 0.18335676666666667,
"count": 244
},
"BensonV6.1.Policy.Beta.mean": {
"value": 8.245217333333181e-05,
"min": 8.245217333333181e-05,
"max": 0.04168004765666667,
"count": 244
},
"BensonV6.1.Policy.Beta.sum": {
"value": 8.245217333333181e-05,
"min": 8.245217333333181e-05,
"max": 0.04168004765666667,
"count": 244
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713626939",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV6.1.yaml --run-id=BensonV6.1 --resume",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713631580"
},
"total": 4641.414641300001,
"count": 1,
"self": 0.005876700000953861,
"children": {
"run_training.setup": {
"total": 0.08273789999999992,
"count": 1,
"self": 0.08273789999999992
},
"TrainerController.start_learning": {
"total": 4641.3260267,
"count": 1,
"self": 4.529195499979323,
"children": {
"TrainerController._reset_env": {
"total": 10.7731008,
"count": 1,
"self": 10.7731008
},
"TrainerController.advance": {
"total": 4625.96329380002,
"count": 261576,
"self": 3.9781851998122875,
"children": {
"env_step": {
"total": 2717.0496560999586,
"count": 261576,
"self": 1678.8040917999438,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1035.1611249000011,
"count": 261576,
"self": 11.988765500048657,
"children": {
"TorchPolicy.evaluate": {
"total": 1023.1723593999525,
"count": 251122,
"self": 1023.1723593999525
}
}
},
"workers": {
"total": 3.084439400013709,
"count": 261576,
"self": 0.0,
"children": {
"worker_root": {
"total": 4625.813898699946,
"count": 261576,
"is_parallel": true,
"self": 3193.993477400112,
"children": {
"steps_from_proto": {
"total": 0.0005550000000003052,
"count": 1,
"is_parallel": true,
"self": 0.0002106000000008379,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00034439999999946735,
"count": 6,
"is_parallel": true,
"self": 0.00034439999999946735
}
}
},
"UnityEnvironment.step": {
"total": 1431.8198662998334,
"count": 261576,
"is_parallel": true,
"self": 31.458605499658688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.3378812000054,
"count": 261576,
"is_parallel": true,
"self": 34.3378812000054
},
"communicator.exchange": {
"total": 1271.401098000018,
"count": 261576,
"is_parallel": true,
"self": 1271.401098000018
},
"steps_from_proto": {
"total": 94.6222816001513,
"count": 261576,
"is_parallel": true,
"self": 36.53906309968252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.083218500468774,
"count": 1569456,
"is_parallel": true,
"self": 58.083218500468774
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1904.9354525002493,
"count": 261576,
"self": 7.898237100334654,
"children": {
"process_trajectory": {
"total": 286.0989276999167,
"count": 261576,
"self": 285.7158488999173,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3830787999994101,
"count": 6,
"self": 0.3830787999994101
}
}
},
"_update_policy": {
"total": 1610.938287699998,
"count": 244,
"self": 330.12971779998907,
"children": {
"TorchPPOOptimizer.update": {
"total": 1280.808569900009,
"count": 150024,
"self": 1280.808569900009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.060435899999902176,
"count": 1,
"self": 0.0057316999991599005,
"children": {
"RLTrainer._checkpoint": {
"total": 0.054704200000742276,
"count": 1,
"self": 0.054704200000742276
}
}
}
}
}
}
}