{ "name": "root", "gauges": { "HotrodTrack2.Policy.Entropy.mean": { "value": 1.3739535808563232, "min": 1.343945860862732, "max": 1.7702805995941162, "count": 207 }, "HotrodTrack2.Policy.Entropy.sum": { "value": 13662.5947265625, "min": 4367.5244140625, "max": 17683.396484375, "count": 207 }, "HotrodTrack2.Step.mean": { "value": 3089971.0, "min": 1029958.0, "max": 3089971.0, "count": 207 }, "HotrodTrack2.Step.sum": { "value": 3089971.0, "min": 1029958.0, "max": 3089971.0, "count": 207 }, "HotrodTrack2.Policy.ExtrinsicValueEstimate.mean": { "value": 5.3286333084106445, "min": 3.038632869720459, "max": 5.346072673797607, "count": 207 }, "HotrodTrack2.Policy.ExtrinsicValueEstimate.sum": { "value": 911.1962890625, "min": 121.12471008300781, "max": 924.87060546875, "count": 207 }, "HotrodTrack2.Environment.EpisodeLength.mean": { "value": 387.61538461538464, "min": 184.0, "max": 490.7, "count": 207 }, "HotrodTrack2.Environment.EpisodeLength.sum": { "value": 10078.0, "min": 184.0, "max": 11250.0, "count": 207 }, "HotrodTrack2.Environment.CumulativeReward.mean": { "value": 23.568863910981097, "min": 2.0535954236984253, "max": 24.461618214845657, "count": 207 }, "HotrodTrack2.Environment.CumulativeReward.sum": { "value": 612.7904616855085, "min": 2.0535954236984253, "max": 642.6902520656586, "count": 207 }, "HotrodTrack2.Policy.ExtrinsicReward.mean": { "value": 23.568863910981097, "min": 2.0535954236984253, "max": 24.461618214845657, "count": 207 }, "HotrodTrack2.Policy.ExtrinsicReward.sum": { "value": 612.7904616855085, "min": 2.0535954236984253, "max": 642.6902520656586, "count": 207 }, "HotrodTrack2.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 207 }, "HotrodTrack2.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 207 }, "HotrodTrack2.Losses.PolicyLoss.mean": { "value": 0.10941327199726972, "min": 0.10290333305045565, "max": 0.12389937681484328, "count": 200 }, "HotrodTrack2.Losses.PolicyLoss.sum": { "value": 0.10941327199726972, "min": 0.10290333305045565, "max": 0.12389937681484328, "count": 200 }, "HotrodTrack2.Losses.ValueLoss.mean": { "value": 0.2582107081040134, "min": 0.2562730988023447, "max": 0.5628310199583473, "count": 200 }, "HotrodTrack2.Losses.ValueLoss.sum": { "value": 0.2582107081040134, "min": 0.2562730988023447, "max": 0.5628310199583473, "count": 200 }, "HotrodTrack2.Policy.LearningRate.mean": { "value": 0.00011505420164861999, "min": 0.00011505420164861999, "max": 0.00023772074075976003, "count": 200 }, "HotrodTrack2.Policy.LearningRate.sum": { "value": 0.00011505420164861999, "min": 0.00011505420164861999, "max": 0.00023772074075976003, "count": 200 }, "HotrodTrack2.Policy.Epsilon.mean": { "value": 0.13835138000000002, "min": 0.13835138000000002, "max": 0.17924023999999997, "count": 200 }, "HotrodTrack2.Policy.Epsilon.sum": { "value": 0.13835138000000002, "min": 0.13835138000000002, "max": 0.17924023999999997, "count": 200 }, "HotrodTrack2.Policy.Beta.mean": { "value": 0.019181854862, "min": 0.019181854862, "max": 0.039622195976000016, "count": 200 }, "HotrodTrack2.Policy.Beta.sum": { "value": 0.019181854862, "min": 0.019181854862, "max": 0.039622195976000016, "count": 200 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1713970583", "python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\Noah\\Documents\\Unity\\Racesm\\.venv\\Scripts\\mlagents-learn config/HotrodTrack2.yaml --run-id=HotrodTrack2 --resume --env=c:\\Users\\Noah\\Desktop\\Hotrod Track2 Part2\\Racesm_L.exe --no-graphics --num-envs=1", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.2+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1713974089" }, "total": 3505.7378077999997, "count": 1, "self": 0.2688706999997521, "children": { "run_training.setup": { "total": 0.15523819999999988, "count": 1, "self": 0.15523819999999988 }, "TrainerController.start_learning": { "total": 3505.3136989, "count": 1, "self": 6.376300800093759, "children": { "TrainerController._reset_env": { "total": 5.420014, "count": 1, "self": 5.420014 }, "TrainerController.advance": { "total": 3493.4059160999063, "count": 262171, "self": 6.655915299993012, "children": { "env_step": { "total": 2010.8352156999388, "count": 262171, "self": 1575.0194805000299, "children": { "SubprocessEnvManager._take_step": { "total": 431.44365799991607, "count": 262171, "self": 17.06194839988899, "children": { "TorchPolicy.evaluate": { "total": 414.3817096000271, "count": 258147, "self": 414.3817096000271 } } }, "workers": { "total": 4.372077199992827, "count": 262171, "self": 0.0, "children": { "worker_root": { "total": 3491.463849499954, "count": 262171, "is_parallel": true, "self": 2275.6867950999176, "children": { "steps_from_proto": { "total": 0.0010446999999995654, "count": 1, "is_parallel": true, "self": 0.0004099000000010733, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006347999999984921, "count": 6, "is_parallel": true, "self": 0.0006347999999984921 } } }, "UnityEnvironment.step": { "total": 1215.7760097000366, "count": 262171, "is_parallel": true, "self": 42.335275000149295, "children": { "UnityEnvironment._generate_step_input": { "total": 46.282555199985666, "count": 262171, "is_parallel": true, "self": 46.282555199985666 }, "communicator.exchange": { "total": 992.2986982999263, "count": 262171, "is_parallel": true, "self": 992.2986982999263 }, "steps_from_proto": { "total": 134.85948119997522, "count": 262171, "is_parallel": true, "self": 51.5979506000719, "children": { "_process_rank_one_or_two_observation": { "total": 83.26153059990332, "count": 1573026, "is_parallel": true, "self": 83.26153059990332 } } } } } } } } } } }, "trainer_advance": { "total": 1475.9147850999743, "count": 262171, "self": 9.46174870001596, "children": { "process_trajectory": { "total": 198.53451909996, "count": 262171, "self": 198.22039599996003, "children": { "RLTrainer._checkpoint": { "total": 0.3141230999999607, "count": 4, "self": 0.3141230999999607 } } }, "_update_policy": { "total": 1267.9185172999985, "count": 201, "self": 389.2174777000539, "children": { "TorchPPOOptimizer.update": { "total": 878.7010395999446, "count": 123037, "self": 878.7010395999446 } } } } } } }, "trainer_threads": { "total": 1.8999999156221747e-06, "count": 1, "self": 1.8999999156221747e-06 }, "TrainerController._save_models": { "total": 0.11146610000014334, "count": 1, "self": 0.01714480000009644, "children": { "RLTrainer._checkpoint": { "total": 0.0943213000000469, "count": 1, "self": 0.0943213000000469 } } } } } } }