Files
racesm/MLAgents/results/BensonV7/run_logs/timers.json
2024-04-23 15:47:36 +02:00

332 lines
14 KiB
JSON

{
"name": "root",
"gauges": {
"BensonV7.Policy.Entropy.mean": {
"value": 1.724164605140686,
"min": 1.6979351043701172,
"max": 1.9132299423217773,
"count": 114
},
"BensonV7.Policy.Entropy.sum": {
"value": 51466.3125,
"min": 40307.1640625,
"max": 59228.46484375,
"count": 114
},
"BensonV7.Step.mean": {
"value": 1979956.0,
"min": 849958.0,
"max": 1979956.0,
"count": 114
},
"BensonV7.Step.sum": {
"value": 1979956.0,
"min": 849958.0,
"max": 1979956.0,
"count": 114
},
"BensonV7.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.02828311920166,
"min": 2.0664122104644775,
"max": 4.056896209716797,
"count": 114
},
"BensonV7.Policy.ExtrinsicValueEstimate.sum": {
"value": 660.638427734375,
"min": 230.64227294921875,
"max": 681.55859375,
"count": 114
},
"BensonV7.Environment.EpisodeLength.mean": {
"value": 568.3518518518518,
"min": 388.85714285714283,
"max": 749.3571428571429,
"count": 114
},
"BensonV7.Environment.EpisodeLength.sum": {
"value": 30691.0,
"min": 2722.0,
"max": 34215.0,
"count": 114
},
"BensonV7.Self-play.ELO.mean": {
"value": 1384.3959480170795,
"min": 1380.287578470765,
"max": 1480.8259227184733,
"count": 114
},
"BensonV7.Self-play.ELO.sum": {
"value": 24919.12706430743,
"min": 4442.47776815542,
"max": 30394.634561574698,
"count": 114
},
"BensonV7.Environment.CumulativeReward.mean": {
"value": 23.51508159438769,
"min": 4.317544090928277,
"max": 26.173201080504803,
"count": 114
},
"BensonV7.Environment.CumulativeReward.sum": {
"value": 423.2714686989784,
"min": 20.118093952536583,
"max": 474.76355612277985,
"count": 114
},
"BensonV7.Policy.ExtrinsicReward.mean": {
"value": 23.51508159438769,
"min": 4.317544090928277,
"max": 26.173201080504803,
"count": 114
},
"BensonV7.Policy.ExtrinsicReward.sum": {
"value": 423.2714686989784,
"min": 20.118093952536583,
"max": 474.76355612277985,
"count": 114
},
"BensonV7.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"BensonV7.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"BensonV7.Losses.PolicyLoss.mean": {
"value": 0.11129256490654903,
"min": 0.10404405176266453,
"max": 0.12017463006231105,
"count": 110
},
"BensonV7.Losses.PolicyLoss.sum": {
"value": 0.11129256490654903,
"min": 0.10404405176266453,
"max": 0.12017463006231105,
"count": 110
},
"BensonV7.Losses.ValueLoss.mean": {
"value": 0.3347890382678044,
"min": 0.10592283378165912,
"max": 0.45261242995417217,
"count": 110
},
"BensonV7.Losses.ValueLoss.sum": {
"value": 0.3347890382678044,
"min": 0.10592283378165912,
"max": 0.45261242995417217,
"count": 110
},
"BensonV7.Policy.LearningRate.mean": {
"value": 0.00027037953987349,
"min": 0.00027037953987349,
"max": 0.00028719582426806,
"count": 110
},
"BensonV7.Policy.LearningRate.sum": {
"value": 0.00027037953987349,
"min": 0.00027037953987349,
"max": 0.00028719582426806,
"count": 110
},
"BensonV7.Policy.Epsilon.mean": {
"value": 0.19012651000000003,
"min": 0.19012651000000003,
"max": 0.19573194,
"count": 110
},
"BensonV7.Policy.Epsilon.sum": {
"value": 0.19012651000000003,
"min": 0.19012651000000003,
"max": 0.19573194,
"count": 110
},
"BensonV7.Policy.Beta.mean": {
"value": 0.04506424234900002,
"min": 0.04506424234900002,
"max": 0.047866396806000015,
"count": 110
},
"BensonV7.Policy.Beta.sum": {
"value": 0.04506424234900002,
"min": 0.04506424234900002,
"max": 0.047866396806000015,
"count": 110
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713876203",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV7.yaml --run-id=BensonV7 --resume",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713880030"
},
"total": 3827.7623298,
"count": 1,
"self": 0.011808999999630032,
"children": {
"run_training.setup": {
"total": 0.09212290000000012,
"count": 1,
"self": 0.09212290000000012
},
"TrainerController.start_learning": {
"total": 3827.6583979,
"count": 1,
"self": 2.435547799978849,
"children": {
"TrainerController._reset_env": {
"total": 19.67768449999975,
"count": 7,
"self": 19.67768449999975
},
"TrainerController.advance": {
"total": 3805.4429475000215,
"count": 119182,
"self": 2.0891288001512294,
"children": {
"env_step": {
"total": 2832.8957301999403,
"count": 119182,
"self": 1718.9134972999607,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1112.513478499999,
"count": 119183,
"self": 16.29119710006944,
"children": {
"TorchPolicy.evaluate": {
"total": 1096.2222813999297,
"count": 344490,
"self": 1096.2222813999297
}
}
},
"workers": {
"total": 1.4687543999806074,
"count": 119181,
"self": 0.0,
"children": {
"worker_root": {
"total": 3753.157156000006,
"count": 119181,
"is_parallel": true,
"self": 2309.568257799966,
"children": {
"steps_from_proto": {
"total": 0.010590099999753022,
"count": 24,
"is_parallel": true,
"self": 0.0033023999993027076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0072877000004503145,
"count": 240,
"is_parallel": true,
"self": 0.0072877000004503145
}
}
},
"UnityEnvironment.step": {
"total": 1443.5783081000404,
"count": 119181,
"is_parallel": true,
"self": 49.66146120013195,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.08897779992863,
"count": 119181,
"is_parallel": true,
"self": 36.08897779992863
},
"communicator.exchange": {
"total": 1207.7480174999691,
"count": 119181,
"is_parallel": true,
"self": 1207.7480174999691
},
"steps_from_proto": {
"total": 150.0798516000106,
"count": 357543,
"is_parallel": true,
"self": 46.51214239967524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.56770920033537,
"count": 3575430,
"is_parallel": true,
"self": 103.56770920033537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 970.45808849993,
"count": 119181,
"self": 17.939639899914255,
"children": {
"process_trajectory": {
"total": 147.86677610001612,
"count": 119181,
"self": 147.7127672000159,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15400890000023537,
"count": 2,
"self": 0.15400890000023537
}
}
},
"_update_policy": {
"total": 804.6516724999997,
"count": 111,
"self": 185.2384082999847,
"children": {
"TorchPPOOptimizer.update": {
"total": 619.413264200015,
"count": 68319,
"self": 619.413264200015
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.99999883788405e-07,
"count": 1,
"self": 9.99999883788405e-07
},
"TrainerController._save_models": {
"total": 0.10221709999996165,
"count": 1,
"self": 0.01906329999974332,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08315380000021833,
"count": 1,
"self": 0.08315380000021833
}
}
}
}
}
}
}