Files
racesm/MLAgents/results/BensonImitationV3/run_logs/timers.json
2024-04-20 14:45:47 +02:00

507 lines
22 KiB
JSON

{
"name": "root",
"gauges": {
"BensonImitationV3.Policy.Entropy.mean": {
"value": 0.7362620830535889,
"min": 0.4023934304714203,
"max": 1.6750186681747437,
"count": 84
},
"BensonImitationV3.Policy.Entropy.sum": {
"value": 7878.00439453125,
"min": 3972.7509765625,
"max": 16761.912109375,
"count": 84
},
"BensonImitationV3.Step.mean": {
"value": 999953.0,
"min": 169993.0,
"max": 999953.0,
"count": 84
},
"BensonImitationV3.Step.sum": {
"value": 999953.0,
"min": 169993.0,
"max": 999953.0,
"count": 84
},
"BensonImitationV3.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.15762338042259216,
"min": -0.21867768466472626,
"max": 0.3222484588623047,
"count": 84
},
"BensonImitationV3.Policy.ExtrinsicValueEstimate.sum": {
"value": -25.21973991394043,
"min": -34.769752502441406,
"max": 36.416282653808594,
"count": 84
},
"BensonImitationV3.Policy.CuriosityValueEstimate.mean": {
"value": 0.059504203498363495,
"min": -0.1515316367149353,
"max": 3.198132038116455,
"count": 84
},
"BensonImitationV3.Policy.CuriosityValueEstimate.sum": {
"value": 9.520672798156738,
"min": -24.24506187438965,
"max": 508.50299072265625,
"count": 84
},
"BensonImitationV3.Policy.GailValueEstimate.mean": {
"value": 2.308720111846924,
"min": 0.7855445146560669,
"max": 30.982532501220703,
"count": 84
},
"BensonImitationV3.Policy.GailValueEstimate.sum": {
"value": 369.39520263671875,
"min": 125.68712615966797,
"max": 4926.22265625,
"count": 84
},
"BensonImitationV3.Environment.EpisodeLength.mean": {
"value": 749.0,
"min": 749.0,
"max": 1107.7777777777778,
"count": 84
},
"BensonImitationV3.Environment.EpisodeLength.sum": {
"value": 10486.0,
"min": 3998.0,
"max": 12186.0,
"count": 84
},
"BensonImitationV3.Environment.CumulativeReward.mean": {
"value": -1.466538528410288,
"min": -1.466538528410288,
"max": 1.207200152426958,
"count": 84
},
"BensonImitationV3.Environment.CumulativeReward.sum": {
"value": -19.065000869333744,
"min": -20.443001002073288,
"max": 12.36000008136034,
"count": 84
},
"BensonImitationV3.Policy.ExtrinsicReward.mean": {
"value": -1.466538528410288,
"min": -1.466538528410288,
"max": 1.207200152426958,
"count": 84
},
"BensonImitationV3.Policy.ExtrinsicReward.sum": {
"value": -19.065000869333744,
"min": -20.443001002073288,
"max": 12.36000008136034,
"count": 84
},
"BensonImitationV3.Policy.CuriosityReward.mean": {
"value": 0.27002759718407804,
"min": 0.0,
"max": 51.99450251981616,
"count": 84
},
"BensonImitationV3.Policy.CuriosityReward.sum": {
"value": 3.5103587633930147,
"min": 0.0,
"max": 519.9450251981616,
"count": 84
},
"BensonImitationV3.Policy.GailReward.mean": {
"value": 7.096627946083363,
"min": 7.096627946083363,
"max": 312.76846281290057,
"count": 84
},
"BensonImitationV3.Policy.GailReward.sum": {
"value": 92.25616329908371,
"min": 92.25616329908371,
"max": 3127.6846281290054,
"count": 84
},
"BensonImitationV3.Losses.PolicyLoss.mean": {
"value": 0.11416770667925508,
"min": 0.10628626191813033,
"max": 0.12596408734975964,
"count": 84
},
"BensonImitationV3.Losses.PolicyLoss.sum": {
"value": 0.4566708267170203,
"min": 0.1150214869889573,
"max": 0.4799000932967309,
"count": 84
},
"BensonImitationV3.Losses.ValueLoss.mean": {
"value": 0.1504570970164174,
"min": 0.023458264732923364,
"max": 7.6387338708948205,
"count": 84
},
"BensonImitationV3.Losses.ValueLoss.sum": {
"value": 0.6018283880656696,
"min": 0.07037479419877009,
"max": 22.91620161268446,
"count": 84
},
"BensonImitationV3.Policy.LearningRate.mean": {
"value": 1.560399479899995e-06,
"min": 1.560399479899995e-06,
"max": 0.00024907891697369995,
"count": 84
},
"BensonImitationV3.Policy.LearningRate.sum": {
"value": 6.24159791959998e-06,
"min": 6.24159791959998e-06,
"max": 0.0009655095781635001,
"count": 84
},
"BensonImitationV3.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 84
},
"BensonImitationV3.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.09999999999999998,
"max": 0.3999999999999999,
"count": 84
},
"BensonImitationV3.Policy.Beta.mean": {
"value": 0.00026999798999999914,
"min": 0.00026999798999999914,
"max": 0.04151484737,
"count": 84
},
"BensonImitationV3.Policy.Beta.sum": {
"value": 0.0010799919599999966,
"min": 0.0010799919599999966,
"max": 0.16092606635,
"count": 84
},
"BensonImitationV3.Losses.CuriosityForwardLoss.mean": {
"value": 0.01479993625149599,
"min": 0.01479993625149599,
"max": 0.11420892757122164,
"count": 84
},
"BensonImitationV3.Losses.CuriosityForwardLoss.sum": {
"value": 0.05919974500598396,
"min": 0.051338312531633123,
"max": 0.3426267827136649,
"count": 84
},
"BensonImitationV3.Losses.CuriosityInverseLoss.mean": {
"value": 0.5536654406924392,
"min": 0.24887636022944215,
"max": 1.2337175917846186,
"count": 84
},
"BensonImitationV3.Losses.CuriosityInverseLoss.sum": {
"value": 2.2146617627697567,
"min": 0.7527279080731458,
"max": 4.676302831187057,
"count": 84
},
"BensonImitationV3.Policy.GAILPolicyEstimate.mean": {
"value": 0.018920360066892343,
"min": 0.018920360066892343,
"max": 0.2624264025301845,
"count": 84
},
"BensonImitationV3.Policy.GAILPolicyEstimate.sum": {
"value": 0.07568144026756937,
"min": 0.060770991945364436,
"max": 0.7980163626311745,
"count": 84
},
"BensonImitationV3.Policy.GAILExpertEstimate.mean": {
"value": 0.9472016285026008,
"min": 0.7497235457102458,
"max": 0.9516197779573812,
"count": 84
},
"BensonImitationV3.Policy.GAILExpertEstimate.sum": {
"value": 3.7888065140104032,
"min": 0.7595125705003738,
"max": 3.7941520373677946,
"count": 84
},
"BensonImitationV3.Losses.GAILLoss.mean": {
"value": 0.08147498036994308,
"min": 0.08009267514927791,
"max": 0.7282829950805064,
"count": 84
},
"BensonImitationV3.Losses.GAILLoss.sum": {
"value": 0.3258999214797723,
"min": 0.24027802544783372,
"max": 2.360661158223513,
"count": 84
},
"BensonImitationV3.Policy.GAILGradMagLoss.mean": {
"value": 0.027322364259826763,
"min": 0.027322364259826763,
"max": 0.0784072881805952,
"count": 84
},
"BensonImitationV3.Policy.GAILGradMagLoss.sum": {
"value": 0.10928945703930705,
"min": 0.0687877264753398,
"max": 0.2958798513992033,
"count": 84
},
"BensonImitationV3.Losses.PretrainingLoss.mean": {
"value": 0.14462123439324642,
"min": 0.14462123439324642,
"max": 0.26747753791519174,
"count": 84
},
"BensonImitationV3.Losses.PretrainingLoss.sum": {
"value": 0.5784849375729857,
"min": 0.26248274427029045,
"max": 1.069910151660767,
"count": 84
},
"BensonImitationV3.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 84
},
"BensonImitationV3.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 84
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713004745",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\ML Tutorual v2\\ml-agents\\.venv\\Scripts\\mlagents-learn config/BensonImitationV3.yaml --run-id=BensonImitationV3 --resume --env=C:\\Users\\noahk\\Desktop\\BensonV3\\Racesm_L.exe --no-graphics --num-envs=1",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713011201"
},
"total": 6455.327749,
"count": 1,
"self": 0.6092883999999685,
"children": {
"run_training.setup": {
"total": 0.09464389999999989,
"count": 1,
"self": 0.09464389999999989
},
"TrainerController.start_learning": {
"total": 6454.6238167,
"count": 1,
"self": 16.26712229984514,
"children": {
"TrainerController._reset_env": {
"total": 18.2295927,
"count": 1,
"self": 4.401290500000002,
"children": {
"demo_to_buffer": {
"total": 13.8283022,
"count": 2,
"self": 0.0004267999999996164,
"children": {
"load_demonstration": {
"total": 0.18638369999999949,
"count": 2,
"self": 0.18282569999999954,
"children": {
"read_file": {
"total": 0.00355799999999995,
"count": 2,
"self": 0.00355799999999995
}
}
},
"make_demo_buffer": {
"total": 13.6414917,
"count": 2,
"self": 1.856581499999951,
"children": {
"steps_from_proto": {
"total": 11.784910200000049,
"count": 61304,
"self": 6.0699884999999085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.71492170000014,
"count": 367824,
"self": 5.71492170000014
}
}
}
}
}
}
}
}
},
"TrainerController.advance": {
"total": 6420.056406700156,
"count": 834019,
"self": 7.670406600033857,
"children": {
"env_step": {
"total": 6412.386000100122,
"count": 834019,
"self": 3058.5475695995337,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3345.4360913005335,
"count": 834019,
"self": 33.51401700062752,
"children": {
"TorchPolicy.evaluate": {
"total": 3311.922074299906,
"count": 834019,
"self": 3311.922074299906
}
}
},
"workers": {
"total": 8.402339200054833,
"count": 834019,
"self": 0.0,
"children": {
"worker_root": {
"total": 6432.202858400451,
"count": 834019,
"is_parallel": true,
"self": 5154.227634000432,
"children": {
"steps_from_proto": {
"total": 0.0017536000000002439,
"count": 1,
"is_parallel": true,
"self": 0.0007776000000006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009759999999996438,
"count": 6,
"is_parallel": true,
"self": 0.0009759999999996438
}
}
},
"UnityEnvironment.step": {
"total": 1277.973470800019,
"count": 834019,
"is_parallel": true,
"self": 61.485407299839835,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 58.45837599982281,
"count": 834019,
"is_parallel": true,
"self": 58.45837599982281
},
"communicator.exchange": {
"total": 904.4249526003125,
"count": 834019,
"is_parallel": true,
"self": 904.4249526003125
},
"steps_from_proto": {
"total": 253.60473490004375,
"count": 834019,
"is_parallel": true,
"self": 125.21319250040426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 128.3915423996395,
"count": 5004114,
"is_parallel": true,
"self": 128.3915423996395
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.889999970851932e-05,
"count": 1,
"self": 3.889999970851932e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6434.756297900024,
"count": 172038,
"is_parallel": true,
"self": 5.68249489999198,
"children": {
"process_trajectory": {
"total": 2625.780229700033,
"count": 172038,
"is_parallel": true,
"self": 2625.112627400034,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6676022999989186,
"count": 9,
"is_parallel": true,
"self": 0.6676022999989186
}
}
},
"_update_policy": {
"total": 3803.293573299999,
"count": 273,
"is_parallel": true,
"self": 2901.946123899993,
"children": {
"TorchPPOOptimizer.update": {
"total": 901.3474494000063,
"count": 49650,
"is_parallel": true,
"self": 901.3474494000063
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07065609999972366,
"count": 1,
"self": 0.00708059999942634,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06357550000029732,
"count": 1,
"self": 0.06357550000029732
}
}
}
}
}
}
}