Files
2024-04-20 14:45:47 +02:00

313 lines
13 KiB
JSON

{
"name": "root",
"gauges": {
"BensonV4.Policy.Entropy.mean": {
"value": 1.3508883714675903,
"min": 1.3445194959640503,
"max": 1.3667104244232178,
"count": 5
},
"BensonV4.Policy.Entropy.sum": {
"value": 13589.9375,
"min": 12181.3466796875,
"max": 13776.44140625,
"count": 5
},
"BensonV4.Step.mean": {
"value": 3759941.0,
"min": 3719953.0,
"max": 3759941.0,
"count": 5
},
"BensonV4.Step.sum": {
"value": 3759941.0,
"min": 3719953.0,
"max": 3759941.0,
"count": 5
},
"BensonV4.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.5055700540542603,
"min": 1.4554778337478638,
"max": 1.5631643533706665,
"count": 5
},
"BensonV4.Policy.ExtrinsicValueEstimate.sum": {
"value": 295.09173583984375,
"min": 265.7379455566406,
"max": 299.07958984375,
"count": 5
},
"BensonV4.Environment.EpisodeLength.mean": {
"value": 151.27272727272728,
"min": 150.1076923076923,
"max": 152.33846153846153,
"count": 5
},
"BensonV4.Environment.EpisodeLength.sum": {
"value": 9984.0,
"min": 8164.0,
"max": 10019.0,
"count": 5
},
"BensonV4.Environment.CumulativeReward.mean": {
"value": 5.765378269409253,
"min": 5.6894510709322414,
"max": 5.897744768747577,
"count": 5
},
"BensonV4.Environment.CumulativeReward.sum": {
"value": 380.5149657810107,
"min": 318.47821751236916,
"max": 384.8610228523612,
"count": 5
},
"BensonV4.Policy.ExtrinsicReward.mean": {
"value": 5.765378269409253,
"min": 5.6894510709322414,
"max": 5.897744768747577,
"count": 5
},
"BensonV4.Policy.ExtrinsicReward.sum": {
"value": 380.5149657810107,
"min": 318.47821751236916,
"max": 384.8610228523612,
"count": 5
},
"BensonV4.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"BensonV4.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"BensonV4.Losses.PolicyLoss.mean": {
"value": 0.11566021161525575,
"min": 0.1076902610879454,
"max": 0.11566021161525575,
"count": 4
},
"BensonV4.Losses.PolicyLoss.sum": {
"value": 0.11566021161525575,
"min": 0.1076902610879454,
"max": 0.11566021161525575,
"count": 4
},
"BensonV4.Losses.ValueLoss.mean": {
"value": 0.04978977781373851,
"min": 0.04331555476609042,
"max": 0.04978977781373851,
"count": 4
},
"BensonV4.Losses.ValueLoss.sum": {
"value": 0.04978977781373851,
"min": 0.04331555476609042,
"max": 0.04978977781373851,
"count": 4
},
"BensonV4.Policy.LearningRate.mean": {
"value": 7.485721504761999e-05,
"min": 7.485721504761999e-05,
"max": 7.670917443029998e-05,
"count": 4
},
"BensonV4.Policy.LearningRate.sum": {
"value": 7.485721504761999e-05,
"min": 7.485721504761999e-05,
"max": 7.670917443029998e-05,
"count": 4
},
"BensonV4.Policy.Epsilon.mean": {
"value": 0.12495237999999999,
"min": 0.12495237999999999,
"max": 0.1255697,
"count": 4
},
"BensonV4.Policy.Epsilon.sum": {
"value": 0.12495237999999999,
"min": 0.12495237999999999,
"max": 0.1255697,
"count": 4
},
"BensonV4.Policy.Beta.mean": {
"value": 0.012483694761999998,
"min": 0.012483694761999998,
"max": 0.012792293029999995,
"count": 4
},
"BensonV4.Policy.Beta.sum": {
"value": 0.012483694761999998,
"min": 0.012483694761999998,
"max": 0.012792293029999995,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713539163",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\ML Tutorual v2\\ml-agents\\.venv\\Scripts\\mlagents-learn config/BensonV4.yaml --run-id=BensonV4 --resume",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713539310"
},
"total": 146.8465324,
"count": 1,
"self": 0.0043030999999871256,
"children": {
"run_training.setup": {
"total": 0.07616970000000012,
"count": 1,
"self": 0.07616970000000012
},
"TrainerController.start_learning": {
"total": 146.7660596,
"count": 1,
"self": 0.1163909999995667,
"children": {
"TrainerController._reset_env": {
"total": 10.3584175,
"count": 1,
"self": 10.3584175
},
"TrainerController.advance": {
"total": 136.13096090000042,
"count": 6858,
"self": 0.10552590000048667,
"children": {
"env_step": {
"total": 102.59305809999964,
"count": 6858,
"self": 77.66245159999988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 24.854691299999992,
"count": 6859,
"self": 0.3053916000001422,
"children": {
"TorchPolicy.evaluate": {
"total": 24.54929969999985,
"count": 6607,
"self": 24.54929969999985
}
}
},
"workers": {
"total": 0.07591519999976626,
"count": 6858,
"self": 0.0,
"children": {
"worker_root": {
"total": 120.62309139999942,
"count": 6858,
"is_parallel": true,
"self": 65.5113897999996,
"children": {
"steps_from_proto": {
"total": 0.0021694000000014313,
"count": 2,
"is_parallel": true,
"self": 0.0008578999999944159,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013115000000070154,
"count": 12,
"is_parallel": true,
"self": 0.0013115000000070154
}
}
},
"UnityEnvironment.step": {
"total": 55.10953219999982,
"count": 6858,
"is_parallel": true,
"self": 0.7230074999996319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.7702295999999382,
"count": 6858,
"is_parallel": true,
"self": 0.7702295999999382
},
"communicator.exchange": {
"total": 51.2747549,
"count": 6858,
"is_parallel": true,
"self": 51.2747549
},
"steps_from_proto": {
"total": 2.3415402000002548,
"count": 6858,
"is_parallel": true,
"self": 0.9429467000018548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.3985934999984,
"count": 41148,
"is_parallel": true,
"self": 1.3985934999984
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 33.43237690000029,
"count": 6858,
"self": 0.18423370000056138,
"children": {
"process_trajectory": {
"total": 5.548114599999716,
"count": 6858,
"self": 5.548114599999716
},
"_update_policy": {
"total": 27.700028600000017,
"count": 5,
"self": 5.631023199999724,
"children": {
"TorchPPOOptimizer.update": {
"total": 22.069005400000293,
"count": 2463,
"self": 22.069005400000293
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.000000152856956e-07,
"count": 1,
"self": 7.000000152856956e-07
},
"TrainerController._save_models": {
"total": 0.16028950000000464,
"count": 1,
"self": 0.00573789999998553,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1545516000000191,
"count": 1,
"self": 0.1545516000000191
}
}
}
}
}
}
}