This commit is contained in:
2024-04-24 07:11:18 +02:00
parent de0a1c842a
commit 65c678e706
55 changed files with 592 additions and 922 deletions

View File

@@ -1,270 +1,112 @@
{
"name": "root",
"gauges": {
"BensonV7.Policy.Entropy.mean": {
"value": 1.724164605140686,
"min": 1.6979351043701172,
"max": 1.9132299423217773,
"count": 114
},
"BensonV7.Policy.Entropy.sum": {
"value": 51466.3125,
"min": 40307.1640625,
"max": 59228.46484375,
"count": 114
},
"BensonV7.Step.mean": {
"value": 1979956.0,
"min": 849958.0,
"max": 1979956.0,
"count": 114
},
"BensonV7.Step.sum": {
"value": 1979956.0,
"min": 849958.0,
"max": 1979956.0,
"count": 114
},
"BensonV7.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.02828311920166,
"min": 2.0664122104644775,
"max": 4.056896209716797,
"count": 114
},
"BensonV7.Policy.ExtrinsicValueEstimate.sum": {
"value": 660.638427734375,
"min": 230.64227294921875,
"max": 681.55859375,
"count": 114
},
"BensonV7.Environment.EpisodeLength.mean": {
"value": 568.3518518518518,
"min": 388.85714285714283,
"max": 749.3571428571429,
"count": 114
},
"BensonV7.Environment.EpisodeLength.sum": {
"value": 30691.0,
"min": 2722.0,
"max": 34215.0,
"count": 114
},
"BensonV7.Self-play.ELO.mean": {
"value": 1384.3959480170795,
"min": 1380.287578470765,
"max": 1480.8259227184733,
"count": 114
},
"BensonV7.Self-play.ELO.sum": {
"value": 24919.12706430743,
"min": 4442.47776815542,
"max": 30394.634561574698,
"count": 114
},
"BensonV7.Environment.CumulativeReward.mean": {
"value": 23.51508159438769,
"min": 4.317544090928277,
"max": 26.173201080504803,
"count": 114
},
"BensonV7.Environment.CumulativeReward.sum": {
"value": 423.2714686989784,
"min": 20.118093952536583,
"max": 474.76355612277985,
"count": 114
},
"BensonV7.Policy.ExtrinsicReward.mean": {
"value": 23.51508159438769,
"min": 4.317544090928277,
"max": 26.173201080504803,
"count": 114
},
"BensonV7.Policy.ExtrinsicReward.sum": {
"value": 423.2714686989784,
"min": 20.118093952536583,
"max": 474.76355612277985,
"count": 114
},
"BensonV7.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"BensonV7.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"BensonV7.Losses.PolicyLoss.mean": {
"value": 0.11129256490654903,
"min": 0.10404405176266453,
"max": 0.12017463006231105,
"count": 110
},
"BensonV7.Losses.PolicyLoss.sum": {
"value": 0.11129256490654903,
"min": 0.10404405176266453,
"max": 0.12017463006231105,
"count": 110
},
"BensonV7.Losses.ValueLoss.mean": {
"value": 0.3347890382678044,
"min": 0.10592283378165912,
"max": 0.45261242995417217,
"count": 110
},
"BensonV7.Losses.ValueLoss.sum": {
"value": 0.3347890382678044,
"min": 0.10592283378165912,
"max": 0.45261242995417217,
"count": 110
},
"BensonV7.Policy.LearningRate.mean": {
"value": 0.00027037953987349,
"min": 0.00027037953987349,
"max": 0.00028719582426806,
"count": 110
},
"BensonV7.Policy.LearningRate.sum": {
"value": 0.00027037953987349,
"min": 0.00027037953987349,
"max": 0.00028719582426806,
"count": 110
},
"BensonV7.Policy.Epsilon.mean": {
"value": 0.19012651000000003,
"min": 0.19012651000000003,
"max": 0.19573194,
"count": 110
},
"BensonV7.Policy.Epsilon.sum": {
"value": 0.19012651000000003,
"min": 0.19012651000000003,
"max": 0.19573194,
"count": 110
},
"BensonV7.Policy.Beta.mean": {
"value": 0.04506424234900002,
"min": 0.04506424234900002,
"max": 0.047866396806000015,
"count": 110
},
"BensonV7.Policy.Beta.sum": {
"value": 0.04506424234900002,
"min": 0.04506424234900002,
"max": 0.047866396806000015,
"count": 110
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713876203",
"start_time_seconds": "1713901110",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV7.yaml --run-id=BensonV7 --resume",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV7.yaml --run-id=BensonV7 --resume --env=C:\\Users\\noahk\\Desktop\\Benson slepe\\Racesm_L.exe --no-graphics --num-envs=1",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713880030"
"end_time_seconds": "1713901184"
},
"total": 3827.7623298,
"total": 73.1220318,
"count": 1,
"self": 0.011808999999630032,
"self": 0.5384944999999988,
"children": {
"run_training.setup": {
"total": 0.09212290000000012,
"total": 0.08394279999999998,
"count": 1,
"self": 0.09212290000000012
"self": 0.08394279999999998
},
"TrainerController.start_learning": {
"total": 3827.6583979,
"total": 72.4995945,
"count": 1,
"self": 2.435547799978849,
"self": 0.08601039999989268,
"children": {
"TrainerController._reset_env": {
"total": 19.67768449999975,
"count": 7,
"self": 19.67768449999975
"total": 5.160142400000002,
"count": 2,
"self": 5.160142400000002
},
"TrainerController.advance": {
"total": 3805.4429475000215,
"count": 119182,
"self": 2.0891288001512294,
"total": 67.1305968000001,
"count": 4917,
"self": 0.07592770000036353,
"children": {
"env_step": {
"total": 2832.8957301999403,
"count": 119182,
"self": 1718.9134972999607,
"total": 66.1624174999998,
"count": 4917,
"self": 21.281240999999945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1112.513478499999,
"count": 119183,
"self": 16.29119710006944,
"total": 44.82998009999979,
"count": 4917,
"self": 0.5884662999992116,
"children": {
"TorchPolicy.evaluate": {
"total": 1096.2222813999297,
"count": 344490,
"self": 1096.2222813999297
"total": 44.241513800000575,
"count": 14691,
"self": 44.241513800000575
}
}
},
"workers": {
"total": 1.4687543999806074,
"count": 119181,
"total": 0.05119640000007486,
"count": 4917,
"self": 0.0,
"children": {
"worker_root": {
"total": 3753.157156000006,
"count": 119181,
"total": 67.91859579999999,
"count": 4917,
"is_parallel": true,
"self": 2309.568257799966,
"self": 51.74360209999996,
"children": {
"steps_from_proto": {
"total": 0.010590099999753022,
"count": 24,
"total": 0.0015551000000000315,
"count": 6,
"is_parallel": true,
"self": 0.0033023999993027076,
"self": 0.0006934000000047291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0072877000004503145,
"count": 240,
"total": 0.0008616999999953023,
"count": 60,
"is_parallel": true,
"self": 0.0072877000004503145
"self": 0.0008616999999953023
}
}
},
"UnityEnvironment.step": {
"total": 1443.5783081000404,
"count": 119181,
"total": 16.173438600000026,
"count": 4917,
"is_parallel": true,
"self": 49.66146120013195,
"self": 0.5166272999996373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.08897779992863,
"count": 119181,
"total": 0.38660149999983506,
"count": 4917,
"is_parallel": true,
"self": 36.08897779992863
"self": 0.38660149999983506
},
"communicator.exchange": {
"total": 1207.7480174999691,
"count": 119181,
"total": 11.692612900000144,
"count": 4917,
"is_parallel": true,
"self": 1207.7480174999691
"self": 11.692612900000144
},
"steps_from_proto": {
"total": 150.0798516000106,
"count": 357543,
"total": 3.577596900000409,
"count": 14751,
"is_parallel": true,
"self": 46.51214239967524,
"self": 1.5999384000006982,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.56770920033537,
"count": 3575430,
"total": 1.9776584999997109,
"count": 147510,
"is_parallel": true,
"self": 103.56770920033537
"self": 1.9776584999997109
}
}
}
@@ -277,52 +119,33 @@
}
},
"trainer_advance": {
"total": 970.45808849993,
"count": 119181,
"self": 17.939639899914255,
"total": 0.8922515999999412,
"count": 4916,
"self": 0.2938318999998568,
"children": {
"process_trajectory": {
"total": 147.86677610001612,
"count": 119181,
"self": 147.7127672000159,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15400890000023537,
"count": 2,
"self": 0.15400890000023537
}
}
},
"_update_policy": {
"total": 804.6516724999997,
"count": 111,
"self": 185.2384082999847,
"children": {
"TorchPPOOptimizer.update": {
"total": 619.413264200015,
"count": 68319,
"self": 619.413264200015
}
}
"total": 0.5984197000000844,
"count": 4916,
"self": 0.5984197000000844
}
}
}
}
},
"trainer_threads": {
"total": 9.99999883788405e-07,
"total": 7.000000010748408e-07,
"count": 1,
"self": 9.99999883788405e-07
"self": 7.000000010748408e-07
},
"TrainerController._save_models": {
"total": 0.10221709999996165,
"total": 0.12284420000000296,
"count": 1,
"self": 0.01906329999974332,
"self": 0.01001760000001184,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08315380000021833,
"total": 0.11282659999999112,
"count": 1,
"self": 0.08315380000021833
"self": 0.11282659999999112
}
}
}