Benson V6.1 er trænet færdig

This commit is contained in:
2024-04-20 18:48:43 +02:00
parent d5b24b17ea
commit ae447faf5e
24 changed files with 2294 additions and 5 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,76 @@
default_settings: null
behaviors:
BensonV6.1:
trainer_type: ppo
hyperparameters:
batch_size: 50
buffer_size: 10240
learning_rate: 0.0003
beta: 0.05
epsilon: 0.2
lambd: 0.95
num_epoch: 3
shared_critic: false
learning_rate_schedule: linear
beta_schedule: linear
epsilon_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
init_path: null
keep_checkpoints: 5
checkpoint_interval: 500000
max_steps: 3000000
time_horizon: 64
summary_freq: 10000
threaded: false
self_play: null
behavioral_cloning: null
env_settings:
env_path: null
env_args: null
base_port: 5005
num_envs: 1
num_areas: 1
seed: -1
max_lifetime_restarts: 10
restarts_rate_limit_n: 1
restarts_rate_limit_period_s: 60
engine_settings:
width: 84
height: 84
quality_level: 5
time_scale: 20
target_frame_rate: -1
capture_frame_rate: 60
no_graphics: false
environment_parameters: null
checkpoint_settings:
run_id: BensonV6.1
initialize_from: null
load_model: false
resume: true
force: false
train_model: false
inference: false
results_dir: results
torch_settings:
device: null
debug: false

View File

@@ -0,0 +1,320 @@
{
"name": "root",
"gauges": {
"BensonV6.1.Policy.Entropy.mean": {
"value": 0.6789544820785522,
"min": 0.6789544820785522,
"max": 1.7365357875823975,
"count": 252
},
"BensonV6.1.Policy.Entropy.sum": {
"value": 6762.38671875,
"min": 1786.3543701171875,
"max": 17327.384765625,
"count": 252
},
"BensonV6.1.Step.mean": {
"value": 2999952.0,
"min": 489988.0,
"max": 2999952.0,
"count": 252
},
"BensonV6.1.Step.sum": {
"value": 2999952.0,
"min": 489988.0,
"max": 2999952.0,
"count": 252
},
"BensonV6.1.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.416819095611572,
"min": 1.214796781539917,
"max": 7.417298793792725,
"count": 252
},
"BensonV6.1.Policy.ExtrinsicValueEstimate.sum": {
"value": 1453.696533203125,
"min": 27.14661979675293,
"max": 1453.696533203125,
"count": 252
},
"BensonV6.1.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 252
},
"BensonV6.1.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 252
},
"BensonV6.1.Environment.EpisodeLength.mean": {
"value": 158.25806451612902,
"min": 156.3548387096774,
"max": 315.34375,
"count": 251
},
"BensonV6.1.Environment.EpisodeLength.sum": {
"value": 9812.0,
"min": 9135.0,
"max": 10918.0,
"count": 251
},
"BensonV6.1.Environment.CumulativeReward.mean": {
"value": 17.720831836972916,
"min": 6.600487498161294,
"max": 17.764139349177732,
"count": 251
},
"BensonV6.1.Environment.CumulativeReward.sum": {
"value": 1116.4124057292938,
"min": 235.22235931036994,
"max": 1139.6223163156537,
"count": 251
},
"BensonV6.1.Policy.ExtrinsicReward.mean": {
"value": 17.720831836972916,
"min": 6.600487498161294,
"max": 17.764139349177732,
"count": 251
},
"BensonV6.1.Policy.ExtrinsicReward.sum": {
"value": 1116.4124057292938,
"min": 235.22235931036994,
"max": 1139.6223163156537,
"count": 251
},
"BensonV6.1.Losses.PolicyLoss.mean": {
"value": 0.10757822603643585,
"min": 0.09952940423776513,
"max": 0.12089756364854064,
"count": 244
},
"BensonV6.1.Losses.PolicyLoss.sum": {
"value": 0.10757822603643585,
"min": 0.09952940423776513,
"max": 0.12089756364854064,
"count": 244
},
"BensonV6.1.Losses.ValueLoss.mean": {
"value": 0.161995082059845,
"min": 0.12143607831643365,
"max": 0.7208258142950488,
"count": 244
},
"BensonV6.1.Losses.ValueLoss.sum": {
"value": 0.161995082059845,
"min": 0.12143607831643365,
"max": 0.7208258142950488,
"count": 244
},
"BensonV6.1.Policy.LearningRate.mean": {
"value": 4.348998550666575e-07,
"min": 4.348998550666575e-07,
"max": 0.0002500703166432333,
"count": 244
},
"BensonV6.1.Policy.LearningRate.sum": {
"value": 4.348998550666575e-07,
"min": 4.348998550666575e-07,
"max": 0.0002500703166432333,
"count": 244
},
"BensonV6.1.Policy.Epsilon.mean": {
"value": 0.10014493333333332,
"min": 0.10014493333333332,
"max": 0.18335676666666667,
"count": 244
},
"BensonV6.1.Policy.Epsilon.sum": {
"value": 0.10014493333333332,
"min": 0.10014493333333332,
"max": 0.18335676666666667,
"count": 244
},
"BensonV6.1.Policy.Beta.mean": {
"value": 8.245217333333181e-05,
"min": 8.245217333333181e-05,
"max": 0.04168004765666667,
"count": 244
},
"BensonV6.1.Policy.Beta.sum": {
"value": 8.245217333333181e-05,
"min": 8.245217333333181e-05,
"max": 0.04168004765666667,
"count": 244
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713626939",
"python_version": "3.9.13 (tags/v3.9.13:6de2ca5, May 17 2022, 16:36:42) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\noahk\\Documents\\Unity projects\\Racesm\\.venv\\Scripts\\mlagents-learn config/BensonV6.1.yaml --run-id=BensonV6.1 --resume",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1713631580"
},
"total": 4641.414641300001,
"count": 1,
"self": 0.005876700000953861,
"children": {
"run_training.setup": {
"total": 0.08273789999999992,
"count": 1,
"self": 0.08273789999999992
},
"TrainerController.start_learning": {
"total": 4641.3260267,
"count": 1,
"self": 4.529195499979323,
"children": {
"TrainerController._reset_env": {
"total": 10.7731008,
"count": 1,
"self": 10.7731008
},
"TrainerController.advance": {
"total": 4625.96329380002,
"count": 261576,
"self": 3.9781851998122875,
"children": {
"env_step": {
"total": 2717.0496560999586,
"count": 261576,
"self": 1678.8040917999438,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1035.1611249000011,
"count": 261576,
"self": 11.988765500048657,
"children": {
"TorchPolicy.evaluate": {
"total": 1023.1723593999525,
"count": 251122,
"self": 1023.1723593999525
}
}
},
"workers": {
"total": 3.084439400013709,
"count": 261576,
"self": 0.0,
"children": {
"worker_root": {
"total": 4625.813898699946,
"count": 261576,
"is_parallel": true,
"self": 3193.993477400112,
"children": {
"steps_from_proto": {
"total": 0.0005550000000003052,
"count": 1,
"is_parallel": true,
"self": 0.0002106000000008379,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00034439999999946735,
"count": 6,
"is_parallel": true,
"self": 0.00034439999999946735
}
}
},
"UnityEnvironment.step": {
"total": 1431.8198662998334,
"count": 261576,
"is_parallel": true,
"self": 31.458605499658688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.3378812000054,
"count": 261576,
"is_parallel": true,
"self": 34.3378812000054
},
"communicator.exchange": {
"total": 1271.401098000018,
"count": 261576,
"is_parallel": true,
"self": 1271.401098000018
},
"steps_from_proto": {
"total": 94.6222816001513,
"count": 261576,
"is_parallel": true,
"self": 36.53906309968252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.083218500468774,
"count": 1569456,
"is_parallel": true,
"self": 58.083218500468774
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1904.9354525002493,
"count": 261576,
"self": 7.898237100334654,
"children": {
"process_trajectory": {
"total": 286.0989276999167,
"count": 261576,
"self": 285.7158488999173,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3830787999994101,
"count": 6,
"self": 0.3830787999994101
}
}
},
"_update_policy": {
"total": 1610.938287699998,
"count": 244,
"self": 330.12971779998907,
"children": {
"TorchPPOOptimizer.update": {
"total": 1280.808569900009,
"count": 150024,
"self": 1280.808569900009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.060435899999902176,
"count": 1,
"self": 0.0057316999991599005,
"children": {
"RLTrainer._checkpoint": {
"total": 0.054704200000742276,
"count": 1,
"self": 0.054704200000742276
}
}
}
}
}
}
}

View File

@@ -0,0 +1,65 @@
{
"BensonV6.1": {
"checkpoints": [
{
"steps": 1499970,
"file_path": "results\\BensonV6.1\\BensonV6.1\\BensonV6.1-1499970.onnx",
"reward": 16.48727667717529,
"creation_time": 1713628846.0252626,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-1499970.pt"
]
},
{
"steps": 1999994,
"file_path": "results\\BensonV6.1\\BensonV6.1\\BensonV6.1-1999994.onnx",
"reward": 18.574123819669087,
"creation_time": 1713629766.2435691,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-1999994.pt"
]
},
{
"steps": 2499995,
"file_path": "results\\BensonV6.1\\BensonV6.1\\BensonV6.1-2499995.onnx",
"reward": 16.940118609749256,
"creation_time": 1713630672.6028414,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-2499995.pt"
]
},
{
"steps": 2999952,
"file_path": "results\\BensonV6.1\\BensonV6.1\\BensonV6.1-2999952.onnx",
"reward": 17.787549989564077,
"creation_time": 1713631580.4652746,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-2999952.pt"
]
},
{
"steps": 3000016,
"file_path": "results\\BensonV6.1\\BensonV6.1\\BensonV6.1-3000016.onnx",
"reward": 17.787549989564077,
"creation_time": 1713631580.5248048,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-3000016.pt"
]
}
],
"final_checkpoint": {
"steps": 3000016,
"file_path": "results\\BensonV6.1\\BensonV6.1.onnx",
"reward": 17.787549989564077,
"creation_time": 1713631580.5248048,
"auxillary_file_paths": [
"results\\BensonV6.1\\BensonV6.1\\BensonV6.1-3000016.pt"
]
}
},
"metadata": {
"stats_format_version": "0.3.0",
"mlagents_version": "0.30.0",
"torch_version": "2.2.2+cu118"
}
}