benson V5 har cooket

This commit is contained in:
2024-04-20 14:45:47 +02:00
parent 2d017b7a59
commit 65607aebc4
322 changed files with 127875 additions and 143 deletions

View File

@@ -0,0 +1,65 @@
behaviors:
BensonImitationV3:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 3000
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.1
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 1.8e5
time_horizon: 64
summary_freq: 1800
keep_checkpoints: 5
checkpoint_interval: 100000
threaded: true
init_path: null
# behavior cloning
behavioral_cloning:
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
strength: 0.5
# steps: 150000
# batch_size: 512
# num_epoch: 3
# samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# curiosity module
curiosity:
strength: 0.02
gamma: 0.99
encoding_size: 256
learning_rate: 3.0e-4
# GAIL
gail:
strength: 0.5
# gamma: 0.99
# encoding_size: 128
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# learning_rate: 3.0e-4
# use_actions: false
# use_vail: false

View File

@@ -0,0 +1,65 @@
behaviors:
BensonImitationV3:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 15000
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.1
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 2.4e5
time_horizon: 64
summary_freq: 9000
keep_checkpoints: 5
checkpoint_interval: 100000
threaded: true
init_path: null
# behavior cloning
behavioral_cloning:
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
strength: 0.5
# steps: 150000
# batch_size: 512
# num_epoch: 3
# samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# curiosity module
curiosity:
strength: 0.02
gamma: 0.99
encoding_size: 256
learning_rate: 3.0e-4
# GAIL
gail:
strength: 0.5
# gamma: 0.99
# encoding_size: 128
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# learning_rate: 3.0e-4
# use_actions: false
# use_vail: false

View File

@@ -0,0 +1,65 @@
behaviors:
BensonImitationV3:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 3000
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.1
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 1.0e6
time_horizon: 64
summary_freq: 10000
keep_checkpoints: 5
checkpoint_interval: 100000
threaded: true
init_path: null
# behavior cloning
behavioral_cloning:
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
strength: 0.5
# steps: 150000
# batch_size: 512
# num_epoch: 3
# samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# curiosity module
curiosity:
strength: 0.02
gamma: 0.99
encoding_size: 256
learning_rate: 3.0e-4
# GAIL
gail:
strength: 0.5
# gamma: 0.99
# encoding_size: 128
demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# learning_rate: 3.0e-4
# use_actions: false
# use_vail: false

View File

@@ -0,0 +1,65 @@
behaviors:
BensonV4:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 10240
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.2
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 5.0e6
time_horizon: 64
summary_freq: 10000
keep_checkpoints: 5
checkpoint_interval : 500000
threaded: false
init_path: null
# # behavior cloning
# behavioral_cloning:
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# strength: 0.5
# # steps: 150000
# # batch_size: 512
# # num_epoch: 3
# # samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# # curiosity module
# curiosity:
# strength: 0.02
# gamma: 0.99
# encoding_size: 256
# learning_rate: 3.0e-4
# # GAIL
# gail:
# strength: 0.5
# # gamma: 0.99
# # encoding_size: 128
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# # learning_rate: 3.0e-4
# # use_actions: false
# # use_vail: false

View File

@@ -0,0 +1,65 @@
behaviors:
BensonV5:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 10240
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.2
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 3.0e7
time_horizon: 64
summary_freq: 10000
keep_checkpoints: 5
checkpoint_interval : 500000
threaded: false
init_path: null
# # behavior cloning
# behavioral_cloning:
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# strength: 0.5
# # steps: 150000
# # batch_size: 512
# # num_epoch: 3
# # samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# # curiosity module
# curiosity:
# strength: 0.02
# gamma: 0.99
# encoding_size: 256
# learning_rate: 3.0e-4
# # GAIL
# gail:
# strength: 0.5
# # gamma: 0.99
# # encoding_size: 128
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# # learning_rate: 3.0e-4
# # use_actions: false
# # use_vail: false

View File

@@ -0,0 +1,65 @@
behaviors:
BensonV5:
trainer_type: ppo
hyperparameters:
# Hyperparameters common to PPO and SAC
batch_size: 50
buffer_size: 10240
learning_rate: 3.0e-4
learning_rate_schedule: linear
# PPO-specific hyperparameters
# Replaces the "PPO-specific hyperparameters" section above
beta: 5.0e-2
epsilon: 0.2
lambd: 0.95
num_epoch: 3
# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2
# Trainer configurations common to all trainers
max_steps: 3.0e6
time_horizon: 64
summary_freq: 10000
keep_checkpoints: 5
checkpoint_interval : 500000
threaded: false
init_path: null
# # behavior cloning
# behavioral_cloning:
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# strength: 0.5
# # steps: 150000
# # batch_size: 512
# # num_epoch: 3
# # samples_per_update: 0
reward_signals:
# environment reward (default)
extrinsic:
strength: 1.0
gamma: 0.99
# # curiosity module
# curiosity:
# strength: 0.02
# gamma: 0.99
# encoding_size: 256
# learning_rate: 3.0e-4
# # GAIL
# gail:
# strength: 0.5
# # gamma: 0.99
# # encoding_size: 128
# demo_path: 'c:\Users\noahk\Documents\Unity projects\Racesm\Assets\Demonstrations\BensonV3M.demo'
# # learning_rate: 3.0e-4
# # use_actions: false
# # use_vail: false

View File

@@ -0,0 +1,39 @@
behaviors:
Crawler:
trainer_type: ppo
hyperparameters:
batch_size: 2024
buffer_size: 20240
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
gail:
gamma: 0.99
strength: 1.0
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
learning_rate: 0.0003
use_actions: false
use_vail: false
demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawler.demo
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 1000
summary_freq: 30000
behavioral_cloning:
demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawler.demo
steps: 50000
strength: 0.5
samples_per_update: 0

View File

@@ -0,0 +1,35 @@
behaviors:
Hallway:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 1024
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory:
sequence_length: 64
memory_size: 256
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
gail:
gamma: 0.99
strength: 0.01
learning_rate: 0.0003
use_actions: false
use_vail: false
demo_path: Project/Assets/ML-Agents/Examples/Hallway/Demos/ExpertHallway.demo
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 64
summary_freq: 10000

View File

@@ -0,0 +1,42 @@
behaviors:
PushBlock:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
gail:
gamma: 0.99
strength: 0.01
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
learning_rate: 0.0003
use_actions: false
use_vail: false
demo_path: Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPushBlock.demo
keep_checkpoints: 5
max_steps: 100000
time_horizon: 64
summary_freq: 60000
behavioral_cloning:
demo_path: Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPushBlock.demo
steps: 50000
strength: 1.0
samples_per_update: 0

View File

@@ -0,0 +1,34 @@
behaviors:
Pyramids:
trainer_type: ppo
time_horizon: 128
max_steps: 1.0e7
hyperparameters:
batch_size: 128
beta: 0.01
buffer_size: 2048
epsilon: 0.2
lambd: 0.95
learning_rate: 0.0003
num_epoch: 3
network_settings:
num_layers: 2
normalize: false
hidden_units: 512
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
curiosity:
strength: 0.02
gamma: 0.99
network_settings:
hidden_units: 256
gail:
strength: 0.01
gamma: 0.99
demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
behavioral_cloning:
demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
strength: 0.5
steps: 150000

View File

@@ -0,0 +1,25 @@
behaviors:
DungeonEscape:
trainer_type: poca
hyperparameters:
batch_size: 1024
buffer_size: 10240
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 20000000
time_horizon: 64
summary_freq: 60000

View File

@@ -0,0 +1,25 @@
behaviors:
PushBlockCollab:
trainer_type: poca
hyperparameters:
batch_size: 1024
buffer_size: 10240
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 15000000
time_horizon: 64
summary_freq: 60000

View File

@@ -0,0 +1,32 @@
behaviors:
SoccerTwos:
trainer_type: poca
hyperparameters:
batch_size: 2048
buffer_size: 20480
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: false
hidden_units: 512
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 50000000
time_horizon: 1000
summary_freq: 10000
self_play:
save_steps: 50000
team_change: 200000
swap_steps: 2000
window: 10
play_against_latest_model_ratio: 0.5
initial_elo: 1200.0

View File

@@ -0,0 +1,63 @@
behaviors:
Goalie:
trainer_type: poca
hyperparameters:
batch_size: 2048
buffer_size: 20480
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: false
hidden_units: 512
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 30000000
time_horizon: 1000
summary_freq: 10000
self_play:
save_steps: 50000
team_change: 200000
swap_steps: 1000
window: 10
play_against_latest_model_ratio: 0.5
initial_elo: 1200.0
Striker:
trainer_type: poca
hyperparameters:
batch_size: 2048
buffer_size: 20480
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: false
hidden_units: 512
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 30000000
time_horizon: 1000
summary_freq: 10000
self_play:
save_steps: 50000
team_change: 200000
swap_steps: 4000
window: 10
play_against_latest_model_ratio: 0.5
initial_elo: 1200.0

View File

@@ -0,0 +1,25 @@
behaviors:
3DBall:
trainer_type: ppo
hyperparameters:
batch_size: 64
buffer_size: 12000
learning_rate: 0.0003
beta: 0.001
epsilon: 0.2
lambd: 0.99
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 1000
summary_freq: 12000

View File

@@ -0,0 +1,25 @@
behaviors:
3DBallHard:
trainer_type: ppo
hyperparameters:
batch_size: 120
buffer_size: 12000
learning_rate: 0.0003
beta: 0.001
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 1000
summary_freq: 12000

View File

@@ -0,0 +1,36 @@
behaviors:
3DBall:
trainer_type: ppo
hyperparameters:
batch_size: 64
buffer_size: 12000
learning_rate: 0.0003
beta: 0.001
epsilon: 0.2
lambd: 0.99
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 1000
summary_freq: 12000
environment_parameters:
mass:
sampler_type: uniform
sampler_parameters:
min_value: 0.5
max_value: 10
scale:
sampler_type: uniform
sampler_parameters:
min_value: 0.75
max_value: 3

View File

@@ -0,0 +1,25 @@
behaviors:
Basic:
trainer_type: ppo
hyperparameters:
batch_size: 32
buffer_size: 256
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 20
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.9
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 3
summary_freq: 2000

View File

@@ -0,0 +1,25 @@
behaviors:
Crawler:
trainer_type: ppo
hyperparameters:
batch_size: 2048
buffer_size: 20480
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 1000
summary_freq: 30000

View File

@@ -0,0 +1,25 @@
behaviors:
GridFoodCollector:
trainer_type: ppo
hyperparameters:
batch_size: 1024
buffer_size: 10240
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 2000000
time_horizon: 64
summary_freq: 10000

View File

@@ -0,0 +1,25 @@
behaviors:
GridWorld:
trainer_type: ppo
hyperparameters:
batch_size: 32
buffer_size: 256
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.9
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 5
summary_freq: 20000

View File

@@ -0,0 +1,28 @@
behaviors:
Hallway:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 1024
learning_rate: 0.0003
beta: 0.03
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory:
sequence_length: 64
memory_size: 128
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 64
summary_freq: 10000

View File

@@ -0,0 +1,48 @@
default_settings:
trainer_type: ppo
hyperparameters:
batch_size: 16
buffer_size: 120
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.99
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: true
hidden_units: 256
num_layers: 4
vis_encode_type: match3
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 128
summary_freq: 10000
behaviors:
Match3SimpleHeuristic:
# Settings can be very simple since we don't care about actually training the model
trainer_type: ppo
hyperparameters:
batch_size: 16
buffer_size: 120
network_settings:
hidden_units: 4
num_layers: 1
max_steps: 5000000
summary_freq: 10000
Match3SmartHeuristic:
# Settings can be very simple since we don't care about actually training the model
trainer_type: ppo
hyperparameters:
batch_size: 16
buffer_size: 120
network_settings:
hidden_units: 4
num_layers: 1
max_steps: 5000000
summary_freq: 10000

View File

@@ -0,0 +1,25 @@
behaviors:
PushBlock:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 2000000
time_horizon: 64
summary_freq: 60000

View File

@@ -0,0 +1,31 @@
behaviors:
Pyramids:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 512
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
curiosity:
gamma: 0.99
strength: 0.02
network_settings:
hidden_units: 256
learning_rate: 0.0003
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 128
summary_freq: 30000

View File

@@ -0,0 +1,32 @@
behaviors:
Pyramids:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.01
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 512
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
rnd:
gamma: 0.99
strength: 0.01
network_settings:
hidden_units: 64
num_layers: 3
learning_rate: 0.0001
keep_checkpoints: 5
max_steps: 3000000
time_horizon: 128
summary_freq: 30000

View File

@@ -0,0 +1,102 @@
behaviors:
Sorter:
trainer_type: ppo
hyperparameters:
batch_size: 512
buffer_size: 40960
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: False
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 256
summary_freq: 10000
environment_parameters:
num_tiles:
curriculum:
- name: Lesson0 # The '-' is important as this is a list
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.3
value: 2.0
- name: Lesson1
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.4
value: 4.0
- name: Lesson2
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.45
value: 6.0
- name: Lesson3
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.5
value: 8.0
- name: Lesson4
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.55
value: 10.0
- name: Lesson5
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.6
value: 12.0
- name: Lesson6
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.65
value: 14.0
- name: Lesson7
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.7
value: 16.0
- name: Lesson8
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.75
value: 18.0
- name: Lesson9
value: 20.0

View File

@@ -0,0 +1,25 @@
behaviors:
Visual3DBall:
trainer_type: ppo
hyperparameters:
batch_size: 256
buffer_size: 2560
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 400000
time_horizon: 64
summary_freq: 20000

View File

@@ -0,0 +1,25 @@
behaviors:
VisualFoodCollector:
trainer_type: ppo
hyperparameters:
batch_size: 1024
buffer_size: 10240
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 3000000
time_horizon: 100
summary_freq: 40000

View File

@@ -0,0 +1,25 @@
behaviors:
Walker:
trainer_type: ppo
hyperparameters:
batch_size: 2048
buffer_size: 20480
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 30000000
time_horizon: 1000
summary_freq: 30000

View File

@@ -0,0 +1,49 @@
behaviors:
BigWallJump:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 20000000
time_horizon: 128
summary_freq: 20000
SmallWallJump:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 128
summary_freq: 20000

View File

@@ -0,0 +1,118 @@
behaviors:
BigWallJump:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 20000000
time_horizon: 128
summary_freq: 20000
SmallWallJump:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 128
summary_freq: 20000
environment_parameters:
big_wall_height:
curriculum:
- name: Lesson0 # The '-' is important as this is a list
completion_criteria:
measure: progress
behavior: BigWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.1
value:
sampler_type: uniform
sampler_parameters:
min_value: 0.0
max_value: 4.0
- name: Lesson1 # This is the start of the second lesson
completion_criteria:
measure: progress
behavior: BigWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.3
value:
sampler_type: uniform
sampler_parameters:
min_value: 4.0
max_value: 7.0
- name: Lesson2
completion_criteria:
measure: progress
behavior: BigWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.5
value:
sampler_type: uniform
sampler_parameters:
min_value: 6.0
max_value: 8.0
- name: Lesson3
value: 8.0
small_wall_height:
curriculum:
- name: Lesson0
completion_criteria:
measure: progress
behavior: SmallWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.1
value: 1.5
- name: Lesson1
completion_criteria:
measure: progress
behavior: SmallWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.3
value: 2.0
- name: Lesson2
completion_criteria:
measure: progress
behavior: SmallWallJump
signal_smoothing: true
min_lesson_length: 100
threshold: 0.5
value: 2.5
- name: Lesson3
value: 4.0

View File

@@ -0,0 +1,25 @@
behaviors:
Worm:
trainer_type: ppo
hyperparameters:
batch_size: 2024
buffer_size: 20240
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 7000000
time_horizon: 1000
summary_freq: 30000

View File

@@ -0,0 +1,25 @@
behaviors:
RollerBall:
trainer_type: ppo
hyperparameters:
batch_size: 10
buffer_size: 100
learning_rate: 3.0e-4
beta: 5.0e-4
epsilon: 0.2
lambd: 0.99
num_epoch: 3
learning_rate_schedule: linear
beta_schedule: constant
epsilon_schedule: linear
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
max_steps: 500000
time_horizon: 64
summary_freq: 10000

View File

@@ -0,0 +1,27 @@
behaviors:
3DBall:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 64
buffer_size: 200000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.5
reward_signal_steps_per_update: 10.0
network_settings:
normalize: true
hidden_units: 64
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 200000
time_horizon: 1000
summary_freq: 12000

View File

@@ -0,0 +1,27 @@
behaviors:
3DBallHard:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 10.0
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 1000
summary_freq: 12000

View File

@@ -0,0 +1,27 @@
behaviors:
Basic:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 64
buffer_size: 50000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.01
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 20
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 10
summary_freq: 2000

View File

@@ -0,0 +1,27 @@
behaviors:
Crawler:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 20.0
save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 20.0
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 1000
summary_freq: 30000

View File

@@ -0,0 +1,28 @@
behaviors:
FoodCollector:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 2048
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.05
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 256
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 2000000
time_horizon: 64
summary_freq: 60000
threaded: false

View File

@@ -0,0 +1,27 @@
behaviors:
GridWorld:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 50000
buffer_init_steps: 1000
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.5
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 128
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.9
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 5
summary_freq: 20000

View File

@@ -0,0 +1,30 @@
behaviors:
Hallway:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 512
buffer_size: 200000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.1
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory:
sequence_length: 64
memory_size: 128
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 4000000
time_horizon: 64
summary_freq: 10000

View File

@@ -0,0 +1,27 @@
behaviors:
PushBlock:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 50000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.05
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 2000000
time_horizon: 64
summary_freq: 100000

View File

@@ -0,0 +1,34 @@
behaviors:
Pyramids:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 2000000
buffer_init_steps: 1000
tau: 0.01
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.01
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 2.0
gail:
gamma: 0.99
strength: 0.01
learning_rate: 0.0003
use_actions: true
use_vail: false
demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
keep_checkpoints: 5
max_steps: 3000000
time_horizon: 128
summary_freq: 30000

View File

@@ -0,0 +1,27 @@
behaviors:
Walker:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 1024
buffer_size: 2000000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 30.0
save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 30.0
network_settings:
normalize: true
hidden_units: 256
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 15000000
time_horizon: 1000
summary_freq: 30000

View File

@@ -0,0 +1,53 @@
behaviors:
BigWallJump:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 200000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 20.0
save_replay_buffer: false
init_entcoef: 0.1
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 15000000
time_horizon: 128
summary_freq: 20000
SmallWallJump:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 50000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 20.0
save_replay_buffer: false
init_entcoef: 0.1
reward_signal_steps_per_update: 10.0
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 128
summary_freq: 20000

View File

@@ -0,0 +1,27 @@
behaviors:
Worm:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 20.0
save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 20.0
network_settings:
normalize: true
hidden_units: 512
num_layers: 3
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 1000
summary_freq: 30000