From a2b4fb7b77d977d16b7f86429b132677c887ce46 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Mon, 13 Apr 2020 18:47:30 -0700
Subject: [PATCH 01/36] Move sampler config into main YAML
---
config/3dball_randomize.yaml | 16 ---------
config/ppo/3DBall.yaml | 25 +++++++++++++
config/ppo/3DBall_randomize.yaml | 40 +++++++++++++++++++++
ml-agents/mlagents/trainers/learn.py | 18 ++++++----
ml-agents/mlagents/trainers/trainer_util.py | 8 ++---
5 files changed, 80 insertions(+), 27 deletions(-)
delete mode 100644 config/3dball_randomize.yaml
create mode 100644 config/ppo/3DBall.yaml
create mode 100644 config/ppo/3DBall_randomize.yaml
diff --git a/config/3dball_randomize.yaml b/config/3dball_randomize.yaml
deleted file mode 100644
index b57e2686a0..0000000000
--- a/config/3dball_randomize.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-resampling-interval: 5000
-
-mass:
- sampler-type: "uniform"
- min_value: 0.5
- max_value: 10
-
-gravity:
- sampler-type: "uniform"
- min_value: 7
- max_value: 12
-
-scale:
- sampler-type: "uniform"
- min_value: 0.75
- max_value: 3
diff --git a/config/ppo/3DBall.yaml b/config/ppo/3DBall.yaml
new file mode 100644
index 0000000000..df7d502106
--- /dev/null
+++ b/config/ppo/3DBall.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ 3DBall:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.001
+ buffer_size: 12000
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.99
+ learning_rate: 3.0e-4
+ learning_rate_schedule: linear
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/3DBall_randomize.yaml b/config/ppo/3DBall_randomize.yaml
new file mode 100644
index 0000000000..35b87d19b4
--- /dev/null
+++ b/config/ppo/3DBall_randomize.yaml
@@ -0,0 +1,40 @@
+behaviors:
+ 3DBall:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.001
+ buffer_size: 12000
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.99
+ learning_rate: 3.0e-4
+ learning_rate_schedule: linear
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
+parameter_randomization:
+ resampling-interval: 500
+ mass:
+ sampler-type: "uniform"
+ min_value: 0.5
+ max_value: 10
+ gravity:
+ sampler-type: "uniform"
+ min_value: 7
+ max_value: 12
+ scale:
+ sampler-type: "uniform"
+ min_value: 0.75
+ max_value: 3
\ No newline at end of file
diff --git a/ml-agents/mlagents/trainers/learn.py b/ml-agents/mlagents/trainers/learn.py
index 30db36b0ca..aec2c709b9 100644
--- a/ml-agents/mlagents/trainers/learn.py
+++ b/ml-agents/mlagents/trainers/learn.py
@@ -26,7 +26,7 @@
)
from mlagents_envs.environment import UnityEnvironment
from mlagents.trainers.sampler_class import SamplerManager
-from mlagents.trainers.exception import SamplerException
+from mlagents.trainers.exception import SamplerException, TrainerConfigError
from mlagents_envs.base_env import BaseEnv
from mlagents.trainers.subprocess_env_manager import SubprocessEnvManager
from mlagents_envs.side_channel.side_channel import SideChannel
@@ -238,15 +238,19 @@ def from_argparse(args: argparse.Namespace) -> "RunOptions":
configs loaded from files.
"""
argparse_args = vars(args)
- trainer_config_path = argparse_args["trainer_config_path"]
+ config_path = argparse_args["trainer_config_path"]
curriculum_config_path = argparse_args["curriculum_config_path"]
- argparse_args["trainer_config"] = load_config(trainer_config_path)
+ full_config = load_config(config_path)
+ try:
+ argparse_args["trainer_config"] = full_config["behaviors"]
+ except KeyError:
+ raise TrainerConfigError(
+ "Trainer configurations not found. Make sure your YAML file has a section for behaviors."
+ )
if curriculum_config_path is not None:
argparse_args["curriculum_config"] = load_config(curriculum_config_path)
- if argparse_args["sampler_file_path"] is not None:
- argparse_args["sampler_config"] = load_config(
- argparse_args["sampler_file_path"]
- )
+ if "parameter_randomization" in full_config:
+ argparse_args["sampler_config"] = full_config["parameter_randomization"]
# Keep deprecated --load working, TODO: remove
argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"]
# Since argparse accepts file paths in the config options which don't exist in CommandLineOptions,
diff --git a/ml-agents/mlagents/trainers/trainer_util.py b/ml-agents/mlagents/trainers/trainer_util.py
index 3fad49a368..06bf4475dd 100644
--- a/ml-agents/mlagents/trainers/trainer_util.py
+++ b/ml-agents/mlagents/trainers/trainer_util.py
@@ -95,13 +95,13 @@ def initialize_trainer(
:param meta_curriculum: Optional meta_curriculum, used to determine a reward buffer length for PPOTrainer
:return:
"""
- if "default" not in trainer_config and brain_name not in trainer_config:
+ if brain_name not in trainer_config:
raise TrainerConfigError(
- f'Trainer config must have either a "default" section, or a section for the brain name ({brain_name}). '
- "See config/trainer_config.yaml for an example."
+ f"Trainer config must have a section for the brain name {brain_name}. "
+ "See the config/ directory for examples."
)
- trainer_parameters = trainer_config.get("default", {}).copy()
+ trainer_parameters: Dict[str, Any] = {}
trainer_parameters["summary_path"] = str(run_id) + "_" + brain_name
trainer_parameters["model_path"] = "{basedir}/{name}".format(
basedir=model_path, name=brain_name
From 94c684c2fbea51f0bfc92d2f5bc2a5b56d0a6d0d Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Mon, 13 Apr 2020 18:47:58 -0700
Subject: [PATCH 02/36] Make CLI override YAML
---
ml-agents/mlagents/trainers/trainer_util.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/ml-agents/mlagents/trainers/trainer_util.py b/ml-agents/mlagents/trainers/trainer_util.py
index 06bf4475dd..f8ab565ab4 100644
--- a/ml-agents/mlagents/trainers/trainer_util.py
+++ b/ml-agents/mlagents/trainers/trainer_util.py
@@ -106,10 +106,7 @@ def initialize_trainer(
trainer_parameters["model_path"] = "{basedir}/{name}".format(
basedir=model_path, name=brain_name
)
- if init_path is not None:
- trainer_parameters["init_path"] = "{basedir}/{name}".format(
- basedir=init_path, name=brain_name
- )
+
trainer_parameters["keep_checkpoints"] = keep_checkpoints
if brain_name in trainer_config:
_brain_key: Any = brain_name
@@ -117,6 +114,11 @@ def initialize_trainer(
_brain_key = trainer_config[_brain_key]
trainer_parameters.update(trainer_config[_brain_key])
+ if init_path is not None:
+ trainer_parameters["init_path"] = "{basedir}/{name}".format(
+ basedir=init_path, name=brain_name
+ )
+
min_lesson_length = 1
if meta_curriculum:
if brain_name in meta_curriculum.brains_to_curricula:
From 54e99143552a70dc83c091cc8ef2c07acb18cec0 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Tue, 14 Apr 2020 17:44:22 -0700
Subject: [PATCH 03/36] Bring back default functionality, curriculum loader
---
ml-agents/mlagents/trainers/trainer_util.py | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/ml-agents/mlagents/trainers/trainer_util.py b/ml-agents/mlagents/trainers/trainer_util.py
index f8ab565ab4..1844c59ac5 100644
--- a/ml-agents/mlagents/trainers/trainer_util.py
+++ b/ml-agents/mlagents/trainers/trainer_util.py
@@ -95,13 +95,13 @@ def initialize_trainer(
:param meta_curriculum: Optional meta_curriculum, used to determine a reward buffer length for PPOTrainer
:return:
"""
- if brain_name not in trainer_config:
+ if "default" not in trainer_config and brain_name not in trainer_config:
raise TrainerConfigError(
- f"Trainer config must have a section for the brain name {brain_name}. "
+ f'Trainer config must have either a "default" section, or a section for the brain name {brain_name}. '
"See the config/ directory for examples."
)
- trainer_parameters: Dict[str, Any] = {}
+ trainer_parameters = trainer_config.get("default", {}).copy()
trainer_parameters["summary_path"] = str(run_id) + "_" + brain_name
trainer_parameters["model_path"] = "{basedir}/{name}".format(
basedir=model_path, name=brain_name
@@ -210,6 +210,21 @@ def _load_config(fp: TextIO) -> Dict[str, Any]:
) from e
+def assemble_curriculum_config(trainer_config: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Assembles a curriculum config Dict from a trainer config. The resulting
+ dictionary should have a mapping of {brain_name: config}, where config is another
+ Dict that
+ :param trainer_config: Dict of trainer configurations (keys are brain_names).
+ :return: Dict of curriculum configurations. Returns empty dict if none are found.
+ """
+ curriculum_config: Dict[str, Any] = {}
+ for brain_name, brain_config in trainer_config.items():
+ if "curriculum" in brain_config:
+ curriculum_config[brain_name] = brain_config["curriculum"]
+ return curriculum_config
+
+
def handle_existing_directories(
model_path: str, summary_path: str, resume: bool, force: bool, init_path: str = None
) -> None:
From df4a358dad8045c95f16af37ffb73c32ac0b7dbf Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Tue, 14 Apr 2020 17:48:09 -0700
Subject: [PATCH 04/36] Load curriculum from same YAML
---
ml-agents/mlagents/trainers/learn.py | 21 ++++-----------------
1 file changed, 4 insertions(+), 17 deletions(-)
diff --git a/ml-agents/mlagents/trainers/learn.py b/ml-agents/mlagents/trainers/learn.py
index aec2c709b9..3a6198d170 100644
--- a/ml-agents/mlagents/trainers/learn.py
+++ b/ml-agents/mlagents/trainers/learn.py
@@ -16,6 +16,7 @@
load_config,
TrainerFactory,
handle_existing_directories,
+ assemble_curriculum_config,
)
from mlagents.trainers.stats import (
TensorboardWriter,
@@ -50,18 +51,6 @@ def _create_parser():
argparser.add_argument(
"--env", default=None, dest="env_path", help="Name of the Unity executable "
)
- argparser.add_argument(
- "--curriculum",
- default=None,
- dest="curriculum_config_path",
- help="Curriculum config yaml file for environment",
- )
- argparser.add_argument(
- "--sampler",
- default=None,
- dest="sampler_file_path",
- help="Reset parameter yaml file for environment",
- )
argparser.add_argument(
"--keep-checkpoints",
default=5,
@@ -239,7 +228,6 @@ def from_argparse(args: argparse.Namespace) -> "RunOptions":
"""
argparse_args = vars(args)
config_path = argparse_args["trainer_config_path"]
- curriculum_config_path = argparse_args["curriculum_config_path"]
full_config = load_config(config_path)
try:
argparse_args["trainer_config"] = full_config["behaviors"]
@@ -247,16 +235,15 @@ def from_argparse(args: argparse.Namespace) -> "RunOptions":
raise TrainerConfigError(
"Trainer configurations not found. Make sure your YAML file has a section for behaviors."
)
- if curriculum_config_path is not None:
- argparse_args["curriculum_config"] = load_config(curriculum_config_path)
+ curriculum_config = assemble_curriculum_config(argparse_args["trainer_config"])
+ if len(curriculum_config) > 0:
+ argparse_args["curriculum_config"] = curriculum_config
if "parameter_randomization" in full_config:
argparse_args["sampler_config"] = full_config["parameter_randomization"]
# Keep deprecated --load working, TODO: remove
argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"]
# Since argparse accepts file paths in the config options which don't exist in CommandLineOptions,
# these keys will need to be deleted to use the **/splat operator below.
- argparse_args.pop("sampler_file_path")
- argparse_args.pop("curriculum_config_path")
argparse_args.pop("trainer_config_path")
return RunOptions(**vars(args))
From 3a84c13ad74f8cf804e91a2e9125c20db9345191 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Tue, 14 Apr 2020 17:56:28 -0700
Subject: [PATCH 05/36] Example WallJump curriculum
---
config/ppo/WallJump_curriculum.yaml | 62 +++++++++++++++++++++++++++++
1 file changed, 62 insertions(+)
create mode 100644 config/ppo/WallJump_curriculum.yaml
diff --git a/config/ppo/WallJump_curriculum.yaml b/config/ppo/WallJump_curriculum.yaml
new file mode 100644
index 0000000000..364ebe687b
--- /dev/null
+++ b/config/ppo/WallJump_curriculum.yaml
@@ -0,0 +1,62 @@
+behaviors:
+ default:
+ trainer: ppo
+ batch_size: 1024
+ beta: 5.0e-3
+ buffer_size: 10240
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 3.0e-4
+ learning_rate_schedule: linear
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
+ SmallWallJump:
+ max_steps: 5e6
+ batch_size: 128
+ buffer_size: 2048
+ beta: 5.0e-3
+ hidden_units: 256
+ summary_freq: 20000
+ time_horizon: 128
+ num_layers: 2
+ normalize: false
+ curriculum:
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ small_wall_height: [1.5, 2.0, 2.5, 4.0]
+
+ BigWallJump:
+ max_steps: 2e7
+ batch_size: 128
+ buffer_size: 2048
+ beta: 5.0e-3
+ hidden_units: 256
+ summary_freq: 20000
+ time_horizon: 128
+ num_layers: 2
+ normalize: false
+ curriculum:
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
+ big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
\ No newline at end of file
From 92c9682b1ca1ece2305dd6157a7c5ca5a6beab21 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Wed, 15 Apr 2020 11:05:15 -0700
Subject: [PATCH 06/36] New-format YAML files
---
config/ppo/3DBall.yaml | 48 +++++++++++++++----------------
config/ppo/3DBallHard.yaml | 25 ++++++++++++++++
config/ppo/Basic.yaml | 25 ++++++++++++++++
config/ppo/Bouncer.yaml | 25 ++++++++++++++++
config/ppo/CrawlerDynamic.yaml | 25 ++++++++++++++++
config/ppo/CrawlerStatic.yaml | 25 ++++++++++++++++
config/ppo/FoodCollector.yaml | 25 ++++++++++++++++
config/ppo/GridWorld.yaml | 25 ++++++++++++++++
config/ppo/Hallway.yaml | 25 ++++++++++++++++
config/ppo/PushBlock.yaml | 25 ++++++++++++++++
config/ppo/Pyramids.yaml | 29 +++++++++++++++++++
config/ppo/Reacher.yaml | 25 ++++++++++++++++
config/ppo/Soccer.yaml | 31 ++++++++++++++++++++
config/ppo/Tennis.yaml | 31 ++++++++++++++++++++
config/ppo/VisualHallway.yaml | 25 ++++++++++++++++
config/ppo/VisualPushBlock.yaml | 25 ++++++++++++++++
config/ppo/VisualPyramids.yaml | 29 +++++++++++++++++++
config/ppo/Walker.yaml | 25 ++++++++++++++++
config/ppo/WallJump.yaml | 51 +++++++++++++++++++++++++++++++++
19 files changed, 520 insertions(+), 24 deletions(-)
create mode 100644 config/ppo/3DBallHard.yaml
create mode 100644 config/ppo/Basic.yaml
create mode 100644 config/ppo/Bouncer.yaml
create mode 100644 config/ppo/CrawlerDynamic.yaml
create mode 100644 config/ppo/CrawlerStatic.yaml
create mode 100644 config/ppo/FoodCollector.yaml
create mode 100644 config/ppo/GridWorld.yaml
create mode 100644 config/ppo/Hallway.yaml
create mode 100644 config/ppo/PushBlock.yaml
create mode 100644 config/ppo/Pyramids.yaml
create mode 100644 config/ppo/Reacher.yaml
create mode 100644 config/ppo/Soccer.yaml
create mode 100644 config/ppo/Tennis.yaml
create mode 100644 config/ppo/VisualHallway.yaml
create mode 100644 config/ppo/VisualPushBlock.yaml
create mode 100644 config/ppo/VisualPyramids.yaml
create mode 100644 config/ppo/Walker.yaml
create mode 100644 config/ppo/WallJump.yaml
diff --git a/config/ppo/3DBall.yaml b/config/ppo/3DBall.yaml
index df7d502106..9f2767f73d 100644
--- a/config/ppo/3DBall.yaml
+++ b/config/ppo/3DBall.yaml
@@ -1,25 +1,25 @@
behaviors:
- 3DBall:
- trainer: ppo
- batch_size: 64
- beta: 0.001
- buffer_size: 12000
- epsilon: 0.2
- hidden_units: 128
- lambd: 0.99
- learning_rate: 3.0e-4
- learning_rate_schedule: linear
- max_steps: 5.0e5
- memory_size: 128
- normalize: true
- num_epoch: 3
- num_layers: 2
- time_horizon: 1000
- sequence_length: 64
- summary_freq: 12000
- use_recurrent: false
- vis_encode_type: simple
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
+ 3DBall:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.001
+ buffer_size: 12000
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.99
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/3DBallHard.yaml b/config/ppo/3DBallHard.yaml
new file mode 100644
index 0000000000..9054b21de5
--- /dev/null
+++ b/config/ppo/3DBallHard.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ 3DBallHard:
+ trainer: ppo
+ batch_size: 1200
+ beta: 0.001
+ buffer_size: 12000
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 5.0e6
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/ppo/Basic.yaml b/config/ppo/Basic.yaml
new file mode 100644
index 0000000000..07bd93c12c
--- /dev/null
+++ b/config/ppo/Basic.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ Basic:
+ trainer: ppo
+ batch_size: 32
+ beta: 0.005
+ buffer_size: 256
+ epsilon: 0.2
+ hidden_units: 20
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 1
+ time_horizon: 3
+ sequence_length: 64
+ summary_freq: 2000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.9
diff --git a/config/ppo/Bouncer.yaml b/config/ppo/Bouncer.yaml
new file mode 100644
index 0000000000..7deee97ac3
--- /dev/null
+++ b/config/ppo/Bouncer.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ Bouncer:
+ trainer: ppo
+ batch_size: 1024
+ beta: 0.005
+ buffer_size: 10240
+ epsilon: 0.2
+ hidden_units: 64
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 4.0e6
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/CrawlerDynamic.yaml b/config/ppo/CrawlerDynamic.yaml
new file mode 100644
index 0000000000..0922eb7e01
--- /dev/null
+++ b/config/ppo/CrawlerDynamic.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ CrawlerDynamic:
+ trainer: ppo
+ batch_size: 2024
+ beta: 0.005
+ buffer_size: 20240
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1e7
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/ppo/CrawlerStatic.yaml b/config/ppo/CrawlerStatic.yaml
new file mode 100644
index 0000000000..e532c1c198
--- /dev/null
+++ b/config/ppo/CrawlerStatic.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ CrawlerStatic:
+ trainer: ppo
+ batch_size: 2024
+ beta: 0.005
+ buffer_size: 20240
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1e7
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/ppo/FoodCollector.yaml b/config/ppo/FoodCollector.yaml
new file mode 100644
index 0000000000..53abc801db
--- /dev/null
+++ b/config/ppo/FoodCollector.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ FoodCollector:
+ trainer: ppo
+ batch_size: 1024
+ beta: 0.005
+ buffer_size: 10240
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2.0e6
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/GridWorld.yaml b/config/ppo/GridWorld.yaml
new file mode 100644
index 0000000000..e7ccd13434
--- /dev/null
+++ b/config/ppo/GridWorld.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ GridWorld:
+ trainer: ppo
+ batch_size: 32
+ beta: 0.005
+ buffer_size: 256
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 500000
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 1
+ time_horizon: 5
+ sequence_length: 64
+ summary_freq: 20000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.9
diff --git a/config/ppo/Hallway.yaml b/config/ppo/Hallway.yaml
new file mode 100644
index 0000000000..29247505ba
--- /dev/null
+++ b/config/ppo/Hallway.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ Hallway:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 1024
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/PushBlock.yaml b/config/ppo/PushBlock.yaml
new file mode 100644
index 0000000000..246d07aa11
--- /dev/null
+++ b/config/ppo/PushBlock.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ PushBlock:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2.0e6
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 60000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/Pyramids.yaml b/config/ppo/Pyramids.yaml
new file mode 100644
index 0000000000..6f385ae16e
--- /dev/null
+++ b/config/ppo/Pyramids.yaml
@@ -0,0 +1,29 @@
+behaviors:
+ Pyramids:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curiosity:
+ strength: 0.02
+ gamma: 0.99
+ encoding_size: 256
diff --git a/config/ppo/Reacher.yaml b/config/ppo/Reacher.yaml
new file mode 100644
index 0000000000..18ebb35580
--- /dev/null
+++ b/config/ppo/Reacher.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ Reacher:
+ trainer: ppo
+ batch_size: 2024
+ beta: 0.005
+ buffer_size: 20240
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2e7
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 60000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/ppo/Soccer.yaml b/config/ppo/Soccer.yaml
new file mode 100644
index 0000000000..4b5caf4fba
--- /dev/null
+++ b/config/ppo/Soccer.yaml
@@ -0,0 +1,31 @@
+behaviors:
+ Soccer:
+ trainer: ppo
+ batch_size: 2048
+ beta: 0.005
+ buffer_size: 20480
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5.0e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ self_play:
+ window: 10
+ play_against_latest_model_ratio: 0.5
+ save_steps: 50000
+ swap_steps: 50000
+ team_change: 100000
diff --git a/config/ppo/Tennis.yaml b/config/ppo/Tennis.yaml
new file mode 100644
index 0000000000..3ad006fa18
--- /dev/null
+++ b/config/ppo/Tennis.yaml
@@ -0,0 +1,31 @@
+behaviors:
+ Tennis:
+ trainer: ppo
+ batch_size: 1024
+ beta: 0.005
+ buffer_size: 10240
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5.0e7
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ self_play:
+ window: 10
+ play_against_latest_model_ratio: 0.5
+ save_steps: 50000
+ swap_steps: 50000
+ team_change: 100000
diff --git a/config/ppo/VisualHallway.yaml b/config/ppo/VisualHallway.yaml
new file mode 100644
index 0000000000..abcbfc3ee3
--- /dev/null
+++ b/config/ppo/VisualHallway.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ VisualHallway:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.01
+ buffer_size: 1024
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 1
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/VisualPushBlock.yaml b/config/ppo/VisualPushBlock.yaml
new file mode 100644
index 0000000000..1ea0f1fa6f
--- /dev/null
+++ b/config/ppo/VisualPushBlock.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ VisualPushBlock:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.01
+ buffer_size: 1024
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 3.0e6
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 1
+ time_horizon: 64
+ sequence_length: 32
+ summary_freq: 60000
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/ppo/VisualPyramids.yaml b/config/ppo/VisualPyramids.yaml
new file mode 100644
index 0000000000..2447c44c48
--- /dev/null
+++ b/config/ppo/VisualPyramids.yaml
@@ -0,0 +1,29 @@
+behaviors:
+ VisualPyramids:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.01
+ buffer_size: 2024
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 1
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curiosity:
+ strength: 0.01
+ gamma: 0.99
+ encoding_size: 256
diff --git a/config/ppo/Walker.yaml b/config/ppo/Walker.yaml
new file mode 100644
index 0000000000..52648fedff
--- /dev/null
+++ b/config/ppo/Walker.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ Walker:
+ trainer: ppo
+ batch_size: 2048
+ beta: 0.005
+ buffer_size: 20480
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2e7
+ memory_size: 128
+ normalize: true
+ num_epoch: 3
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/ppo/WallJump.yaml b/config/ppo/WallJump.yaml
new file mode 100644
index 0000000000..2501e129f2
--- /dev/null
+++ b/config/ppo/WallJump.yaml
@@ -0,0 +1,51 @@
+behaviors:
+ BigWallJump:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.005
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
+ SmallWallJump:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.005
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 5e6
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
From 9dcf38da3ae7ba75d6d3325b1c92ba039ab6f5a7 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 11:49:03 -0700
Subject: [PATCH 07/36] Fix walljump curriculum
---
config/ppo/WallJump_curriculum.yaml | 123 ++++++++++++++--------------
1 file changed, 63 insertions(+), 60 deletions(-)
diff --git a/config/ppo/WallJump_curriculum.yaml b/config/ppo/WallJump_curriculum.yaml
index 364ebe687b..48640a767d 100644
--- a/config/ppo/WallJump_curriculum.yaml
+++ b/config/ppo/WallJump_curriculum.yaml
@@ -1,62 +1,65 @@
behaviors:
- default:
- trainer: ppo
- batch_size: 1024
- beta: 5.0e-3
- buffer_size: 10240
- epsilon: 0.2
- hidden_units: 128
- lambd: 0.95
- learning_rate: 3.0e-4
- learning_rate_schedule: linear
- max_steps: 5.0e5
- memory_size: 128
- normalize: false
- num_epoch: 3
- num_layers: 2
- time_horizon: 64
- sequence_length: 64
- summary_freq: 10000
- use_recurrent: false
- vis_encode_type: simple
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
+ BigWallJump:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.005
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 2e7
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curriculum:
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
+ big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
- SmallWallJump:
- max_steps: 5e6
- batch_size: 128
- buffer_size: 2048
- beta: 5.0e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- num_layers: 2
- normalize: false
- curriculum:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- small_wall_height: [1.5, 2.0, 2.5, 4.0]
-
- BigWallJump:
- max_steps: 2e7
- batch_size: 128
- buffer_size: 2048
- beta: 5.0e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- num_layers: 2
- normalize: false
- curriculum:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
- big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
\ No newline at end of file
+ SmallWallJump:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.005
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ learning_rate_schedule: linear
+ max_steps: 5e6
+ memory_size: 128
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curriculum:
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ small_wall_height: [1.5, 2.0, 2.5, 4.0]
From a926d4c6bd9b92d7b3d7c769c2661aebd6e20764 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 12:34:55 -0700
Subject: [PATCH 08/36] Commit SAC parameters
---
config/sac/3DBall.yaml | 26 ++++++++++++++++
config/sac/3DBallHard.yaml | 26 ++++++++++++++++
config/sac/Basic.yaml | 26 ++++++++++++++++
config/sac/BigWallJump.yaml | 53 +++++++++++++++++++++++++++++++++
config/sac/Bouncer.yaml | 26 ++++++++++++++++
config/sac/CrawlerDynamic.yaml | 26 ++++++++++++++++
config/sac/CrawlerStatic.yaml | 26 ++++++++++++++++
config/sac/FoodCollector.yaml | 26 ++++++++++++++++
config/sac/GridWorld.yaml | 26 ++++++++++++++++
config/sac/Hallway.yaml | 26 ++++++++++++++++
config/sac/PushBlock.yaml | 26 ++++++++++++++++
config/sac/Pyramids.yaml | 32 ++++++++++++++++++++
config/sac/Reacher.yaml | 26 ++++++++++++++++
config/sac/Tennis.yaml | 31 +++++++++++++++++++
config/sac/VisualHallway.yaml | 27 +++++++++++++++++
config/sac/VisualPushBlock.yaml | 27 +++++++++++++++++
config/sac/VisualPyramids.yaml | 32 ++++++++++++++++++++
config/sac/Walker.yaml | 26 ++++++++++++++++
18 files changed, 514 insertions(+)
create mode 100644 config/sac/3DBall.yaml
create mode 100644 config/sac/3DBallHard.yaml
create mode 100644 config/sac/Basic.yaml
create mode 100644 config/sac/BigWallJump.yaml
create mode 100644 config/sac/Bouncer.yaml
create mode 100644 config/sac/CrawlerDynamic.yaml
create mode 100644 config/sac/CrawlerStatic.yaml
create mode 100644 config/sac/FoodCollector.yaml
create mode 100644 config/sac/GridWorld.yaml
create mode 100644 config/sac/Hallway.yaml
create mode 100644 config/sac/PushBlock.yaml
create mode 100644 config/sac/Pyramids.yaml
create mode 100644 config/sac/Reacher.yaml
create mode 100644 config/sac/Tennis.yaml
create mode 100644 config/sac/VisualHallway.yaml
create mode 100644 config/sac/VisualPushBlock.yaml
create mode 100644 config/sac/VisualPyramids.yaml
create mode 100644 config/sac/Walker.yaml
diff --git a/config/sac/3DBall.yaml b/config/sac/3DBall.yaml
new file mode 100644
index 0000000000..c3ea98b39e
--- /dev/null
+++ b/config/sac/3DBall.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ 3DBall:
+ trainer: sac
+ batch_size: 64
+ buffer_size: 12000
+ buffer_init_steps: 0
+ hidden_units: 64
+ init_entcoef: 0.5
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/3DBallHard.yaml b/config/sac/3DBallHard.yaml
new file mode 100644
index 0000000000..b6d5eaa3ab
--- /dev/null
+++ b/config/sac/3DBallHard.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ 3DBallHard:
+ trainer: sac
+ batch_size: 256
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 12000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/Basic.yaml b/config/sac/Basic.yaml
new file mode 100644
index 0000000000..af2142e5a0
--- /dev/null
+++ b/config/sac/Basic.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ Basic:
+ trainer: sac
+ batch_size: 64
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 20
+ init_entcoef: 0.01
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5.0e5
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 10
+ sequence_length: 64
+ summary_freq: 2000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/BigWallJump.yaml b/config/sac/BigWallJump.yaml
new file mode 100644
index 0000000000..a3dbc27032
--- /dev/null
+++ b/config/sac/BigWallJump.yaml
@@ -0,0 +1,53 @@
+behaviors:
+ BigWallJump:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 256
+ init_entcoef: 0.1
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2e7
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
+ SmallWallJump:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 256
+ init_entcoef: 0.1
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5e6
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 20000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+
diff --git a/config/sac/Bouncer.yaml b/config/sac/Bouncer.yaml
new file mode 100644
index 0000000000..75a88ed0d5
--- /dev/null
+++ b/config/sac/Bouncer.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ Bouncer:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 64
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2.0e6
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 20000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/CrawlerDynamic.yaml b/config/sac/CrawlerDynamic.yaml
new file mode 100644
index 0000000000..8aa09bb88b
--- /dev/null
+++ b/config/sac/CrawlerDynamic.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ CrawlerDynamic:
+ trainer: sac
+ batch_size: 256
+ buffer_size: 500000
+ buffer_init_steps: 0
+ hidden_units: 512
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 1e7
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 2
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/sac/CrawlerStatic.yaml b/config/sac/CrawlerStatic.yaml
new file mode 100644
index 0000000000..4d90f16d51
--- /dev/null
+++ b/config/sac/CrawlerStatic.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ CrawlerStatic:
+ trainer: sac
+ batch_size: 256
+ buffer_size: 500000
+ buffer_init_steps: 2000
+ hidden_units: 512
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 5e6
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 2
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
diff --git a/config/sac/FoodCollector.yaml b/config/sac/FoodCollector.yaml
new file mode 100644
index 0000000000..de308bf64c
--- /dev/null
+++ b/config/sac/FoodCollector.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ FoodCollector:
+ trainer: sac
+ batch_size: 256
+ buffer_size: 500000
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 0.05
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2.0e6
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/GridWorld.yaml b/config/sac/GridWorld.yaml
new file mode 100644
index 0000000000..23c94ee4a7
--- /dev/null
+++ b/config/sac/GridWorld.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ GridWorld:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 1000
+ hidden_units: 128
+ init_entcoef: 0.5
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 500000
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 1
+ time_horizon: 5
+ sequence_length: 64
+ summary_freq: 20000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.9
diff --git a/config/sac/Hallway.yaml b/config/sac/Hallway.yaml
new file mode 100644
index 0000000000..84908dfe8b
--- /dev/null
+++ b/config/sac/Hallway.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ Hallway:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 0.1
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 32
+ summary_freq: 10000
+ tau: 0.005
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/PushBlock.yaml b/config/sac/PushBlock.yaml
new file mode 100644
index 0000000000..384a148fbf
--- /dev/null
+++ b/config/sac/PushBlock.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ PushBlock:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 256
+ init_entcoef: 0.05
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2e6
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 60000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/Pyramids.yaml b/config/sac/Pyramids.yaml
new file mode 100644
index 0000000000..fff9198e76
--- /dev/null
+++ b/config/sac/Pyramids.yaml
@@ -0,0 +1,32 @@
+behaviors:
+ Pyramids:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 500000
+ buffer_init_steps: 10000
+ hidden_units: 256
+ init_entcoef: 0.01
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 16
+ summary_freq: 30000
+ tau: 0.01
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 2.0
+ gamma: 0.99
+ gail:
+ strength: 0.02
+ gamma: 0.99
+ encoding_size: 128
+ use_actions: true
+ demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
diff --git a/config/sac/Reacher.yaml b/config/sac/Reacher.yaml
new file mode 100644
index 0000000000..2aa4c69590
--- /dev/null
+++ b/config/sac/Reacher.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ Reacher:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 500000
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2e7
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 60000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
diff --git a/config/sac/Tennis.yaml b/config/sac/Tennis.yaml
new file mode 100644
index 0000000000..76c678d16b
--- /dev/null
+++ b/config/sac/Tennis.yaml
@@ -0,0 +1,31 @@
+behaviors:
+ Tennis:
+ trainer: sac
+ batch_size: 128
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 256
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2e7
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 1
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ self_play:
+ window: 10
+ play_against_current_self_ratio: 0.5
+ save_steps: 50000
+ swap_steps: 50000
diff --git a/config/sac/VisualHallway.yaml b/config/sac/VisualHallway.yaml
new file mode 100644
index 0000000000..e2244ddf21
--- /dev/null
+++ b/config/sac/VisualHallway.yaml
@@ -0,0 +1,27 @@
+behaviors:
+ VisualHallway:
+ trainer: sac
+ batch_size: 64
+ buffer_size: 50000
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 1
+ time_horizon: 64
+ sequence_length: 32
+ summary_freq: 10000
+ tau: 0.005
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ gamma: 0.99
diff --git a/config/sac/VisualPushBlock.yaml b/config/sac/VisualPushBlock.yaml
new file mode 100644
index 0000000000..50db5780ac
--- /dev/null
+++ b/config/sac/VisualPushBlock.yaml
@@ -0,0 +1,27 @@
+behaviors:
+ VisualPushBlock:
+ trainer: sac
+ batch_size: 64
+ buffer_size: 1024
+ buffer_init_steps: 0
+ hidden_units: 128
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 3.0e6
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 1
+ time_horizon: 64
+ sequence_length: 32
+ summary_freq: 60000
+ tau: 0.005
+ use_recurrent: true
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ gamma: 0.99
diff --git a/config/sac/VisualPyramids.yaml b/config/sac/VisualPyramids.yaml
new file mode 100644
index 0000000000..846b4fff50
--- /dev/null
+++ b/config/sac/VisualPyramids.yaml
@@ -0,0 +1,32 @@
+behaviors:
+ VisualPyramids:
+ trainer: sac
+ batch_size: 64
+ buffer_size: 500000
+ buffer_init_steps: 1000
+ hidden_units: 256
+ init_entcoef: 0.01
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 1.0e7
+ memory_size: 128
+ normalize: false
+ num_update: 1
+ train_interval: 1
+ num_layers: 1
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 10000
+ tau: 0.01
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 2.0
+ gamma: 0.99
+ gail:
+ strength: 0.02
+ gamma: 0.99
+ encoding_size: 128
+ use_actions: true
+ demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
diff --git a/config/sac/Walker.yaml b/config/sac/Walker.yaml
new file mode 100644
index 0000000000..b33022a720
--- /dev/null
+++ b/config/sac/Walker.yaml
@@ -0,0 +1,26 @@
+behaviors:
+ Walker:
+ trainer: sac
+ batch_size: 256
+ buffer_size: 500000
+ buffer_init_steps: 0
+ hidden_units: 512
+ init_entcoef: 1.0
+ learning_rate: 0.0003
+ learning_rate_schedule: constant
+ max_steps: 2e7
+ memory_size: 128
+ normalize: true
+ num_update: 1
+ train_interval: 2
+ num_layers: 4
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ tau: 0.005
+ use_recurrent: false
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.995
From 419a1566bf4ed15d263c3d22809d4c670bb6e72e Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 12:37:30 -0700
Subject: [PATCH 09/36] Delete old configs and add gail
---
config/curricula/test.yaml | 9 -
config/curricula/wall_jump.yaml | 16 --
config/gail_config.yaml | 129 -------------
config/imitation/CrawlerStatic.yaml | 29 +++
config/imitation/FoodCollector.yaml | 29 +++
config/imitation/Hallway.yaml | 28 +++
config/imitation/PushBlock.yaml | 25 +++
config/imitation/Pyramids.yaml | 36 ++++
config/sac_trainer_config.yaml | 273 --------------------------
config/trainer_config.yaml | 289 ----------------------------
10 files changed, 147 insertions(+), 716 deletions(-)
delete mode 100644 config/curricula/test.yaml
delete mode 100644 config/curricula/wall_jump.yaml
delete mode 100644 config/gail_config.yaml
create mode 100644 config/imitation/CrawlerStatic.yaml
create mode 100644 config/imitation/FoodCollector.yaml
create mode 100644 config/imitation/Hallway.yaml
create mode 100644 config/imitation/PushBlock.yaml
create mode 100644 config/imitation/Pyramids.yaml
delete mode 100644 config/sac_trainer_config.yaml
delete mode 100644 config/trainer_config.yaml
diff --git a/config/curricula/test.yaml b/config/curricula/test.yaml
deleted file mode 100644
index 1eb379046e..0000000000
--- a/config/curricula/test.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-TestBrain:
- measure: reward
- thresholds: [10, 20, 50]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- param1: [0.7, 0.5, 0.3, 0.1]
- param2: [100, 50, 20, 15]
- param3: [0.2, 0.3, 0.7, 0.9]
diff --git a/config/curricula/wall_jump.yaml b/config/curricula/wall_jump.yaml
deleted file mode 100644
index c38c281b51..0000000000
--- a/config/curricula/wall_jump.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-BigWallJump:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
- big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
-
-SmallWallJump:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- small_wall_height: [1.5, 2.0, 2.5, 4.0]
diff --git a/config/gail_config.yaml b/config/gail_config.yaml
deleted file mode 100644
index fae274597b..0000000000
--- a/config/gail_config.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-default:
- trainer: ppo
- batch_size: 1024
- beta: 5.0e-3
- buffer_size: 10240
- epsilon: 0.2
- hidden_units: 128
- lambd: 0.95
- learning_rate: 3.0e-4
- max_steps: 5.0e5
- memory_size: 256
- normalize: false
- num_epoch: 3
- num_layers: 2
- time_horizon: 64
- sequence_length: 64
- summary_freq: 10000
- use_recurrent: false
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
-
-Pyramids:
- summary_freq: 30000
- time_horizon: 128
- batch_size: 128
- buffer_size: 2048
- hidden_units: 512
- num_layers: 2
- beta: 1.0e-2
- max_steps: 1.0e7
- num_epoch: 3
- behavioral_cloning:
- demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
- strength: 0.5
- steps: 150000
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
- curiosity:
- strength: 0.02
- gamma: 0.99
- encoding_size: 256
- gail:
- strength: 0.01
- gamma: 0.99
- encoding_size: 128
- demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
-
-CrawlerStatic:
- normalize: true
- num_epoch: 3
- time_horizon: 1000
- batch_size: 2024
- buffer_size: 20240
- max_steps: 1e7
- summary_freq: 30000
- num_layers: 3
- hidden_units: 512
- behavioral_cloning:
- demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
- strength: 0.5
- steps: 50000
- reward_signals:
- gail:
- strength: 1.0
- gamma: 0.99
- encoding_size: 128
- demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
-
-PushBlock:
- max_steps: 1.5e7
- batch_size: 128
- buffer_size: 2048
- beta: 1.0e-2
- hidden_units: 256
- summary_freq: 60000
- time_horizon: 64
- num_layers: 2
- reward_signals:
- gail:
- strength: 1.0
- gamma: 0.99
- encoding_size: 128
- demo_path: Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo
-
-Hallway:
- use_recurrent: true
- sequence_length: 64
- num_layers: 2
- hidden_units: 128
- memory_size: 256
- beta: 1.0e-2
- num_epoch: 3
- buffer_size: 1024
- batch_size: 128
- max_steps: 1.0e7
- summary_freq: 10000
- time_horizon: 64
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
- gail:
- strength: 0.1
- gamma: 0.99
- encoding_size: 128
- demo_path: Project/Assets/ML-Agents/Examples/Hallway/Demos/ExpertHallway.demo
-
-FoodCollector:
- batch_size: 64
- max_steps: 2.0e6
- use_recurrent: false
- hidden_units: 128
- learning_rate: 3.0e-4
- num_layers: 2
- sequence_length: 32
- reward_signals:
- gail:
- strength: 0.1
- gamma: 0.99
- encoding_size: 128
- demo_path: Project/Assets/ML-Agents/Examples/FoodCollector/Demos/ExpertFood.demo
- behavioral_cloning:
- demo_path: Project/Assets/ML-Agents/Examples/FoodCollector/Demos/ExpertFood.demo
- strength: 1.0
- steps: 0
diff --git a/config/imitation/CrawlerStatic.yaml b/config/imitation/CrawlerStatic.yaml
new file mode 100644
index 0000000000..57705f7815
--- /dev/null
+++ b/config/imitation/CrawlerStatic.yaml
@@ -0,0 +1,29 @@
+behaviors:
+ CrawlerStatic:
+ trainer: ppo
+ batch_size: 2024
+ beta: 0.005
+ buffer_size: 20240
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ max_steps: 1e7
+ memory_size: 256
+ normalize: true
+ num_epoch: 3
+ num_layers: 3
+ time_horizon: 1000
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ reward_signals:
+ gail:
+ strength: 1.0
+ gamma: 0.99
+ encoding_size: 128
+ demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
+ behavioral_cloning:
+ demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
+ strength: 0.5
+ steps: 50000
diff --git a/config/imitation/FoodCollector.yaml b/config/imitation/FoodCollector.yaml
new file mode 100644
index 0000000000..f5682763be
--- /dev/null
+++ b/config/imitation/FoodCollector.yaml
@@ -0,0 +1,29 @@
+behaviors:
+ FoodCollector:
+ trainer: ppo
+ batch_size: 64
+ beta: 0.005
+ buffer_size: 10240
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ max_steps: 2.0e6
+ memory_size: 256
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 32
+ summary_freq: 10000
+ use_recurrent: false
+ reward_signals:
+ gail:
+ strength: 0.1
+ gamma: 0.99
+ encoding_size: 128
+ demo_path: Project/Assets/ML-Agents/Examples/FoodCollector/Demos/ExpertFood.demo
+ behavioral_cloning:
+ demo_path: Project/Assets/ML-Agents/Examples/FoodCollector/Demos/ExpertFood.demo
+ strength: 1.0
+ steps: 0
diff --git a/config/imitation/Hallway.yaml b/config/imitation/Hallway.yaml
new file mode 100644
index 0000000000..235b310877
--- /dev/null
+++ b/config/imitation/Hallway.yaml
@@ -0,0 +1,28 @@
+behaviors:
+ Hallway:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 1024
+ epsilon: 0.2
+ hidden_units: 128
+ lambd: 0.95
+ learning_rate: 0.0003
+ max_steps: 1.0e7
+ memory_size: 256
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 10000
+ use_recurrent: true
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ gail:
+ strength: 0.1
+ gamma: 0.99
+ encoding_size: 128
+ demo_path: Project/Assets/ML-Agents/Examples/Hallway/Demos/ExpertHallway.demo
diff --git a/config/imitation/PushBlock.yaml b/config/imitation/PushBlock.yaml
new file mode 100644
index 0000000000..2d1e996733
--- /dev/null
+++ b/config/imitation/PushBlock.yaml
@@ -0,0 +1,25 @@
+behaviors:
+ PushBlock:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 256
+ lambd: 0.95
+ learning_rate: 0.0003
+ max_steps: 1.5e7
+ memory_size: 256
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 64
+ sequence_length: 64
+ summary_freq: 60000
+ use_recurrent: false
+ reward_signals:
+ gail:
+ strength: 1.0
+ gamma: 0.99
+ encoding_size: 128
+ demo_path: Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo
diff --git a/config/imitation/Pyramids.yaml b/config/imitation/Pyramids.yaml
new file mode 100644
index 0000000000..699e571513
--- /dev/null
+++ b/config/imitation/Pyramids.yaml
@@ -0,0 +1,36 @@
+behaviors:
+ Pyramids:
+ trainer: ppo
+ batch_size: 128
+ beta: 0.01
+ buffer_size: 2048
+ epsilon: 0.2
+ hidden_units: 512
+ lambd: 0.95
+ learning_rate: 0.0003
+ max_steps: 1.0e7
+ memory_size: 256
+ normalize: false
+ num_epoch: 3
+ num_layers: 2
+ time_horizon: 128
+ sequence_length: 64
+ summary_freq: 30000
+ use_recurrent: false
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curiosity:
+ strength: 0.02
+ gamma: 0.99
+ encoding_size: 256
+ gail:
+ strength: 0.01
+ gamma: 0.99
+ encoding_size: 128
+ demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
+ behavioral_cloning:
+ demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
+ strength: 0.5
+ steps: 150000
diff --git a/config/sac_trainer_config.yaml b/config/sac_trainer_config.yaml
deleted file mode 100644
index 64c02acc02..0000000000
--- a/config/sac_trainer_config.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-default:
- trainer: sac
- batch_size: 128
- buffer_size: 50000
- buffer_init_steps: 0
- hidden_units: 128
- init_entcoef: 1.0
- learning_rate: 3.0e-4
- learning_rate_schedule: constant
- max_steps: 5.0e5
- memory_size: 128
- normalize: false
- num_update: 1
- train_interval: 1
- num_layers: 2
- time_horizon: 64
- sequence_length: 64
- summary_freq: 10000
- tau: 0.005
- use_recurrent: false
- vis_encode_type: simple
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
-
-FoodCollector:
- normalize: false
- batch_size: 256
- buffer_size: 500000
- max_steps: 2.0e6
- init_entcoef: 0.05
- train_interval: 1
-
-Bouncer:
- normalize: true
- max_steps: 2.0e6
- num_layers: 2
- hidden_units: 64
- summary_freq: 20000
-
-PushBlock:
- max_steps: 2e6
- init_entcoef: 0.05
- hidden_units: 256
- summary_freq: 60000
- time_horizon: 64
- num_layers: 2
-
-SmallWallJump:
- max_steps: 5e6
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- init_entcoef: 0.1
- num_layers: 2
- normalize: false
-
-BigWallJump:
- max_steps: 2e7
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- num_layers: 2
- init_entcoef: 0.1
- normalize: false
-
-Striker:
- max_steps: 5.0e6
- learning_rate: 1e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- init_entcoef: 0.1
- num_layers: 2
- normalize: false
-
-Goalie:
- max_steps: 5.0e6
- learning_rate: 1e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- init_entcoef: 0.1
- num_layers: 2
- normalize: false
-
-Pyramids:
- summary_freq: 30000
- time_horizon: 128
- batch_size: 128
- buffer_init_steps: 10000
- buffer_size: 500000
- hidden_units: 256
- num_layers: 2
- init_entcoef: 0.01
- max_steps: 1.0e7
- sequence_length: 16
- tau: 0.01
- use_recurrent: false
- reward_signals:
- extrinsic:
- strength: 2.0
- gamma: 0.99
- gail:
- strength: 0.02
- gamma: 0.99
- encoding_size: 128
- use_actions: true
- demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
-
-VisualPyramids:
- time_horizon: 128
- batch_size: 64
- hidden_units: 256
- buffer_init_steps: 1000
- num_layers: 1
- max_steps: 1.0e7
- buffer_size: 500000
- init_entcoef: 0.01
- tau: 0.01
- reward_signals:
- extrinsic:
- strength: 2.0
- gamma: 0.99
- gail:
- strength: 0.02
- gamma: 0.99
- encoding_size: 128
- use_actions: true
- demo_path: Project/Assets/ML-Agents/Examples/Pyramids/Demos/ExpertPyramid.demo
-
-3DBall:
- normalize: true
- batch_size: 64
- buffer_size: 12000
- summary_freq: 12000
- time_horizon: 1000
- hidden_units: 64
- init_entcoef: 0.5
-
-3DBallHard:
- normalize: true
- batch_size: 256
- summary_freq: 12000
- time_horizon: 1000
-
-Tennis:
- normalize: true
- max_steps: 2e7
- hidden_units: 256
- self_play:
- window: 10
- play_against_current_self_ratio: 0.5
- save_steps: 50000
- swap_steps: 50000
-
-CrawlerStatic:
- normalize: true
- time_horizon: 1000
- batch_size: 256
- train_interval: 2
- buffer_size: 500000
- buffer_init_steps: 2000
- max_steps: 5e6
- summary_freq: 30000
- init_entcoef: 1.0
- num_layers: 3
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-CrawlerDynamic:
- normalize: true
- time_horizon: 1000
- batch_size: 256
- buffer_size: 500000
- summary_freq: 30000
- train_interval: 2
- num_layers: 3
- max_steps: 1e7
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Walker:
- normalize: true
- time_horizon: 1000
- batch_size: 256
- buffer_size: 500000
- max_steps: 2e7
- summary_freq: 30000
- num_layers: 4
- train_interval: 2
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Reacher:
- normalize: true
- time_horizon: 1000
- batch_size: 128
- buffer_size: 500000
- max_steps: 2e7
- summary_freq: 60000
-
-Hallway:
- sequence_length: 32
- num_layers: 2
- hidden_units: 128
- memory_size: 128
- init_entcoef: 0.1
- max_steps: 1.0e7
- summary_freq: 10000
- time_horizon: 64
- use_recurrent: true
-
-VisualHallway:
- sequence_length: 32
- num_layers: 1
- hidden_units: 128
- memory_size: 128
- gamma: 0.99
- batch_size: 64
- max_steps: 1.0e7
- summary_freq: 10000
- time_horizon: 64
- use_recurrent: true
-
-VisualPushBlock:
- use_recurrent: true
- sequence_length: 32
- num_layers: 1
- hidden_units: 128
- memory_size: 128
- gamma: 0.99
- buffer_size: 1024
- batch_size: 64
- max_steps: 3.0e6
- summary_freq: 60000
- time_horizon: 64
-
-GridWorld:
- batch_size: 128
- normalize: false
- num_layers: 1
- hidden_units: 128
- init_entcoef: 0.5
- buffer_init_steps: 1000
- buffer_size: 50000
- max_steps: 500000
- summary_freq: 20000
- time_horizon: 5
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.9
-
-Basic:
- batch_size: 64
- normalize: false
- num_layers: 2
- init_entcoef: 0.01
- hidden_units: 20
- max_steps: 5.0e5
- summary_freq: 2000
- time_horizon: 10
diff --git a/config/trainer_config.yaml b/config/trainer_config.yaml
deleted file mode 100644
index b5fecb4ca1..0000000000
--- a/config/trainer_config.yaml
+++ /dev/null
@@ -1,289 +0,0 @@
-default:
- trainer: ppo
- batch_size: 1024
- beta: 5.0e-3
- buffer_size: 10240
- epsilon: 0.2
- hidden_units: 128
- lambd: 0.95
- learning_rate: 3.0e-4
- learning_rate_schedule: linear
- max_steps: 5.0e5
- memory_size: 128
- normalize: false
- num_epoch: 3
- num_layers: 2
- time_horizon: 64
- sequence_length: 64
- summary_freq: 10000
- use_recurrent: false
- vis_encode_type: simple
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
-
-FoodCollector:
- normalize: false
- beta: 5.0e-3
- batch_size: 1024
- buffer_size: 10240
- max_steps: 2.0e6
-
-Bouncer:
- normalize: true
- max_steps: 4.0e6
- num_layers: 2
- hidden_units: 64
-
-PushBlock:
- max_steps: 2.0e6
- batch_size: 128
- buffer_size: 2048
- beta: 1.0e-2
- hidden_units: 256
- summary_freq: 60000
- time_horizon: 64
- num_layers: 2
-
-SmallWallJump:
- max_steps: 5e6
- batch_size: 128
- buffer_size: 2048
- beta: 5.0e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- num_layers: 2
- normalize: false
-
-BigWallJump:
- max_steps: 2e7
- batch_size: 128
- buffer_size: 2048
- beta: 5.0e-3
- hidden_units: 256
- summary_freq: 20000
- time_horizon: 128
- num_layers: 2
- normalize: false
-
-Pyramids:
- summary_freq: 30000
- time_horizon: 128
- batch_size: 128
- buffer_size: 2048
- hidden_units: 512
- num_layers: 2
- beta: 1.0e-2
- max_steps: 1.0e7
- num_epoch: 3
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
- curiosity:
- strength: 0.02
- gamma: 0.99
- encoding_size: 256
-
-VisualPyramids:
- time_horizon: 128
- batch_size: 64
- buffer_size: 2024
- hidden_units: 256
- num_layers: 1
- beta: 1.0e-2
- max_steps: 1.0e7
- num_epoch: 3
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.99
- curiosity:
- strength: 0.01
- gamma: 0.99
- encoding_size: 256
-
-3DBall:
- normalize: true
- batch_size: 64
- buffer_size: 12000
- summary_freq: 12000
- time_horizon: 1000
- lambd: 0.99
- beta: 0.001
-
-3DBallHard:
- normalize: true
- batch_size: 1200
- buffer_size: 12000
- summary_freq: 12000
- time_horizon: 1000
- max_steps: 5.0e6
- beta: 0.001
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Tennis:
- normalize: true
- max_steps: 5.0e7
- learning_rate_schedule: constant
- batch_size: 1024
- buffer_size: 10240
- hidden_units: 256
- time_horizon: 1000
- self_play:
- window: 10
- play_against_latest_model_ratio: 0.5
- save_steps: 50000
- swap_steps: 50000
- team_change: 100000
-
-Soccer:
- normalize: false
- max_steps: 5.0e7
- learning_rate_schedule: constant
- batch_size: 2048
- buffer_size: 20480
- hidden_units: 512
- time_horizon: 1000
- num_layers: 2
- self_play:
- window: 10
- play_against_latest_model_ratio: 0.5
- save_steps: 50000
- swap_steps: 50000
- team_change: 100000
-
-CrawlerStatic:
- normalize: true
- num_epoch: 3
- time_horizon: 1000
- batch_size: 2024
- buffer_size: 20240
- max_steps: 1e7
- summary_freq: 30000
- num_layers: 3
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-CrawlerDynamic:
- normalize: true
- num_epoch: 3
- time_horizon: 1000
- batch_size: 2024
- buffer_size: 20240
- max_steps: 1e7
- summary_freq: 30000
- num_layers: 3
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Walker:
- normalize: true
- num_epoch: 3
- time_horizon: 1000
- batch_size: 2048
- buffer_size: 20480
- max_steps: 2e7
- summary_freq: 30000
- num_layers: 3
- hidden_units: 512
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Reacher:
- normalize: true
- num_epoch: 3
- time_horizon: 1000
- batch_size: 2024
- buffer_size: 20240
- max_steps: 2e7
- summary_freq: 60000
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.995
-
-Hallway:
- use_recurrent: true
- sequence_length: 64
- num_layers: 2
- hidden_units: 128
- memory_size: 128
- beta: 1.0e-2
- num_epoch: 3
- buffer_size: 1024
- batch_size: 128
- max_steps: 1.0e7
- summary_freq: 10000
- time_horizon: 64
-
-VisualHallway:
- use_recurrent: true
- sequence_length: 64
- num_layers: 1
- hidden_units: 128
- memory_size: 128
- beta: 1.0e-2
- num_epoch: 3
- buffer_size: 1024
- batch_size: 64
- max_steps: 1.0e7
- summary_freq: 10000
- time_horizon: 64
-
-VisualPushBlock:
- use_recurrent: true
- sequence_length: 32
- num_layers: 1
- hidden_units: 128
- memory_size: 128
- beta: 1.0e-2
- num_epoch: 3
- buffer_size: 1024
- batch_size: 64
- max_steps: 3.0e6
- summary_freq: 60000
- time_horizon: 64
-
-GridWorld:
- batch_size: 32
- normalize: false
- num_layers: 1
- hidden_units: 256
- beta: 5.0e-3
- buffer_size: 256
- max_steps: 500000
- summary_freq: 20000
- time_horizon: 5
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.9
-
-Basic:
- batch_size: 32
- normalize: false
- num_layers: 1
- hidden_units: 20
- beta: 5.0e-3
- buffer_size: 256
- max_steps: 5.0e5
- summary_freq: 2000
- time_horizon: 3
- reward_signals:
- extrinsic:
- strength: 1.0
- gamma: 0.9
From c80c359a7cfbafbd754eb667d728d855223fd56e Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 17:00:41 -0700
Subject: [PATCH 10/36] Change some of the documentation
---
docs/Feature-Memory.md | 2 +-
docs/Getting-Started.md | 12 ++++++------
docs/Learning-Environment-Create-New.md | 15 ++++++++-------
docs/Learning-Environment-Examples.md | 2 +-
4 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/docs/Feature-Memory.md b/docs/Feature-Memory.md
index d35d49af40..f67cec88b1 100644
--- a/docs/Feature-Memory.md
+++ b/docs/Feature-Memory.md
@@ -18,7 +18,7 @@ important to remember with
## How to use
-When configuring the trainer parameters in the `config/trainer_config.yaml`
+When configuring the trainer parameters in the config YAML
file, add the following parameters to the Behavior you want to use.
```json
diff --git a/docs/Getting-Started.md b/docs/Getting-Started.md
index 751b7769e4..fad06a9551 100644
--- a/docs/Getting-Started.md
+++ b/docs/Getting-Started.md
@@ -184,8 +184,8 @@ has been shown to be both stable and sample-efficient.
For more information on SAC, see UC Berkeley's
[blog post](https://bair.berkeley.edu/blog/2018/12/14/sac/) and
[our page](Training-SAC.md) for more guidance on when to use SAC vs. PPO. To
-use SAC to train Balance Ball, replace all references to `config/trainer_config.yaml`
-with `config/sac_trainer_config.yaml` below.
+use SAC to train Balance Ball, replace all references to `config/ppo/3DBall.yaml`
+with `config/sac/3DBall.yaml` below.
To train the agents within the Balance Ball environment, we will be using the
ML-Agents Python package. We have provided a convenient command called `mlagents-learn`
@@ -201,13 +201,13 @@ which accepts arguments used to configure both training and inference phases.
where:
- `` is the relative or absolute filepath of the
trainer configuration. The defaults used by example environments included
- in `MLAgentsSDK` can be found in `config/trainer_config.yaml`.
+ in `MLAgentsSDK` can be found in the `config/ppo/` and `config/sac` folders.
- `` is a string used to separate the results of different
training runs. Make sure to use one that hasn't been used already!
4. If you cloned the ML-Agents repo, then you can simply run
```sh
- mlagents-learn config/trainer_config.yaml --run-id=firstRun
+ mlagents-learn config/ppo/3DBall.yaml --run-id=firstRun
```
5. When the message _"Start training by pressing the Play button in the Unity
@@ -331,14 +331,14 @@ If you've quit the training early using Ctrl+C and want to resume training, run
same command again, appending the `--resume` flag:
```sh
-mlagents-learn config/trainer_config.yaml --run-id=firstRun --resume
+mlagents-learn config/ppo/3DBall.yaml --run-id=firstRun --resume
```
Your trained model will be at `models//.nn` where
`` is the name of the `Behavior Name` of the agents corresponding to the model.
(**Note:** There is a known bug on Windows that causes the saving of the model to
fail when you early terminate the training, it's recommended to wait until Step
-has reached the max_steps parameter you set in trainer_config.yaml.) This file
+has reached the max_steps parameter you set in your config YAML.) This file
corresponds to your model's latest checkpoint. You can now embed this trained
model into your Agents by following the steps below, which is similar to
the steps described
diff --git a/docs/Learning-Environment-Create-New.md b/docs/Learning-Environment-Create-New.md
index 069507d974..5eff20844b 100644
--- a/docs/Learning-Environment-Create-New.md
+++ b/docs/Learning-Environment-Create-New.md
@@ -393,13 +393,14 @@ status information in the Game window.
The process is
the same as described in [Training ML-Agents](Training-ML-Agents.md). Note that the
-models will be created in the original ml-agents project folder, `ml-agents/models`.
+models will be created in the original ml-agents project folder, under `models`.
The hyperparameters for training are specified in the configuration file that you
-pass to the `mlagents-learn` program. Using the default settings specified
-in the original `ml-agents/config/trainer_config.yaml` file, the
-RollerAgent takes about 300,000 steps to train. However, you can change the
-following hyperparameters to speed up training considerably (to under 20,000 steps):
+pass to the `mlagents-learn` program. You can create a config file just for your environment
+by copying an existing file. For this example, copy `config/ppo/3DBall.yaml` into a new file,
+let's say called `config/ppo/RollerBall.yaml` (the name isn't important). Using the settings specified
+in the original file, the RollerAgent takes about 300,000 steps to train. However, you can change the
+following hyperparameters to speed up training considerably (to under 20,000 steps):
batch_size: 10
buffer_size: 100
@@ -418,9 +419,9 @@ in this simple environment, speeds up training.
To train in the editor, run the following Python command from a Terminal or Console
window before pressing play:
- mlagents-learn config/config.yaml --run-id=RollerBall-1
+ mlagents-learn config/ppo/RollerBall.yaml --run-id=RollerBall-1
-(where `config.yaml` is a copy of `trainer_config.yaml` that you have edited
+(where `RollerBall.yaml` is the copy of `3DBall.yaml` that you have edited
to change the `batch_size` and `buffer_size` hyperparameters for your trainer.)
**Note:** If you get a `command not found` error when running this command, make sure
diff --git a/docs/Learning-Environment-Examples.md b/docs/Learning-Environment-Examples.md
index a2f090e58c..19192f411a 100644
--- a/docs/Learning-Environment-Examples.md
+++ b/docs/Learning-Environment-Examples.md
@@ -314,7 +314,7 @@ return.
training parameters.__
* Float Properties: None
* Benchmark Mean Reward: 0.7
- * To speed up training, you can enable curiosity by adding the `curiosity` reward signal in `config/trainer_config.yaml`
+ * To train this environment, you can enable curiosity by adding the `curiosity` reward signal in `config/ppo/Hallway.yaml`
## Bouncer
From 0fa8f8ba264e313b496dda356b0a48a2a67bbada Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 17:35:08 -0700
Subject: [PATCH 11/36] More doc updates
---
docs/Learning-Environment-Executable.md | 6 +-
docs/Reward-Signals.md | 4 +-
docs/Training-Curriculum-Learning.md | 61 ++++++++++++-------
...ing-Environment-Parameter-Randomization.md | 52 +++++++++-------
docs/Training-ML-Agents.md | 31 ++++++----
5 files changed, 92 insertions(+), 62 deletions(-)
diff --git a/docs/Learning-Environment-Executable.md b/docs/Learning-Environment-Executable.md
index b78110bcfe..5737dca2b0 100644
--- a/docs/Learning-Environment-Executable.md
+++ b/docs/Learning-Environment-Executable.md
@@ -89,13 +89,13 @@ For example, if you are training with a 3DBall executable you exported to the
the directory where you installed the ML-Agents Toolkit, run:
```sh
-mlagents-learn ../config/trainer_config.yaml --env=3DBall --run-id=firstRun
+mlagents-learn ../config/ppo/3DBall.yaml --env=3DBall --run-id=firstRun
```
And you should see something like
```console
-ml-agents$ mlagents-learn config/trainer_config.yaml --env=3DBall --run-id=first-run
+ml-agents$ mlagents-learn config/ppo/3DBall.yaml --env=3DBall --run-id=first-run
â–„â–„â–„â–“â–“â–“â–“
@@ -175,7 +175,7 @@ You can press Ctrl+C to stop the training, and your trained model will be at
latest checkpoint. (**Note:** There is a known bug on Windows that causes the
saving of the model to fail when you early terminate the training, it's
recommended to wait until Step has reached the max_steps parameter you set in
-trainer_config.yaml.) You can now embed this trained model into your Agent by
+your config YAML.) You can now embed this trained model into your Agent by
following the steps below:
1. Move your model file into
diff --git a/docs/Reward-Signals.md b/docs/Reward-Signals.md
index db392fc549..10f6e38e1a 100644
--- a/docs/Reward-Signals.md
+++ b/docs/Reward-Signals.md
@@ -17,8 +17,8 @@ The `curiosity` reward signal helps your agent explore when extrinsic rewards ar
## Enabling Reward Signals
-Reward signals, like other hyperparameters, are defined in the trainer config `.yaml` file. An
-example is provided in `config/trainer_config.yaml` and `config/gail_config.yaml`. To enable a reward signal, add it to the
+Reward signals, like other hyperparameters, are defined in the trainer config `.yaml` file. Examples of config files
+are provided in `config/ppo/` and `config/imitation/`. To enable a reward signal, add it to the
`reward_signals:` section under the behavior name. For instance, to enable the extrinsic signal
in addition to a small curiosity reward and a GAIL reward signal, you would define your `reward_signals` as follows:
diff --git a/docs/Training-Curriculum-Learning.md b/docs/Training-Curriculum-Learning.md
index 38885287ea..a0ea98ec0e 100644
--- a/docs/Training-Curriculum-Learning.md
+++ b/docs/Training-Curriculum-Learning.md
@@ -43,34 +43,53 @@ the environment will vary. In the case of the Wall Jump environment,
the height of the wall is what varies. We define this as a `Shared Float Property`
that can be accessed in `SideChannelUtils.GetSideChannel()`, and by doing
so it becomes adjustable via the Python API.
-Rather than adjusting it by hand, we will create a YAML file which
+Rather than adjusting it by hand, we will add a section to our YAML configuration file that
describes the structure of the curricula. Within it, we can specify which
points in the training process our wall height will change, either based on the
percentage of training steps which have taken place, or what the average reward
the agent has received in the recent past is. Below is an example config for the
-curricula for the Wall Jump environment.
+curricula for the Wall Jump environment. You can find the full file in `config/ppo/WallJump_curriculum.yaml`.
```yaml
-BigWallJump:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
- big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
-SmallWallJump:
- measure: progress
- thresholds: [0.1, 0.3, 0.5]
- min_lesson_length: 100
- signal_smoothing: true
- parameters:
- small_wall_height: [1.5, 2.0, 2.5, 4.0]
+behaviors:
+ BigWallJump:
+ trainer: ppo
+ ... # The rest of the hyperparameters
+ vis_encode_type: simple
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curriculum: # Add this section for curriculum
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ big_wall_min_height: [0.0, 4.0, 6.0, 8.0]
+ big_wall_max_height: [4.0, 7.0, 8.0, 8.0]
+
+ SmallWallJump:
+ trainer: ppo
+ ... # The rest of the hyperparameters
+ reward_signals:
+ extrinsic:
+ strength: 1.0
+ gamma: 0.99
+ curriculum: # Add this section for curriculum
+ measure: progress
+ thresholds: [0.1, 0.3, 0.5]
+ min_lesson_length: 100
+ signal_smoothing: true
+ parameters:
+ small_wall_height: [1.5, 2.0, 2.5, 4.0]
```
-At the top level of the config is the behavior name. Note that this must be the
+For each Behavior Name described in your training configuration file, we can specify a curriculum
+by adding a `curriculum:` section under that particular Behavior Name.
+Note that these must be the
same as the Behavior Name in the [Agent's Behavior Parameters](Learning-Environment-Design-Agents.md#agent-properties).
- The curriculum for each
+The curriculum for each
behavior has the following parameters:
* `measure` - What to measure learning progress, and advancement in lessons by.
* `reward` - Uses a measure received reward.
@@ -88,7 +107,7 @@ behavior has the following parameters:
cumulative reward of the last `100` episodes exceeds the current threshold.
The mean reward logged to the console is dictated by the `summary_freq`
parameter in the
- [trainer configuration file](Training-ML-Agents.md#training-config-file).
+ [training configuration file](Training-ML-Agents.md#training-config-file).
* `signal_smoothing` (true/false) - Whether to weight the current progress
measure by previous values.
* If `true`, weighting will be 0.75 (new) 0.25 (old).
@@ -110,7 +129,7 @@ for our curricula and PPO will train using Curriculum Learning. For example,
to train agents in the Wall Jump environment with curriculum learning, we can run:
```sh
-mlagents-learn config/trainer_config.yaml --curriculum=config/curricula/wall_jump.yaml --run-id=wall-jump-curriculum
+mlagents-learn config/ppo/WallJump_curriculum.yaml --run-id=wall-jump-curriculum
```
We can then keep track of the current lessons and progresses via TensorBoard.
diff --git a/docs/Training-Environment-Parameter-Randomization.md b/docs/Training-Environment-Parameter-Randomization.md
index e812557b1d..fa6596765a 100644
--- a/docs/Training-Environment-Parameter-Randomization.md
+++ b/docs/Training-Environment-Parameter-Randomization.md
@@ -40,27 +40,34 @@ training procedure, remaining unchanged. The samplers for all the `Environment P
are handled by a **Sampler Manager**, which also handles the generation of new
values for the environment parameters when needed.
-To setup the Sampler Manager, we create a YAML file that specifies how we wish to
-generate new samples for each `Environment Parameters`. In this file, we specify the samplers and the
+To setup the Sampler Manager, we edit our [training configuration file](Training-ML-Agents.md#training-config-file).
+Add a `parameter_randomization` section that specifies how we wish to generate new samples for each `Environment
+Parameters`. In this section, we specify the samplers and the
`resampling-interval` (the number of simulation steps after which environment parameters are
-resampled). Below is an example of a sampler file for the 3D ball environment.
+resampled). Below is an example of a sampler file for the 3D ball environment. The full file is provided in
+`config/ppo/3DBall_randomize.yaml`.
```yaml
-resampling-interval: 5000
+behaviors:
+ # Trainer hyperparameters
-mass:
- sampler-type: "uniform"
- min_value: 0.5
- max_value: 10
+# New section
+parameter_randomization:
+ resampling-interval: 5000
+
+ mass:
+ sampler-type: "uniform"
+ min_value: 0.5
+ max_value: 10
-gravity:
- sampler-type: "multirange_uniform"
- intervals: [[7, 10], [15, 20]]
+ gravity:
+ sampler-type: "multirange_uniform"
+ intervals: [[7, 10], [15, 20]]
-scale:
- sampler-type: "uniform"
- min_value: 0.75
- max_value: 3
+ scale:
+ sampler-type: "uniform"
+ min_value: 0.75
+ max_value: 3
```
@@ -145,7 +152,7 @@ class CustomSampler(Sampler):
return np.random.choice(self.possible_vals)
```
-Now we need to specify the new sampler type in the sampler YAML file. For example, we use this new
+Now we need to specify the new sampler type in the trainer configuration file. For example, we use this new
sampler type for the `Environment Parameter` *mass*.
```yaml
@@ -158,14 +165,13 @@ mass:
### Training with Environment Parameter Randomization
-After the sampler YAML file is defined, we proceed by launching `mlagents-learn` and specify
-our configured sampler file with the `--sampler` flag. For example, if we wanted to train the
-3D ball agent with parameter randomization using `Environment Parameters` with `config/3dball_randomize.yaml`
-sampling setup, we would run
+After the parameter variations are defined in the training config file, we proceed by launching the file with
+`mlagents-learn` as usual. For example, if we wanted to train the
+3D ball agent with parameter randomization using `Environment Parameters` as specified in
+`config/ppo/3DBall_randomize.yaml` sampling setup, we would run
```sh
-mlagents-learn config/trainer_config.yaml --sampler=config/3dball_randomize.yaml
---run-id=3D-Ball-randomize
+mlagents-learn config/ppo/3DBall_randomize.yaml --run-id=3D-Ball-randomize
```
-We can observe progress and metrics via Tensorboard.
+We can observe progress and metrics via Tensorboard as usual.
diff --git a/docs/Training-ML-Agents.md b/docs/Training-ML-Agents.md
index 6fa92aba25..f53998b30a 100644
--- a/docs/Training-ML-Agents.md
+++ b/docs/Training-ML-Agents.md
@@ -115,19 +115,24 @@ configurations and may generate different artifacts and TensorBoard statistics.
This section offers a detailed guide into how to manage the different training
set-ups withing the toolkit.
-The training config files `config/trainer_config.yaml`,
-`config/sac_trainer_config.yaml`, `config/gail_config.yaml` and
-`config/offline_bc_config.yaml` specifies the training method, the
-hyperparameters, and a few additional values to use when training with Proximal
-Policy Optimization(PPO), Soft Actor-Critic(SAC), GAIL (Generative Adversarial
-Imitation Learning) with PPO/SAC, and Behavioral Cloning(BC)/Imitation with
-PPO/SAC. These files are divided into sections. The **default** section defines
-the default values for all the available training with PPO, SAC, GAIL (with
-PPO), and BC. These files are divided into sections. The **default** section
-defines the default values for all the available settings. You can also add new
-sections to override these defaults to train specific Behaviors. Name each of
-these override sections after the appropriate `Behavior Name`. Sections for the
-example environments are included in the provided config file.
+For each training run, create a YAML file that contains the the training method and the
+hyperparameters for each of the Behaviors found in your environment. Example files for
+Policy Optimization (PPO) and Soft Actor-Critic (SAC) are provided in `config/ppo/` and
+`config/sac/`, respectively. Examples for imitation learning through GAIL (Generative Adversarial
+Imitation Learning) and Behavioral Cloning (BC) can be found in `config/imitiation/`.
+
+Each file is divided into sections. The `behaviors` section defines the hyperparameters
+for each Behavior found in your environment. A section should be created for each `Behavior Name`.
+The available parameters for PPO and SAC are listed below. Alternatively, if there are many
+different Behaviors that all use similar hyperparameters, you can create a `default` behavior name
+that specifies all hyperparameters that are not specified in the Behavior-specific sections.
+To use [Curriculum Learning](Training-Curriculum-Learning.md) for a particular Behavior, add a
+section under that `Behavior Name` called `curriculum`.
+See the [Curriculum Learning](Training-Curriculum-Learning.md) page for more information.
+
+To use Parameter Randomization, add a `parameter_randomization` section in the configuration
+file. See the [Parameter Randomization](Training-Environment-Parameter-Randomization.md) docs
+for more information.
\*PPO = Proximal Policy Optimization, SAC = Soft Actor-Critic, BC = Behavioral
Cloning (Imitation), GAIL = Generative Adversarial Imitation Learning
From 72b39f0460268a784f99107f943118df0c2488ac Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 17:42:12 -0700
Subject: [PATCH 12/36] Fix Yamato test
---
ml-agents/tests/yamato/training_int_tests.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ml-agents/tests/yamato/training_int_tests.py b/ml-agents/tests/yamato/training_int_tests.py
index 5900e83920..224892bcd9 100644
--- a/ml-agents/tests/yamato/training_int_tests.py
+++ b/ml-agents/tests/yamato/training_int_tests.py
@@ -63,7 +63,7 @@ def run_training(python_version, csharp_version):
# Copy the default training config but override the max_steps parameter,
# and reduce the batch_size and buffer_size enough to ensure an update step happens.
override_config_file(
- "config/trainer_config.yaml",
+ "config/ppo/3DBall.yaml",
"override.yaml",
max_steps=100,
batch_size=10,
From 0c89258aa1d0ab1f8586298dff7ca141f4411764 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 18:00:58 -0700
Subject: [PATCH 13/36] Fix learn.py test
---
.../mlagents/trainers/tests/test_learn.py | 39 ++++++++++++++++---
1 file changed, 33 insertions(+), 6 deletions(-)
diff --git a/ml-agents/mlagents/trainers/tests/test_learn.py b/ml-agents/mlagents/trainers/tests/test_learn.py
index bf8bcbcf60..52eafbb4fc 100644
--- a/ml-agents/mlagents/trainers/tests/test_learn.py
+++ b/ml-agents/mlagents/trainers/tests/test_learn.py
@@ -15,6 +15,25 @@ def basic_options(extra_args=None):
return parse_command_line(args)
+MOCK_YAML = """
+ behaviors:
+ {}
+ """
+
+MOCK_SAMPLER_CURRICULUM_YAML = """
+ behaviors:
+ behavior1:
+ curriculum:
+ curriculum1
+ behavior2:
+ curriculum:
+ curriculum2
+
+ parameter_randomization:
+ sampler1
+ """
+
+
@patch("mlagents.trainers.learn.handle_existing_directories")
@patch("mlagents.trainers.learn.TrainerFactory")
@patch("mlagents.trainers.learn.SamplerManager")
@@ -69,7 +88,7 @@ def test_bad_env_path():
)
-@patch("builtins.open", new_callable=mock_open, read_data="{}")
+@patch("builtins.open", new_callable=mock_open, read_data=MOCK_YAML)
def test_commandline_args(mock_file):
# No args raises
@@ -98,8 +117,6 @@ def test_commandline_args(mock_file):
full_args = [
"mytrainerpath",
"--env=./myenvfile",
- "--curriculum=./mycurriculum",
- "--sampler=./mysample",
"--keep-checkpoints=42",
"--lesson=3",
"--resume",
@@ -117,8 +134,8 @@ def test_commandline_args(mock_file):
opt = parse_command_line(full_args)
assert opt.trainer_config == {}
assert opt.env_path == "./myenvfile"
- assert opt.curriculum_config == {}
- assert opt.sampler_config == {}
+ assert opt.curriculum_config is None
+ assert opt.sampler_config is None
assert opt.keep_checkpoints == 42
assert opt.lesson == 3
assert opt.run_id == "myawesomerun"
@@ -132,7 +149,17 @@ def test_commandline_args(mock_file):
assert opt.resume is True
-@patch("builtins.open", new_callable=mock_open, read_data="{}")
+@patch("builtins.open", new_callable=mock_open, read_data=MOCK_SAMPLER_CURRICULUM_YAML)
+def test_sampler_curriculum_configs(mock_file):
+ opt = parse_command_line(["mytrainerpath"])
+ assert opt.curriculum_config == {
+ "behavior1": "curriculum1",
+ "behavior2": "curriculum2",
+ }
+ assert opt.sampler_config == "sampler1"
+
+
+@patch("builtins.open", new_callable=mock_open, read_data=MOCK_YAML)
def test_env_args(mock_file):
full_args = [
"mytrainerpath",
From b84396f8bbdb685d82185ba9edaa8732abea7566 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 18:01:16 -0700
Subject: [PATCH 14/36] More docs updates
---
docs/Training-Imitation-Learning.md | 3 ++-
docs/Training-ML-Agents.md | 5 ++---
docs/Training-Using-Concurrent-Unity-Instances.md | 2 +-
gym-unity/README.md | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docs/Training-Imitation-Learning.md b/docs/Training-Imitation-Learning.md
index ba3cba51e2..930509701a 100644
--- a/docs/Training-Imitation-Learning.md
+++ b/docs/Training-Imitation-Learning.md
@@ -87,7 +87,8 @@ inspector.
width="375" border="10" />
-You can then specify the path to this file as the `demo_path` in your `trainer_config.yaml` file
+You can then specify the path to this file as the `demo_path` in your
+[training configuration file](Training-ML-Agents.md#training-config-file).
when using BC or GAIL. For instance, for BC:
```
diff --git a/docs/Training-ML-Agents.md b/docs/Training-ML-Agents.md
index f53998b30a..c55289027e 100644
--- a/docs/Training-ML-Agents.md
+++ b/docs/Training-ML-Agents.md
@@ -181,6 +181,5 @@ are conducting, see:
You can also compare the
[example environments](Learning-Environment-Examples.md) to the corresponding
-sections of the `config/trainer_config.yaml` file for each example to see how
-the hyperparameters and other configuration variables have been changed from the
-defaults.
+files in the `config/ppo/` file for each example to see how
+the hyperparameters and other configuration variables have been changed from environment to environment.
diff --git a/docs/Training-Using-Concurrent-Unity-Instances.md b/docs/Training-Using-Concurrent-Unity-Instances.md
index e4d7acdabf..77f0b9d284 100644
--- a/docs/Training-Using-Concurrent-Unity-Instances.md
+++ b/docs/Training-Using-Concurrent-Unity-Instances.md
@@ -10,7 +10,7 @@ Please refer to the general instructions on [Training ML-Agents](Training-ML-Age
### Buffer Size
-If you are having trouble getting an agent to train, even with multiple concurrent Unity instances, you could increase `buffer_size` in the `config/trainer_config.yaml` file. A common practice is to multiply `buffer_size` by `num-envs`.
+If you are having trouble getting an agent to train, even with multiple concurrent Unity instances, you could increase `buffer_size` in the [training configuration file](Training-ML-Agents.md#training-config-file). A common practice is to multiply `buffer_size` by `num-envs`.
### Resource Constraints
diff --git a/gym-unity/README.md b/gym-unity/README.md
index 13b1bb4c08..7277bd286b 100755
--- a/gym-unity/README.md
+++ b/gym-unity/README.md
@@ -337,7 +337,7 @@ the training buffer, and no learning happens.
We provide results from our PPO implementation and the DQN from Baselines as reference.
Note that all runs used the same greyscale GridWorld as Dopamine. For PPO, `num_layers`
-was set to 2, and all other hyperparameters are the default for GridWorld in `trainer_config.yaml`.
+was set to 2, and all other hyperparameters are the default for GridWorld in `config/ppo/GridWorld.yaml`.
For Baselines DQN, the provided hyperparameters in the previous section are used. Note
that Baselines implements certain features (e.g. dueling-Q) that are not enabled
in Dopamine DQN.
From 756a75f8a7acfb847ed7f98c2b9e5fbf626bb655 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 18:13:50 -0700
Subject: [PATCH 15/36] Update migrating.md file
---
docs/Migrating.md | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/docs/Migrating.md b/docs/Migrating.md
index ef081023df..4fd95e8226 100644
--- a/docs/Migrating.md
+++ b/docs/Migrating.md
@@ -17,6 +17,8 @@ The versions can be found in
* The low level Python API has changed. You can look at the document [Low Level Python API documentation](Python-API.md) for more information. If you use `mlagents-learn` for training, this should be a transparent change.
* The obsolete `Agent` methods `GiveModel`, `Done`, `InitializeAgent`, `AgentAction` and `AgentReset` have been removed.
* The signature of `Agent.Heuristic()` was changed to take a `float[]` as a parameter, instead of returning the array. This was done to prevent a common source of error where users would return arrays of the wrong size.
+* Trainer configuration, curriculum configuration, and parameter randomization configuration have all been moved to
+a single YAML file.
### Steps to Migrate
* Replace the `--load` flag with `--resume` when calling `mlagents-learn`, and don't use the `--train` flag as training
@@ -29,6 +31,13 @@ The versions can be found in
* Replace `Academy.RegisterSideChannel` with `SideChannelUtils.RegisterSideChannel()`.
* Replace `Academy.UnregisterSideChannel` with `SideChannelUtils.UnregisterSideChannel`.
* If your Agent class overrides `Heuristic()`, change the signature to `public override void Heuristic(float[] actionsOut)` and assign values to `actionsOut` instead of returning an array.
+* Before upgrading, move your environment-specific `Behavior Name` sections from `trainer_config.yaml` into
+a separate trainer configuration file, under the `behaviors:` section. You can move the `default` section too
+if it's being used.
+ * If your training uses [curriculum](Training-Curriculum-Learning.md), move those configurations under
+ the `Behavior Name` section.
+ * If your training uses [parameter randomization](Training-Environment-Parameter-Randomization.md), move
+ the contents of the sampler config to `parameter_randomization` in the main trainer configuration.
## Migrating from 0.14 to 0.15
From cb97315021a7b4120a18524f4c7bbab23afd99e5 Mon Sep 17 00:00:00 2001
From: Ervin Teng
Date: Thu, 16 Apr 2020 18:16:45 -0700
Subject: [PATCH 16/36] Update changelog and improve migrating
---
Project/Recordings/movie.mp4 | Bin 0 -> 8469465 bytes
com.unity.ml-agents/CHANGELOG.md | 3 +++
docs/Migrating.md | 5 +++--
3 files changed, 6 insertions(+), 2 deletions(-)
create mode 100644 Project/Recordings/movie.mp4
diff --git a/Project/Recordings/movie.mp4 b/Project/Recordings/movie.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..569b90faabc0cb970e62f1d32362d3f8a9a512c3
GIT binary patch
literal 8469465
zcmZ6y2S5`+w>G??2Bd^uM0ypFDj?E9igZC~N*9ohA{`PSROujHK~Yguii&`g(2G>D
zfV5CWq$KnJN%jwV-}m0{pJaElo86f+Gv_?#IWw~W0PuJOhX?uvD#`-@03jXaPJiFI
z7vT5z+}$OZbOeA{ntjrqm5LJ9wAmzkOTaiNK1d2y!P+c=003bNkw!oZpaeBRWI{3k
zfv6qb^@sHvuYn76$X9!CoUVPeCSD-xp+*@I*2^ui2VxU-tU(a!E?i~~B4y*r^&;SS
zfq!RmFZPaoK&KrKaYbKG*g~MePimy8g6e2m)qCx1;&*xw74Glx1BdHMvsGEtFFMeq
zU2rW|uc#c_qJGy2^>)7RKHg&~kSBRVhQS{>O5E
z@5Q}e@Y3B(nKeq+$PTG{^JDjZZ1sT%i>S_E&u2u$R>k7SHVw*~RDG=tGjq`tO%S4r
zhy4~Gayn+3oNl)8jB7r&{H4cmj-LbCRocNVmgV82
zwi(?a@z;!KAr0!%!ZaaXxeO_wmt&fz`SWn
z0UzYXAwbJ*EqRY$dkrLPEw6FS1KFIZ?PT@EIe;wJhb(}J
z$_n}{=c?^J8j;6j(06%ajYkyA2X!u@OARv&JyX@FKnx}uVR5n9(MQ6A70(JSs
z14dhaGHMAORy6gz2Zf@E)a~Ti2|?A2e(5jfwJNVulA&5Y2p&ja
zxNOSi$roKq%eO+MYAad~#DNUOPMhXA4gXYL4FAT0E~W(LbK&5Xk)M~`c7
zBJDf7Y+eG`4hZxu#7iAvGb#CEVW%WX0|kugr9d0Mz!dIdTsmA;m8WCQ((;F#>Td<DO(F(*SvrYf^M>XSOF%xyJ;q}QZ>L8c99+Q`9K)L@u`+Y~KrUScKr
z2;_I{Lr=yLxcz!pE#dG<>$}C)P)uBJpYQn)0BTdjPZr5vB(?34B3uyxWGUFxUy-z77HYw(UQI@ciX*B-qKC=WFBf2
z%MTG<(V>+-zF7-EY26x!FFU?(pC8c)x?xd!_FQ(Qtc&1TYGNtBf^E!<5du{%6=%Fhc^PRAR4rf^9Y`Fc0!Qs(x(jy@c%
zUYLoI{pWbicau^8ugjA#u|u~~+c|pNya`gMXao#Iz9}LqSI`Z0DXo$_Ij2yzcj*vFu+n9>ZB2Z;f-mLQdgZznT*wB`OkWn
zIP4`5PGO$%nDJo#?0yZOc=n)^6`->n;14HWYUQY>Wbw@0j
z*5UK4f}ON<@Q}{}8ZvYY>jUGdh&1s6LI>!wx9C9z4Anl%g^0Q}6Hcpm=c+T$nDmN!
zKhpA>c0EP$CxMX3y&v}JelE|X>e$3P{a=v165~aoljcoE`ppDGQ(U|lB*_jqysT=D
z3rt{N6L9@QGOP$fEu}HINz1Ls-L+72BbkNEs#A-JJvl{nTCCwvm_}yX(pLvpR;i3=
zxE|YgmC_HSgw&7FL)e>_q2l2T=cfgmK=khHyOv(A@d}<5AaE1H^U6qaj#93YWyYAd
z>pOxpg+dHLDEGrsw82nq*C_SP5)Z%3Dk9)!OMRDaM{vzKNpKX$
z!-oU<*#8`?K)0VAz3+|(ANURzexKE?GG*NT!hjP@d&V&HT={45Bwy@%p4OIK8#q^f
zeG)!$l}tamTVu6!)6Dt)PwGGz7X62ecdO(6x7ZK&3FUcXdi@0J#M^lDe)seyK}z_-
zfk1*R#MzM2XN0yaVYoMG{UtAr{<2%CcCvBw7;DOdvjz&Q5IvF?kma#1XA1oqM*vrq
zOyzHP6o7lKVsxU;u0=Qyj(#^$w!H5j
z*v_zUl%m+%4GDn%m!ij-$MA;cHZv2;TvqowgA<31$=7Y`c`a4dFCKsNzqK3n#;Mal
z2y&Y&1FrjU<-eV#p386Jt$qyk}o?wNCJfb^F=2pF&u}`7hbDY>~G5y_+gY
z)B1w&Lx2UoFzb8uHWffe|7hOek
zFm*q8S~SHI;@!v@avZIg>Zqs3B0y89CsZr@r_9M5G_lRQOdfP!DZpk_TX)Z>=O$Cs
zeWzDX(Dzt@hMbHd975Op2p`9#%5XG9s3uVJz4t3-czHP)|2+35(l-mx
zQ@75TuDp57=O+9zTZ-F5225|%4)J^Me15HC#<)r
zo=XM!Ez89wP}K&dSp*6N>z;I1f`^F2g!?@XKT23Wl?B~m$2yZWXpB|K>}fR=nQKy2;5Ct$BiJFuSjKz|bwhT?jW+q01N!u(VuLMlbPHv^
z%3@_S6nACrYj2^|jXM2J2bz>|SKtdc&+XyS)ar;o^$#Mi8!N#x9r*>^`04lk)p>1R
zgmcVklhdkyrhS9VY3F)&E+zUv4*o*u%Y!KDB$^bus-f)1HIe!+(G&A!%MI8?J|5U1
z5wf(y5~jD9@~6d8Kzn6zc>m?s&ibQ8FCnIGHwvUgHR5KwEKQv~4d%hRVNr`I66P{y
zb&^{vJ;y(m>Z+3J77u81Kmsg9&RAKPKWG%^k8&4ZGwb8#?*8rmxw(x=tGpc=!5l+v
z?I-Cv%O7pv58`seRiB0pXHjTT^OW(CS+3H^0(7aIcWu>ZI4p$y{^(Yk`Dupo*g`YG
zq~hmPN6I8E6Wx39NV1b=p!J?r<(_7oOY?X97Ztj_;wzneZGOD1NcoBH{rx|_M$h^Q
z_G9TkS&wMo-Yz{F+Puk)659^dZ{aE0@SvKNHW7avBq`Okt+8{~XRQF3RX*;_&Korz!Ya`FEcar+@kDcPvLDL-HYiE|{j1
zN2Wa#x7VNu(^38=wh^wc-g?w?BagCZ;N)=q3X)ZejHaPjiL0ctvJG%7ARE*==YlLF
zRR1cH`z70ozGkFo^y-8g7`Z4kbR-3~asWKZUBR+hIq=df
zL)mD1k=l$miI3{nGRa-dQmfux^DYexWnB~XImo{|iHvu718>LCu{IE_G-to~YOj!p
zJ)^AX{Ylg8VMj*Jp2DbQ#{O$_4{EwBUACPHD;=PLWOPPckugSSrG#t06{wH38us)Q
zUNkw+k~c+~6))kn@nzr+-FEj&7jQ3dwjKq3{CiEvJWLd04!qlBj^6Q#78->W*9d
zLV*xEU!^vsa=J6Jo7MEDf!vEiB?~)`MQ+zQbhc4@mexHxgO|5w91;sUpt>$q93)(P
zM}&BSX#@G%WYRLd5ps!w@d6(*OoB?}pyu^XkvV@94J47;&cEKS&!*xC+uiQqOhB6E
zZ!|oeIV?knHtcNhM~!=YwEPynhD3W*z9eDh<)?)ClVf7xqDq>P=a^!9qxsY_Rq4xt~
z@%BgR4_@<^{5Zcs%IB~0>9URO^QT}S4Hw6#0y!6b_6FBGZnH22$@RG`mU3aw2v$94
z7B_iVR>Iutp#`)9T81eBjW?BP45q2brwGMEHW{qK
zH&CQ`tNrd%#kHSVqmH~LRHjA)fr)^@#C|3mWCC%m-nDN+C#R_U6uaqFw*8;Y$x)ib
zNAA!#Vn6@PP(@F)mtZ55DNK@jqnxC{gPylEZp?iw5I&lfdir_E4cgYWm>LIX(_pqP
zyk5Lelo`O)(CP$+U%EuHq?wZ3y8+JuH;}C&m-XtD!NBZLxY~R9+s_u1+SD&rcQ2o&
z-K)Wm^Lox7PdE|Fa@EBjr>H!M0kFJF$6xXztqx*(At43dE;`O8s`~6z&H=b6>TZ{9
zjz#kKSKbOuweg0^O(k}JfDpQ3vhsS7UYs2G_7=Cyq%tQlQEi^KO{ffM7y28U*L#BSF36YYH-VR@lXqBU+kxkDml8j$h(iG4CK>
z45=dxe1Le^A$U$8{%0d3Ptx_Au=n9B$~PB3WX6sSvkadYtR
ze5|V0-PkICX~DK{vG6%3>Piy6ohy3IZ%O+b?lA7X4fUrkxWj-XspFX19|+=afAhA8
z@VQg2l=8_eW_1y
z9UDIOXG&^=m2SxXbe@qJ<#)l->*ft0*y(_35$x(jY6!1PVrhIt0nS4wP;>g_dkOk!
zr$yysasuJ4z5e&t9W-Cn)lab3yiJW>T@-ybu`zVR-+@$(#`~38v*x=e!sjTN!gKkG
zE*P^RHHX1tuBz2P^`BbMccW~o7jDCuAX-*c%#J*!ZLrr`gFb-VE6VZOMX*2a%I575
zGf4zzfZnk89%F-VUYAffnxQt|AuEDT>Z2vSQ;I#8UQn84{K*45%`XFHma$?=mo)Ki
z+nJsI8ES<%#v_GO`vJiPX-qp+=8);d)z{sB?MdOg4M=nQb&QRDhTtdeoS);LB%Qi$
zedLLG2o*;#6*}It?6Be(^01(!+9?Z9JNgiMTT<@Jq-(EoIQcoQ#gs_3oq2P
z>jm+cc6j73e$KBCSt^nWr|5eusL3mb+@1=zKFor0bD11gcWrkEWv}-ws$}w?D8U23
z%7@g?Qk7FsjpCHAo0dswx2&AJ<0{i5fBTyGX+P=yCBY{?U1DKk1$anI7cL&
z45SXE63323{6e>g6M0d?HfpvFtuHO16sx~RK_Y8sxS$}v+=Mr?TBq+Lw2T3)pNZWl
zL@V@sNvrwq-tkPxf;P{3d+1&}fV#ujKXO5c?3b!LWt%159ElAY}Tw&?;h^@VZi6b*g?_rX!?RHWp3`ZYq{5a{sU}D
zXe|?8&eVrb+W~D_fT5#+&1KqK>&3!D?6G|M%CTW!<}ZQX_xcyR-xQTvHq=c8@?ABr
zyWQGO){}W4TjeS7lz`)Cw|LB9S=PNpCx33o_1)>1XT<(>u+u#1qd_I-i)UJlqvwPIjQ2lKz*u}y(qR^mjzd`vu9vTkS(n8)
z({MCVsM_r)px5w$+m5-)=J>;|B|0CGb%7p1PZJIRUuCFK9(Pp^iImvKDnUR*TM5Z+bI
z*0{1JrL-6o%uajn^oE?7yyclPS8~xQyW5|)D~@G>tK?jF>uBDJj{8xPW53x(+5D3D
z(OJr!2FO4q&eesK{DzYWg3??fT9Y1!Cja$o)-H_;UJ{H{^I+
zF1?OuoG-aiGT^(KZ*s%rhA|rmE}opsE%&(p{lLK^O>l>WZOx1~x$wD?i^SwF#Tf;5
zu@#{fv21~(^4k2roPboF);CTdO*dp4
zSIP>>TuJ1b6xJODV#EbHQ=5#PP99wMp?!*y6oy=vDs8MUBVKv}U7$qJ&~J9#S7YY~
zL!c}9V!`)98X;`ubQCG{ftWC|MNnxpfDuLjy^^}4vFdmAW*Lbh&+TCX6yFxbQr~T0>HzB&H^qex-6b?;W>bui-MzHTayd=wafaq!7rbYrMu^x
z>ZI4DyVW5i*8lb18@6=)lt0RCAT;eKQ#zx~iw%18=EXbIivm$ET8xJV_al4W=umis
zX&Jy796HFq#6(vhdkXOeez^O)YM983S6c1)PGx>>o8fY!4Zq@^pHN&Ge|lE
zG&*#iqt^Wt_rAihgbKU;Q$S^!yTDwyWbC!qH`jcoG9Oke|2QT2GaIrY|xQF}YR#DXzx!!k1
zJp)Y=q?`Uh1j%^(O&9a%S|M^*YkxlRD4Ho}uAlT;5P3Fl+~k-%Q=r#CvH>v>qbBLU
zkNtacj-ey%Y#&WWldSgK7nVCd-0lMphQGQxN|}7``IYj2^8WXez(l@gXl0c0Lh1k)
zZT`~0Q|fN7$bu%B=d+SVnSs!4zMRohLtCE7Ck51}-@oJq#M!fAdk!b~i|;zx(q7@10c3yO=A2M~23Ct^Q(QNiKZn#GZAVG2}3lIp$5Y-bThv>CEuvpMJ)J
zWoe`MUF2XZxM&;1WB3@5v`ZUi4ORiH&t8z-dAgYb0OVxkv+d2RdUmlX@bbR-!Z6c3
zuyvmzIYcLolfu*u4_b^}{4@Y#Ig1VsR2p};AHct`eA{W$a{f<7qR{@Mr6n&Y&g7X=
z?kXH=dsu##5cbvY*mbARUEf}GP#*utEple??D;o#$1BL0AvatVpO
zSwMfQ-(awvuHyT81DZ_525w<2)sfQOwcg0dyQu|zJHxXC-WRMxL}Ah+GOuvnvwFMN~#>|1fpw>;ARuiy7JxnUaZ
z4!!c6R#Ng#nTm@k#YRM9`CIw~QKQu5D?3N-Bw$^h`HKvofSeu3mZ!BJi15-2>-^ZqFD7wgCO1
z)`fZ7HTH2OaaF6!XE<1PWj(R^4qxgyBUC;%Q!%8N#-l1{T>LjHQCx8I?v%q56c)e_
zvOM+Gc@Ua=&HJAlH#0Lex@vsQXD`{9^_jTtjZ34*jfmH}b$Tr=wIyVlH=<@UG54!4
zIG$aP3Kzd&YDA)41Ed&25^C%J7zAy7=Z06AT~F>30xk)`>)U=L$X=ihUXQ&r7Xz02
zePds`n(%%{p*LK&C0Z7!syh(A`@ZP#BffX{*onyFiBNr*PL9T0vEm_81fMG!+@P$0
z_`0fuA%hJyM>oto5q(om$c0OK*Vh$RFaTj+RzUs+K2YwViR1S7bDIJV2=sYVOLGo{H-K$bM33dpFMKd
z=vu!Jx8vR54sPGdlfGWftkVhS(vqBKP3Mhs@7^qZC~_yndrgbU2lmhuq<9y
z&*~3{r`$3j+VNqID5*keIIdLBhu5K=|jR3@4dO7U-Li2DnV*
z2$(;I5ySdMxuvvITKB&3TJhvB25Bl%`v{&r(=i^qCmk*HWsN#YYITo~={d^VcLT-u
zBXOCCURE9FM`HM}E&k017b1XR8V~ZfvdIX`xfsoccG-$TfFe$MEsHEu^<2vJTyP#z
z<2rIf=y44hjMDlS@_MrTVEv`DaH1yd&MFEdLZgt(|BQ5brZndtyCaEhlN{KDy4ub%
z?#6mP+Z+{lquJtFpho4SiGO}hNZ_I2?H8&p+ZFl3y{(RB+f$feGi`D(z^@FFOowAtjshr6BG9|vD
z5@0ga;e#S;4wc*1TG-u|WKUo`dTKera-0%@iT#N4&R14yjLCix{!UUuV1aMn|
zkwP6!lu5=`%^t#W$jbf3<)w}v5|LSUozB2;tzMGlNZ%QmTzoy
zQB_r<4slpOyRSx?sFESnX%mf(CMu9h5*sDmbA8C2_PZI=UcFoaxz>w(R5VfDRfWz)
z#~E@$_U??dJ#9e=5nyrq;hgdsWzg{-*j`3!I!&b3I3$EQtk&@SicVhj(Y$4b=B?
zdzQ{>&8qy6@zpM^N$IO*5}zW<c9FaK9#pL_*S5sapDCgGr1@3T$;jHH+sTtd$F2?*xhhxOi2+Z|#i1!2=u!Zf=7_^b)abI)He|fX9uCA>4kRU*7L%e&RYR
z8w@=X`|Em06oJ&oYTyFF#wyihcx}8}+GGQfsoRJAbIF=t^3KvAsD7{0&2c|src?j~
z5mfT|o=2;a!F*#~Iv$y>4Lq@*VT7k>$|E)rUd}cbU25_CUO3
zR!+1csX#jCs_@sOEP3Ov7<>g6oKDMW44^b7XcF1>Nb86s6YboOJ!(Lf^7t#X%cpCc
zIvek#Rt}QbY`+mn#Dt)aMwKLN>Sb4GJJEx33P689stxtJDI9aALk6)?ncIO-q2gcG
zLfoZ*)y}@>Ki~u$U(Fpk9cYNXwP39}>fi}D?;2&EvVAe7g|ycYdm}5A2aiaG>O&n7
z&U6}~j3;OSg?(6Vj-u&B%(aT|E=Uq7tMP|f=1oTfxNEFg^Y0O7e3?7b9G_-I&be6u
z)Vlp47A34lh2Z%2N9P()s|KxNj0F5JVct|I|~3P;aYZpN}E-}
z`rsWy4f*}gD|;jl2LV|5sjqEK&gUsiX9bQf9ykMqEd~bLA-|vOR+RTatF~C#IGU*u
zyoSJeTcDE;>mgKsN6Pkz3{J6_#;)}IW$`H|vBWHKI61A__&q?o_>K
zdQ3fc-uqPz_C;*|)FynJy^g!v2Pw2_{6AVV5V;13C$ZFFCCvhURIT=Ukz06)jkKu^b~%}^tHps+rj6pfwh_f*E0|`
zsWgszn!Q%%ANZ}buAi{YE;eAZJ)$%|xr>hhO(B?9XbhO*m;rm*omw~Z%ywHK#51&3
z?J>WW0Hpwc5TFzqXzyx=>{Z5$*MRYifaL9zTW6;5rnJAMFU5%iniR|J&wPh&puj+g
z+y3Y0_RsHriiHA+HU8F)exvd8kY6GVZCQ_X)>gsF&I>~2YT3VQnSNxlwiR9y5feqO#GF~USr)YNhrqTOxD`FNHKV%@ls@j9J{
zv&D`x7obJ6N9yFdv1U|qOiTP_&Npw%9Vf$}a&jsqSH+BJu3Xp)4))2oAROV)tV`Z<
zn>_abS&mqUzcjzcT3?**Q8Vyr5RCmn@nvn+0W~)sDy{o^DKqR$v%5AP8Qqy-D`NEJ
zI=cjBE9x;#`;(xZv_wxtk%EfoNDT6%q{8kcNLX2yb@Y(=@y0IB-QUt5lOEd6oAmZ`
zk=thVulT#nO@^&T%*ccbCV1LdF4`MH52u$ND7Q5Ga
z>0PjGuz{JI_>a5#j{)_4*1dzy{>a6FguHaL`%g+xhMKQ0&i5zBY%3|YuNXQTh*vF{!LAAp*UlcJ8K*o}#nCb$e
ziU1k#%)%<3L#np;;$sG}ut$r2i=pF!(u{oKHjT~~PW-4x_ae9C1v=iO_WC14@LBndk6VAnnT4YlsOUC=WNi&FbW!K&2lXTBSAymUFah%D-FKRwdL}miU^T7p<{^-fSFEhCr4ifBwtr{&n(|I|@
z&!`-#r&uaEZ?k+t(V7q^;_P)>R--Vk?WukudBzJt-!?IhlqX>&9LNne1{2Ihw6J8#
z8%c13V_So*U6|lT%BhW$Y=Ne>kS{+tnDU9R0(9ZURz)uU2rH}^>ualZK^PB5229uy
ziRhI2l;@=FhiJa&1UW#<)OlH4qfE!wa+@L`#x=y!+o~#yQ>)vW&!G!2iP$2QF
zXIVD6+3}${(iP3^NnQh(h0Z@hUnfI2E)&glkqJ+o^Mqo#{bVW+bqDT3lV^cz&)LcP&e4|6)D(MAu9fP8~@d*)i8Kx7a+Sr|A;V
zouZ`kREr^L36B=*zs(6F2Z(f(_IZ@{h%@&^haEN1N|L^F)v%2E@+EQXnP@^FZZ!;(
z3X}aHByRBq57E?5=Rf2YSgG!K;FITYE#n94$4}FwKuNHU*Vma5Oq%4UmOPJ8KTgpm
z>`kU!xk&49>unAG%0;GnruwU$ZV%s^CK7*sNDh$ZaOP8m&1Q_?m2~6QXofD1a^q-87|L{P@(@vU{vXxBJ2trsr&VN5T^SZDl4}S}s7rhX_BshX;lm
zerVu|i2qFfG~Cb-+ZN2RI+q@2O_55Qz-mrXk~*b2#nTQ}l1#W0695h@3qbAk)3nID
z*cZdYrW?S!Ai3C9pR&fEA6W=@pSUiByFxJ878fVB4kYk*57O9y(kXB)PnRL*vBIJ&
zJm!A`8qL1u6Ng)Iu>LGzq+X!VY-?NFPx4wCtDS0LE%z~)@;sb~`muDM#{_cXLZMG#mTk%gt%v05?1^dv3ywGJVvVFI%7_WZjuxrpPiO+~R3|JpURg$^>k
zxD-8cnV?OhTUVy{c(;{KTYzK@Y~72Er8a*X_*Clys{#4?*+?)PtD3P2c53}L#Z)>g
z05sPv_5KQeim3rC_QL1SBpEaU_Yvn{U-Or2xATX!?x)TK!l&pFXUJ|`4Nez{0;j%T
z5qx-QVUI>9$3?`BvqdVCK)oQ4cxCHV@?OI@k7*03yvqLwF-DREYq=nv}~8oHCM
zHEaLV?f+Bh61FU}I#tsDi&5qsfJFD4*iq{~ds)nPr4cAkH{y(m!?%s6k%z!!={>Q+
z^+g_lqkvxhYuDjEq`&ut;IzCZXnR@O(DDRMn!L~(dN;Jc+c@?MptdU(9(LMsmcpf)
zt@Crw%4hu3lUf-a
z8g8r5^?9_tXwfHd^bItcW$$&Gy(1BzB(OiA$-g0B+H{T2A)oZ^hAdv5y$<^C
zVocT86BI=Br$gFomR5W9k{1R~A%NBoPN)|07V=?c5>*tFf_WJVSR~2_sDxjLeVEhZ
z)JgW|Q_YI;qU@Z#kdmG=D0netTlv&gpRzg9!G<&s#9Xz@(p|pC2W%ysDw#P+pMM8<
zvj6uU(yg69`@QnH@zQ*@vz%
zN}cg*FQ;S3h{w0e=H1@xU)&@z0C)40KWJ_eCM8j5A`$HF7hgOTq9UcvfF)I>)Utaq
zL(GztacYvZXzB(XaiVIO1g0kFWl(DJTY`H@rnxY
z>&Z^u^YiO-_~#2%KcB@A1y|ikqQYT
zVSQN18zWZv2XTi;5krN9#xhc+NL8?$4`jMI+QzS745bP~#9siCi>oxXzwa@T16s6#
z!O(_Bl()MnD06Es)KkkZN*{B~9(Fc^YsCMWK}lE<@9BvjZZ{H}64_GLDQ(*`$$QqZ
z58C{EA>o^>vSCg!;}ieG3mg~luS1i^7ZXml2vD2Q*+tnUCR$rGY8vcTy}!VKB)ge_
z;GMNeig}kB%v+iD6GAnpBFL%WvX3^84A8T!gx@B{e~p-g=p
z?ao}`{`ZsP#@Lm6{ygJCK0LSYh>rGkuf(>o)-uejDSoY=`s#@CYh&$LXE7%YRF)kz
z5Lx@Cb~fcV;rRG8^4y!Xmo5&*FqqzLU!IUAEXvPZQIlW>xExMaYY5n{24I3L#f1W&
z3CzyT5BBPhWWWq2Naxy0ENN?j-2PlFy!QzOj)EKA?V=O=Ac)uhVdtiKFRC2^;v#_a
z?0uCIQ~{cs@#t<(5vI~S{kv9-Btp&YsLNmt5I%I65CcHmRiOwMlc%o>us^Ytdnmj?
zVIUS{gaKVOH9ux_+XXAERQ>{==o|?z?n9vmQu+jf5u|)*Tm<`KflaTF4;hO9qbc%k|2#Ai&p{_38I3bn7pn_Z|ge`jJoP6Y50XLHD-Jee$lg)5f%p
zuB*^p{`f~@))lz(lKi51ht;i)xjzZwQW~b}iU-^RHCbQigShT6Tpv8=+89gU2>}TY*0<{errSlrP
zq&g(pdOVTWY#p$np5}Dann)TH|3P|Xv*vVYIsy(Z&@{J71$N}n$tS$j
zKs=PXnXm)G7i_1ZH9=ZXhBUrS8k_!YuI+R>w*GYuB*hSGmEdurI!>MphyY&XS%D5RZhkfq!cK94AnLf6y~ReBvAs0iZCGV)x`eXFPLXEI(beg
z>{@0S*n2`+I@pAi68lI#$Vgh|~Hn$TniZwJ8KvEL;dUt-^ZucHqA1xXFwHy8}sw3IfcYz70A0&Ubg1GIy
z0Y{es$u_Sv9aO*N5_3_wNASJebz6C)!S;)SM`>To!B_jU)Zt=uz5~YUe`x?EJa*DyEsv^iJ+ym
zS(7)5Dd^mU98@d{nrZ*SB{Ot}JBP3ghNHNSsMPNb;;Y)B@2o=&SW|V7AsAhPRV7UBp->-X1KJKpWF^U@M3+sk6NqeXJTEXYVGYl<
zbf?=XJ2eC(f{EZ!VVQGg!Mke37Z}@EL2$dE2+4@m{ucLQFf?C(``gA{?P@e_S@y`86V-C#coZic-&DqY~
z*>OSv-Ietp4}KqW)@E<`un4Wq^Zq%m-(y})+QqG4-K+<&V~0qA`k|%6eY1;TI8OG)
zj)0tk3F!^UThFFwz6*ed+OZb2-!`1aqe1Q%KWdw_3H_5uuq2vmlWpL9tg@5UM>GZp
zT=srG8ihLPQ8=g$edOPF7{d!NKMnahKJi7ak;H|*Y27Y_q*qlFK&%B+#Y@Y?9P)dQ
zR@4!yog&NR_-S?@`~jmYoB?bB=Y-Tlj(3EqK9I28Ar}WwU+j3c`{^(?p3-CUfwV4&
z#@l>2Ib@O!qjT4(Yp%Py541XV|*E%YzAXT@_>=gPnQ&BXWAAn&&SrI>LRVpn!QHR
z8ca{*PT_T=F<+cCEyVlnCUgN1esB(v+&?8nJ!CB9tVYFQuRw(>Z1~GSC)_b&75Cp6
zgRuDXIl}oh@SS9s^Th+gNlj}X4VJuxGPuILn}Fd<+G(G<`sSrbBxW(7@f5x*yNysN
z7=MyWtqhdr0zy@+^m>&pl~15O(^ZojC!dI8z;(MK=8rz$i68CH@h3KL_`2=|dHRc$
zCsd5Ok_C9LKXh7{t1LwmX2SMSo9F
zn2SO5>>!Mn{C*~au5GIcQ(UVRBY0^=ZsmnfE}!+-t8y`L(EkmYTY}7o0h1MZE6r_g
z(o7O50x|ePanI#lqVJyenH%q2JM(oIrX&5pzyC>o7WmT|rByol>8%UGP%}QoS=K0D
z-wV+2xM;Z~iPUe#fChR-!iexgc-4WX4hMod(M2MX{fC}0c!=7g-V>xq0@nrZS)<_a
zd6e~WM5vL=FZ%-hB8$cn>>5Od6k*(b$~qCLEvMaBg?hl^x%@!ID5-S6w^7E1r}
zIT95*jWi;1{)gV}e~wwB&=r8Ki=CtP75F0(tjYzo?mg}GQ?
z=0}99=$u$6UC`x)CE6%pcq?R!&|ncEdka6g>$U^Z6rE&0z1eEe+{oF9`X8xS}*|w8$yFIvDWE3JzxqHwv((Ol!A`X4hk!UY>x%z>q-0UUv$wm^nXQKE6>5d7}@t7}`ON)Zv9OHwqoB
zV-sSZv%jDqGrax#T4KxT$cL$#>HkO7x4<*?|NoycY!aKfM$9Fn2t|~;erwG!#b-8zXRhlY#zyge+@9%hMviW+{9%-a|~{-o%-oIG0?9
z-17tR`YuGio`UoXw6z-Pv~ZQq3vn+XG{b?;$Arp)Wjv;_BB2Ruk=P&O2`CF5eNij4
zu3p~#ed`_yD1(_?MjTcUR(90*sOJ8*c}$4^uxn(iCw@_UtC@$*aca(w)v2h}sV0$W
zYb$k*!bY2=HH*OgWtXd6j6PQy{qlQ{HSbcrW-`Vf$>7(3MV!j9{+iqdyz)-?kSmLhzT$$tp4T+WSJHXFSCKZ=o|0ns9A
zoQX?C*z&e2Ibjd(j|SC;aUh!#6XyO#Ov%v=69VlYi8Z%RNZ@k0Z*vJ%v%QbdOc={Z
ztQfoQlhR-?{>|X@U)28!(o;q^ASLvZkHa+BC(hU>{x&5P@GI1L?zmgY_X}4ViKhl5
zkOfcWc$~H>#^XaY{^Su3r1gIrgo;i&_tsJyBVUqphAx+aZ_JT^**gniD^q*E%7?kb)LYft<;NJB656&HZ~
zii6aif2r9&ER9{ZjjIsS^fp=*akxtNITHFuoidO7sVzhL_Krp>$JUI@;6ROkpGh-}
zv2$rCU%nTl3okHzxfdV94s6X1JaIUgv`Q|wqH=gUe(q*gLNyT;=sAKvHy<~!RT#$h
zP67m?zhTacAMLyd*W0>ZdkvUv2)Y2o+VmuJI@OGs%fHttXJrg#9MQ!J<#79@GG_9Y
zz%lO75{kko4Hhi)@ncu-DC5iJrX%Vv1Di;>%!T+pibulnO7_*)=wEs%k)jc=elwXx
zUT3NdaaBg$8M*teZuH2%g4gxL(g}@6ckXeZ@w7GDh+XnBJE1FGMuF_+P%;1xo<_hg
zSn3{GG}`uTs`G6Re7kG&tB|6n0TKAGzrO`pc*@@Nzw8W-qW0gV@>
z#h-`ks(&Nk7l%TP=HAL05#5>WMWEP<)OQlLm2J%u`d8nasM}Ee|5IFZy&w@`QMV#-
zQ5PC8{+XIWa{HX4WIQRL>x&y7uuA@wQ9F8A7G#~-J^UyC>KxB({}DW%*{W{*1$xzx
z=<~ngmf%74F_S6ap#gFVV>i~U=@o8Q
zrPXju6>UQeH#UFBrL-G(`}*A$7N~<@eYjG!gbB|>aob=$xx7{YDfacNy|S}aQYRh5
znj|J}n2jne?(u>?q)Ajt7)h#}yktG9fN2m=5+7lb&vrV2oa{(>Ch3W)$rWmS>m7Kn
z|2oJyPeJz)4x(_)N)*cU7~qth3&nos+J&R`A8-C$(DwQMeCt(Hky8i59+?%nY;WlI
zz^e;m{VI-4C@!I%UZQ!NKSrZt6#jqSyQt&3DjdAB+zlqleTg*nBS$J9qD+7ANDmKVIHZ@w$xtH=YS8=TdtD3wR$7
zC&$ub@urF8|LYJV=e%T>CcH0EhQAFu0ozkLEyv%3&jq&Pd)X|4Xwx&0o!|+p;22RW
z=FsQ_62jjHkrNYg{`+feIL|Y??1#i3Ap+Z&!Aw#c_{7|$yO-abv~K0IgY3uew@P9-
zg}Q^SIgdVCgB#>FPAfM={8Lvr-QDK;+)q
zwH_)}pl4uidR`tbOpF9(Vj{vYWOd_r?dx1rQ)IIA!otFO!$&49VT1Hr>hejLq3#}R
zV#x6rp;U{LtP9TWfG`HX11uHq#%v;itxpBa0(}aLUt3{AVc{xewwJZR&4Kex>LJD6
zf-v?WVTaGi)6MHCw8V8DKMm`lb4~|1*!lO?DvCm}s-A`A17NH{1vqZ+TMM@4-Lr-s
zJ+A<9_4|r=u;DuTv84riV!$n-lV9BN8Qo67fc83*#BQtKJ1H$Y`!U-l|7ziO&D*vi
zGJ$y>Y!;qL9mBPsHGCiu?_4|3*RFHJtdn
zR5SOF_LW^%-cdnstJr`jQq!_~?s^Q$m`hTzdobs~*jp_?R{t$^&)LNKt~U5{fm*M#
zg+`}noew(!zPR+D#x^dPRlHwf@&@QwIusmwoR&dtvbQUh;}uLzOW(uuZM-8;Ht}A#
zF}JS{|Mh!)WrNG^#~aixzW1TnOOS$$sJ%Q77BTN1hkcCX(Sm=4H>bUwRD6GV{=yYc
zdMfiS-JMu+>zgW-pgGHVP!Vj{$bLn?97X4hmoA#NPM5o-YP01y$N_zw*(Zovv92
z^)-9#q{c-V6*yn9i6ZUbQjsh7dvv_SE=RfB#x@kGwZPMa6OJhW+^hd^3a5)=6!$D=
z$F16hk6{*u<{HM>3|nVw9}t|;=o1;r><2?-e?;pS%+n?4+F&E(PwT65yD=Fk
zFdeSiV0)$yT&B_~xTl#eQCLLqss84P!)Ax?fsiX#zmKDW_7)zK(383lA(L>-cL)~&&I3tZQn;hZ>foy1?=tFM3=nXr
zDV4b{VW9Iw)QAH9xe0zR;0ESaf(#8ldSARaFaD~-!RKb{Xc-M`^x(QLkTdmas8*41
zUw+3No(>}KGTARdu$WX@KSdPBTdoC>ZbUqetYc4E&Ot2G&Iq
zVPv|hTC!(*l^?WSTB$1*%<
zPugJt+n*f)m(u}k(y45+{?1~w@y9ceKyXxVJ;~Qg0
zD&9}v2Mh76@bz+x9$(r*^S22B`3`w$3)El!1Ce3j7!4@1}
z>0NBBc+8zmAh+;hBDh=mcmq;&<}~0v7+}*po{;r<;}k8*ZTna~&r-RQ%7lj6)+|7+
z=9BGy?WsvutrbUgkZ1xU7BZkt^HIGK3IBQ5kIa+QoPUIp-|~I*7S?=~hh_ahyRfeq
zBTUPM<=EUj*}^eb5I3|5-a~f0)=DERp1%2~z_X-bKtM+6m?;GHc9}w`iw7{djTWXa
zVv7&&Pi<{?-meFR^M74vlEbQ3d7TY}aOiYbU+yp(Ww+BQ6*uc7gF@U1`hrY83UQv3
zH=~n%uZuo%7O>Ih
zVuz=1Weqg^wPtMfFutVs5kZO)k1db!^{JbncYFedADsJeePTvVdBZNf%sB7$Odf>;
z_`iPJkz)mZrmFv1VdMlUO`SmHdqqjPeUW*WS-J2{K+Q8UXIvx
zeIuip5aA)6>M$uLG5y6L@9cTcx^hq1t!tQFS)Ex*!kT`!&6_?;VLa*4im#c0c=jvf
z2Dsi*@F)J&*E10u;2G`NO}$UZn+zk(pPsG@%S$STvb(m7MDRCB+&wS+Q~INoEPV#W
zSu2wiZ+}&~NO=p^KptcsktLU9t7@x<_K`~e6U|#CWfjG-W_7k!;HBmkO;Paf;aeUR
zukwX%wmnu3uJ9C1wzn5e7Alw*wr@~9p0X+#v+q7|K;-C7pD{O0S@#D>Zk8
z+#$8d?xCd>e>=T6@r|Ro#bAlr%4i>xMT=>DRd7l1(vH;zfB$}k&dxHm+o!|uR!$;c
zL;lOfO@qCHvDpEUSj|m(4h^q;f5n1K)V_b1*}t^wKVBATG4HsMd-=~ss_(xSgo~{D
zn}k{T=kJA^`IWB3SIQ7A>VvKk?Opd#7y;kYoh3Vt*27{~`5HSY!Lpk{i?5Ir
z1@Whi8m{c@lzgT<{T2`t$Potv2lRthB$
z&AuSZFi(3HBbm#p1C7lEFh@CKJ22|!64-gBea6@TwY9r|`}8c;2p{OYlqJ1o>haAp
zSWmyObzy!-Q~e9lhEyR%|H|l9o`jD30ba_l(d%cmjh?pdSaBf8g&+-P)<~^xm0GDH
zy2t&L+r}+v7p2r*{@zDnnIAUI+Wq>4C${M!&0l1F6Zku(!$wwyF8?hRX%
z{PU2TYwH*fVI&LzI)tG-PF-uA%Vcf-W;ngsj7c(5e#-vf2&;PDiyyaH&cC|mI|YJ!
z&j>|vaLYtx;VIG#^P{O^W15*{cJ7Y1TQskCg{v^PRFW8ws%*%cDZkZCIb7C7x@=Tr
zRZ`;KXKQGLo-p*B7GfD1^J++WrKq!?LU!Zh_Yer1yXX$S
zTNjfXn44*1XcPGg_ay1|aR#K}1O)6R
zjYYd2NP5WB{U3G{jF2f9ll|!%PP%|o27naG#JzxAcMLD#m0}F}2P{{;Kno!>`A`-teSq%X9@xGfd(9rVQwC=|_KF6Olbn#bQ(uXHkO%L6B<&{f*
zbDQ9C7wTKQ>}4{!^gxhJ3-7Xd$LH|j
zV#Ai$m*mwmz90dHE*ZMoS#$P^$w#4UH(IgA2$%~3AiEfYx~_nMaF+qZ=Uku6x=Jxm;kLgnZrzFwdu!n=nT216ij$Q
zLkXD`csz`hhoe~k;04H?M;@mPbDMlS^SO;4hWP=6N-sd7@_!FV=1_yPe^uqnMH5HZGf4r&;X2cB3{fgpqwS
z0ZxdEzqEt4J2OJ|Q6j@kB6Y3b!=fQ<4!Nrgq>_FmX#uV+2WxLk8&MrZlIr&i=i&0l
z3-=ab_2MnuFN%{q_z2PMW)!>b?cXYoKFvREHRfT=LAc17ppog<#$1kTRi8di&9kt5
z@xWv8_Vfz-WcZ32Yt*|aGEaV$8Z?)o)B}Q{LQi4k-5-1NuxHdRYKNa3C-muZp$z!s
zU@AhxN2OH>O@OWEOR%7sC|Xi$`9i%fF*9!QJ$Ilju*M*cB)S>Rmd(cVh!P3T
zeLH76hveNo-_1O~6N^pc60Q*ySP=mqMwBMUe-Ytte>IvFg$?34)+sR48%Og%7+}&?Y-8(95C)EzQ1Z
zED82&sNcNx72d(fC{T}H6cErz6Gyh4(>>Z>w$4io6j{fEu}1dbOsU*!8GD8?5t-Ob
z*V4KzLw>1oZF%cspkZ>4V#PRemVsJElt`Ab5->3vTKfbp?6ZiF$UT-eI@6scey-l!)#-L-PS#q$7P
z{49$4;*SqVpLnwKh=WIc`5Y$cUBU=Ht;0NrN87gv0yI$>88q
z4?q%jb?=5kUI=X*B@>8uy`R_W=LE=)}!eOPbn@lKsov3yQX!?{+g#L;Euk95$GV
z&DUK2gH+Tx0b>UX9;4lhNitUci}H@gZJjC={$yxFLUWxTXIMaW%HN(}tbH2f%2Y!i
z-ZmkK9eO8yTg$KLooV_QOm#$Y-{L2*Irv&zeWq)dOYpVkQ#o>?D-&M@)EzViaQPg@
z*)#77*BjSOZTN|T@4xsutSYSEGN=>QQEYpDER?C1Tj4KqK}JBOCIuPmWTPYwar*L4
z?u#-a>+T@E?YA{`&u4ZMMC$j4mmyvN0qH1U35o5>QW&`UfgD|6kUwA6L
zxrBFUzCCB2x5?I;SOqfu#*{DBYA~8=ckyCu)W2L$Mi6$Ugch@#L*g}0?bM^sI@Y0y
z8YR~(IAd!3ANLH=sXtSuY&P=*WR`KF*=7#+)1pN3Ke7uO!HbPX-xyeG?V8wyei&gX
zZ7wo#YiV~-CxGl@-=$V{?rb?F*!R$iN?m^Q
z2|rzTUiQV$u;1zw#|}L6=g%cQSR2|0Uy$aCZqqkTMGr!V
zTyb^NF@vsixCKPL>!S1?8zUqhI9%PJb`T}sAPIlb@3n(K{br2&|7U$0?eUrIe@t?d
zCidm7#g;QaqY;A%1bc|E+i%P;`{p<20?Zre;hU)s4
zI?5tzD_{_OJbwJ_AB)aigS+X``N9Zqvv-MKOaOE8r$W6tMTTjn{J}9>hR}G&;fL}<@O8bpEARyH&$G5oGTD9=Vtlj?qKdzRVgtPQh+ug_B!b_rqXl7H4!H<4SXStI
z7-^c3eG79eF^GE-a^4yo8(EF(vMw59mO#Ng5-Z~8DdB-3@-NWG8Z&n7GnWlil{QO_
z0$Q1k)NXZ3q(onyk^mQP#8ub_EZXxhUjz6nu+Sc?8ECy<4asfCc4fIdyIkmI{=Cr5
zOeNol@#&MD0{nSDzd1qQPssJ$@a31om+Q}JeX{*_p`@6B@-V(7zRuTw*^vx&{YBY@8zPyJ>>bN|G(w9ua
zkFd%yI_)Q#}Mtu$8^qUB0l6+WX(gi;&s@k;;4RP!Rh*vFEV
z`z?217ll<-eA((OtfT3H6aKG1D2DiC%D>dF_WHrfCZ<0^9jzTf_)QY8Xhp0E})_M4G
z!O+JEg6fHFf=eUMu>zF2eT@Dso$AdHkMx!s;|Cnny}YI{eL3SuqO-0G9&D{T0R3u$
z)^|wU4A*Y3MuhF2huK0!$hCg_DO4}#S<>W_4RJcBBzs0YmHuwA6?4tMB#|x6YllwA2J(2m&3Z<|~
znuCDGO=Ch;<-r0YgDBN;JuYeM3m3K-E^fWqfoxP?mPb$!hmQA23<9!5wR1vU9Q~4#
zF@)5~x$g~uI>VX+7o>JFG5Z3xgo6K
zs`(;=p<$ua*!qCNJ}uaDFY^^h`O&E!vZtwO2gtM-J+hzL5-^f+l(S~%bKq#oE%B?_
zQZ}%z!%1^iz91>?KwoW9vxt$y`+D^=IC_a~;vdnZPKm3MR}{V~T8SEU5ao)s>?p6{
zO;iT;B71)r>%f*aW|WhT4}fwE%CaL}UbB8b{377AocoImYP**PX)JCfIVR9fuHW`v^>QkkNKww*PTA3)ln
zg^&98!ElYeEWM5bjmYgUe_4nhAA|M6-kFD$p94vEwsCtr-*5IfQ6G#x5+X8`6a%;&
z+~r#gM*UvRQThtylUfezHBVo}v&Q_j%}wsy#wCY+dKSoK?Av+Fjr&M8>v6Ifom9Up
z0e_gGY6R*tPm|o=O_(4q!iD*jjW0M{v)Edxmp#T~B3oMEv+XXoF<>CU6A*b|$sphZ
z0pW7-`UCsKk!xq)MIg2k_2uL)S_?Hum@yc2Mg==M=G|R@=k`rG`Glwi?%ye#@oggV
zfSA>Fj^3OUvp|+lu{v^-u4f%#}e>~RM
zg)7~-wfqW30*qks+&Zc=(>TaQ#FJ6~VBw+Cb2P4I=g~(^5z{HIbwaBu
z(CiImHX_Ih)4z`?qE)^`j#;=_lPkwoS`Azy4*%B&l8O@#Sb^liQnhgE;Zhj%M1>0IZSQ
zL)DvjkI!P~L~zd`ksyLjw5Zw<5%yDRdkIm4C-o-hvBisX5o6dh!_E(PEZlg&Zrf9c
z2F4uaI^3L*+qmZ^6(o#**Q)U?Ay?X`PKKi;y>|0<7}hFUZK>2(iGUwGfP}XeM!FC1
zo2^1+wxBS-sf<*YZ}@rub3kJM*@ffZx1Uly0DY&x30{}1-mWK%PS~ESy{DmX(~xsJ
z(&ki5xU{CW#la!zp&TM6e)Y-4@l)?0kM@tp_f59>w=A6mWNB3$I%2k_4$NjC&Qc
zv_vHmb!qHy*9FPFH92(}Q^Yh<@M`cDsc0inrz)7gcCw7G_`&y$4K|iV8xSs2QG#}-
zaSFpq#>2y{IEr@0-=pf`z}qiSY6;aHJB(~4i{42?x@Irs-W-rF{Z_eaLZ0uNyuQW#
z`;@D)G&XD0J$~pUj2KwNW{z=?d#wvPzdSr21l~S&_hh`!eNTB!k-OZZMf@If(3yBFZL*xSW~NDI-`Pz5y8DoS@O&beEJRB#P{%Y$V9okViWOU(5c}K=
zMcNt$HZpM=;QL1Ub~+kVG+%OT*9GIJL8z{1Jht+?hOVP>DLO9t{+KN_EbG4e7SqFY
zs@ZuSX+qX>CIl^$_IS!gGAfF9$Xz-@pDUggkD&E+2<)tkye0C%+64P%%O_gt
zFDQ_#uR`EeKSjYe!FDQinpR{$tevyrCy(1h)ij%iG8g>d{EHyBU#YdU5YtrTy|Rct8t`iE-yJ+@x)O~rlUp-qE5EG-Oj#wct9
z|8LekLlvHrqa2j*pQ%yi+96LJp=sLuE6Ve0iF=*vMVzsDhi)ll;!f`2@2m(F5Yub`
z(B@g1j!t`$69jRh_g3BaEtr@SNxo~>i0zQM2fb(^w6)f=<3WhlLuTSjfpGa9#ynTm
z_ez-Uwz=@tylw-L_CFZeF6T`uaPD%5AJX
zLLaa$DGRV$*N_(hsr((x};YDeGh9;Dv_-H4uk
zxVva~`fA^i^A_(Yq(iPM3yq>Hycbu(NubpGJ2>)6DlPd0*Ip<$3W?#H+%zfas{+|e
z>GRlQ5Btpi_U&}3M{E^cKZKz`Rh0i?0O#$5zt9U^mv1@t8G$t
z+%NL!sM;-LvQTLUt87u;!VMSo73oLhn%j_9CNW-6srnsA!kEKsgfOAEh7QGNc*Y?e
zSDX+_@HkX|W&OKo{e(aTUb1j@NQ_G3J5duam6}-Sx;e`Hua5PZ-80*ro9gOcr3GTF
zwDN5tz2NM34J;<@G0M4i2iPJf+9&o9dk5=QUtdqyvN8}s0h#2^BH|qRcKeHsjOlYE
zL{D8mnL9urJ)gD!COqY=zVdGyF=Xwbs4qO=KOuD(fEf*u@_ok|3XOno3z6XKzT
zP&qtNs}8H_fz}zR?vp`rAw#y3$fn05>Esx>;qM`Wv>m(HNP4s6s{v{g>Zil!cy{MT
zCl%lo4-RXb&>>%xSFXhPHh-O>-F?#;DLWp)|84EgC<2ycnlds%uTQu}dit-Uv81|!gE!A!K3T1iR$|T9^*RRr}pGvPxqNH}iQ{HJv^9K-N!i`~8NwM!JW9B)Tf7Z3}2w`>d4LJsEz0(D*8S=-%ZRK!(ek?Yu!BYX%
z-YJ)fP^JoVmRq*ZNe=!qn_LqA2xO$(L+4HGIcXjitMNnaH=dEoA4CGX4P{v>=R~^v
z7=i%ib%y+zx_|o*^f5+pv%WtCWHp{&js^k}Wx3qjSnv0e0{63iaxZT&CL&BtxiyC0
zt%6MN_SEAG=h`Rx{GGSP1QVc4ri$E{0N|lMmcht+O{SnVA9HIsr6+uBtpe>q@-nUf
zD&bt;ta#{6uqQ_fn+sgftl18FD|kt9l#DPgdGbE>EQb$K=Xl6wngP(KH35D
zIbn-8E%V9^kQ)+&SC&5{U4_}!Y^b3A{N^FVs8(QY0epAUbl0epxZTBNt{Y3L#uPiHP6HO|Bad5S
zG7HNyr}0cM4p@6v^IV5i@Kt;AT*HjledERs8XraMbdDkC`pLR+?{?%=R6A6cPOvK0DoqYhl_y1z2yh^i#vW=
z1+0RL0POCwyJY(hc_1L_eIdIv2~T?5y5b4)bqrWM9i}YEoPN~AJ=KW&QNVf_kYm;t4`b6UowVI*=$vg|G7Zrpg9sS&hZ
zW4iB8Hv4pL6eA9;g}_vHE3o%Hv7gr=g@UDmuRUNPck_kbO)Hf)`agYKW3=AsDu+Hm
zHv-eH;q_Eo$)~z@`?=`b4jPfulMToPts
z6#*GFcp~-J@v*jkflv(3#1$eVmz6VXm8W%sUEZv@A*H@yD!T3iZ;I@g`b;~Jt6KOp
zJCo06#_{rX|G00rdk&wgW^wjroT9r0e|Y)Zsj@}YzG}k!5kaPM{IjBSI6mLwqGo`B
z`}_7(;MxrjVQl{q6=t7|l9fmI6=B=(e`dw6br0uHw@etKXElPjwuA$$d|SI)4zkUQ
z(;XumZ%R}Z_C=LceHn-ILy23NlCkP8yV?<>rSM~q2LE<*{E-g&TCTBa1BC)T&s;i;#0xpSr3?lSe$#ftWm%?vHwiM_p&3M8nCxh
zN!jPFjNSpY?X9-=59Vp4ccxZib3wYX6R%sk
z-g;MlCw9HO>_4aw>B+V=xzq8as1iau|Ad!zE#W~k@}c+_g9&y=(`O#+RdPQWGLsr=
zP?0w5pj}=+ybf;9U;p^EMh22(XPnbzj@g4fwYFP7>;L9|za#`UqgJ>kp1DQ7li1Yazb={dF2s_2%DWZ3ke-&
z0~k!9em~|$TvncCJay7&tJOl~5xDSLTZDvp5}6`{;8JXxAvNgXz=V5b%dmt+*{Z-Z
zjf#yLQ4_qGDDYl1nj
zi4-_~kzG*W?PenSyzxZ4No&4+sZvIek5249d`Ui(<1xdFTd6_oG04!tkBjJQo_m#c
zOqfb!-AknShOgox?b<`+HYR`M7);2$_5=kaAaHZcd`nnhoon)O>M!I-)RDz)Ct!%%
z)v|CFJd_r4qS-I;3vy%os&pufQDilqxR|B8FQO_j(IxlO>-c*Sz#0KYBcMAkGZSwd
zr*8nGP}{^;$A9_6YDCfNd(-s1E?#J^|0XOTfEYR;^KY=~n2hdeW<3c@wAm6d-+CUp
z#OAaT&LFSBp4E#W2bHky#>c)LwI(Ir-!FMkVP1-lU{UJ%3VfWADN(UU)qjMO)Et0-
zT&Y|SIi+9hD`@oaa{BGao@b6xFmohY#uh%T=fN%jmgQ&VZD3iKyJUN(8BWaupWZzI
zX{C=qmddc7-v-~QTQ}=|Y?Vih=l8%~(YjpoMntiA9dyNRPr;j-$#Q2noo)qLN6nz1zlypEY~C`unA!h#(oZZ+8dYldttO49
zpNQjdMVw$5Dl)vG>V0Iw@ZxfBE82nJ(ffq_@o{CBPAD80+XEMVKmZug;hF-!P@}+c
zTpKFd2X5A&MGKG5Ng-3`#aQ>7nfl5=D
z)1u3LFb|pNS)HRKzoNhw7JlVm;hw?V!R!cPm0l92$?+!@yAip|YudohkPT=W?Bd;%
zSSlV1QLF!Qd;7q|Qk`2wF8B-|b|d-;VeBjTrGUgv(L1j}R=%kCC=+5r0$%XA{go5C
z0KTaI9sCiAnj0_Qpdnxy^fRy1betInyR)O~#So4NSAJ0?$MJGVO*I6_MWJGL?BSZ2
zp8`$O2k2qX=5BIMDv%5+dct-D)*hlA-qPchlep7bzqB(&$x^X3DcLuW~mA&|ou
zieZ>#;{`q_Szt8I*&TF$tKb*5tvimHRg^wvBldLnCHB;?-+a%A{$Z?vzs*picC^By
zm0%|Qj*`g@yt_M=HX|00eyu$aTIGo*(|>|g1#$}f$Bk5Kk3T1GgPpYt
zO9Qj9!8sYcbnlPV)L*LGv__6I)|N?`O!BpHK^uon#!{$9Q@F}~!EV9~@_;6|B%rgp
ziLju|>EAoSQ3`E6M#}#Oe*Lwm{yWc`6Tr_?tVT-Dn4B+Xu9Ya>i_q}@sH^LP+9v5y
zosHr@d}K*1FBhs}`#%I`1#M?Iv$6j^0B=X*ezKqM&5Sor@5QO6rF8
ziFDss9PyuH+K5DsNKGVDES@|(Yy}8GPfq$zrr1n8LLA{s;y~gici*hh+qdDUtusvP
zG6t2AWmDtN{F?ECI^`LCRz@@2o@h@PY#&=0hH|yoZIlmUu;sX$n2GCu)6hPBxs=)7
zinLMbQ==#@*;|Mp`JYm{H{m^j8iZR2xR*S`#iR}q+f~ki;+>n8H+cm^r=nFl$!8S!@vp#f
zFDby1ekvfQvNxNOCHp%)dZucGw}l#aUvO+k;_gUk!N4^Pe0O2PLR
zN?v5EJWfTAAK-GoSmzm-I{*hP1`@=_qQPUwd?#dOljb5OzLg1gOjmo
z95dCRxQ@6HX%(3r*qX1O+*uY0-~P8{-5}t{|Dd)kl(YWYu>WvzXziDs8aU}5uD52X
zC$eZ!`=$l>YwZW#<4((8;<6u&0A&|hN@I5;fzCgTPdR=B&|`h9z`~gHm@vYCJSk_n
zF+spzJtEmUfhdU95iIC47%2eHA@*Hz3tDB_K&Ny3H1zcV`B0viAF{|GDVofZ8zFKZ
ztO$tXb@B5fBT#zAjnNz>!<`4ta2`4D0q+%luy3_MAhR6S9e|ot8Q+I8;w1K7tXZ5}
zREJw~Eyef3J|R7#!{R>97wRG>PmT?A1;aerxx7I6c
zE|J*N_lOZ|{$ZgsV;fcJ{nmHk
zFyrD)#pd-lJSGR
zA@}d*DlxRXnthjtvuzBMjDhZ>ac0Z_SKRb8@rAFK%NHx$k^%k;FQ4B4!K-&0zk$qrl^e$F5!Uus`Mj^jP2X8LtT8j)vn;Du0l(=3
zd>YNpvR4}4&6e9U$5Rp*#_~(?0cU4MCC(OFZrza9$t+er>&Ivv0(h%^JeXB87wTM$
zZ+;`V@G`848vUZb-YH^K!$0iN;6vbU)6?~v!JKD2GYf&Qo=2s%F<-te^a*-Sdh+`e
zzCC(e=&gVC8J3r?y}kWlMmDdU{v|fJw&zNLY?juCMv~vbTMOKdUj;~oazkb*319Pd
zc*eNi?%YAFDSLGS1n~raD5v0@^IB@xW@-a3Qb=C04xTqYBf(9G0nXzOABDb<;)`#E
z09;p@N8)~r
zmtSk^bXdxCj`a1a;jVD){e9S=X7OU0Voat+tm7Tpk*{kU5S-VSJBS7L)8)MyFTn1EA#;DtyXD
zKalw>5>lL2nY=B11p7p>D1URB(Eg>k$jveK_&fFj*1186W>9*+@!O3OLX!|DHk>!|
zs#M`lrs{T|*TLW`Jhg}5AuX*n=FML;ka(nQ0SU$c+vemY9d3m-9C)pjc3te=)G_3P
zfa5v-AS1*+d;R?8Onn^zH#}0&hJ7?VBviO8U{WD%58A0#!V7;^Kz6mGJD)
zKSJ~v0m-#BRqMt(AuZDkC}?8O*{n55c=WH+VLt#Os%yI^Ako{8jN{q?9>Vg=&22FC6{C$LK-XpMFZmFqGI6
z>i4dFR0o39dX~7-#Jq9!3KNArtK3c%;|68Jox8AE_dN+Fi9cI}1R|AxjN!%=M0G1I
z{?P0nhTiB1oU!??XcDbE6HIRi_4vpe#c+~S+!$t`XTeK1pG}&+Gq6$I`zMYeq?8R(gK?umB@;j>SZ&Xim%<0x35t7G>yk=RHlz0(SOMHa5Hex9ao$eXsxJvT>E2?t8C2AFszL;(gHHOJX&`;e>7$yX%+P
zEDMph=dOoeKC2pASV%f?8O>wpM({TfjnrTl2>rEu?*)gR;d=*Fk%}FYYENp>A|56z
zUecO=tAf}O$N1J+-q#nTA>%b~m5+hMSo=?)ea)oy<>MWG&GJ%`?Eha5(!{`O;KKIv{iM?EiV0@;jrV9$FAjHY#
zh9x$h;E~DaB}`wP2d2DKiXK?M$E@03DpJ?3N5a9sKXgNlw}DQs^N)|iXw1PJ-M{A=
z$k6=&3umB@xo^sXCMcMIyQv!b6k&SzNeuR6Wx0Yz&y*IUe5T533ea+<(Xl)Tw;P{R
z%t(s)s4(>mstqZ5t$yiq&!*}CqE=_0%_A#j)TqZH1WpKe_Ec
zTOEFNGxX3IcgS)6!}s7-!~gq#hqv@Xbq(_>S_~efc*A;BmF-%~G8OOnKf3bJ?)caJ
z_RpU;~Bk5F23z4p6c!Gl}Nb7Ei
zb2r4&T3SNXsLd#KI?kE1$4$RzzxFBMehD1}gMS8b4oJIf)|C#2spP-`%h+m`ul99uZG;NNpFxobVkp#0C#jcqccIUCVQC!&c>7%F>AbARH
zpFb=!Zt)mTjD;k=fnhu)?}K1!UC3VWUE@j6t+ob9w9Hv>yy(?`6IIs+!PhQ*9a|#}
zxXXp^qf9@^+VHfFXZ}fj@B!UVyRGmPd43?E@dJT#%@olLH4^;+fQNGOZz^p7`9%yg
z^zcL?DZrJU1iSFW9`CBA!`laSY}yU=nVgZ#)=rJX!{nY6=j=6dQC&1pV5i)ac2K7-
zp+-{A%;Pk$xCJ84L5e)EHMn?4?@ynAu-M$)QV;oM9jd(1q^LiG+_o{OU!g}rG4HrT)uO?8|lr=yI{ef)a#)(PDeJBs$V
zjbw4LA#;3OIb2ZEf7(d8=#vi>9qiBCni6b$#31A>Gt+pbw5BJ$VPxP;B86Q*a1cMec&i
zBCru31RCP(VvOI@-AHA)@3_GPV-dK<6JC(7skYB}#;Gh9wAIbyW!rFIqZK%iZ}Ea$
zm2otDrPWS5j1@s3EUUwiEsW@1e|Cilpu}>P)?~f03+pVgFei$_r#UB=tLAVe)<^~N
zot3?&ZE^Cm+7vzGTd7ynJqWARr-I^^{+%;YqFstL9;8Nypd=q^{90Nf+%_VqD9a
zwY1=;ragEL4(%N$-CCpJSz;w97d1AIYuRc-i)1uM#?q(oK>@g;=RjW;KrZz55=&
zF2A4cM0snJr?)(7#x`WITpOMKOL
zhVQNwPw8B}j{oWxnrT@x0&o_F2r+`WfM+vs`-ZH#huBh(g1hQ#{*3r+ngVJ^(bA5pg5_
z!5Oh}+0iVQLpT#IqNk*dduYeXKm5RbAzvQ|4TPJp7Zn`b;y9@>^RuN!HDe0-G8}vX
zct1DT4!ZXU7>J`8rH4^Lo857C!rNBpOW#~6Y+wUsui*wo=E&)j*9ZMLAJ6D}nqrZG
zKkutgHX^tUE}kT-r2=Bco_JRcf!83BiiLuz19YDCNTgEH%W0BAw9YxsVwiTG%Up#Rb$w#8AYN!t-BJqpZpsah?b
z4WP#P`Jo9{40;9MU}#w_zP9(?2ip_$Lo9plIW;X|98H*buhg}m>Dl7al>n)_QIomN
zvh;WmAhJN3PB_gJJ>&!4v0X1Yz9uB|wJhRH7_r^}0<*Pvn{@MA#2185`@~Fbnf(Cy
zfV5lCl(^(ZegN7J5#tDt#ndb4oae_lKIR4r&FO~mPUN=(yWeXWx1=8wqc(w$kmIFy
zhA#qzSkkqgRS|1JGhlFHivoH1xVrCp3{RiUi4zrM37nMK?#Kqzn3n4HJmFajtwZ>i
zvhU1~HlE)Tu(_3%X1`fVW#{+<$Eh~g7cBVCIlOq$6u0({57PfF94G_+EPX4c$;R}4
z$;#H1l$~XXH~46w`zKn-90kBB2p4{(w5Pee6ZM66(TzA2%rQCqVYRuvG+|`f0%FE9
zE0dUEr~1cnu=)%qA2tn1BwdDQB0tE4#KqYFDc237%PvyMcD1=xcY0H5fhm7fVT%~&
zZ1##F{b-N_|>Q;1~1hLy92eFM-+>iypB@6)$%H9IafzuyaK;4It%oLElr
zqyajqVgiGxJMXR4BIQqP27rlF{cqBfX*y#PyECI-6L+Ol#N;D2nW6iTed#9&VmdVU
z&K3fGZa2nPaYU*Zr$Eh3NOtCVgEGM6ZE=ri3;oTkPWw_t)-z{ytku>#DkNS97GsGO
zcTv$)RJ|{7Ux%|2RROkArTX^V06Ma;kmw^?cgsP_l{&+2{az*lKvQLcMQo}JWPjxn
zo}Iyg`EqrS-<#0jlDNhmwBNvtquv~CxyL!jO(26Q+X-eYz0mL1t|Vn&z)+6J#U@bX
zpTxGQy|R)L0%nf0^R&?MzaioXLT*5@M!(6RK8I-&>Z8aMgn|uJ^_pfs654odH(url
zP`3gOqSLT9W;`nY!)7*XZ*+2~>KVIOR&f2W$PDJS-<`IKdI;8?fZM8|U=H3iK|q1G
z%W!Js%g$6~;&opS-4sJuZ&ju!wr
zb!5!Iae=oi`FJpLi>~nXX#j8zv$88H390%LN9L9S>r}M!2J+_9FLEnZ>F%z@2^#sQ
zR!WT0oGY??H@RjEGd@Bi6PP0Lc
znK`4$gv|?5_#l`h{L1f|XEXAcw>(&U@=luSj}@x4!)wNlW+3b-R?gojrs-*VN3iOn
zn=TD6aNikWChL`iqACZH7)!kDHLwzfD&XW>*11(ZnOctaT7`7Pkleh@~#Wy^4{g9Owiap{;xLz
zRHL0N)l-$_R!*W=K8m=?0ySZWMkMb2Vwc)gm)#swuS{lT0&H^v2FG^}lH5Q(u1
z%(b;2ZFo`}*6d}aIoCXI5sf2ljiV7r+X3g=5U?Dll#}(Uw|UAj#foNPgp$2Ea9IuA
zXffYN^g9tm+&ZRUYz?K(HvIfvm-nUIBD))y%@WV8a9(~KNx8xSwjWSsdO9cK+nk8GMA7ASha$DV
z2tpPmo4e6hAndZIS&-yQS^wYgx(aG54rwtEgHQfT3H>+L{&)2ORDJ&UOQX$i7&P({
z;mz7~>PdyfE~BVVMlHumVhr4J|2fk@;_GlU4*Cb+4h?GmeyKXXY;7Q*#bkU1MNoCH
zR{J#*2Kn}dXW;D7A}&^roGf65S@!VH4#8AY2pIl&_{jbv@FqNShMTn~)6!4D*2)dc
z_tyN33?~aUu19{EhRfI}%DL-O@fojrp9>TawW0t}tvdF9wS!eUjU1X_`0HW(>pw7+
zw_nc8J<0$BO@-**Evk{zPBn&Bn&}jh^Y~I|)XBK;^BO^tAr4;*lm3{_=~emlU@N-#*U7)!KrKd7MY^M^?utqC2s_!1ScC1iNGm=&HPa=*
zl6Z$Q&biEt2$fZ4Mo|qtj0*s;djX7o28?`^M;v6sC)^g~FgtAUAn|`@R_O`moDNXAh
z5TmTO$uN*#wCwsbNbop?*VNsGE75i3?ziZP9i_v&{O`T+fxsn=aPd|7cKs&=3gw6z
z>D**$JxX|-U9ZE+B-RO>JV1gHz#NGwE=anHcJ_Noj^)!L>n=z5#jFrV593LUQ6E68
z1rmB-w*$3+ilO9N{6z9~3(%=l5JKOwr9}%5RI@_R(w1XY)(fs$Ig*=^!SlE)b1?SN
zjGvL!pPH|u7!e7QJ`fMSB$;E@t?O+O?`EjKDo}v7B{Lg)5cGH12<$J)alG3_B~ib!i3q?e>Q8z9|5x(jSMR?&}{~{eU%LTnj*DhcmI6eb0NHwq*Z{E-*{TMaihP
zn72BZ{hP&oZXjxk+8NQuc(5s!SyG}|m*8mHAm4W7CS^_Yi}?MaK0g4c+amM4wIKdJ
zTS`JcD|+~ylyH>rM^3uei?|p?)kDFjxL=FCZaCk+6>h6x(aYuRq@xDZS2`oBzdS#4
z{}Bo|aIa<>%(4$cHf5s1BN+R^kBLB8`6QW$#Di}gYdl;aCY-%e#b
zvuE}kt@kUA_J@Kb4wNzVXP$vV=Rjb~I#)PnYrO#@Hr`9^`4Y$ERlTG6PBkk~GMXFZ
zviiBz)US(Vfg%z+Vw!tTE1dQ_i1_B9&sf5D%*DJh5TYbK`bcVC@|pn2vA3>>zf4E^
zk1zgZWDFerh9)XNXRD$_sD8<0@^e#
zO+L{xwP%#=({NwgqXpx;LpLq~J8=bOg=NINV>sgOBgNDMUeto6A9+rAUan3tg$kK?
zX%7`UWUcWg2iRfNXG7FFtkKoLXM@`;#}Q^3~ay9
zoeQ~Dg+pG{mQ&5p3uk}s*?C%=%#a+85nr+A<>Ujlr|76HIKB@zxXFXNKqqCn($A9V
zskQC=+SL~p<~?dS`~XMMr>inEHl_zLXEnbnQ#*F?7|U`-FgL3mrp;}Yf3lL^n|dJTNsLlquM4Kb`I>6MRc{uZ~iODv~K!VBCWyVQQTTj_9X8gV8l0
z=n_X72K9WrvCXAeWLGO=f6^UuAI8bc;QnsG)A;hs!FW}_JxtrE6&SmZ1S)Zf43Yv?#qn)11Xrc`|+dIAQ1*;&27=2Og;PP+*&tl{7)}#
z1#nh7WXvkyLD5n%Qzvt=$88_w3Y2Iq&_y_NMIPLEL2SDiO+_3xmLwlxdI(M;gqV}y
z*4w)9x)3q^Df>~k$q<@qLl?{`FQ{|E7*BEAoB$OC@ig2yanN7G27s{aZyn$_Pd4I=
zTLguQ;+`+!4kc!O7Bcnx!KoYDW0La4dca?-(_-Buh03X|{`iZtIY_aRGOhoYHu@((
zbY1)G3C0DLgZbS-ZdB>745X{%x+qxETcn4Oej54*RuRFOH;6lVUqb6eI${|KhsMD&
z)Q250;7)wC7cMEMS7v|H4Az}a`5;)3kN!P&0-BUCag~{Ahc=RZt{RE3Zotp@ewK-k
z@N6Ptxr6WydA(pFV`$faN~^!Q0m546a7H#Ig3*5naQ<1v|ESW%CP3nnIy~_k|B~Ei
z0tO!6aDRrOyK?6`Q}gYHri>IW0Vg!`uEm`(U<#@*FZD7ue*cX)%bleF+iz1N=yoer
z6BMOMnipK=IRhO&dA)h0_8H;@Azn`b0?s9|L=si_6&qj*2S2>S?!bu(;yZzJkJ)uQ(5Uz0ci*o_XA0Io{&2YY1f7SB8a3=7`5bwI|hKd+Pv;~UKf&tWf
zk_$qNBHZr+5b9#pqRRd%Bj8sx5>L$?V@29
z99$5OZ{WU$`PT*b+X3Cp?{4w;+1UPT6Id0XW5QIjx{Fg&aooR!U~wMO^scK5c2H&^
z+bsR5o;Sn-07y&f@G{*#jJ5CZ0Z+dV7%dpx7ukhKI0+T@ONN3(tl5hnWZf;&dF+<-
z8hO9vJTTcr7(iW3r@s}UH%b=3{aEEk&A^2V4q$-l&puR_091~4;?%4qETLDW#w4)NZ(7=gB?#r)9eXBWO
z+3k!4{lJFQK__d|@MO?Xfd?;dH;Wm@s~AE}<#Ov23jQCQ02kuD?eC|{n%$rg`IqF7
zW@vvpQ3i%AB@C#RxQhxPY;r$RO&rM3g~4s8OOTvAkEU?ZdTGIgTZArM!qe36rq}gb
z%^lkWz;44s#p?>@x#;2CQTfoGQ=rg{W5DYgnKi~b`VHHlt1)J<8dnFokV62D4pMqZb>ev5sqg=)++;lsD
zqthxu!>mWpc7h89ByE{wjv(bu@35%T&
znQH^s2kvpg?-tm8lJsoulRCwJGq4{wbaE62)`7w)8!eg#;gah6pPv9ntLBvRkPuP;|nR1`srQJClo1pV)0l>$rwv6{f~LqM+DY5Q7`&r?iTt4
z$-T203ohaSfPfBGn!Wz8-}U(UlX)tU{@7yXGC)?&cl%9U0w!efF|?Y{SGFCZliv3^d*tG%2e=i1Xc}*a(S?r
zw>T*m4CQ@!mzNNkui4I^sqIko%8{+vrP-ILQ2V1G8pBJ)S;Ub)pg!^
zmI4$(ekSb2o;aE*=Q;+9=Px7-qmJ1NpSk6a-A+VM<|d*Nx4?
zA!p6}KU8UDG!zo#g0
z$wU{|ZeM^*rdXQoLZGQ8HWSxX-WrKvFZ#rR96;pZ9?=_D+bk%ZpepwHK|DvLf!!?u
z%W0;&fA-Y~vQZcddeX3ejxs>UPTNrJAnezfz397%GQL5)2uHN6lVEp-P((pX8QOJA
zp3ZkF)e|*SrTYYMLL@y@FG)WqEshK#0`XsU*ho-|fECxK}
zohO|Ls2;2!_B{+=z{Z8Pxb0PnR&mKXp9OYF?n7lLfXK1Mp0|4sXk#>N(}J+l85U7>
zTbQl^r9qJ}53ULF`7G3a_u0T>Cs&C>;u0UjW{)I(r$DOA3Qmgyo3cagakr%BOH`S-
z#c^gorGQ-s_I4m)_h77{%u=kX;w;BAtsYvMUyr3HmZ>2}AVhmZT?QF;vH3pI4YL`Z
zV~;k`#z&l(=;C5&R6HQWGhK$q3g)<*+tz~VW6!hRE<$r;p>MsstlHYDyQFHOgWD~j
zHq6G1Kv050J-_^L14e8GMAGN8Utfu@lVfplk}kgtf%6Nf&9c2-9d6%5cO~BMN2a1G
zi7{Q?c>|dpC;$ohkx@F~oBHBscus^g!FbGe)pWrMSaYFctpxq_=G$Y
z`ladRMgc8E5cmhbYX7v(BXq@~;sAK;9|$v~3N!8&(Qub$KWGIxqk%0FCL?r9Y>+{3
zEVYjLipwi-w?^%m5ZJQv2q2wV1f%oUGxS2=;QAjKA8sZ`4k|ZVH$h(qznPzatUXse
zT(>~~uViI$Atci>BQr-ZgQVsRn-duYNJ-pp2TIZqmG!qD0*ByXgQntgiyMe|k{%9!
zFh1+XR8Ppq{Mg)sNyJ2Ty6qB@z-02!u_~*=i~=KNl{@bC9%$Fgx8&%P9(o4Yfn{hH
z;J8oQ5*e1X7b^7?-O3NH+|Z{YnidZP%reuDr6Xgjn$PoB>ArEMz3_&8x2EVf9f~y&
zt`bdN!^H8YTU`R}MV0${-?-Y6NI6q4f(A7u7gp~A9k
z)Wp`kj;hurU|Ru;bq3XI6Tz|f=<+@eN2YEasZwR(hp6j5
z9$aeLV5&2a#(2#@sW4CIkM%U=Nz|cllhGQQzr`v0
zkksV0?E5~)K3T9*wzJR=eEPq|p7p5xu3^E}WH)!u;4Wo7M~g|sdiy9%(idT!KnmT_
zs}{8rhf))8CzNzGL|xBjhSQjqW<=&3!tTZ9h5KJ8capW^ZoT^kyi#kP1XX92m4qJ|
zfNsM*%9nRb=q=x@vKR{Rbm6JKYL=C|h&^Va9|uI~dLysEMx@*Tg1yyZ)dRXIf&eb+
zAd)W1zlV>S)@*4o$*Ufq5U~^3g(1K?C)ve}&%#S@Tyy6op7Mwmpw0Oi&%*9e+Efpx
z1^XWq@wZtTX^lF&LUUh$lF!2KT2-Id7L^Qc#b+5mOFP4GSED4b>0yZFKXDBY1$hxI
z?g(Q;0hvSW@B%&b)R!y+$enk?xkMXq+E(#lAN9DYY-zS%;(o8%&s%i}HU2U&$3?F7
zcpgTvgD#+fLvz&Os9!go>CZ9^=5ktp__OPMUDoGyKsAtAH!xg%K;A+M;hYlyj2r
z!T`*|fl)@Rw-fygf3z{KnOism#
z$WRq4Z>kt5{&CKMPkMM7a$VXcjdAW)AuIKj_!^0-i=VlimH=J*i2rS*sW{Dyp-D;`
za2;-?Bl*0FhSMh1(+DnA=&r8MpwgQjahiF)}SU
z(4nTJw`q3Ufx8T}Mb-J`K6MTIm_!w~S^C116Sz0cQu=s@c8Aop%0mm3Vk`~AJ9po|
zz!5yD8*P`S83_#yIR2z6^C@gXN}?)?(cMl
zh5_3vfE@@WiY2D_AeAb-ut>*?$d^;6xb*x*!sVrABDe?s(~~VI<`)3nBonXrWFADX
zE2L$2Aob2cf)Ol+=G_$H(>>kh=Fc5TGr(Ogo!}PNExP$CVn2*WsxyiQvd+wW_f-kf_hQlXG`)l**AosW;Igr!^e#qB%^_-jv
z%X%4Es%15vnI^e8iLgMH#}+r4$7CXM18?50UO+V8_6)dl&Ka4H@F~)R2z6&(_KA7+
zP8IGgUZ7&@0t3vc56?t@ZcCXABwez)ru0rtRVtUFg
zvsG2a*zT8fc$dh9?2JIVc}iJNeP1(`O}#boTLj?5U|6>hkrKLjpB92DoCR>O9qnpO
zXJ~<)I4WmwGM{DW`MaJ?2*<`J^s@@`7E?IneOp#&mK;R?<-*tBDC#z5TVAR9wP%if
zzQOb1dbWoYrm&dlItm1T;l3|w_p{(&og}g+Hl2OX5h;krz7bcKBZfhL&
zh8u3_@X;dqnsSy-8;yiypkk0Kj0W-sIUiHb-FXb-&f*1wG%?SDWofT|0h5&lky;o}%lwta?+
z8Bu6@hM!#?hx|UEXXW>AX(48QY||U${zhy5mIw$sNKa{>D+`
zmsZ~ITsVSDw6-(XjR2ft%*ur3RuFc?XcQdXmCxqCtsuS<)>}NlkGsl*Hjq_5^KLoE
z9c=%;FUrdAvV6oCyDj;di{%zTdR0x$0|)~vB?wDOY!&zi1DCR~Oa3xrRb%YAf|zZ_b~M`yX~
z0!N8Ex)&*LeAg?GKuB1srl`hNGBRF3^NU8T+SW9z1lJ*W$|1ZDypu-(fNBfxI(|9P
z;00@IXd>f<1hi<*2lFvW3%V7CNr5*&cBr8qn-I6M`E7xcPG2juc58$rYQ_{(J^_?=
zX%@UW(b7a^{N(nBL7d~JH1J_I=3khqa|iER1@B9LzE{~U>tQ-mjsD7mK%RXNE%tCW
znC4fA_%!J@L_t{M%#LGEXz=sa9sa|b3Xmf>YBKCxcDJ&ya40b*2RPtRv(WH(+mf1t
zWE-QT>{!?>~obEzea9~ygLnV-q>RC{jZSA^}+p76#(9z_ZY>7^08rm3t=lL-pJz*5j*w3
zEbS{-!~<2nw_tobID~a<|j