Skip to content

Prevent init normalize on --resume #4463

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Sep 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ml-agents/mlagents/trainers/model_saver/tf_model_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ def initialize_or_load(self, policy: Optional[TFPolicy] = None) -> None:
def _load_graph(
self, policy: TFPolicy, model_path: str, reset_global_steps: bool = False
) -> None:
# This prevents normalizer init up from executing on load
policy.first_normalization_update = False
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the fix.

with policy.graph.as_default():
logger.info(f"Loading model from {model_path}.")
ckpt = tf.train.get_checkpoint_state(model_path)
Expand Down
66 changes: 65 additions & 1 deletion ml-agents/mlagents/trainers/tests/test_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
from mlagents.tf_utils import tf
from mlagents.trainers.model_saver.tf_model_saver import TFModelSaver
from mlagents.trainers import __version__
from mlagents.trainers.settings import TrainerSettings
from mlagents.trainers.settings import TrainerSettings, NetworkSettings
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.test_nn_policy import create_policy_mock
from mlagents.trainers.tests.test_trajectory import make_fake_trajectory
from mlagents.trainers.ppo.optimizer_tf import PPOOptimizer


Expand Down Expand Up @@ -113,3 +114,66 @@ def test_checkpoint_conversion(tmpdir, rnn, visual, discrete):
model_saver.register(policy)
model_saver.save_checkpoint("Mock_Brain", 100)
assert os.path.isfile(model_path + "/Mock_Brain-100.nn")


# This is the normalizer test from test_nn_policy.py but with a load
def test_normalizer_after_load(tmp_path):
behavior_spec = mb.setup_test_behavior_specs(
use_discrete=True, use_visual=False, vector_action_space=[2], vector_obs_space=1
)
time_horizon = 6
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
observation_shapes=[(1,)],
action_space=[2],
)
# Change half of the obs to 0
for i in range(3):
trajectory.steps[i].obs[0] = np.zeros(1, dtype=np.float32)

trainer_params = TrainerSettings(network_settings=NetworkSettings(normalize=True))
policy = TFPolicy(0, behavior_spec, trainer_params)

trajectory_buffer = trajectory.to_agentbuffer()
policy.update_normalization(trajectory_buffer["vector_obs"])

# Check that the running mean and variance is correct
steps, mean, variance = policy.sess.run(
[policy.normalization_steps, policy.running_mean, policy.running_variance]
)

assert steps == 6
assert mean[0] == 0.5
assert variance[0] / steps == pytest.approx(0.25, abs=0.01)
# Save ckpt and load into another policy
path1 = os.path.join(tmp_path, "runid1")
model_saver = TFModelSaver(trainer_params, path1)
model_saver.register(policy)
mock_brain_name = "MockBrain"
model_saver.save_checkpoint(mock_brain_name, 6)
assert len(os.listdir(tmp_path)) > 0
policy1 = TFPolicy(0, behavior_spec, trainer_params)
model_saver = TFModelSaver(trainer_params, path1, load=True)
model_saver.register(policy1)
model_saver.initialize_or_load(policy1)

# Make another update to new policy, this time with all 1's
time_horizon = 10
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
observation_shapes=[(1,)],
action_space=[2],
)
trajectory_buffer = trajectory.to_agentbuffer()
policy1.update_normalization(trajectory_buffer["vector_obs"])

# Check that the running mean and variance is correct
steps, mean, variance = policy1.sess.run(
[policy1.normalization_steps, policy1.running_mean, policy1.running_variance]
)

assert steps == 16
assert mean[0] == 0.8125
assert variance[0] / steps == pytest.approx(0.152, abs=0.01)