diff --git a/ml-agents/mlagents/trainers/tests/torch/test_decoders.py b/ml-agents/mlagents/trainers/tests/torch/test_decoders.py index aa417edd05..afb86e7876 100644 --- a/ml-agents/mlagents/trainers/tests/torch/test_decoders.py +++ b/ml-agents/mlagents/trainers/tests/torch/test_decoders.py @@ -12,7 +12,7 @@ def test_valueheads(): # Test default 1 value per head value_heads = ValueHeads(stream_names, input_size) input_data = torch.ones((batch_size, input_size)) - value_out, _ = value_heads(input_data) # Note: mean value will be removed shortly + value_out = value_heads(input_data) # Note: mean value will be removed shortly for stream_name in stream_names: assert value_out[stream_name].shape == (batch_size,) @@ -25,7 +25,7 @@ def test_valueheads(): output_size = 4 value_heads = ValueHeads(stream_names, input_size, output_size) input_data = torch.ones((batch_size, input_size)) - value_out, _ = value_heads(input_data) + value_out = value_heads(input_data) for stream_name in stream_names: assert value_out[stream_name].shape == (batch_size, output_size) diff --git a/ml-agents/mlagents/trainers/tests/torch/test_distributions.py b/ml-agents/mlagents/trainers/tests/torch/test_distributions.py index 6637eb159b..e9ef77529c 100644 --- a/ml-agents/mlagents/trainers/tests/torch/test_distributions.py +++ b/ml-agents/mlagents/trainers/tests/torch/test_distributions.py @@ -112,7 +112,7 @@ def test_gaussian_dist_instance(): def test_tanh_gaussian_dist_instance(): torch.manual_seed(0) act_size = 4 - dist_instance = GaussianDistInstance( + dist_instance = TanhGaussianDistInstance( torch.zeros(1, act_size), torch.ones(1, act_size) ) for _ in range(10): diff --git a/ml-agents/mlagents/trainers/tests/torch/test_utils.py b/ml-agents/mlagents/trainers/tests/torch/test_utils.py index 25c7a6c05e..b9a58c4617 100644 --- a/ml-agents/mlagents/trainers/tests/torch/test_utils.py +++ b/ml-agents/mlagents/trainers/tests/torch/test_utils.py @@ -22,7 +22,7 @@ def test_min_visual_size(): for encoder_type in EncoderType: good_size = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[encoder_type] vis_input = torch.ones((1, 3, good_size, good_size)) - ModelUtils._check_resolution_for_encoder(vis_input, encoder_type) + ModelUtils._check_resolution_for_encoder(good_size, good_size, encoder_type) enc_func = ModelUtils.get_encoder_for_type(encoder_type) enc = enc_func(good_size, good_size, 3, 1) enc.forward(vis_input) @@ -34,7 +34,9 @@ def test_min_visual_size(): with pytest.raises(UnityTrainerException): # Make sure we'd hit a friendly error during model setup time. - ModelUtils._check_resolution_for_encoder(vis_input, encoder_type) + ModelUtils._check_resolution_for_encoder( + bad_size, bad_size, encoder_type + ) enc = enc_func(bad_size, bad_size, 3, 1) enc.forward(vis_input)