diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index ad89c7f318263e..94778200c80815 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -4480,6 +4480,10 @@ def _test_ddp_hook_with_optimizer_parity( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) + @sandcastle_skip_if( + BACKEND == "gloo" and HAS_TORCHVISION, + "Failing with gloo backend + torchvision due to ongoing issue https://github.com/pytorch/pytorch/issues/111834", + ) @skip_if_lt_x_gpu(2) @parametrize("grad_as_bucket_view", [True, False]) @parametrize("static_graph", [True, False]) @@ -4507,6 +4511,10 @@ def test_ddp_hook_with_optimizer_parity_adamw( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) + @sandcastle_skip_if( + BACKEND == "gloo" and HAS_TORCHVISION, + "Failing with gloo backend + torchvision due to ongoing issue https://github.com/pytorch/pytorch/issues/111834", + ) @skip_if_lt_x_gpu(2) @parametrize("optimize_subset", [True, False]) def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): @@ -4527,6 +4535,10 @@ def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259" ) + @sandcastle_skip_if( + BACKEND == "gloo" and HAS_TORCHVISION, + "Failing with gloo backend + torchvision due to ongoing issue https://github.com/pytorch/pytorch/issues/111834", + ) @skip_if_lt_x_gpu(2) @parametrize("optimize_subset", [True, False]) def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset):