Skip to content

Commit e86291a

Browse files
authored
Skip certain unit tests on NAVI (#1950)
This PR is to skip certain unit tests on NAVI only. Fixes SWDEV-509011 - test_sac_ilp.py::TestSACILP::test_sac_ilp_case1 Fixes SWDEV-509311 - test_max_autotune.py::TestMaxAutotune::test_non_contiguous_input_addmm Fixes SWDEV-510738 test_fsdp_sharded_grad_scaler.py::TestShardedGradScalerParityWithDDP::test_sharded_grad_scaler_found_inf
1 parent cfb673e commit e86291a

File tree

4 files changed

+9
-1
lines changed

4 files changed

+9
-1
lines changed

test/distributed/_tools/test_sac_ilp.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
sac_milp,
2020
)
2121
from torch.testing._internal.common_cuda import TEST_CUDA
22-
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
22+
from torch.testing._internal.common_utils import NAVI_ARCH, run_tests, skipIfTorchDynamo, TestCase, skipIfRocmArch
2323
from torch.testing._internal.distributed._tensor.common_dtensor import (
2424
ModelArgs,
2525
Transformer,
@@ -131,6 +131,7 @@ def _collect_module_info_with_fake_tensor_mode(self) -> ModuleInfo:
131131

132132
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/115653")
133133
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
134+
@skipIfRocmArch(NAVI_ARCH)
134135
def test_sac_ilp_case1(self):
135136
"""
136137
This is a case where the memory budget is either binding or too tight,

test/distributed/fsdp/test_fsdp_sharded_grad_scaler.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
run_tests,
3737
TEST_WITH_DEV_DBG_ASAN,
3838
TestCase,
39+
NAVI_ARCH,
40+
skipIfRocmArch,
3941
)
4042

4143

@@ -236,6 +238,7 @@ def _build_model_and_optim(
236238
return model, optim, ref_model, ref_optim
237239

238240
@skip_if_lt_x_gpu(2)
241+
@skipIfRocmArch(NAVI_ARCH)
239242
def test_sharded_grad_scaler_found_inf(self):
240243
self.run_subtests(
241244
{

test/inductor/test_max_autotune.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@
2727
instantiate_parametrized_tests,
2828
parametrize,
2929
TEST_WITH_ROCM,
30+
NAVI_ARCH,
31+
skipIfRocmArch,
3032
)
3133
from torch.utils._triton import has_triton_tma_device
3234

@@ -881,6 +883,7 @@ def f(x, y):
881883
act = f(x, y)
882884
torch.testing.assert_close(act, ref, atol=2e-2, rtol=1e-2)
883885

886+
@skipIfRocmArch(NAVI_ARCH)
884887
def test_non_contiguous_input_addmm(self):
885888
b = torch.randn((768), dtype=torch.bfloat16, device="cuda")
886889
x = rand_strided(

torch/testing/_internal/common_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@
108108

109109

110110
MI300_ARCH = ("gfx940", "gfx941", "gfx942")
111+
NAVI_ARCH = ("gfx1030", "gfx1100", "gfx1101", "gfx1200", "gfx1201")
111112

112113

113114
def freeze_rng_state(*args, **kwargs):

0 commit comments

Comments
 (0)