Skip to content

Commit ec77129

Browse files
BLOrange-AMDdnikolaev-amd
authored andcommitted
Skip certain unit tests on NAVI (#1950)
This PR is to skip certain unit tests on NAVI only. Fixes SWDEV-509011 - test_sac_ilp.py::TestSACILP::test_sac_ilp_case1 Fixes SWDEV-509311 - test_max_autotune.py::TestMaxAutotune::test_non_contiguous_input_addmm Fixes SWDEV-510738 test_fsdp_sharded_grad_scaler.py::TestShardedGradScalerParityWithDDP::test_sharded_grad_scaler_found_inf (cherry picked from commit e86291a)
1 parent 6a1e26a commit ec77129

File tree

4 files changed

+10
-1
lines changed

4 files changed

+10
-1
lines changed

test/distributed/_tools/test_sac_ilp.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from torch.testing._internal.common_cuda import TEST_CUDA
2121
from torch.testing._internal.common_utils import (
2222
MI300_ARCH,
23+
NAVI_ARCH,
2324
run_tests,
2425
skipIfRocmArch,
2526
skipIfTorchDynamo,
@@ -137,6 +138,7 @@ def _collect_module_info_with_fake_tensor_mode(self) -> ModuleInfo:
137138
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/115653")
138139
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
139140
@skipIfRocmArch(MI300_ARCH)
141+
@skipIfRocmArch(NAVI_ARCH)
140142
def test_sac_ilp_case1(self):
141143
"""
142144
This is a case where the memory budget is either binding or too tight,

test/distributed/fsdp/test_fsdp_sharded_grad_scaler.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
run_tests,
3737
TEST_WITH_DEV_DBG_ASAN,
3838
TestCase,
39+
NAVI_ARCH,
40+
skipIfRocmArch,
3941
)
4042

4143

@@ -236,6 +238,7 @@ def _build_model_and_optim(
236238
return model, optim, ref_model, ref_optim
237239

238240
@skip_if_lt_x_gpu(2)
241+
@skipIfRocmArch(NAVI_ARCH)
239242
def test_sharded_grad_scaler_found_inf(self):
240243
self.run_subtests(
241244
{

test/inductor/test_max_autotune.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@
3434
IS_WINDOWS,
3535
parametrize,
3636
TEST_WITH_ROCM,
37+
NAVI_ARCH,
38+
skipIfRocmArch,
3739
)
3840
from torch.utils._triton import has_triton_tma_device
3941

@@ -1000,6 +1002,7 @@ def f(x, y):
10001002
act = f(x, y)
10011003
torch.testing.assert_close(act, ref, atol=2e-2, rtol=1e-2)
10021004

1005+
@skipIfRocmArch(NAVI_ARCH)
10031006
def test_non_contiguous_input_addmm(self):
10041007
b = torch.randn((768), dtype=torch.bfloat16, device=GPU_TYPE)
10051008
x = rand_strided(

torch/testing/_internal/common_utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,8 @@
102102
has_pytest = False
103103

104104

105-
MI300_ARCH = ("gfx942",)
105+
MI300_ARCH = ("gfx940", "gfx941", "gfx942")
106+
NAVI_ARCH = ("gfx1030", "gfx1100", "gfx1101", "gfx1200", "gfx1201")
106107

107108

108109
def freeze_rng_state(*args, **kwargs):

0 commit comments

Comments
 (0)