Skip to content

Commit 49459b0

Browse files
authored
[Navi] [Inductor] Unskip Navi inductor UTs (#1514)
Relates to https://ontrack-internal.amd.com/browse/SWDEV-461590
1 parent bddc29d commit 49459b0

File tree

3 files changed

+0
-36
lines changed

3 files changed

+0
-36
lines changed

test/inductor/test_cuda_repro.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,6 @@ def foo(x):
333333
out_ref.add_(2)
334334
# self.assertEqual(out_ref, out)
335335

336-
@skipIfRocmArch(NAVI_ARCH)
337336
def test_accuracy_issue1(self):
338337
class Repro(torch.nn.Module):
339338
def __init__(self) -> None:
@@ -370,7 +369,6 @@ def forward(self, start_positions: torch.Tensor, x: torch.Tensor):
370369
assert same_two_models(mod, opt_mod, args), "Dynamo failed"
371370

372371
@config.patch(allow_buffer_reuse=False)
373-
@skipIfRocmArch(NAVI_ARCH)
374372
def test_issue103461(self):
375373
def forward(add_1):
376374
var_mean = torch.ops.aten.var_mean.correction(
@@ -869,7 +867,6 @@ def forward(self, x):
869867
res2 = jit_func(x)
870868
self.assertEqual(res1, res2)
871869

872-
@skipIfRocmArch(NAVI_ARCH)
873870
def test_issue103481(self):
874871
def fn(x, y):
875872
# NOTE: 6 dimensions is important! does not fail for 5 dimensions

test/inductor/test_torchinductor.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1804,7 +1804,6 @@ def fn(x):
18041804
# make sure things also work if they aren't unrolled
18051805
self.common(fn, (torch.randn(8, 3),))
18061806

1807-
@skipIfRocmArch(NAVI_ARCH)
18081807
def test_multilayer_sum_low_prec(self):
18091808
# fp16 nyi for cpu
18101809
if self.device == "cpu":
@@ -1815,7 +1814,6 @@ def fn(a):
18151814

18161815
self.common(fn, ((torch.rand((10, 3, 352, 352), dtype=torch.float16),)))
18171816

1818-
@skipIfRocmArch(NAVI_ARCH)
18191817
def test_multilayer_prime_size(self):
18201818
def fn(a):
18211819
return torch.max(a), torch.sum(a)
@@ -1827,7 +1825,6 @@ def fn(a):
18271825

18281826
@skip_if_gpu_halide
18291827
@skipCPUIf(IS_MACOS, "fails on macos")
1830-
@skipIfRocmArch(NAVI_ARCH)
18311828
def test_multilayer_var(self):
18321829
def fn(a):
18331830
return torch.var(a)
@@ -2940,7 +2937,6 @@ def fn(a, b):
29402937
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
29412938

29422939
@skip_if_halide # only 32-bit indexing
2943-
@skipIfRocmArch(NAVI_ARCH)
29442940
def test_large_tensor_reduction(self):
29452941
if not _has_sufficient_memory(self.device, 4.5 * 1024**3): # 4.5 GiB
29462942
raise unittest.SkipTest("insufficient memory")
@@ -2962,7 +2958,6 @@ def fn(a):
29622958
self.assertEqual(actual, expect)
29632959

29642960
@skip_if_gpu_halide # only 32-bit indexing
2965-
@skipIfRocmArch(NAVI_ARCH)
29662961
def test_large_broadcast_reduction(self):
29672962
if self.device == "cpu":
29682963
raise unittest.SkipTest("Fails on CPU")
@@ -4113,7 +4108,6 @@ def test_conv2d_channels_last(self):
41134108
check_lowp=False,
41144109
)
41154110

4116-
@skipIfRocmArch(NAVI_ARCH)
41174111
def test_conv2d_backward_channels_last(self):
41184112
def fn(grad_output, inp, weight):
41194113
convolution_backward_8 = torch.ops.aten.convolution_backward.default(
@@ -4899,7 +4893,6 @@ def fn(x, y):
48994893
self.assertEqual(c.stride()[2], 1)
49004894

49014895
@skip_if_gpu_halide
4902-
@skipIfRocmArch(NAVI_ARCH)
49034896
def test_std(self):
49044897
def fn(x):
49054898
return (
@@ -4942,7 +4935,6 @@ def test_batch_norm_2d(self):
49424935

49434936
# From yolov3
49444937
@with_tf32_off
4945-
@skipIfRocmArch(NAVI_ARCH)
49464938
def test_batch_norm_2d_2(self):
49474939
if self.device == "cpu":
49484940
raise unittest.SkipTest(f"requires {GPU_TYPE}")
@@ -5090,7 +5082,6 @@ def fn(dist, angle):
50905082
self.common(fn, (*inp,))
50915083

50925084
@skip_if_gpu_halide # incorrect result on CUDA
5093-
@skipIfRocmArch(NAVI_ARCH)
50945085
def test_cauchy(self):
50955086
def fn(x, y):
50965087
return torch.sum(1 / (torch.unsqueeze(x, -1) - y))
@@ -6491,7 +6482,6 @@ def fn(a):
64916482
y = fn_compiled(x)
64926483
self.assertTrue(y is not x)
64936484

6494-
@skipIfRocmArch(NAVI_ARCH)
64956485
def test_l1_loss(self):
64966486
def fn(a, b):
64976487
return torch.nn.functional.l1_loss(a, b), torch.nn.functional.mse_loss(a, b)
@@ -6899,7 +6889,6 @@ def fn(x):
68996889
fn, (torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]),)
69006890
)
69016891

6902-
@skipIfRocmArch(NAVI_ARCH)
69036892
def test_any(self):
69046893
def fn(x):
69056894
return (
@@ -7652,7 +7641,6 @@ def fn(a, dim, index, b, reduce):
76527641

76537642
@skip_if_gpu_halide
76547643
# issue #1150
7655-
@skipIfRocmArch(NAVI_ARCH)
76567644
def test_dense_mask_index(self):
76577645
r"""
76587646
There will be a little difference for reduce order between aten and inductor
@@ -8662,7 +8650,6 @@ def fn(a, b):
86628650
b = torch.rand(2, 2, 1, 4, 1).int()
86638651
self.common(fn, (a, b))
86648652

8665-
@skipIfRocmArch(NAVI_ARCH)
86668653
def test_argmax_argmin1(self):
86678654
def fn(x):
86688655
return (aten.argmax(x), aten.argmin(x))
@@ -8674,7 +8661,6 @@ def fn(x):
86748661
],
86758662
)
86768663

8677-
@skipIfRocmArch(NAVI_ARCH)
86788664
def test_argmax_argmin2(self):
86798665
def fn(x):
86808666
return (
@@ -8686,7 +8672,6 @@ def fn(x):
86868672

86878673
self.common(fn, (torch.randn([144, 144]),))
86888674

8689-
@skipIfRocmArch(NAVI_ARCH)
86908675
def test_argmax_argmin_with_duplicates(self):
86918676
def fn(x):
86928677
return (
@@ -8709,7 +8694,6 @@ def fn(x):
87098694
self.common(fn, (t1,))
87108695

87118696
@skip_if_halide # nan behavior
8712-
@skipIfRocmArch(NAVI_ARCH)
87138697
def test_argmax_argmin_with_nan(self):
87148698
def fn(x):
87158699
return (
@@ -8833,7 +8817,6 @@ def fn(x):
88338817
],
88348818
)
88358819

8836-
@skipIfRocmArch(NAVI_ARCH)
88378820
def test_tmp_not_defined_issue1(self):
88388821
def forward(
88398822
primals_3,
@@ -9234,7 +9217,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
92349217
else:
92359218
self.assertEqual(len(inps), 0)
92369219

9237-
@skipIfRocmArch(NAVI_ARCH)
92389220
def test_dtype_mismatch_issue(self):
92399221
def fn(x):
92409222
attn = torch.nn.functional.pad(x, [0, 1])
@@ -12235,7 +12217,6 @@ def test_rnn_compile_safe(self):
1223512217

1223612218
class NanCheckerTest(TestCase):
1223712219
@config.patch("nan_asserts", True)
12238-
@skipIfRocmArch(NAVI_ARCH)
1223912220
def test_nan_checker_pass(self):
1224012221
def f(x):
1224112222
return torch.softmax(x, dim=-1)

test/inductor/test_torchinductor_opinfo.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
dtype_abbrs,
3333
IS_MACOS,
3434
IS_X86,
35-
is_navi_arch,
3635
skipCUDAMemoryLeakCheckIf,
3736
skipIfCrossRef,
3837
skipIfTorchDynamo,
@@ -204,19 +203,6 @@ def format_op(op):
204203
# Tensors are not alike
205204
inductor_skips["cuda"]["logcumsumexp"] = {f32}
206205
inductor_skips["cuda"]["special.modified_bessel_i1"] = {f64}
207-
if is_navi_arch():
208-
inductor_skips["cuda"]["aminmax"] = {b8, f16, f32, f64, i32, i64}
209-
inductor_skips["cuda"]["dist"] = {b8, f16, f32, f64, i32, i64}
210-
inductor_skips["cuda"]["kron"] = {b8, f16, f32, f64, i32, i64}
211-
inductor_skips["cuda"]["masked.std"] = {b8, f16, f32, f64, i32, i64}
212-
inductor_skips["cuda"]["masked.var"] = {b8, f16, f32, f64, i32, i64}
213-
inductor_skips["cuda"][("max", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64}
214-
inductor_skips["cuda"][("min", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64}
215-
inductor_skips["cuda"]["nn.functional.conv_transpose3d"] = {b8, f16, f32, f64, i32, i64}
216-
inductor_skips["cuda"]["std"] = {b8, f16, f32, f64, i32, i64}
217-
inductor_skips["cuda"]["std_mean"] = {b8, f16, f32, f64, i32, i64}
218-
inductor_skips["cuda"]["var"] = {b8, f16, f32, f64, i32, i64}
219-
inductor_skips["cuda"]["var_mean"] = {b8, f16, f32, f64, i32, i64}
220206

221207
inductor_expected_failures_single_sample = defaultdict(dict)
222208

0 commit comments

Comments
 (0)