Skip to content

[Navi] [Inductor] Unskip Navi inductor UTs #1514

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Aug 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions test/inductor/test_cuda_repro.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,6 @@ def foo(x):
out_ref.add_(2)
# self.assertEqual(out_ref, out)

@skipIfRocmArch(NAVI_ARCH)
def test_accuracy_issue1(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
Expand Down Expand Up @@ -370,7 +369,6 @@ def forward(self, start_positions: torch.Tensor, x: torch.Tensor):
assert same_two_models(mod, opt_mod, args), "Dynamo failed"

@config.patch(allow_buffer_reuse=False)
@skipIfRocmArch(NAVI_ARCH)
def test_issue103461(self):
def forward(add_1):
var_mean = torch.ops.aten.var_mean.correction(
Expand Down Expand Up @@ -869,7 +867,6 @@ def forward(self, x):
res2 = jit_func(x)
self.assertEqual(res1, res2)

@skipIfRocmArch(NAVI_ARCH)
def test_issue103481(self):
def fn(x, y):
# NOTE: 6 dimensions is important! does not fail for 5 dimensions
Expand Down
19 changes: 0 additions & 19 deletions test/inductor/test_torchinductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1804,7 +1804,6 @@ def fn(x):
# make sure things also work if they aren't unrolled
self.common(fn, (torch.randn(8, 3),))

@skipIfRocmArch(NAVI_ARCH)
def test_multilayer_sum_low_prec(self):
# fp16 nyi for cpu
if self.device == "cpu":
Expand All @@ -1815,7 +1814,6 @@ def fn(a):

self.common(fn, ((torch.rand((10, 3, 352, 352), dtype=torch.float16),)))

@skipIfRocmArch(NAVI_ARCH)
def test_multilayer_prime_size(self):
def fn(a):
return torch.max(a), torch.sum(a)
Expand All @@ -1827,7 +1825,6 @@ def fn(a):

@skip_if_gpu_halide
@skipCPUIf(IS_MACOS, "fails on macos")
@skipIfRocmArch(NAVI_ARCH)
def test_multilayer_var(self):
def fn(a):
return torch.var(a)
Expand Down Expand Up @@ -2940,7 +2937,6 @@ def fn(a, b):
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))

@skip_if_halide # only 32-bit indexing
@skipIfRocmArch(NAVI_ARCH)
def test_large_tensor_reduction(self):
if not _has_sufficient_memory(self.device, 4.5 * 1024**3): # 4.5 GiB
raise unittest.SkipTest("insufficient memory")
Expand All @@ -2962,7 +2958,6 @@ def fn(a):
self.assertEqual(actual, expect)

@skip_if_gpu_halide # only 32-bit indexing
@skipIfRocmArch(NAVI_ARCH)
def test_large_broadcast_reduction(self):
if self.device == "cpu":
raise unittest.SkipTest("Fails on CPU")
Expand Down Expand Up @@ -4113,7 +4108,6 @@ def test_conv2d_channels_last(self):
check_lowp=False,
)

@skipIfRocmArch(NAVI_ARCH)
def test_conv2d_backward_channels_last(self):
def fn(grad_output, inp, weight):
convolution_backward_8 = torch.ops.aten.convolution_backward.default(
Expand Down Expand Up @@ -4899,7 +4893,6 @@ def fn(x, y):
self.assertEqual(c.stride()[2], 1)

@skip_if_gpu_halide
@skipIfRocmArch(NAVI_ARCH)
def test_std(self):
def fn(x):
return (
Expand Down Expand Up @@ -4942,7 +4935,6 @@ def test_batch_norm_2d(self):

# From yolov3
@with_tf32_off
@skipIfRocmArch(NAVI_ARCH)
def test_batch_norm_2d_2(self):
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
Expand Down Expand Up @@ -5090,7 +5082,6 @@ def fn(dist, angle):
self.common(fn, (*inp,))

@skip_if_gpu_halide # incorrect result on CUDA
@skipIfRocmArch(NAVI_ARCH)
def test_cauchy(self):
def fn(x, y):
return torch.sum(1 / (torch.unsqueeze(x, -1) - y))
Expand Down Expand Up @@ -6491,7 +6482,6 @@ def fn(a):
y = fn_compiled(x)
self.assertTrue(y is not x)

@skipIfRocmArch(NAVI_ARCH)
def test_l1_loss(self):
def fn(a, b):
return torch.nn.functional.l1_loss(a, b), torch.nn.functional.mse_loss(a, b)
Expand Down Expand Up @@ -6899,7 +6889,6 @@ def fn(x):
fn, (torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]),)
)

@skipIfRocmArch(NAVI_ARCH)
def test_any(self):
def fn(x):
return (
Expand Down Expand Up @@ -7652,7 +7641,6 @@ def fn(a, dim, index, b, reduce):

@skip_if_gpu_halide
# issue #1150
@skipIfRocmArch(NAVI_ARCH)
def test_dense_mask_index(self):
r"""
There will be a little difference for reduce order between aten and inductor
Expand Down Expand Up @@ -8662,7 +8650,6 @@ def fn(a, b):
b = torch.rand(2, 2, 1, 4, 1).int()
self.common(fn, (a, b))

@skipIfRocmArch(NAVI_ARCH)
def test_argmax_argmin1(self):
def fn(x):
return (aten.argmax(x), aten.argmin(x))
Expand All @@ -8674,7 +8661,6 @@ def fn(x):
],
)

@skipIfRocmArch(NAVI_ARCH)
def test_argmax_argmin2(self):
def fn(x):
return (
Expand All @@ -8686,7 +8672,6 @@ def fn(x):

self.common(fn, (torch.randn([144, 144]),))

@skipIfRocmArch(NAVI_ARCH)
def test_argmax_argmin_with_duplicates(self):
def fn(x):
return (
Expand All @@ -8709,7 +8694,6 @@ def fn(x):
self.common(fn, (t1,))

@skip_if_halide # nan behavior
@skipIfRocmArch(NAVI_ARCH)
def test_argmax_argmin_with_nan(self):
def fn(x):
return (
Expand Down Expand Up @@ -8833,7 +8817,6 @@ def fn(x):
],
)

@skipIfRocmArch(NAVI_ARCH)
def test_tmp_not_defined_issue1(self):
def forward(
primals_3,
Expand Down Expand Up @@ -9234,7 +9217,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
else:
self.assertEqual(len(inps), 0)

@skipIfRocmArch(NAVI_ARCH)
def test_dtype_mismatch_issue(self):
def fn(x):
attn = torch.nn.functional.pad(x, [0, 1])
Expand Down Expand Up @@ -12235,7 +12217,6 @@ def test_rnn_compile_safe(self):

class NanCheckerTest(TestCase):
@config.patch("nan_asserts", True)
@skipIfRocmArch(NAVI_ARCH)
def test_nan_checker_pass(self):
def f(x):
return torch.softmax(x, dim=-1)
Expand Down
14 changes: 0 additions & 14 deletions test/inductor/test_torchinductor_opinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
dtype_abbrs,
IS_MACOS,
IS_X86,
is_navi_arch,
skipCUDAMemoryLeakCheckIf,
skipIfCrossRef,
skipIfTorchDynamo,
Expand Down Expand Up @@ -204,19 +203,6 @@ def format_op(op):
# Tensors are not alike
inductor_skips["cuda"]["logcumsumexp"] = {f32}
inductor_skips["cuda"]["special.modified_bessel_i1"] = {f64}
if is_navi_arch():
inductor_skips["cuda"]["aminmax"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["dist"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["kron"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["masked.std"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["masked.var"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"][("max", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"][("min", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["nn.functional.conv_transpose3d"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["std"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["std_mean"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["var"] = {b8, f16, f32, f64, i32, i64}
inductor_skips["cuda"]["var_mean"] = {b8, f16, f32, f64, i32, i64}

inductor_expected_failures_single_sample = defaultdict(dict)

Expand Down