@@ -1804,7 +1804,6 @@ def fn(x):
1804
1804
# make sure things also work if they aren't unrolled
1805
1805
self .common (fn , (torch .randn (8 , 3 ),))
1806
1806
1807
- @skipIfRocmArch (NAVI_ARCH )
1808
1807
def test_multilayer_sum_low_prec (self ):
1809
1808
# fp16 nyi for cpu
1810
1809
if self .device == "cpu" :
@@ -1815,7 +1814,6 @@ def fn(a):
1815
1814
1816
1815
self .common (fn , ((torch .rand ((10 , 3 , 352 , 352 ), dtype = torch .float16 ),)))
1817
1816
1818
- @skipIfRocmArch (NAVI_ARCH )
1819
1817
def test_multilayer_prime_size (self ):
1820
1818
def fn (a ):
1821
1819
return torch .max (a ), torch .sum (a )
@@ -1827,7 +1825,6 @@ def fn(a):
1827
1825
1828
1826
@skip_if_gpu_halide
1829
1827
@skipCPUIf (IS_MACOS , "fails on macos" )
1830
- @skipIfRocmArch (NAVI_ARCH )
1831
1828
def test_multilayer_var (self ):
1832
1829
def fn (a ):
1833
1830
return torch .var (a )
@@ -2940,7 +2937,6 @@ def fn(a, b):
2940
2937
self .common (fn , (torch .randn (8 , 8 ), torch .randn (8 , 8 )))
2941
2938
2942
2939
@skip_if_halide # only 32-bit indexing
2943
- @skipIfRocmArch (NAVI_ARCH )
2944
2940
def test_large_tensor_reduction (self ):
2945
2941
if not _has_sufficient_memory (self .device , 4.5 * 1024 ** 3 ): # 4.5 GiB
2946
2942
raise unittest .SkipTest ("insufficient memory" )
@@ -2962,7 +2958,6 @@ def fn(a):
2962
2958
self .assertEqual (actual , expect )
2963
2959
2964
2960
@skip_if_gpu_halide # only 32-bit indexing
2965
- @skipIfRocmArch (NAVI_ARCH )
2966
2961
def test_large_broadcast_reduction (self ):
2967
2962
if self .device == "cpu" :
2968
2963
raise unittest .SkipTest ("Fails on CPU" )
@@ -4113,7 +4108,6 @@ def test_conv2d_channels_last(self):
4113
4108
check_lowp = False ,
4114
4109
)
4115
4110
4116
- @skipIfRocmArch (NAVI_ARCH )
4117
4111
def test_conv2d_backward_channels_last (self ):
4118
4112
def fn (grad_output , inp , weight ):
4119
4113
convolution_backward_8 = torch .ops .aten .convolution_backward .default (
@@ -4899,7 +4893,6 @@ def fn(x, y):
4899
4893
self .assertEqual (c .stride ()[2 ], 1 )
4900
4894
4901
4895
@skip_if_gpu_halide
4902
- @skipIfRocmArch (NAVI_ARCH )
4903
4896
def test_std (self ):
4904
4897
def fn (x ):
4905
4898
return (
@@ -4942,7 +4935,6 @@ def test_batch_norm_2d(self):
4942
4935
4943
4936
# From yolov3
4944
4937
@with_tf32_off
4945
- @skipIfRocmArch (NAVI_ARCH )
4946
4938
def test_batch_norm_2d_2 (self ):
4947
4939
if self .device == "cpu" :
4948
4940
raise unittest .SkipTest (f"requires { GPU_TYPE } " )
@@ -5090,7 +5082,6 @@ def fn(dist, angle):
5090
5082
self .common (fn , (* inp ,))
5091
5083
5092
5084
@skip_if_gpu_halide # incorrect result on CUDA
5093
- @skipIfRocmArch (NAVI_ARCH )
5094
5085
def test_cauchy (self ):
5095
5086
def fn (x , y ):
5096
5087
return torch .sum (1 / (torch .unsqueeze (x , - 1 ) - y ))
@@ -6491,7 +6482,6 @@ def fn(a):
6491
6482
y = fn_compiled (x )
6492
6483
self .assertTrue (y is not x )
6493
6484
6494
- @skipIfRocmArch (NAVI_ARCH )
6495
6485
def test_l1_loss (self ):
6496
6486
def fn (a , b ):
6497
6487
return torch .nn .functional .l1_loss (a , b ), torch .nn .functional .mse_loss (a , b )
@@ -6899,7 +6889,6 @@ def fn(x):
6899
6889
fn , (torch .tensor ([1 , float ("inf" ), 2 , float ("-inf" ), float ("nan" )]),)
6900
6890
)
6901
6891
6902
- @skipIfRocmArch (NAVI_ARCH )
6903
6892
def test_any (self ):
6904
6893
def fn (x ):
6905
6894
return (
@@ -7652,7 +7641,6 @@ def fn(a, dim, index, b, reduce):
7652
7641
7653
7642
@skip_if_gpu_halide
7654
7643
# issue #1150
7655
- @skipIfRocmArch (NAVI_ARCH )
7656
7644
def test_dense_mask_index (self ):
7657
7645
r"""
7658
7646
There will be a little difference for reduce order between aten and inductor
@@ -8662,7 +8650,6 @@ def fn(a, b):
8662
8650
b = torch .rand (2 , 2 , 1 , 4 , 1 ).int ()
8663
8651
self .common (fn , (a , b ))
8664
8652
8665
- @skipIfRocmArch (NAVI_ARCH )
8666
8653
def test_argmax_argmin1 (self ):
8667
8654
def fn (x ):
8668
8655
return (aten .argmax (x ), aten .argmin (x ))
@@ -8674,7 +8661,6 @@ def fn(x):
8674
8661
],
8675
8662
)
8676
8663
8677
- @skipIfRocmArch (NAVI_ARCH )
8678
8664
def test_argmax_argmin2 (self ):
8679
8665
def fn (x ):
8680
8666
return (
@@ -8686,7 +8672,6 @@ def fn(x):
8686
8672
8687
8673
self .common (fn , (torch .randn ([144 , 144 ]),))
8688
8674
8689
- @skipIfRocmArch (NAVI_ARCH )
8690
8675
def test_argmax_argmin_with_duplicates (self ):
8691
8676
def fn (x ):
8692
8677
return (
@@ -8709,7 +8694,6 @@ def fn(x):
8709
8694
self .common (fn , (t1 ,))
8710
8695
8711
8696
@skip_if_halide # nan behavior
8712
- @skipIfRocmArch (NAVI_ARCH )
8713
8697
def test_argmax_argmin_with_nan (self ):
8714
8698
def fn (x ):
8715
8699
return (
@@ -8833,7 +8817,6 @@ def fn(x):
8833
8817
],
8834
8818
)
8835
8819
8836
- @skipIfRocmArch (NAVI_ARCH )
8837
8820
def test_tmp_not_defined_issue1 (self ):
8838
8821
def forward (
8839
8822
primals_3 ,
@@ -9234,7 +9217,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
9234
9217
else :
9235
9218
self .assertEqual (len (inps ), 0 )
9236
9219
9237
- @skipIfRocmArch (NAVI_ARCH )
9238
9220
def test_dtype_mismatch_issue (self ):
9239
9221
def fn (x ):
9240
9222
attn = torch .nn .functional .pad (x , [0 , 1 ])
@@ -12235,7 +12217,6 @@ def test_rnn_compile_safe(self):
12235
12217
12236
12218
class NanCheckerTest (TestCase ):
12237
12219
@config .patch ("nan_asserts" , True )
12238
- @skipIfRocmArch (NAVI_ARCH )
12239
12220
def test_nan_checker_pass (self ):
12240
12221
def f (x ):
12241
12222
return torch .softmax (x , dim = - 1 )
0 commit comments