@@ -1801,7 +1801,6 @@ def fn(x):
1801
1801
# make sure things also work if they aren't unrolled
1802
1802
self .common (fn , (torch .randn (8 , 3 ),))
1803
1803
1804
- @skipIfRocmArch (NAVI_ARCH )
1805
1804
def test_multilayer_sum_low_prec (self ):
1806
1805
# fp16 nyi for cpu
1807
1806
if self .device == "cpu" :
@@ -1812,7 +1811,6 @@ def fn(a):
1812
1811
1813
1812
self .common (fn , ((torch .rand ((10 , 3 , 352 , 352 ), dtype = torch .float16 ),)))
1814
1813
1815
- @skipIfRocmArch (NAVI_ARCH )
1816
1814
def test_multilayer_prime_size (self ):
1817
1815
def fn (a ):
1818
1816
return torch .max (a ), torch .sum (a )
@@ -1824,7 +1822,6 @@ def fn(a):
1824
1822
1825
1823
@skip_if_gpu_halide
1826
1824
@skipCPUIf (IS_MACOS , "fails on macos" )
1827
- @skipIfRocmArch (NAVI_ARCH )
1828
1825
def test_multilayer_var (self ):
1829
1826
def fn (a ):
1830
1827
return torch .var (a )
@@ -2976,7 +2973,6 @@ def fn(a, b):
2976
2973
self .common (fn , (torch .randn (8 , 8 ), torch .randn (8 , 8 )))
2977
2974
2978
2975
@skip_if_halide # only 32-bit indexing
2979
- @skipIfRocmArch (NAVI_ARCH )
2980
2976
def test_large_tensor_reduction (self ):
2981
2977
if not _has_sufficient_memory (self .device , 4.5 * 1024 ** 3 ): # 4.5 GiB
2982
2978
raise unittest .SkipTest ("insufficient memory" )
@@ -2998,7 +2994,6 @@ def fn(a):
2998
2994
self .assertEqual (actual , expect )
2999
2995
3000
2996
@skip_if_gpu_halide # only 32-bit indexing
3001
- @skipIfRocmArch (NAVI_ARCH )
3002
2997
def test_large_broadcast_reduction (self ):
3003
2998
if self .device == "cpu" :
3004
2999
raise unittest .SkipTest ("Fails on CPU" )
@@ -4160,7 +4155,6 @@ def test_conv2d_channels_last(self):
4160
4155
check_lowp = False ,
4161
4156
)
4162
4157
4163
- @skipIfRocmArch (NAVI_ARCH )
4164
4158
def test_conv2d_backward_channels_last (self ):
4165
4159
def fn (grad_output , inp , weight ):
4166
4160
convolution_backward_8 = torch .ops .aten .convolution_backward .default (
@@ -4945,7 +4939,6 @@ def fn(x, y):
4945
4939
self .assertEqual (c .stride ()[2 ], 1 )
4946
4940
4947
4941
@skip_if_gpu_halide
4948
- @skipIfRocmArch (NAVI_ARCH )
4949
4942
def test_std (self ):
4950
4943
def fn (x ):
4951
4944
return (
@@ -4988,7 +4981,6 @@ def test_batch_norm_2d(self):
4988
4981
4989
4982
# From yolov3
4990
4983
@with_tf32_off
4991
- @skipIfRocmArch (NAVI_ARCH )
4992
4984
def test_batch_norm_2d_2 (self ):
4993
4985
if self .device == "cpu" :
4994
4986
raise unittest .SkipTest (f"requires { GPU_TYPE } " )
@@ -5135,7 +5127,6 @@ def fn(dist, angle):
5135
5127
self .common (fn , (* inp ,))
5136
5128
5137
5129
@skip_if_gpu_halide # incorrect result on CUDA
5138
- @skipIfRocmArch (NAVI_ARCH )
5139
5130
def test_cauchy (self ):
5140
5131
def fn (x , y ):
5141
5132
return torch .sum (1 / (torch .unsqueeze (x , - 1 ) - y ))
@@ -6536,7 +6527,6 @@ def fn(a):
6536
6527
y = fn_compiled (x )
6537
6528
self .assertTrue (y is not x )
6538
6529
6539
- @skipIfRocmArch (NAVI_ARCH )
6540
6530
def test_l1_loss (self ):
6541
6531
def fn (a , b ):
6542
6532
return torch .nn .functional .l1_loss (a , b ), torch .nn .functional .mse_loss (a , b )
@@ -6939,7 +6929,6 @@ def fn(x):
6939
6929
fn , (torch .tensor ([1 , float ("inf" ), 2 , float ("-inf" ), float ("nan" )]),)
6940
6930
)
6941
6931
6942
- @skipIfRocmArch (NAVI_ARCH )
6943
6932
def test_any (self ):
6944
6933
def fn (x ):
6945
6934
return (
@@ -7707,7 +7696,6 @@ def fn(a, dim, index, b, reduce):
7707
7696
7708
7697
@skip_if_gpu_halide
7709
7698
# issue #1150
7710
- @skipIfRocmArch (NAVI_ARCH )
7711
7699
def test_dense_mask_index (self ):
7712
7700
r"""
7713
7701
There will be a little difference for reduce order between aten and inductor
@@ -8715,7 +8703,6 @@ def fn(a, b):
8715
8703
b = torch .rand (2 , 2 , 1 , 4 , 1 ).int ()
8716
8704
self .common (fn , (a , b ))
8717
8705
8718
- @skipIfRocmArch (NAVI_ARCH )
8719
8706
def test_argmax_argmin1 (self ):
8720
8707
def fn (x ):
8721
8708
return (aten .argmax (x ), aten .argmin (x ))
@@ -8727,7 +8714,6 @@ def fn(x):
8727
8714
],
8728
8715
)
8729
8716
8730
- @skipIfRocmArch (NAVI_ARCH )
8731
8717
def test_argmax_argmin2 (self ):
8732
8718
def fn (x ):
8733
8719
return (
@@ -8739,7 +8725,6 @@ def fn(x):
8739
8725
8740
8726
self .common (fn , (torch .randn ([144 , 144 ]),))
8741
8727
8742
- @skipIfRocmArch (NAVI_ARCH )
8743
8728
def test_argmax_argmin_with_duplicates (self ):
8744
8729
def fn (x ):
8745
8730
return (
@@ -8762,7 +8747,6 @@ def fn(x):
8762
8747
self .common (fn , (t1 ,))
8763
8748
8764
8749
@skip_if_halide # nan behavior
8765
- @skipIfRocmArch (NAVI_ARCH )
8766
8750
def test_argmax_argmin_with_nan (self ):
8767
8751
def fn (x ):
8768
8752
return (
@@ -8886,7 +8870,6 @@ def fn(x):
8886
8870
],
8887
8871
)
8888
8872
8889
- @skipIfRocmArch (NAVI_ARCH )
8890
8873
def test_tmp_not_defined_issue1 (self ):
8891
8874
def forward (
8892
8875
primals_3 ,
@@ -9286,7 +9269,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
9286
9269
else :
9287
9270
self .assertEqual (len (inps ), 0 )
9288
9271
9289
- @skipIfRocmArch (NAVI_ARCH )
9290
9272
def test_dtype_mismatch_issue (self ):
9291
9273
def fn (x ):
9292
9274
attn = torch .nn .functional .pad (x , [0 , 1 ])
@@ -12377,7 +12359,6 @@ def test_rnn_compile_safe(self):
12377
12359
12378
12360
class NanCheckerTest (TestCase ):
12379
12361
@config .patch ("nan_asserts" , True )
12380
- @skipIfRocmArch (NAVI_ARCH )
12381
12362
def test_nan_checker_pass (self ):
12382
12363
def f (x ):
12383
12364
return torch .softmax (x , dim = - 1 )
0 commit comments