@@ -872,6 +872,7 @@ def test_multidim(x, dim):
872
872
expected = fn (y , 1 , keepdim = False )
873
873
self .assertEqual (x [:, 1 ], expected , '{} with out= kwarg' .format (fn_name ))
874
874
875
+ @skipIfRocm
875
876
def test_dim_reduction (self ):
876
877
self ._test_dim_reduction (self , lambda t : t )
877
878
@@ -938,6 +939,7 @@ def test_reduction_empty(self):
938
939
self .assertEqual (torch .ones ((2 , 1 , 4 ), device = device ), xb .all (1 , keepdim = True ))
939
940
self .assertEqual (torch .ones ((), device = device ), xb .all ())
940
941
942
+ @skipIfRocm
941
943
def test_pairwise_distance_empty (self ):
942
944
devices = ['cpu' ] if not torch .cuda .is_available () else ['cpu' , 'cuda' ]
943
945
for device in devices :
@@ -2110,6 +2112,7 @@ def get_int64_dtype(dtype):
2110
2112
dtype = int64_dtype , layout = layout , device = device , requires_grad = False ),
2111
2113
int64_dtype , layout , device , fv + 5 , False )
2112
2114
2115
+ @skipIfRocm
2113
2116
def test_empty_full (self ):
2114
2117
self ._test_empty_full (self , torch .testing .get_all_dtypes (), torch .strided , torch .device ('cpu' ))
2115
2118
if torch .cuda .device_count () > 0 :
@@ -2248,6 +2251,7 @@ def test_tensor_factory_cuda_type(self):
2248
2251
self .assertTrue (x .is_cuda )
2249
2252
torch .set_default_tensor_type (saved_type )
2250
2253
2254
+ @skipIfRocm
2251
2255
def test_tensor_factories_empty (self ):
2252
2256
# ensure we can create empty tensors from each factory function
2253
2257
shapes = [(5 , 0 , 1 ), (0 ,), (0 , 0 , 1 , 0 , 2 , 0 , 0 )]
@@ -3184,6 +3188,7 @@ def check_order(a, b):
3184
3188
seen .add (ixx [k ][j ])
3185
3189
self .assertEqual (len (seen ), size )
3186
3190
3191
+ @skipIfRocm
3187
3192
def test_sort (self ):
3188
3193
SIZE = 4
3189
3194
x = torch .rand (SIZE , SIZE )
@@ -3297,6 +3302,7 @@ def test_topk_noncontiguous_gpu(self):
3297
3302
self .assertEqual (top1 , top2 )
3298
3303
self .assertEqual (idx1 , idx2 )
3299
3304
3305
+ @skipIfRocm
3300
3306
def test_kthvalue (self ):
3301
3307
SIZE = 50
3302
3308
x = torch .rand (SIZE , SIZE , SIZE )
@@ -3341,6 +3347,7 @@ def test_kthvalue(self):
3341
3347
self .assertEqual (torch .kthvalue (y , 3 )[0 ], 3 , 0 )
3342
3348
self .assertEqual (torch .kthvalue (y , 2 )[0 ], 1 , 0 )
3343
3349
3350
+ @skipIfRocm
3344
3351
def test_median (self ):
3345
3352
for size in (155 , 156 ):
3346
3353
x = torch .rand (size , size )
@@ -3376,6 +3383,7 @@ def test_median(self):
3376
3383
# input unchanged
3377
3384
self .assertEqual (x , x0 , 0 )
3378
3385
3386
+ @skipIfRocm
3379
3387
def test_mode (self ):
3380
3388
x = torch .arange (1. , SIZE * SIZE + 1 ).clone ().resize_ (SIZE , SIZE )
3381
3389
x [:2 ] = 1
@@ -3539,6 +3547,7 @@ def test_narrow(self):
3539
3547
self .assertEqual (x .narrow (- 1 , - 1 , 1 ), torch .Tensor ([[2 ], [5 ], [8 ]]))
3540
3548
self .assertEqual (x .narrow (- 2 , - 1 , 1 ), torch .Tensor ([[6 , 7 , 8 ]]))
3541
3549
3550
+ @skipIfRocm
3542
3551
def test_narrow_empty (self ):
3543
3552
devices = ['cpu' ] if not torch .cuda .is_available () else ['cpu' , 'cuda' ]
3544
3553
for device in devices :
@@ -3829,6 +3838,7 @@ def _test_gesv_batched_dims(self, cast):
3829
3838
self .assertEqual (x .data , cast (x_exp ))
3830
3839
3831
3840
@skipIfNoLapack
3841
+ @skipIfRocm
3832
3842
def test_gesv_batched_dims (self ):
3833
3843
self ._test_gesv_batched_dims (self , lambda t : t )
3834
3844
@@ -4177,6 +4187,7 @@ def test_eig(self):
4177
4187
self .assertEqual (X , Xhat , 1e-8 , 'VeV\' wrong' )
4178
4188
4179
4189
@skipIfNoLapack
4190
+ @skipIfRocm
4180
4191
def test_symeig (self ):
4181
4192
xval = torch .rand (100 , 3 )
4182
4193
cov = torch .mm (xval .t (), xval )
@@ -4908,6 +4919,7 @@ def tset_potri(self):
4908
4919
self .assertLessEqual (inv0 .dist (inv1 ), 1e-12 )
4909
4920
4910
4921
@skipIfNoLapack
4922
+ @skipIfRocm
4911
4923
def test_pstrf (self ):
4912
4924
def checkPsdCholesky (a , uplo , inplace ):
4913
4925
if inplace :
@@ -6116,6 +6128,7 @@ def test_empty_reshape(self):
6116
6128
# match NumPy semantics -- don't infer the size of dimension with a degree of freedom
6117
6129
self .assertRaises (RuntimeError , lambda : x .reshape (0 , - 1 ))
6118
6130
6131
+ @skipIfRocm
6119
6132
def test_tensor_shape_empty (self ):
6120
6133
devices = ['cpu' ] if not torch .cuda .is_available () else ['cpu' , 'cuda' ]
6121
6134
for device in devices :
@@ -6304,6 +6317,7 @@ def test_dim_function_empty(self):
6304
6317
c = torch .randn ((0 , 1 , 2 ), device = device )
6305
6318
self .assertEqual (c , c .index_select (0 , ind_empty ))
6306
6319
6320
+ @skipIfRocm
6307
6321
def test_blas_empty (self ):
6308
6322
devices = ['cpu' ] if not torch .cuda .is_available () else ['cpu' , 'cuda' ]
6309
6323
for device in devices :
@@ -6373,6 +6387,7 @@ def fn(torchfn, *args):
6373
6387
A_LU , pivots = fn (torch .btrifact , (2 , 0 , 0 ))
6374
6388
self .assertEqual ([(2 , 0 , 0 ), (2 , 0 )], [A_LU .shape , pivots .shape ])
6375
6389
6390
+ @skipIfRocm
6376
6391
def test_blas_alpha_beta_empty (self ):
6377
6392
devices = ['cpu' ] if not torch .cuda .is_available () else ['cpu' , 'cuda' ]
6378
6393
for device in devices :
@@ -7719,7 +7734,6 @@ def test_empty_like(self):
7719
7734
self .assertEqual (torch .empty_like (a ).type (), a .type ())
7720
7735
7721
7736
@unittest .skipIf (not torch .cuda .is_available (), 'no CUDA' )
7722
- @skipIfRocm
7723
7737
def test_pin_memory (self ):
7724
7738
x = torch .randn (3 , 5 )
7725
7739
self .assertFalse (x .is_pinned ())
@@ -7887,7 +7901,6 @@ def test_from_numpy(self):
7887
7901
self .assertRaises (ValueError , lambda : torch .from_numpy (x ))
7888
7902
7889
7903
@unittest .skipIf (not TEST_NUMPY , "Numpy not found" )
7890
- @skipIfRocm
7891
7904
def test_ctor_with_numpy_array (self ):
7892
7905
dtypes = [
7893
7906
np .double ,
0 commit comments