5
5
import functools
6
6
import random
7
7
import unittest
8
- from common import TestCase , run_tests
8
+ from common import TestCase , run_tests , skipIfRocm
9
9
from common_cuda import TEST_CUDA
10
10
from test_torch import TestTorch
11
11
from numbers import Number
@@ -107,6 +107,7 @@ def randn(self, *args, **kwargs):
107
107
# TODO: Put this in torch.cuda.randn
108
108
return self .ValueTensor (* args , ** kwargs ).normal_ ()
109
109
110
+ @skipIfRocm
110
111
def test_basic (self ):
111
112
x , i , v = self ._gen_sparse (3 , 10 , 100 )
112
113
@@ -155,6 +156,7 @@ def test_ctor_size_checks(self):
155
156
RuntimeError ,
156
157
lambda : self .SparseTensor (indices , values , torch .Size ([2 , 4 , 2 , 1 ])))
157
158
159
+ @skipIfRocm
158
160
def test_to_dense (self ):
159
161
i = self .IndexTensor ([
160
162
[0 , 1 , 2 , 2 ],
@@ -184,6 +186,7 @@ def test_to_dense(self):
184
186
self .assertEqual (res , x .to_dense ())
185
187
self .assertEqual (res , self .safeToDense (x ))
186
188
189
+ @skipIfRocm
187
190
def test_shared (self ):
188
191
i = self .IndexTensor ([[2 ]])
189
192
v = self .ValueTensor ([5 ])
@@ -193,6 +196,7 @@ def test_shared(self):
193
196
i [0 ][0 ] = 0
194
197
self .assertEqual (self .ValueTensor ([6 , 0 , 0 ]), self .safeToDense (x ))
195
198
199
+ @skipIfRocm
196
200
def test_to_dense_hybrid (self ):
197
201
i = self .IndexTensor ([
198
202
[0 , 1 , 2 , 2 ],
@@ -221,6 +225,7 @@ def test_to_dense_hybrid(self):
221
225
self .assertEqual (res , x .to_dense ())
222
226
self .assertEqual (res , self .safeToDense (x ))
223
227
228
+ @skipIfRocm
224
229
def test_contig (self ):
225
230
i = self .IndexTensor ([
226
231
[1 , 0 , 35 , 14 , 39 , 6 , 71 , 66 , 40 , 27 ],
@@ -274,6 +279,7 @@ def test_contig(self):
274
279
self .assertEqual (exp_i , x ._indices ())
275
280
self .assertEqual (exp_v , x ._values ())
276
281
282
+ @skipIfRocm
277
283
def test_contig_hybrid (self ):
278
284
i = self .IndexTensor ([
279
285
[1 , 0 , 35 , 14 , 39 , 6 , 71 , 66 , 40 , 27 ],
@@ -333,6 +339,7 @@ def test_contig_hybrid(self):
333
339
self .assertEqual (exp_i , x ._indices ())
334
340
self .assertEqual (exp_v , x ._values ())
335
341
342
+ @skipIfRocm
336
343
def test_clone (self ):
337
344
x , _ , _ = self ._gen_sparse (4 , 20 , 5 )
338
345
if self .is_uncoalesced :
@@ -354,6 +361,7 @@ def test_cuda_empty(self):
354
361
self .assertEqual (y ._sparseDims (), x ._sparseDims ())
355
362
self .assertEqual (y ._denseDims (), x ._denseDims ())
356
363
364
+ @skipIfRocm
357
365
def test_transpose (self ):
358
366
x = self ._gen_sparse (4 , 20 , 5 )[0 ]
359
367
y = self .safeToDense (x )
@@ -367,6 +375,7 @@ def test_transpose(self):
367
375
y = y .transpose (i , j )
368
376
self .assertEqual (self .safeToDense (x ), y )
369
377
378
+ @skipIfRocm
370
379
def test_transpose_coalesce_invariant (self ):
371
380
# If a sparse tensor is coalesced, its transpose should be the same
372
381
# If a sparse tensor is uncoalesed, its transpose should be the same
@@ -407,6 +416,7 @@ def test_t_empty(self):
407
416
self .assertEqual (x ._sparseDims (), 2 )
408
417
self .assertEqual (x ._denseDims (), 0 )
409
418
419
+ @skipIfRocm
410
420
def test_add_zeros (self ):
411
421
def test_shape (sparse_dims , sizes ):
412
422
x , _ , _ = self ._gen_sparse (sparse_dims , 20 , sizes )
@@ -470,6 +480,7 @@ def test_shape(di, dj, dk):
470
480
test_shape (1000 , 100 , 100 )
471
481
test_shape (3000 , 64 , 300 )
472
482
483
+ @skipIfRocm
473
484
def test_dsmm (self ):
474
485
def test_shape (di , dj , dk ):
475
486
x = self ._gen_sparse (2 , 20 , [di , dj ])[0 ]
@@ -483,6 +494,7 @@ def test_shape(di, dj, dk):
483
494
test_shape (1000 , 100 , 100 )
484
495
test_shape (3000 , 64 , 300 )
485
496
497
+ @skipIfRocm
486
498
def test_hsmm (self ):
487
499
def test_shape (di , dj , dk ):
488
500
x = self ._gen_sparse (2 , 20 , [di , dj ])[0 ]
@@ -543,18 +555,21 @@ def _test_spadd_shape(self, shape_i, shape_v=None):
543
555
expected = y + r * self .safeToDense (x_ )
544
556
self .assertEqual (res , expected )
545
557
558
+ @skipIfRocm
546
559
def test_spadd (self ):
547
560
self ._test_spadd_shape ([5 , 6 ])
548
561
self ._test_spadd_shape ([10 , 10 , 10 ])
549
562
self ._test_spadd_shape ([50 , 30 , 20 ])
550
563
self ._test_spadd_shape ([5 , 5 , 5 , 5 , 5 , 5 ])
551
564
565
+ @skipIfRocm
552
566
def test_spadd_hybrid (self ):
553
567
self ._test_spadd_shape ([5 , 6 ], [2 , 3 ])
554
568
self ._test_spadd_shape ([10 , 10 , 10 ], [3 ])
555
569
self ._test_spadd_shape ([50 , 30 , 20 ], [2 ])
556
570
self ._test_spadd_shape ([5 , 5 , 5 , 5 , 5 , 5 ], [2 ])
557
571
572
+ @skipIfRocm
558
573
def test_norm (self ):
559
574
x , _ , _ = self ._gen_sparse (3 , 10 , 100 )
560
575
y = x .coalesce ()
@@ -623,18 +638,21 @@ def _test_basic_ops_shape(self, shape_i, shape_v=None):
623
638
y ._values ().add_ (1 )
624
639
self .assertEqual (z ._values () + 1 , y ._values ())
625
640
641
+ @skipIfRocm
626
642
def test_basic_ops (self ):
627
643
self ._test_basic_ops_shape ([5 , 6 ])
628
644
self ._test_basic_ops_shape ([10 , 10 , 10 ])
629
645
self ._test_basic_ops_shape ([50 , 30 , 20 ])
630
646
self ._test_basic_ops_shape ([5 , 5 , 5 , 5 , 5 , 5 ])
631
647
648
+ @skipIfRocm
632
649
def test_basic_ops_hybrid (self ):
633
650
self ._test_basic_ops_shape ([5 , 6 ], [2 , 3 ])
634
651
self ._test_basic_ops_shape ([10 , 10 , 10 ], [3 ])
635
652
self ._test_basic_ops_shape ([50 , 30 , 20 ], [2 ])
636
653
self ._test_basic_ops_shape ([5 , 5 , 5 , 5 , 5 , 5 ], [2 ])
637
654
655
+ @skipIfRocm
638
656
def test_add_dense_sparse_mismatch (self ):
639
657
x = torch .zeros ([3 , 4 ], dtype = self .value_dtype , device = self .device )
640
658
sparse_y = self .SparseTensor (torch .zeros (1 , 4 , dtype = torch .int64 , device = self .device ),
@@ -673,6 +691,7 @@ def _test_sparse_mask_fixed(self):
673
691
expected = self .SparseTensor (i , exp_v , torch .Size ([5 , 4 ]))
674
692
self .assertEqual (res , expected )
675
693
694
+ @skipIfRocm
676
695
def test_sparse_mask (self ):
677
696
self ._test_sparse_mask_fixed ()
678
697
@@ -692,6 +711,7 @@ def _test_zeros(self, shape, out_shape_i, out_shape_v=None):
692
711
self .assertEqual (out ._sparseDims (), len (shape ))
693
712
self .assertEqual (out ._denseDims (), 0 )
694
713
714
+ @skipIfRocm
695
715
def test_log1p (self ):
696
716
if self .is_cuda :
697
717
input = torch .cuda .sparse .DoubleTensor (
@@ -775,6 +795,7 @@ def _test_sparse_mask_hybrid_fixed(self):
775
795
expected = self .SparseTensor (i , exp_v , torch .Size ([5 , 4 , 2 ]))
776
796
self .assertEqual (res , expected )
777
797
798
+ @skipIfRocm
778
799
def test_sparse_variable_methods (self ):
779
800
# TODO: delete when tensor/variable are merged
780
801
from torch .autograd import Variable
@@ -870,6 +891,7 @@ def test_sparse_variable_methods(self):
870
891
self .assertEqual (test_fn (sp_var , de_var ).data ,
871
892
test_fn (sp_mat , de_mat ), test_name )
872
893
894
+ @skipIfRocm
873
895
def test_sparse_mask_hybrid (self ):
874
896
self ._test_sparse_mask_hybrid_fixed ()
875
897
@@ -878,6 +900,7 @@ def test_sparse_mask_hybrid(self):
878
900
self ._test_sparse_mask_shape ([50 , 30 , 20 ], [2 ])
879
901
self ._test_sparse_mask_shape ([5 , 5 , 5 , 5 , 5 , 5 ], [2 ])
880
902
903
+ @skipIfRocm
881
904
def test_sparse_add_coalesce (self ):
882
905
i = self .IndexTensor ([[1 , 2 , 1 ]])
883
906
v = self .ValueTensor ([3 , 4 , 5 ])
@@ -932,6 +955,7 @@ def test_new_device_multi_gpu(self):
932
955
self ._test_new_device ((30 , 20 ), 1 )
933
956
self ._test_new_device ((30 , 20 , 10 ), 1 )
934
957
958
+ @skipIfRocm
935
959
def test_new (self ):
936
960
x , indices , values = self ._gen_sparse (3 , 10 , 100 )
937
961
if not x .is_cuda :
@@ -1062,6 +1086,7 @@ def test_is_sparse(self):
1062
1086
x = self .SparseTensor ()
1063
1087
self .assertTrue (x .is_sparse )
1064
1088
1089
+ @skipIfRocm
1065
1090
def test_resize_as (self ):
1066
1091
def do_test (t ):
1067
1092
y = t .new ().resize_as_ (t ).zero_ ()
0 commit comments