Skip to content

Commit a341871

Browse files
authored
Enable more unit tests (ROCm 255)
* Enable more tests that relied on CPU LAPACK at compile time. * enabled min/max tests in test_cuda (ROCm 236)
1 parent 1a0d82e commit a341871

File tree

2 files changed

+16
-25
lines changed

2 files changed

+16
-25
lines changed

test/test_autograd.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1990,7 +1990,6 @@ def test_cat_empty(self):
19901990
lambda a, b: torch.cat((a, b)),
19911991
True, f_args_variable, f_args_tensor)
19921992

1993-
@skipIfRocm
19941993
def test_potrf(self):
19951994
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)
19961995

@@ -2150,7 +2149,6 @@ def run_test(input_size, exponent):
21502149
run_test((10, 10), torch.zeros(10, 10))
21512150
run_test((10,), 0)
21522151

2153-
@skipIfRocm
21542152
def test_pinverse(self):
21552153
# Why is pinverse tested this way, and not ordinarily as other linear algebra methods?
21562154
# 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable
@@ -2546,7 +2544,6 @@ def backward(ctx, gO):
25462544
out.backward()
25472545
self.assertIn('MyFunc.apply', str(w[0].message))
25482546

2549-
@skipIfRocm
25502547
def test_symeig_no_eigenvectors(self):
25512548
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
25522549
w, v = torch.symeig(A, eigenvectors=False)
@@ -3185,13 +3182,13 @@ class dont_convert(tuple):
31853182
'large', NO_ARGS, [skipIfNoLapack]),
31863183
('gesv', (S, S), (random_fullrank_matrix_distinct_singular_value(S),), '', NO_ARGS, [skipIfNoLapack]),
31873184
('gesv', (S, S, S), (random_fullrank_matrix_distinct_singular_value(S, S),),
3188-
'batched', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
3185+
'batched', NO_ARGS, [skipIfNoLapack]),
31893186
('gesv', (2, 3, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 3),),
3190-
'batched_dims', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
3187+
'batched_dims', NO_ARGS, [skipIfNoLapack]),
31913188
('gesv', (2, 2, S, S), (random_fullrank_matrix_distinct_singular_value(S, 1),),
3192-
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
3189+
'batched_broadcast_A', NO_ARGS, [skipIfNoLapack]),
31933190
('gesv', (1, S, S), (random_fullrank_matrix_distinct_singular_value(S, 2, 2),),
3194-
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack, skipIfRocm]),
3191+
'batched_broadcast_b', NO_ARGS, [skipIfNoLapack]),
31953192
('fill_', (S, S, S), (1,), 'number'),
31963193
('fill_', (), (1,), 'number_scalar'),
31973194
# FIXME: we should compute the derivative w.r.t torch.tensor(1)

test/test_cuda.py

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -353,25 +353,23 @@ def tmp(t):
353353
('kthvalue', small_3d_unique, lambda t: [3],),
354354
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'),
355355
('kthvalue', small_3d_unique, lambda t: [3, -1], 'neg_dim'),
356-
('lerp', small_3d, lambda t: [small_3d(t), 0.3], '', types, False, "skipIfRocm:HalfTensor"),
357-
('max', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
358-
('max', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
359-
('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
356+
('lerp', small_3d, lambda t: [small_3d(t), 0.3]),
357+
('max', small_3d_unique, lambda t: []),
358+
('max', small_3d_unique, lambda t: [1], 'dim'),
359+
('max', small_3d_unique, lambda t: [-1], 'neg_dim'),
360360
('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
361361
('min', small_3d_unique, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
362362
('min', small_3d_unique, lambda t: [1], 'dim', types, False, skipIfRocm),
363363
('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
364364
('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
365-
('mean', small_3d, lambda t: [], '', types, False, "skipIfRocm:HalfTensor"),
366-
('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
367-
('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfRocm:DoubleTensor,FloatTensor,HalfTensor"),
368-
('mode', small_3d, lambda t: [], '', types, False, skipIfRocm),
369-
('mode', small_3d, lambda t: [1], 'dim', types, False, skipIfRocm),
370-
('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, skipIfRocm),
371-
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half,
372-
False, "skipIfRocm:DoubleTensor,FloatTensor"),
373-
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half,
374-
False, "skipIfRocm:DoubleTensor,FloatTensor"),
365+
('mean', small_3d, lambda t: []),
366+
('mean', small_3d, lambda t: [-1], 'neg_dim'),
367+
('mean', small_3d, lambda t: [1], 'dim'),
368+
('mode', small_3d, lambda t: []),
369+
('mode', small_3d, lambda t: [1], 'dim'),
370+
('mode', small_3d, lambda t: [-1], 'neg_dim'),
371+
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half),
372+
('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half),
375373
('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"),
376374
('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types),
377375
('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
@@ -977,7 +975,6 @@ def test_broadcast_cpu(self):
977975
def test_broadcast_gpu(self):
978976
self._test_broadcast(torch.randn(5, 5).cuda())
979977

980-
@skipIfRocm
981978
def test_min_max_nan(self):
982979
tests = [(lambda x: x.min(), 'min'),
983980
(lambda x: x.max(), 'max'),
@@ -1743,7 +1740,6 @@ def test_tensor_scatterAdd(self):
17431740
def test_tensor_scatterFill(self):
17441741
TestTorch._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', True, test_bounds=False)
17451742

1746-
@skipIfRocm
17471743
def test_min_max_inits(self):
17481744
# Testing if THC_reduceAll received the correct index initialization.
17491745
# This affects the result of THC_reduceAll operations at extreme values
@@ -1757,11 +1753,9 @@ def test_min_max_inits(self):
17571753
_, v = y.min(dim=0)
17581754
self.assertEqual(v, expected)
17591755

1760-
@skipIfRocm
17611756
def test_max_with_inf(self):
17621757
TestTorch._test_max_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')
17631758

1764-
@skipIfRocm
17651759
def test_min_with_inf(self):
17661760
TestTorch._test_min_with_inf(self, (torch.half, torch.float, torch.double), 'cuda')
17671761

0 commit comments

Comments
 (0)