diff --git a/test/test_autograd.py b/test/test_autograd.py index ac5c74f550e786..663cc575390c5f 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -15,7 +15,7 @@ from torch.autograd.function import once_differentiable from torch.autograd.profiler import profile from common import TEST_MKL, TestCase, run_tests, skipIfNoLapack, \ - suppress_warnings, TEST_WITH_ROCM, skipIfRocm + suppress_warnings, skipIfRocm from torch.autograd import Variable, Function, detect_anomaly from torch.autograd.function import InplaceFunction from torch.testing import make_non_contiguous, randn_like @@ -975,7 +975,7 @@ def test_no_requires_grad_inplace(self): with self.assertRaises(RuntimeError): b.add_(5) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_requires_grad_factory(self): x = torch.randn(2, 3) fns = [torch.ones_like, torch.testing.randn_like] @@ -1375,7 +1375,7 @@ def __del__(self): Variable(torch.randn(10, 10), _grad_fn=CollectOnDelete()) @unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_unused_output_gpu(self): from torch.nn.parallel._functions import Broadcast x = Variable(torch.randn(5, 5).float().cuda(), requires_grad=True) @@ -1404,7 +1404,7 @@ def backward(ctx, grad_output): self.assertEqual(device[0], 1) @unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_inputbuffer_add_multigpu(self): input = torch.randn(1).cuda(0).requires_grad_() output = input.cuda(1) + input.cuda(1) @@ -1454,7 +1454,7 @@ def test_detach_base(self): self.assertIsNotNone(view.grad_fn) self.assertIs(view._base, x) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def _test_type_conversion_backward(self, t, ): fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True) fvar.double().sum().backward() @@ -1578,7 +1578,7 @@ def test_pyscalar_conversions(self): self._test_pyscalar_conversions(lambda x: x.cuda(), lambda x: long(x)) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_pin_memory(self): x = torch.randn(2, 2, requires_grad=True) self.assertEqual(x, x.pin_memory()) @@ -1914,7 +1914,7 @@ def test_cat_empty(self): lambda a, b: torch.cat((a, b)), True, f_args_variable, f_args_tensor) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_potrf(self): root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True) @@ -2074,7 +2074,7 @@ def run_test(input_size, exponent): run_test((10, 10), torch.zeros(10, 10)) run_test((10,), 0) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_pinverse(self): # Why is pinverse tested this way, and not ordinarily as other linear algebra methods? # 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable @@ -2187,7 +2187,7 @@ def test_where_functional(self): self._test_where_functional(lambda t: t) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_where_functional_cuda(self): self._test_where_functional(lambda t: t.cuda()) @@ -2397,7 +2397,7 @@ def f3(dt): f(dt) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_set_requires_grad_only_for_floats_cuda(self): self._test_set_requires_grad_only_for_floats(self, True) @@ -2405,7 +2405,7 @@ def test_set_requires_grad_only_for_floats(self): self._test_set_requires_grad_only_for_floats(self, False) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_rnn_backward_to_input_but_not_parameters_cuda(self): # this checks whether it is possible to not require # weight parameters, but require inputs, see #7722 @@ -2457,7 +2457,7 @@ def backward(ctx, gO): out.backward() self.assertIn('MyFunc.apply', str(w[0].message)) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_symeig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.symeig(A, eigenvectors=False) diff --git a/test/test_dataloader.py b/test/test_dataloader.py index bb61cced71753a..6692f6df39e316 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -13,7 +13,7 @@ from torch.utils.data import Dataset, TensorDataset, DataLoader, ConcatDataset from torch.utils.data.dataset import random_split from torch.utils.data.dataloader import default_collate, ExceptionWrapper, MANAGER_STATUS_CHECK_INTERVAL -from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ROCM +from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm # We cannot import TEST_CUDA from common_nn here, because if we do that, # the TEST_CUDNN line from common_nn will be executed multiple times @@ -335,14 +335,14 @@ def test_growing_dataset(self): self.assertEqual(len(dataloader_shuffle), 5) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_sequential_pin_memory(self): loader = DataLoader(self.dataset, batch_size=2, pin_memory=True) for input, target in loader: self.assertTrue(input.is_pinned()) self.assertTrue(target.is_pinned()) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_multiple_dataloaders(self): loader1_it = iter(DataLoader(self.dataset, num_workers=1)) loader2_it = iter(DataLoader(self.dataset, num_workers=2)) @@ -443,7 +443,7 @@ def test_batch_sampler(self): self._test_batch_sampler(num_workers=4) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_shuffle_pin_memory(self): loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True) for input, target in loader: @@ -476,7 +476,7 @@ def test_error_workers(self): @unittest.skipIf(IS_WINDOWS, "FIXME: stuck test") @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_partial_workers(self): "check that workers exit even if the iterator is not exhausted" loader = iter(DataLoader(self.dataset, batch_size=2, num_workers=4, pin_memory=True)) @@ -530,7 +530,7 @@ def _is_process_alive(pid, pname): "spawn start method is not supported in Python 2, \ but we need it for creating another process with CUDA") @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_main_process_unclean_exit(self): '''There might be ConnectionResetError or leaked semaphore warning (due to dirty process exit), \ but they are all safe to ignore''' @@ -634,7 +634,7 @@ def setUp(self): self.dataset = StringDataset() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_shuffle_pin_memory(self): loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True) for batch_ndx, (s, n) in enumerate(loader): @@ -678,7 +678,7 @@ def test_sequential_batch(self): self.assertEqual(n[1], idx + 1) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_pin_memory(self): loader = DataLoader(self.dataset, batch_size=2, pin_memory=True) for batch_ndx, sample in enumerate(loader): @@ -718,7 +718,7 @@ def _run_ind_worker_queue_test(self, batch_size, num_workers): if current_worker_idx == num_workers: current_worker_idx = 0 - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_ind_worker_queue(self): for batch_size in (8, 16, 32, 64): for num_workers in range(1, 6): diff --git a/test/test_jit.py b/test/test_jit.py index f0c42483b5ebe4..a882c5651affc6 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -9,7 +9,7 @@ from torch.autograd.function import traceable from torch.testing import assert_allclose from torch.onnx import OperatorExportTypes -from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, TEST_WITH_ROCM +from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, skipIfRocm from textwrap import dedent import os import io @@ -387,7 +387,7 @@ def forward(self, x): # TODO: Fuser doesn't work at all when inputs require grad. Fix that @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_lstm_fusion_cuda(self): inputs = get_lstm_inputs('cuda') ge = self.checkTrace(LSTMCellF, inputs) @@ -411,7 +411,7 @@ def test_lstm_fusion_cpu(self): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_lstm_fusion_concat(self): inputs = get_lstm_inputs('cuda') ge = self.checkTrace(LSTMCellC, inputs) @@ -419,7 +419,7 @@ def test_lstm_fusion_concat(self): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_concat_fusion(self): hx = torch.randn(3, 20, dtype=torch.float, device='cuda') cx = torch.randn(3, 20, dtype=torch.float, device='cuda') @@ -432,7 +432,7 @@ def foo(hx, cx): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_fusion_distribute(self): def f(x, y): z1, z2 = (x + y).chunk(2, dim=1) @@ -477,7 +477,7 @@ def fn_test_comparison_gt_lt(x, y): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_comparison_gt_lt(self): x = torch.randn(4, 4, dtype=torch.float, device='cuda') y = torch.randn(4, 4, dtype=torch.float, device='cuda') @@ -486,7 +486,7 @@ def test_comparison_gt_lt(self): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_comparison_ge_le(self): def f(x, y): mask = (x >= 0).type_as(x) @@ -506,7 +506,7 @@ def fn_test_relu(x, y): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_relu(self): x = torch.randn(4, 4, dtype=torch.float, device='cuda') y = torch.randn(4, 4, dtype=torch.float, device='cuda') @@ -529,7 +529,7 @@ def fn_test_exp(x, y): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_exp(self): x = torch.randn(4, 4, dtype=torch.float, device='cuda') y = torch.randn(4, 4, dtype=torch.float, device='cuda') @@ -874,7 +874,7 @@ def doit(x, y): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_cpp(self): # rather than rebuild assertExpected in cpp, # just glob all the cpp outputs into one file for now @@ -994,7 +994,7 @@ def test_ge_optimized(self): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "requires CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_ge_cuda(self): self.run_ge_tests(True, True) @@ -1031,7 +1031,7 @@ def foo(a): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "calls .cuda()") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_traced_module(self): class Model(nn.Module): def __init__(self, num_features, num_layers): @@ -2682,7 +2682,7 @@ def test_tensor_number_math(self): self._test_tensor_number_math() @unittest.skipIf(not RUN_CUDA, "No CUDA") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_tensor_number_math_cuda(self): self._test_tensor_number_math(device='cuda') diff --git a/test/test_optim.py b/test/test_optim.py index 97a39d95b3a3d0..bc6e0ff8fc828f 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -11,7 +11,7 @@ from torch.autograd import Variable from torch import sparse from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau -from common import TestCase, run_tests, TEST_WITH_UBSAN, TEST_WITH_ROCM +from common import TestCase, run_tests, TEST_WITH_UBSAN, skipIfRocm def rosenbrock(tensor): @@ -236,7 +236,7 @@ def _build_params_dict(self, weight, bias, **kwargs): def _build_params_dict_single(self, weight, bias, **kwargs): return [dict(params=bias, **kwargs)] - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_sgd(self): self._test_rosenbrock( lambda params: optim.SGD(params, lr=1e-3), @@ -273,7 +273,7 @@ def test_sgd_sparse(self): lambda params: optim.SGD(params, lr=5e-3) ) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_adam(self): self._test_rosenbrock( lambda params: optim.Adam(params, lr=1e-2), @@ -311,7 +311,7 @@ def test_sparse_adam(self): with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"): optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0)) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_adadelta(self): self._test_rosenbrock( lambda params: optim.Adadelta(params), @@ -335,7 +335,7 @@ def test_adadelta(self): with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"): optim.Adadelta(None, lr=1e-2, rho=1.1) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_adagrad(self): self._test_rosenbrock( lambda params: optim.Adagrad(params, lr=1e-1), @@ -369,7 +369,7 @@ def test_adagrad_sparse(self): lambda params: optim.Adagrad(params, lr=1e-1) ) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_adamax(self): self._test_rosenbrock( lambda params: optim.Adamax(params, lr=1e-1), @@ -394,7 +394,7 @@ def test_adamax(self): with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 1: 1.0"): optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0)) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_rmsprop(self): self._test_rosenbrock( lambda params: optim.RMSprop(params, lr=1e-2), @@ -419,7 +419,7 @@ def test_rmsprop(self): with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"): optim.RMSprop(None, lr=1e-2, momentum=-1.0) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_asgd(self): self._test_rosenbrock( lambda params: optim.ASGD(params, lr=1e-3), @@ -444,7 +444,7 @@ def test_asgd(self): with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -0.5"): optim.ASGD(None, lr=1e-2, weight_decay=-0.5) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_rprop(self): self._test_rosenbrock( lambda params: optim.Rprop(params, lr=1e-3), @@ -469,7 +469,7 @@ def test_rprop(self): with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"): optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5)) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_lbfgs(self): self._test_rosenbrock( lambda params: optim.LBFGS(params), diff --git a/test/test_torch.py b/test/test_torch.py index 8bcd30a7e36a9f..e6657fba04d83b 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -22,7 +22,7 @@ from torch import multiprocessing as mp from common import TestCase, iter_indices, TEST_NUMPY, TEST_SCIPY, TEST_MKL, \ TEST_LIBROSA, run_tests, download_file, skipIfNoLapack, suppress_warnings, \ - IS_WINDOWS, PY3, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ROCM + IS_WINDOWS, PY3, NO_MULTIPROCESSING_SPAWN, skipIfRocm from multiprocessing.reduction import ForkingPickler if TEST_NUMPY: @@ -724,7 +724,7 @@ def test_norm(self): @unittest.skipIf(not TEST_NUMPY, "Numpy not found") @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_norm_cuda(self): self._test_norm(self, device='cuda') @@ -3280,7 +3280,7 @@ def test_topk_arguments(self): self.assertRaises(TypeError, lambda: q.topk(4, True)) @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_topk_noncontiguous_gpu(self): t = torch.randn(20, device="cuda")[::2] top1, idx1 = t.topk(5) @@ -7233,7 +7233,7 @@ def test_serialize_device(self): self.assertEqual(device, device_copied) @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_half_tensor_cuda(self): x = torch.randn(5, 5).half() self.assertEqual(x.cuda(), x) @@ -7545,7 +7545,7 @@ def test_from_file(self): t2.fill_(rnum) self.assertEqual(t1, t2, 0) - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_print(self): default_type = torch.Tensor().type() for t in torch._tensor_classes: @@ -7710,7 +7710,7 @@ def test_empty_like(self): self.assertEqual(torch.empty_like(a).type(), a.type()) @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_pin_memory(self): x = torch.randn(3, 5) self.assertFalse(x.is_pinned()) @@ -7878,7 +7878,7 @@ def test_from_numpy(self): self.assertRaises(ValueError, lambda: torch.from_numpy(x)) @unittest.skipIf(not TEST_NUMPY, "Numpy not found") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_ctor_with_numpy_array(self): dtypes = [ np.double, diff --git a/test/test_utils.py b/test/test_utils.py index c6559fe68fae89..b28b4f83171aaf 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -19,7 +19,7 @@ from torch.utils.trainer.plugins.plugin import Plugin from torch.autograd._functions.utils import prepare_onnx_paddings from torch.autograd._functions.utils import check_onnx_broadcast -from common import IS_WINDOWS, IS_PPC, TEST_WITH_ROCM +from common import IS_WINDOWS, IS_PPC, skipIfRocm HAS_CUDA = torch.cuda.is_available() @@ -412,7 +412,7 @@ def test_cpu(self): @unittest.skipIf(not HAS_CFFI or not HAS_CUDA, "ffi tests require cffi package") @unittest.skipIf(IS_WINDOWS, "ffi doesn't currently work on Windows") - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_gpu(self): create_extension( name='gpulib', @@ -616,7 +616,7 @@ def test_bottleneck_cpu_only(self): self._check_cuda(out) @unittest.skipIf(not HAS_CUDA, 'No CUDA') - @unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack") + @skipIfRocm def test_bottleneck_cuda(self): rc, out, err = self._run_bottleneck('bottleneck/test_cuda.py') self.assertEqual(rc, 0, 'Run failed with\n{}'.format(err))