Skip to content

Refactor unit test skip statements to use @skipIfRocm annotation #103

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Aug 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import profile
from common import TEST_MKL, TestCase, run_tests, skipIfNoLapack, \
suppress_warnings, TEST_WITH_ROCM, skipIfRocm
suppress_warnings, skipIfRocm
from torch.autograd import Variable, Function, detect_anomaly
from torch.autograd.function import InplaceFunction
from torch.testing import make_non_contiguous, randn_like
Expand Down Expand Up @@ -975,7 +975,7 @@ def test_no_requires_grad_inplace(self):
with self.assertRaises(RuntimeError):
b.add_(5)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_requires_grad_factory(self):
x = torch.randn(2, 3)
fns = [torch.ones_like, torch.testing.randn_like]
Expand Down Expand Up @@ -1375,7 +1375,7 @@ def __del__(self):
Variable(torch.randn(10, 10), _grad_fn=CollectOnDelete())

@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_unused_output_gpu(self):
from torch.nn.parallel._functions import Broadcast
x = Variable(torch.randn(5, 5).float().cuda(), requires_grad=True)
Expand Down Expand Up @@ -1404,7 +1404,7 @@ def backward(ctx, grad_output):
self.assertEqual(device[0], 1)

@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_inputbuffer_add_multigpu(self):
input = torch.randn(1).cuda(0).requires_grad_()
output = input.cuda(1) + input.cuda(1)
Expand Down Expand Up @@ -1454,7 +1454,7 @@ def test_detach_base(self):
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
Expand Down Expand Up @@ -1578,7 +1578,7 @@ def test_pyscalar_conversions(self):
self._test_pyscalar_conversions(lambda x: x.cuda(), lambda x: long(x))

@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_pin_memory(self):
x = torch.randn(2, 2, requires_grad=True)
self.assertEqual(x, x.pin_memory())
Expand Down Expand Up @@ -1914,7 +1914,7 @@ def test_cat_empty(self):
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_potrf(self):
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)

Expand Down Expand Up @@ -2074,7 +2074,7 @@ def run_test(input_size, exponent):
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_pinverse(self):
# Why is pinverse tested this way, and not ordinarily as other linear algebra methods?
# 1. Pseudo-inverses are not generally continuous, which means that they are not differentiable
Expand Down Expand Up @@ -2187,7 +2187,7 @@ def test_where_functional(self):
self._test_where_functional(lambda t: t)

@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_where_functional_cuda(self):
self._test_where_functional(lambda t: t.cuda())

Expand Down Expand Up @@ -2397,15 +2397,15 @@ def f3(dt):
f(dt)

@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_set_requires_grad_only_for_floats_cuda(self):
self._test_set_requires_grad_only_for_floats(self, True)

def test_set_requires_grad_only_for_floats(self):
self._test_set_requires_grad_only_for_floats(self, False)

@unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_rnn_backward_to_input_but_not_parameters_cuda(self):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
Expand Down Expand Up @@ -2457,7 +2457,7 @@ def backward(ctx, gO):
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
Expand Down
18 changes: 9 additions & 9 deletions test/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from torch.utils.data import Dataset, TensorDataset, DataLoader, ConcatDataset
from torch.utils.data.dataset import random_split
from torch.utils.data.dataloader import default_collate, ExceptionWrapper, MANAGER_STATUS_CHECK_INTERVAL
from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ROCM
from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm

# We cannot import TEST_CUDA from common_nn here, because if we do that,
# the TEST_CUDNN line from common_nn will be executed multiple times
Expand Down Expand Up @@ -335,14 +335,14 @@ def test_growing_dataset(self):
self.assertEqual(len(dataloader_shuffle), 5)

@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_sequential_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_multiple_dataloaders(self):
loader1_it = iter(DataLoader(self.dataset, num_workers=1))
loader2_it = iter(DataLoader(self.dataset, num_workers=2))
Expand Down Expand Up @@ -443,7 +443,7 @@ def test_batch_sampler(self):
self._test_batch_sampler(num_workers=4)

@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
Expand Down Expand Up @@ -476,7 +476,7 @@ def test_error_workers(self):

@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_partial_workers(self):
"check that workers exit even if the iterator is not exhausted"
loader = iter(DataLoader(self.dataset, batch_size=2, num_workers=4, pin_memory=True))
Expand Down Expand Up @@ -530,7 +530,7 @@ def _is_process_alive(pid, pname):
"spawn start method is not supported in Python 2, \
but we need it for creating another process with CUDA")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_main_process_unclean_exit(self):
'''There might be ConnectionResetError or leaked semaphore warning (due to dirty process exit), \
but they are all safe to ignore'''
Expand Down Expand Up @@ -634,7 +634,7 @@ def setUp(self):
self.dataset = StringDataset()

@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for batch_ndx, (s, n) in enumerate(loader):
Expand Down Expand Up @@ -678,7 +678,7 @@ def test_sequential_batch(self):
self.assertEqual(n[1], idx + 1)

@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for batch_ndx, sample in enumerate(loader):
Expand Down Expand Up @@ -718,7 +718,7 @@ def _run_ind_worker_queue_test(self, batch_size, num_workers):
if current_worker_idx == num_workers:
current_worker_idx = 0

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_ind_worker_queue(self):
for batch_size in (8, 16, 32, 64):
for num_workers in range(1, 6):
Expand Down
26 changes: 13 additions & 13 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from torch.autograd.function import traceable
from torch.testing import assert_allclose
from torch.onnx import OperatorExportTypes
from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, TEST_WITH_ROCM
from common import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, skipIfRocm
from textwrap import dedent
import os
import io
Expand Down Expand Up @@ -387,7 +387,7 @@ def forward(self, x):
# TODO: Fuser doesn't work at all when inputs require grad. Fix that
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_lstm_fusion_cuda(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellF, inputs)
Expand All @@ -411,15 +411,15 @@ def test_lstm_fusion_cpu(self):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_lstm_fusion_concat(self):
inputs = get_lstm_inputs('cuda')
ge = self.checkTrace(LSTMCellC, inputs)
self.assertExpectedGraph(ge.graph_for(*inputs))

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_concat_fusion(self):
hx = torch.randn(3, 20, dtype=torch.float, device='cuda')
cx = torch.randn(3, 20, dtype=torch.float, device='cuda')
Expand All @@ -432,7 +432,7 @@ def foo(hx, cx):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_fusion_distribute(self):
def f(x, y):
z1, z2 = (x + y).chunk(2, dim=1)
Expand Down Expand Up @@ -477,7 +477,7 @@ def fn_test_comparison_gt_lt(x, y):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_comparison_gt_lt(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
Expand All @@ -486,7 +486,7 @@ def test_comparison_gt_lt(self):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_comparison_ge_le(self):
def f(x, y):
mask = (x >= 0).type_as(x)
Expand All @@ -506,7 +506,7 @@ def fn_test_relu(x, y):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_relu(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
Expand All @@ -529,7 +529,7 @@ def fn_test_exp(x, y):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_exp(self):
x = torch.randn(4, 4, dtype=torch.float, device='cuda')
y = torch.randn(4, 4, dtype=torch.float, device='cuda')
Expand Down Expand Up @@ -874,7 +874,7 @@ def doit(x, y):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_cpp(self):
# rather than rebuild assertExpected in cpp,
# just glob all the cpp outputs into one file for now
Expand Down Expand Up @@ -994,7 +994,7 @@ def test_ge_optimized(self):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_ge_cuda(self):
self.run_ge_tests(True, True)

Expand Down Expand Up @@ -1031,7 +1031,7 @@ def foo(a):

@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_traced_module(self):
class Model(nn.Module):
def __init__(self, num_features, num_layers):
Expand Down Expand Up @@ -2682,7 +2682,7 @@ def test_tensor_number_math(self):
self._test_tensor_number_math()

@unittest.skipIf(not RUN_CUDA, "No CUDA")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_tensor_number_math_cuda(self):
self._test_tensor_number_math(device='cuda')

Expand Down
20 changes: 10 additions & 10 deletions test/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau
from common import TestCase, run_tests, TEST_WITH_UBSAN, TEST_WITH_ROCM
from common import TestCase, run_tests, TEST_WITH_UBSAN, skipIfRocm


def rosenbrock(tensor):
Expand Down Expand Up @@ -236,7 +236,7 @@ def _build_params_dict(self, weight, bias, **kwargs):
def _build_params_dict_single(self, weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_sgd(self):
self._test_rosenbrock(
lambda params: optim.SGD(params, lr=1e-3),
Expand Down Expand Up @@ -273,7 +273,7 @@ def test_sgd_sparse(self):
lambda params: optim.SGD(params, lr=5e-3)
)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_adam(self):
self._test_rosenbrock(
lambda params: optim.Adam(params, lr=1e-2),
Expand Down Expand Up @@ -311,7 +311,7 @@ def test_sparse_adam(self):
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 0: 1.0"):
optim.SparseAdam(None, lr=1e-2, betas=(1.0, 0.0))

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_adadelta(self):
self._test_rosenbrock(
lambda params: optim.Adadelta(params),
Expand All @@ -335,7 +335,7 @@ def test_adadelta(self):
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
optim.Adadelta(None, lr=1e-2, rho=1.1)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_adagrad(self):
self._test_rosenbrock(
lambda params: optim.Adagrad(params, lr=1e-1),
Expand Down Expand Up @@ -369,7 +369,7 @@ def test_adagrad_sparse(self):
lambda params: optim.Adagrad(params, lr=1e-1)
)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_adamax(self):
self._test_rosenbrock(
lambda params: optim.Adamax(params, lr=1e-1),
Expand All @@ -394,7 +394,7 @@ def test_adamax(self):
with self.assertRaisesRegex(ValueError, "Invalid beta parameter at index 1: 1.0"):
optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0))

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_rmsprop(self):
self._test_rosenbrock(
lambda params: optim.RMSprop(params, lr=1e-2),
Expand All @@ -419,7 +419,7 @@ def test_rmsprop(self):
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
optim.RMSprop(None, lr=1e-2, momentum=-1.0)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_asgd(self):
self._test_rosenbrock(
lambda params: optim.ASGD(params, lr=1e-3),
Expand All @@ -444,7 +444,7 @@ def test_asgd(self):
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -0.5"):
optim.ASGD(None, lr=1e-2, weight_decay=-0.5)

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_rprop(self):
self._test_rosenbrock(
lambda params: optim.Rprop(params, lr=1e-3),
Expand All @@ -469,7 +469,7 @@ def test_rprop(self):
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5))

@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
@skipIfRocm
def test_lbfgs(self):
self._test_rosenbrock(
lambda params: optim.LBFGS(params),
Expand Down
Loading