From 627b80854cf902f44a0e83572d601a7bade60849 Mon Sep 17 00:00:00 2001 From: Johannes M Dieterich Date: Sun, 26 Aug 2018 21:50:20 -0500 Subject: [PATCH 1/6] Make sure we don't push whitespace. --- aten/src/ATen/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aten/src/ATen/CMakeLists.txt b/aten/src/ATen/CMakeLists.txt index 40b526ebacc162..f6d296dfe79e45 100644 --- a/aten/src/ATen/CMakeLists.txt +++ b/aten/src/ATen/CMakeLists.txt @@ -247,7 +247,7 @@ IF(USE_CUDA AND NOT USE_ROCM) ENDIF() IF(USE_ROCM) - ### Link in the ROCm libraries BLAS / RNG . + ### Link in the ROCm libraries BLAS / RNG. FIND_LIBRARY(ROCBLAS_LIBRARY rocblas HINTS ${ROCBLAS_PATH}/lib) FIND_LIBRARY(HIPRAND_LIBRARY hiprand HINTS ${HIPRAND_PATH}/lib) From a75f8e4f18d3d5925d86ab61cdfb5d6cf052db4c Mon Sep 17 00:00:00 2001 From: Johannes M Dieterich Date: Sun, 26 Aug 2018 21:51:55 -0500 Subject: [PATCH 2/6] Remove macro duplication --- test/common.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/common.py b/test/common.py index f2df87969efb67..13a41822ac5254 100644 --- a/test/common.py +++ b/test/common.py @@ -98,15 +98,6 @@ def _check_module_exists(name): import numpy -def skipIfRocm(fn): - @wraps(fn) - def wrapper(*args, **kwargs): - if TEST_WITH_ROCM: - raise unittest.SkipTest("test doesn't currently work on the ROCm stack") - else: - fn(*args, **kwargs) - return wrapper - def skipIfRocm(fn): @wraps(fn) def wrapper(*args, **kwargs): From e68953fe4b4f9acb3f702d1d3cdb675a5c090264 Mon Sep 17 00:00:00 2001 From: lcskrishna Date: Mon, 27 Aug 2018 13:36:47 -0700 Subject: [PATCH 3/6] fixed flake8 issues --- test/common.py | 1 + test/common_nn.py | 44 ++++----- test/test_cuda.py | 218 +++++++++++++++++++++++++---------------- test/test_legacy_nn.py | 91 ++++++++--------- test/test_nn.py | 144 ++++++++++++++------------- 5 files changed, 274 insertions(+), 224 deletions(-) diff --git a/test/common.py b/test/common.py index 13a41822ac5254..9cd0a3c8a773e5 100644 --- a/test/common.py +++ b/test/common.py @@ -107,6 +107,7 @@ def wrapper(*args, **kwargs): fn(*args, **kwargs) return wrapper + def skipIfNoLapack(fn): @wraps(fn) def wrapper(*args, **kwargs): diff --git a/test/common_nn.py b/test/common_nn.py index c8f34af22a6413..b58d35a6d6350f 100644 --- a/test/common_nn.py +++ b/test/common_nn.py @@ -41,7 +41,7 @@ def get_weight(m): constructor_args=(10, 8), input_size=(4, 10), reference_fn=lambda i, p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Linear', @@ -103,20 +103,20 @@ def get_weight(m): constructor_args=(1,), input_size=(10, 20), reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Softmax2d', input_size=(1, 3, 10, 20), reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, False)), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LogSoftmax', constructor_args=(1,), input_size=(10, 20), reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LogSoftmax', @@ -124,7 +124,7 @@ def get_weight(m): input_size=(1, 3, 10, 20), reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), desc='multiparam', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='ELU', @@ -204,14 +204,13 @@ def get_weight(m): input_size=(2, 3, 4), desc='1d_multiparam', reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='PReLU', input_size=(2, 3, 4, 5), desc='2d', reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], - #test_cuda = (not TEST_WITH_ROCM) ), dict( module_name='PReLU', @@ -219,14 +218,13 @@ def get_weight(m): input_size=(2, 3, 4, 5), desc='2d_multiparam', reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='PReLU', input_size=(2, 3, 4, 5, 6), reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='3d', - #test_cuda = (not TEST_WITH_ROCM) ), dict( module_name='PReLU', @@ -234,31 +232,31 @@ def get_weight(m): input_size=(2, 3, 4, 5, 6), desc='3d_multiparam', reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Softsign', input_size=(3, 2, 5), reference_fn=lambda i, _: i.div(1 + torch.abs(i)), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Softmin', constructor_args=(1,), input_size=(10, 20), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Softmin', constructor_args=(1,), input_size=(2, 3, 5, 10), desc='multidim', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Tanhshrink', input_size=(2, 3, 4, 5), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), ] @@ -591,7 +589,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum() / (i.numel() if get_reduction(m) else 1), check_gradgrad=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='BCELoss', @@ -602,7 +600,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 (i.numel() if get_reduction(m) else 1), desc='weights', check_gradgrad=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='CrossEntropyLoss', @@ -623,7 +621,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 reference_fn=lambda i, t, m: hingeembeddingloss_reference(i, t, reduction=get_reduction(m)), check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='HingeEmbeddingLoss', @@ -634,7 +632,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)), desc='margin', check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='MultiLabelMarginLoss', @@ -661,7 +659,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 target_fn=lambda: torch.rand(5, 10).mul(2).floor(), reference_fn=lambda i, t, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()).sum() / i.numel(), check_gradgrad=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='MultiMarginLoss', @@ -740,7 +738,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 reference_fn=lambda i, t, m: cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)), check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='CosineEmbeddingLoss', @@ -751,7 +749,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)), desc='margin', check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='MarginRankingLoss', @@ -760,7 +758,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 reference_fn=lambda i, t, m: marginrankingloss_reference(i[0], i[1], t, reduction=get_reduction(m)), check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='MarginRankingLoss', @@ -771,7 +769,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 marginrankingloss_reference(i[0], i[1], t, margin=0.5, reduction=get_reduction(m)), desc='margin', check_sum_reduction=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), ] diff --git a/test/test_cuda.py b/test/test_cuda.py index 05c180a66a48e0..678e036c9c25dd 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -248,14 +248,18 @@ def tmp(t): # - disable inplace test, if set to True, no inplace test will be done (default=False) # - decorator, e.g., unittest.skipIf (default is no decorator) tests = [ - ('add', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), + ('add', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), ('add', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), ('add', small_3d, lambda t: [number(0.2, 2, t), small_3d_positive(t)], 'scalar_tensor'), - ('sub', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), + ('sub', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), ('sub', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), - ('mul', small_3d, lambda t: [number(3.14, 3, t)],'', types, False, "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), + ('mul', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfHalfTensor;skipIfShortTensor"), ('mul', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), - ('div', small_3d, lambda t: [number(3.14, 3, t)],'', types, False, "skipIfByteTensor;skipIfCharTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfShortTensor"), + ('div', small_3d, lambda t: [number(3.14, 3, t)], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfShortTensor"), ('div', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), ('pow', small_3d, lambda t: [number(3.14, 3, t)], None, float_types, False, "skipIfHalfTensor"), ('pow', small_3d, lambda t: [number(1., 1, t)], 'pow1', types, False, "skipIfHalfTensor"), @@ -263,28 +267,41 @@ def tmp(t): ('pow', small_3d, lambda t: [number(3., 3, t)], 'pow3', types, False, "skipIfHalfTensor"), ('pow', small_3d, lambda t: [number(-1., -1, t)], 'pow-1', float_types, False, "skipIfHalfTensor"), # HalfTensor gives bad result at pow-2 with data sampled from torch.randn - ('pow', small_3d, lambda t: [number(-2., -2, t)], 'pow-2', float_types_no_half, False, "skipIfHalfTensor;skipIfFloatTensor"), + ('pow', small_3d, lambda t: [number(-2., -2, t)], 'pow-2', float_types_no_half, False, + "skipIfHalfTensor;skipIfFloatTensor"), ('pow', small_3d, lambda t: [tensor_abs_(small_3d(t))], 'tensor', float_types, False, "skipIfHalfTensor"), ('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], None, float_types, False, "skipIfHalfTensor"), - ('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars', types, False, "skipIfHalfTensor"), - ('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)],'', types, False, "skipIfHalfTensor"), - ('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars', types, False, "skipIfHalfTensor"), - ('addcdiv', small_2d_lapack, lambda t: [tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)], '', types, False, "skipIfHalfTensor"), - ('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t), - tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)], 'scalar', types, False, "skipIfHalfTensor"), + ('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar', types, + False, "skipIfHalfTensor"), + ('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars', + types, False, "skipIfHalfTensor"), + ('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)], '', types, False, "skipIfHalfTensor"), + ('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar', + types, False, "skipIfHalfTensor"), + ('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], + 'two_scalars', types, False, "skipIfHalfTensor"), + ('addcdiv', small_2d_lapack, lambda t: [tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)], '', + types, False, "skipIfHalfTensor"), + ('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t), tensor_mul(small_2d_lapack(t), 2), small_2d_lapack(t)], + 'scalar', types, False, "skipIfHalfTensor"), ('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)], '', types, False, "skipIfHalfTensor"), - ('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)],'', types, False, "skipIfHalfTensor"), - ('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'two_scalars', types, False, "skipIfHalfTensor"), - ('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)],'', types, False, "skipIfHalfTensor"), - ('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'two_scalars', types, False, "skipIfHalfTensor"), - ('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)],'', types, False, "skipIfHalfTensor"), - ('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'scalar', types, False, "skipIfHalfTensor"), - ('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'two_scalars', types, False, "skipIfHalfTensor"), + ('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], + 'scalar', types, False, "skipIfHalfTensor"), + ('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)], '', types, False, "skipIfHalfTensor"), + ('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], + 'scalar', types, False, "skipIfHalfTensor"), + ('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], + 'two_scalars', types, False, "skipIfHalfTensor"), + ('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)], '', types, False, "skipIfHalfTensor"), + ('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], + 'scalar', types, False, "skipIfHalfTensor"), + ('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], + 'two_scalars', types, False, "skipIfHalfTensor"), + ('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)], '', types, False, "skipIfHalfTensor"), + ('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], + 'scalar', types, False, "skipIfHalfTensor"), + ('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], + 'two_scalars', types, False, "skipIfHalfTensor"), ('atan2', medium_2d, lambda t: [medium_2d(t)], None, float_types + [torch.HalfTensor]), ('fmod', small_3d, lambda t: [3], 'value', types, False, "skipIfHalfTensor"), ('fmod', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), @@ -296,15 +313,15 @@ def tmp(t): ('clone', medium_2d, lambda t: [],), ('contiguous', medium_2d, lambda t: [],), ('cross', new_t(M, 3, M), lambda t: [new_t(M, 3, M)(t)],), - ('cumprod', small_3d, lambda t: [1],'', types, False, "skipIfHalfTensor"), - ('cumprod', small_3d, lambda t: [-1], 'neg_dim',types, False, "skipIfHalfTensor"), - ('cumsum', small_3d, lambda t: [1],'', types, False, "skipIfHalfTensor"), + ('cumprod', small_3d, lambda t: [1], '', types, False, "skipIfHalfTensor"), + ('cumprod', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfHalfTensor"), + ('cumsum', small_3d, lambda t: [1], '', types, False, "skipIfHalfTensor"), ('cumsum', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfHalfTensor"), ('dim', small_3d, lambda t: [],), - ('dist', small_2d, lambda t: [small_2d(t)],'', types, False, "skipIfHalfTensor"), + ('dist', small_2d, lambda t: [small_2d(t)], '', types, False, "skipIfHalfTensor"), ('dist', small_2d, lambda t: [small_2d(t), 3], '3_norm', types, False, "skipIfHalfTensor"), ('dist', small_2d, lambda t: [small_2d(t), 2.5], '2_5_norm', types, False, "skipIfHalfTensor"), - ('dot', medium_1d, lambda t: [medium_1d(t)],'', types, False, "skipIfHalfTensor"), + ('dot', medium_1d, lambda t: [medium_1d(t)], '', types, False, "skipIfHalfTensor"), ('element_size', medium_1d, lambda t: [],), ('eq', small_3d_ones, lambda t: [small_3d(t)],), ('eq', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal'), @@ -314,7 +331,7 @@ def tmp(t): ('equal', small_3d_ones, lambda t: [small_3d(t)],), ('expand', new_t(M, 1, M), lambda t: [M, 4, M],), ('expand_as', new_t(M, 1, M), lambda t: [new_t(M, 4, M)(t)],), - ('fill', medium_2d, lambda t: [number(3.14, 3, t)],'', types, False, "skipIfHalfTensor"), + ('fill', medium_2d, lambda t: [number(3.14, 3, t)], '', types, False, "skipIfHalfTensor"), ('ge', medium_2d, lambda t: [medium_2d(t)],), ('le', medium_2d, lambda t: [medium_2d(t)],), ('gt', medium_2d, lambda t: [medium_2d(t)],), @@ -328,30 +345,40 @@ def tmp(t): ('kthvalue', small_3d_unique, lambda t: [3],), ('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'), ('kthvalue', small_3d_unique, lambda t: [3, -1], 'neg_dim'), - ('lerp', small_3d, lambda t: [small_3d(t), 0.3],'', types, False, "skipIfHalfTensor"), - ('max', small_3d_unique, lambda t: [],'', types, False, "skipIfHalfTensor"), + ('lerp', small_3d, lambda t: [small_3d(t), 0.3], '', types, False, "skipIfHalfTensor"), + ('max', small_3d_unique, lambda t: [], '', types, False, "skipIfHalfTensor"), ('max', small_3d_unique, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('max', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'), - ('min', small_3d_unique, lambda t: [],'', types, False, "skipIfHalfTensor"), - ('min', small_3d_unique, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('min', small_3d_unique, lambda t: [], '', types, False, "skipIfHalfTensor"), + ('min', small_3d_unique, lambda t: [1], 'dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('min', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'), ('mean', small_3d, lambda t: [], '', types, False, "skipIfHalfTensor"), - ('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, "skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor"), + ('mean', small_3d, lambda t: [-1], 'neg_dim', types, False, + "skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor"), ('mean', small_3d, lambda t: [1], 'dim', types, False, "skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor"), - ('mode', small_3d, lambda t: [],'', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('mode', small_3d, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half, False, "skipIfDoubleTensor;skipIfFloatTensor"), - ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half, False, "skipIfDoubleTensor;skipIfFloatTensor"), + ('mode', small_3d, lambda t: [], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('mode', small_3d, lambda t: [1], 'dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('mode', small_3d, lambda t: [-1], 'neg_dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', + float_types_no_half, False, "skipIfDoubleTensor;skipIfFloatTensor"), + ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', + float_types_no_half, False, "skipIfDoubleTensor;skipIfFloatTensor"), ('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfHalfTensor"), ('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types), ('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), @@ -367,45 +394,61 @@ def tmp(t): ('numel', small_3d, lambda t: [],), ('narrow', small_3d, lambda t: [1, 3, 2],), ('narrow', small_3d, lambda t: [-1, 3, 2], 'neg_dim'), - ('nonzero', small_3d, lambda t: [], '', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('norm', small_3d, lambda t: [],'', types, False, "skipIfHalfTensor"), + ('nonzero', small_3d, lambda t: [], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('norm', small_3d, lambda t: [], '', types, False, "skipIfHalfTensor"), ('norm', small_3d, lambda t: [3], '3_norm', types, False, "skipIfHalfTensor"), - ('norm', small_3d, lambda t: [3, 0], '3_norm_dim', types, False, "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), - ('norm', small_3d, lambda t: [3, -2], '3_norm_neg_dim', types, False, "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), + ('norm', small_3d, lambda t: [3, 0], '3_norm_dim', types, False, + "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), + ('norm', small_3d, lambda t: [3, -2], '3_norm_neg_dim', types, False, + "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), ('ones', small_3d, lambda t: [1, 2, 3, 4, 5],), ('permute', new_t(1, 2, 3, 4), lambda t: [2, 1, 3, 0],), - ('put_', new_t(2, 5, 3), lambda t: [long_type(t)([[0], [-2]]), t([[3], [4]])], '', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('put_', new_t(2, 5, 3), lambda t: [long_type(t)([[0], [-2]]), t([[3], [4]])], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('put_', new_t(2, 3), lambda t: [long_type(t)([]), t([])], 'empty'), ('put_', new_t(2, 2), lambda t: [long_type(t)([[1], [-3]]), t([[1], [2]]), True], 'accumulate'), ('prod', small_2d_oneish, lambda t: [], '', types, False, "skipIfHalfTensor"), - ('prod', small_3d, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('prod', small_3d, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('prod', small_3d, lambda t: [1], 'dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('prod', small_3d, lambda t: [-1], 'neg_dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('sum', small_2d, lambda t: [], '', types, False, "skipIfHalfTensor"), - ('sum', small_3d, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sum', small_3d, lambda t: [1], 'dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('sum', small_3d, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('renorm', small_3d, lambda t: [2, 1, 1], '2_norm', types, False, "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), - ('renorm', small_3d, lambda t: [2, -1, 1], '2_norm_neg_dim', types, False, "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), - ('renorm', small_3d, lambda t: [1.5, 1, 1], '1_5_norm', types, False, "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('renorm', small_3d, lambda t: [2, 1, 1], '2_norm', types, False, + "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), + ('renorm', small_3d, lambda t: [2, -1, 1], '2_norm_neg_dim', types, False, + "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), + ('renorm', small_3d, lambda t: [1.5, 1, 1], '1_5_norm', types, False, + "skipIfHalfTensor;skipIfDoubleTensor;skipIfFloatTensor"), ('repeat', small_2d, lambda t: [2, 2, 2],), ('size', new_t(1, 2, 3, 4), lambda t: [],), ('size', new_t(1, 2, 3, 4), lambda t: [1], 'dim'), ('size', new_t(1, 2, 3, 4), lambda t: [-2], 'neg_dim'), - ('sort', small_3d_unique, lambda t: [],'', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('sort', small_3d_unique, lambda t: [1], 'dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('sort', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('sort', small_3d_unique, lambda t: [1, True], 'dim_descending', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('sort', small_3d_unique, lambda t: [-1, True], 'neg_dim_descending', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sort', small_3d_unique, lambda t: [], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sort', small_3d_unique, lambda t: [1], 'dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sort', small_3d_unique, lambda t: [-1], 'neg_dim', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sort', small_3d_unique, lambda t: [1, True], 'dim_descending', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('sort', small_3d_unique, lambda t: [-1, True], 'neg_dim_descending', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('split', small_3d, lambda t: [2],), ('split', small_3d, lambda t: [2, 1], 'dim'), ('split', small_3d, lambda t: [2, -3], 'neg_dim'), @@ -413,17 +456,21 @@ def tmp(t): ('squeeze', new_t(1, 2, 1, 4), lambda t: [2], 'dim'), ('squeeze', new_t(1, 2, 1, 4), lambda t: [-2], 'neg_dim'), ('t', new_t(1, 2), lambda t: [],), - ('take', new_t(3, 4), lambda t: [long_type(t)([[0], [-2]])],'', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('take', new_t(3, 4), lambda t: [long_type(t)([[0], [-2]])], '', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), ('transpose', new_t(1, 2, 3, 4), lambda t: [1, 2],), ('transpose', new_t(1, 2, 3, 4), lambda t: [-1, -2], 'neg_dim'), ('to_list', small_3d, lambda t: [],), - ('topk', small_3d_unique, lambda t: [2, 1, False, True], 'dim_sort', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor" ), - ('topk', small_3d_unique, lambda t: [2, -1, False, True], 'neg_dim_sort', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), - ('topk', small_3d_unique, lambda t: [2, 1, True, True], 'dim_desc_sort', types, False, - "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfHalfTensor;skipIfLongTensor;skipIfFloatTensor;skipIfIntTensor;skipIfShortTensor"), + ('topk', small_3d_unique, lambda t: [2, 1, False, True], 'dim_sort', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('topk', small_3d_unique, lambda t: [2, -1, False, True], 'neg_dim_sort', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;skipIfFloatTensor;\ + skipIfHalfTensor;skipIfIntTensor;skipIfLongTensor;skipIfShortTensor"), + ('topk', small_3d_unique, lambda t: [2, 1, True, True], 'dim_desc_sort', types, False, + "skipIfByteTensor;skipIfCharTensor;skipIfDoubleTensor;\ + skipIfHalfTensor;skipIfLongTensor;skipIfFloatTensor;skipIfIntTensor;skipIfShortTensor"), ('trace', medium_2d, lambda t: [], '', types, False, "skipIfHalfTensor"), ('tril', medium_2d, lambda t: [],), ('tril', medium_2d_expanded, lambda t: [], 'zero_stride', types, True), @@ -1602,7 +1649,7 @@ def test_index(self): @skipIfRocm def test_advancedindex(self): TestTorch._test_advancedindex(self, lambda t: t.cuda()) - + @skipIfRocm def test_advancedindex_mixed_cpu_cuda(self): def test(x, ia, ib): @@ -1859,7 +1906,7 @@ def test_nvtx(self): torch.cuda.nvtx.range_push("foo") torch.cuda.nvtx.mark("bar") torch.cuda.nvtx.range_pop() - + @skipIfRocm def test_randperm_cuda(self): cuda = torch.device('cuda:0') @@ -1964,7 +2011,6 @@ def load_ignore_file(): def generate_tests(): for decl in tests: - #print (decl) for t in types: tensor = t() diff --git a/test/test_legacy_nn.py b/test/test_legacy_nn.py index f37b34cd43603f..b446920c4fec65 100644 --- a/test/test_legacy_nn.py +++ b/test/test_legacy_nn.py @@ -67,26 +67,26 @@ def _do_test(self, test_case, module, input): input_size=(3, 5, 4), reference_fn=lambda i, _: i + 3.5, check_inplace=True, - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.BatchNormalization, constructor_args=(10,), input_size=(4, 10), desc='affine', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.BatchNormalization, constructor_args=(10, 1e-3, 0.3, False), input_size=(4, 10), desc='not_affine', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialBatchNormalization, constructor_args=(3,), input_size=(2, 3, 6, 6), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialBatchNormalization, constructor_args=(3, 1e-3, 0.8), input_size=(2, 3, 6, 6), desc='momentum', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialBatchNormalization, constructor_args=(3, 1e-3, 0.8, False), input_size=(2, 3, 6, 6), @@ -94,12 +94,12 @@ def _do_test(self, test_case, module, input): OldModuleTest(nn.VolumetricBatchNormalization, constructor_args=(3,), input_size=(2, 3, 4, 4, 4), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.VolumetricBatchNormalization, constructor_args=(3, 1e-3, 0.7), input_size=(2, 3, 4, 4, 4), desc='momentum', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.VolumetricBatchNormalization, constructor_args=(3, 1e-3, 0.7, False), input_size=(2, 3, 4, 4, 4), @@ -108,25 +108,25 @@ def _do_test(self, test_case, module, input): constructor_args=(5, 6), input_size=(10, 5, 6), desc='3D', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CMul, constructor_args=(50, 4), input_size=(1, 50, 4), desc='3D_single_example', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CMul, constructor_args=(1, 5), input_fn=lambda: torch.randn(10, 3, 5)[:, 1], desc='3D_noncontiguous', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Exp, input_size=(2, 3, 4), reference_fn=lambda i, _: i.exp(), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Log, input_fn=lambda: torch.rand(2, 3, 2) + 0.1, reference_fn=lambda i, _: i.log(), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Clamp, constructor_args=(-2., 5.), input_fn=lambda: torch.randn(3, 2, 50) * 6, @@ -134,40 +134,40 @@ def _do_test(self, test_case, module, input): OldModuleTest(nn.Abs, input_size=(3, 20, 5), reference_fn=lambda i, _: i.abs(), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Bilinear, constructor_args=(2, 3, 10), input_size=[(4, 2), (4, 3)], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Bilinear, constructor_args=(5, 4, 2), input_size=[(2, 5), (2, 4)], desc='small_output', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Euclidean, constructor_args=(5, 7), input_size=(10, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.WeightedEuclidean, constructor_args=(5, 7), input_size=(10, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Cosine, constructor_args=(5, 7), input_size=(10, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CAddTable, input_size=[(5, 7), (5, 7)], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CSubTable, input_size=[(5, 7), (5, 7)], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CDivTable, input_fn=lambda: [torch.randn(1, 7), torch.rand(1, 7) + 0.1], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.CMulTable, input_size=[(5, 7), (5, 7)], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Square, input_size=(10, 2, 4), reference_fn=lambda i, _: i.mul(i)), @@ -215,22 +215,22 @@ def _do_test(self, test_case, module, input): constructor_args=(1,), input_size=(2, 4, 5), reference_fn=lambda i, _: i.sum(1, keepdim=False), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Sum, constructor_args=(1, True), input_size=(2, 4, 5), reference_fn=lambda i, _: i.sum(1, keepdim=False).div(i.size(1)), desc='sizeAverage', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Mean, constructor_args=(1,), input_size=(2, 4, 5), reference_fn=lambda i, _: torch.mean(i, 1, keepdim=False), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(lambda: nn.Sequential().add(nn.GradientReversal()).add(nn.GradientReversal()), input_size=(4, 3, 2, 2), fullname='GradientReversal', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Identity, input_size=(4, 3, 2, 4), reference_fn=lambda i, _: i), @@ -238,13 +238,13 @@ def _do_test(self, test_case, module, input): input_size=[(10, 4), (10, 4)], reference_fn=lambda i, _: torch.Tensor(list( a.dot(b) for a, b in zip(i[0], i[1]))), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), OldModuleTest(nn.CosineDistance, input_size=[(10, 4), (10, 4)], reference_fn=lambda i, _: torch.Tensor(list( a.dot(b) / (a.norm(2) * b.norm(2)) for a, b in zip(i[0], i[1]))), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), OldModuleTest(nn.JoinTable, constructor_args=(0,), @@ -285,22 +285,22 @@ def _do_test(self, test_case, module, input): desc='with_dimension'), OldModuleTest(nn.MixtureTable, input_size=[(5, 3), (5, 3, 6)], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.LookupTable, constructor_args=(4, 3), input_fn=lambda: torch.randperm(2).repeat(1, 2), jacobian_input=False, - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Mul, input_size=(2, 3, 4, 2), reference_fn=lambda i, p: i * p[0][0], - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.MulConstant, constructor_args=(4,), input_size=(2, 3, 4, 2), reference_fn=lambda i, _: i * 4, check_inplace=True, - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Narrow, constructor_args=(0, 0), input_size=(2, 3, 4, 2), @@ -324,7 +324,7 @@ def _do_test(self, test_case, module, input): constructor_args=(2, 1), input_size=(10, 3, 4, 5), reference_fn=lambda i, _: i.view(10, 1, 3, 4, 5).expand(10, 2, 3, 4, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Padding, constructor_args=(0, 2, -10), input_size=(2, 3, 4, 5)), @@ -339,20 +339,20 @@ def _do_test(self, test_case, module, input): OldModuleTest(nn.PartialLinear, constructor_args=(5, 6), input_size=(4, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(lambda: nn.PartialLinear(5, 6).setPartition(torch.Tensor((2, 4))), input_size=(4, 5), fullname='PartialLinear_setPartition', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Power, constructor_args=(2,), input_size=(2, 3, 4, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Power, constructor_args=(1.5,), input_fn=lambda: torch.rand(3, 4, 5), desc='fractional', - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.Reshape, constructor_args=(4, 5), input_size=(3, 4 * 5), @@ -413,11 +413,11 @@ def _do_test(self, test_case, module, input): OldModuleTest(nn.SpatialDivisiveNormalization, constructor_args=(3,), input_size=(2, 3, 8, 8), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialContrastiveNormalization, constructor_args=(3,), input_size=(2, 3, 8, 8), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialDilatedConvolution, constructor_args=(3, 2, 3, 3, 2, 2, 1, 1, 2, 2), input_size=(2, 3, 8, 8)), @@ -476,14 +476,14 @@ def _do_test(self, test_case, module, input): OldModuleTest(nn.SpatialLPPooling, constructor_args=(3, 2, 2, 2, 2, 2), input_size=(1, 3, 7, 7), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialSubSampling, constructor_args=(3, 3, 3, 2, 2), input_size=(1, 3, 7, 7)), OldModuleTest(nn.SpatialSubtractiveNormalization, constructor_args=(3,), input_size=(1, 3, 7, 7), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), OldModuleTest(nn.SpatialSubtractiveNormalization, constructor_args=(3, torch.rand(3)), input_size=(1, 3, 7, 7), @@ -563,7 +563,7 @@ def _do_test(self, test_case, module, input): constructor_args_fn=lambda: (torch.rand(3, 4, 5),), input_size=(2, 3, 4, 5), target_size=(2, 3, 4, 5), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), CriterionTest(nn.MarginCriterion, input_size=(5, 10), target_fn=lambda: torch.randn(5, 10).sign()), @@ -587,7 +587,7 @@ def _do_test(self, test_case, module, input): # Eh, we need to use p as a default, so it's passed by value reference_fn=lambda i, _, p=p: i.div(i.norm(p, 1, True).expand_as(i)), desc=str(p), - test_cuda = (not TEST_WITH_ROCM)), + test_cuda=(not TEST_WITH_ROCM)), ) for p in range(1, 4 + 1): tests.append( @@ -595,7 +595,7 @@ def _do_test(self, test_case, module, input): constructor_args=(p,), input_size=[(4, 10), (4, 10)], desc=str(p), - test_cuda = (not TEST_WITH_ROCM)) + test_cuda=(not TEST_WITH_ROCM)) ) @@ -658,7 +658,8 @@ def add_test(test): } for test in tests: name = test.get_name() - if ((name == "test_Max" or name == "test_Min" or name == "test_Max_with_dimension" or name == "test_Min_with_dimension") and TEST_WITH_ROCM): + if ((name == "test_Max" or name == "test_Min" or name == "test_Max_with_dimension" or + name == "test_Min_with_dimension") and TEST_WITH_ROCM): continue add_test(test) for test_params in module_tests: diff --git a/test/test_nn.py b/test/test_nn.py index e5ab6303cdb3c8..fbdf83fba96003 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -50,7 +50,7 @@ torch.double] if not TEST_WITH_ROCM: - ALL_TENSORTYPES.append(torch.half) + ALL_TENSORTYPES.append(torch.half) NO_HALF_TENSORTYPES = [torch.float, torch.double] @@ -361,22 +361,22 @@ def _do_test(self, test_case, module, input): test_case.assertEqual(p.get_device(), 1) if not self.skip_double: - # test double() - input = input.double().cuda() - module.double().cuda() - module(input) - for p in module.parameters(): - test_case.assertIsInstance(p, torch.cuda.DoubleTensor) - test_case.assertEqual(p.get_device(), 0) + # test double() + input = input.double().cuda() + module.double().cuda() + module(input) + for p in module.parameters(): + test_case.assertIsInstance(p, torch.cuda.DoubleTensor) + test_case.assertEqual(p.get_device(), 0) if not TEST_WITH_ROCM: - # test half() - input = input.half().cuda() - module.half().cuda() - module(input) - for p in module.parameters(): - test_case.assertIsInstance(p, torch.cuda.HalfTensor) - test_case.assertEqual(p.get_device(), 0) + # test half() + input = input.half().cuda() + module.half().cuda() + module(input) + for p in module.parameters(): + test_case.assertIsInstance(p, torch.cuda.HalfTensor) + test_case.assertEqual(p.get_device(), 0) def _get_target(self): return self._get_arg('target', False) @@ -5030,6 +5030,7 @@ def test_grid_sample_error_checking(self): if TEST_CUDA: with self.assertRaisesRegex(RuntimeError, "expected input and grid to be on same device"): F.grid_sample(input.cuda(), grid) + @skipIfRocm def test_grid_sample(self): def test(N, C, H, W, mode, padding_mode): @@ -5177,6 +5178,7 @@ def test_shape(N, C, IH, IW, H, W, mode, padding_mode): if TEST_CUDNN: with cudnn.flags(enabled=False): test(N, C, H, W, mode, padding_mode) + @skipIfRocm def test_grid_sample_3d(self): def test(N, C, D, H, W, mode, padding_mode): @@ -5290,6 +5292,7 @@ def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode): (input, grid))) test(N, C, D, H, W, mode, padding_mode) + @skipIfRocm def test_affine_grid(self): # test known input on CPU @@ -6525,9 +6528,10 @@ def add(test_name, fn): test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs)) add(cuda_test_name + '_double', lambda self, test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs)) + @skipIfRocm def test_half(self, test=test, kwargs=kwargs): - test.test_cuda(self, dtype=torch.half, **kwargs) + test.test_cuda(self, dtype=torch.half, **kwargs) if getattr(test, 'check_half', True): add(cuda_test_name + '_half', test_half) else: @@ -6571,7 +6575,7 @@ def forward(self, *args): loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='2d', - test_cuda = (not TEST_WITH_ROCM), + test_cuda=(not TEST_WITH_ROCM), ), dict( module_name='NLLLoss', @@ -6581,7 +6585,7 @@ def forward(self, *args): reference_fn=lambda i, t, m: loss_reference_fns['NLLLossNd'](i, t, weight=get_weight(m)), desc='2d_weights', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -6591,7 +6595,7 @@ def forward(self, *args): reference_fn=lambda i, t, m: loss_reference_fns['NLLLossNd'](i, t, ignore_index=1), desc='2d_ignore_index', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -6601,7 +6605,7 @@ def forward(self, *args): loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='higher_dim', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -6611,14 +6615,14 @@ def forward(self, *args): loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='dim_is_3', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='PoissonNLLLoss', input_size=(2, 3, 4, 5), target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), desc='no_full_loss', # without sterling approx - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='PoissonNLLLoss', @@ -6626,7 +6630,7 @@ def forward(self, *args): input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001), target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(), desc='full_loss', # with sterling approx - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='L1Loss', @@ -6699,7 +6703,7 @@ def forward(self, *args): desc='weights', check_sum_reduction=True, check_gradgrad=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='CTCLoss', @@ -6768,7 +6772,7 @@ def poissonnllloss_no_reduce_test(): constructor=wrap_functional( lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')), input_fn=lambda: torch.rand(10, 10), - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def bceloss_no_reduce_test(): @@ -6780,7 +6784,7 @@ def bceloss_no_reduce_test(): input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()), check_gradgrad=False, - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def bceloss_no_reduce_scalar_test(): @@ -6806,7 +6810,7 @@ def bceloss_weights_no_reduce_test(): input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, check_gradgrad=False, - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def bceloss_weights_no_reduce_scalar_test(): @@ -6860,7 +6864,7 @@ def kldivloss_with_target_no_reduce_test(): input_fn=lambda: torch.rand(10, 10), reference_fn=lambda t, _: loss_reference_fns['KLDivLoss'](i.type_as(t), t, reduction='none'), - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def kldivloss_no_reduce_test(): @@ -6895,7 +6899,7 @@ def l1loss_no_reduce_test(): lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), input_fn=lambda: torch.randn(2, 3, 4), reference_fn=lambda i, m: (i - t.type_as(i)).abs(), - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def l1loss_no_reduce_scalar_test(): @@ -7107,7 +7111,7 @@ def smoothl1loss_no_reduce_test(): input_fn=lambda: torch.randn(2, 3, 4), reference_fn=lambda i, _: loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def smoothl1loss_no_reduce_scalar_test(): @@ -7199,7 +7203,7 @@ def softmarginloss_no_reduce_test(): input_fn=lambda: torch.randn(5, 5), reference_fn=lambda i, _: loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'), - pickle=False, test_cuda = (not TEST_WITH_ROCM)) + pickle=False, test_cuda=(not TEST_WITH_ROCM)) def multilabelsoftmarginloss_no_reduce_test(): @@ -7212,7 +7216,7 @@ def multilabelsoftmarginloss_no_reduce_test(): reference_fn=lambda i, m: (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1), check_gradgrad=False, - pickle=False, decorator = skipIfRocm) + pickle=False, decorator=skipIfRocm) def multilabelsoftmarginloss_weights_no_reduce_test(): @@ -7228,7 +7232,7 @@ def multilabelsoftmarginloss_weights_no_reduce_test(): (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1), check_sum_reduction=True, check_gradgrad=False, - pickle=False, decorator = skipIfRocm) + pickle=False, decorator=skipIfRocm) def multimarginloss_no_reduce_test(): @@ -7505,7 +7509,7 @@ def multimarginloss_weights_no_reduce_test(): input_size=(4, 3, 15), cudnn=True, check_eval=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='InstanceNorm1d', @@ -7514,7 +7518,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='tracking_stats', - decorator = skipIfRocm + decorator=skipIfRocm ), dict( module_name='InstanceNorm2d', @@ -7522,7 +7526,7 @@ def multimarginloss_weights_no_reduce_test(): input_size=(2, 3, 6, 6), cudnn=True, check_eval=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='InstanceNorm2d', @@ -7531,7 +7535,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='tracking_stats', - decorator = skipIfRocm + decorator=skipIfRocm ), dict( module_name='InstanceNorm3d', @@ -7539,7 +7543,7 @@ def multimarginloss_weights_no_reduce_test(): input_size=(2, 3, 4, 4, 4), cudnn=True, check_eval=True, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='InstanceNorm3d', @@ -7548,7 +7552,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='tracking_stats', - decorator = skipIfRocm + decorator=skipIfRocm ), dict( module_name='LayerNorm', @@ -7557,7 +7561,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='1d_elementwise_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LayerNorm', @@ -7566,7 +7570,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='1d_no_elementwise_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LayerNorm', @@ -7575,7 +7579,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='3d_elementwise_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LayerNorm', @@ -7584,7 +7588,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='3d_no_elementwise_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7593,7 +7597,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='1d_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7602,7 +7606,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='1d_no_affine_IN', # this setting is equivalent with InstanceNormi - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7611,7 +7615,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='1d_no_affine_LN', # this setting is equivalent with LayerNorm - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7620,7 +7624,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='2d_affine', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7629,7 +7633,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='2d_no_affine_IN', # this setting is equivalent with InstanceNorm - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GroupNorm', @@ -7638,7 +7642,7 @@ def multimarginloss_weights_no_reduce_test(): cudnn=True, check_eval=True, desc='2d_no_affine_LN', # this setting is equivalent with LayerNorm - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='Conv1d', @@ -7927,21 +7931,21 @@ def multimarginloss_weights_no_reduce_test(): constructor_args=(3, ), input_size=(1, 5, 7), desc='1d', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LocalResponseNorm', constructor_args=(2, ), input_size=(1, 5, 7, 7), desc='2d_uneven_pad', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='LocalResponseNorm', constructor_args=(1, 1, 0.5, 2), input_size=(1, 5, 7, 7, 7), desc='3d_custom_params', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='ReflectionPad1d', @@ -8131,8 +8135,8 @@ def multimarginloss_weights_no_reduce_test(): jacobian_input=False, check_gradgrad=False, desc='mean', - test_cuda = (not TEST_WITH_ROCM), - decorator = skipIfRocm + test_cuda=(not TEST_WITH_ROCM), + decorator=skipIfRocm ), dict( module_name='EmbeddingBag', @@ -8141,8 +8145,8 @@ def multimarginloss_weights_no_reduce_test(): jacobian_input=False, check_gradgrad=False, desc='sum', - test_cuda = (not TEST_WITH_ROCM), - decorator = skipIfRocm + test_cuda=(not TEST_WITH_ROCM), + decorator=skipIfRocm ), dict( module_name='EmbeddingBag', @@ -8158,8 +8162,8 @@ def multimarginloss_weights_no_reduce_test(): input_fn=lambda: torch.randperm(2).repeat(1, 2), jacobian_input=False, check_gradgrad=False, - decorator = skipIfRocm, - test_cuda = (not TEST_WITH_ROCM) + decorator=skipIfRocm, + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=lambda: nn.Embedding(4, 3, sparse=True), @@ -8167,7 +8171,7 @@ def multimarginloss_weights_no_reduce_test(): jacobian_input=False, fullname='Embedding_sparse', check_gradgrad=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=lambda: nn.FractionalMaxPool2d( @@ -8471,35 +8475,35 @@ def multimarginloss_weights_no_reduce_test(): dict( module_name='GLU', input_size=(5, 6), - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='GLU', constructor_args=(1,), input_size=(5, 6, 7), desc='dim', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.softmax, dim=-1), input_size=(2, 128), # trigger the last-dim algo in CUDA fullname='softmax_lastdim', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.softmax, dim=1), input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo fullname='softmax_spatial_special', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.softmax, dim=1), input_size=(2, 2, 4, 4), # regular spatial algorithm fullname='softmax_spatial', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.softmax, dim=0), @@ -8527,35 +8531,35 @@ def multimarginloss_weights_no_reduce_test(): input_size=(2, 128), # trigger the last-dim algo in CUDA fullname='log_softmax_lastdim', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.log_softmax, dim=1), input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo fullname='log_softmax_spatial_special', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.log_softmax, dim=1), input_size=(2, 2, 4, 4), # regular spatial algorithm fullname='log_softmax_spatial', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.log_softmax, dim=0), input_size=(2, 3, 4, 5), fullname='log_softmax_dim0', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.log_softmax, dim=3), input_size=(2, 3, 4, 5), fullname='log_softmax_dim3', pickle=False, - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( constructor=wrap_functional(F.log_softmax, dim=0), @@ -8647,7 +8651,7 @@ def multimarginloss_weights_no_reduce_test(): input_size=(), reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), desc='multiparam_scalar', - test_cuda = (not TEST_WITH_ROCM) + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='ELU', From c69dc3d2c19ace41299b608131ae773d4aa807b5 Mon Sep 17 00:00:00 2001 From: jithunnair-amd Date: Tue, 28 Aug 2018 18:39:10 -0500 Subject: [PATCH 4/6] Skip KLDivLoss_cuda tests because of recently observed hangs --- test/common_nn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/common_nn.py b/test/common_nn.py index c8f34af22a6413..76128c85419859 100644 --- a/test/common_nn.py +++ b/test/common_nn.py @@ -575,6 +575,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0 reference_fn=lambda i, t, m: kldivloss_reference(i, t, get_reduction(m)), check_sum_reduction=True, + test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='MSELoss', From a37d62e39c3d719699055e3e3192a158a3354cf7 Mon Sep 17 00:00:00 2001 From: jithunnair-amd Date: Wed, 29 Aug 2018 10:54:41 -0500 Subject: [PATCH 5/6] Skip KLDivLoss_no_reduce_cuda test for ROCm due to recently observed assertion error --- test/test_nn.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_nn.py b/test/test_nn.py index e5ab6303cdb3c8..aa799516a5bb26 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -6872,7 +6872,8 @@ def kldivloss_no_reduce_test(): input_fn=lambda: torch.rand(10, 10).log(), reference_fn=lambda i, _: loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), - pickle=False) + pickle=False, + decorator=skipIfRocm) def kldivloss_no_reduce_scalar_test(): From f53f3b92feca56da32f5ac5556b124283eb13c5b Mon Sep 17 00:00:00 2001 From: jithunnair-amd Date: Wed, 29 Aug 2018 14:52:42 -0500 Subject: [PATCH 6/6] Skip test_jit.py TestEndToEndHybridFrontendModels.test_super_resolution due to recently observed hang --- test/test_jit.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_jit.py b/test/test_jit.py index 8ff0a1ddee5518..66724e0bccbfd6 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -6324,6 +6324,7 @@ class Config: self.checkTrace(SNLIClassifier(Config()), (premise, hypothesis), inputs_require_grads=False) + @skipIfRocm def test_super_resolution(self): import torch.nn.init as init