Skip to content

Commit 1245c83

Browse files
authored
Merge pull request #161 from jithunnair-amd/skip_tests
Skip KLDivLoss_cuda tests due to hang
2 parents d54243e + f53f3b9 commit 1245c83

File tree

6 files changed

+147
-140
lines changed

6 files changed

+147
-140
lines changed

aten/src/ATen/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ IF(USE_CUDA AND NOT USE_ROCM)
247247
ENDIF()
248248

249249
IF(USE_ROCM)
250-
### Link in the ROCm libraries BLAS / RNG .
250+
### Link in the ROCm libraries BLAS / RNG.
251251
FIND_LIBRARY(ROCBLAS_LIBRARY rocblas HINTS ${ROCBLAS_PATH}/lib)
252252
FIND_LIBRARY(HIPRAND_LIBRARY hiprand HINTS ${HIPRAND_PATH}/lib)
253253

test/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ def wrapper(*args, **kwargs):
107107
fn(*args, **kwargs)
108108
return wrapper
109109

110+
110111
def skipIfNoLapack(fn):
111112
@wraps(fn)
112113
def wrapper(*args, **kwargs):

test/common_nn.py

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def get_weight(m):
4141
constructor_args=(10, 8),
4242
input_size=(4, 10),
4343
reference_fn=lambda i, p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8),
44-
test_cuda = (not TEST_WITH_ROCM)
44+
test_cuda=(not TEST_WITH_ROCM)
4545
),
4646
dict(
4747
module_name='Linear',
@@ -103,28 +103,28 @@ def get_weight(m):
103103
constructor_args=(1,),
104104
input_size=(10, 20),
105105
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)),
106-
test_cuda = (not TEST_WITH_ROCM)
106+
test_cuda=(not TEST_WITH_ROCM)
107107
),
108108
dict(
109109
module_name='Softmax2d',
110110
input_size=(1, 3, 10, 20),
111111
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1, False)),
112-
test_cuda = (not TEST_WITH_ROCM)
112+
test_cuda=(not TEST_WITH_ROCM)
113113
),
114114
dict(
115115
module_name='LogSoftmax',
116116
constructor_args=(1,),
117117
input_size=(10, 20),
118118
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(),
119-
test_cuda = (not TEST_WITH_ROCM)
119+
test_cuda=(not TEST_WITH_ROCM)
120120
),
121121
dict(
122122
module_name='LogSoftmax',
123123
constructor_args=(1,),
124124
input_size=(1, 3, 10, 20),
125125
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),
126126
desc='multiparam',
127-
test_cuda = (not TEST_WITH_ROCM)
127+
test_cuda=(not TEST_WITH_ROCM)
128128
),
129129
dict(
130130
module_name='ELU',
@@ -204,61 +204,59 @@ def get_weight(m):
204204
input_size=(2, 3, 4),
205205
desc='1d_multiparam',
206206
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
207-
test_cuda = (not TEST_WITH_ROCM)
207+
test_cuda=(not TEST_WITH_ROCM)
208208
),
209209
dict(
210210
module_name='PReLU',
211211
input_size=(2, 3, 4, 5),
212212
desc='2d',
213213
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
214-
#test_cuda = (not TEST_WITH_ROCM)
215214
),
216215
dict(
217216
module_name='PReLU',
218217
constructor_args=(3,),
219218
input_size=(2, 3, 4, 5),
220219
desc='2d_multiparam',
221220
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
222-
test_cuda = (not TEST_WITH_ROCM)
221+
test_cuda=(not TEST_WITH_ROCM)
223222
),
224223
dict(
225224
module_name='PReLU',
226225
input_size=(2, 3, 4, 5, 6),
227226
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
228227
desc='3d',
229-
#test_cuda = (not TEST_WITH_ROCM)
230228
),
231229
dict(
232230
module_name='PReLU',
233231
constructor_args=(3,),
234232
input_size=(2, 3, 4, 5, 6),
235233
desc='3d_multiparam',
236234
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
237-
test_cuda = (not TEST_WITH_ROCM)
235+
test_cuda=(not TEST_WITH_ROCM)
238236
),
239237
dict(
240238
module_name='Softsign',
241239
input_size=(3, 2, 5),
242240
reference_fn=lambda i, _: i.div(1 + torch.abs(i)),
243-
test_cuda = (not TEST_WITH_ROCM)
241+
test_cuda=(not TEST_WITH_ROCM)
244242
),
245243
dict(
246244
module_name='Softmin',
247245
constructor_args=(1,),
248246
input_size=(10, 20),
249-
test_cuda = (not TEST_WITH_ROCM)
247+
test_cuda=(not TEST_WITH_ROCM)
250248
),
251249
dict(
252250
module_name='Softmin',
253251
constructor_args=(1,),
254252
input_size=(2, 3, 5, 10),
255253
desc='multidim',
256-
test_cuda = (not TEST_WITH_ROCM)
254+
test_cuda=(not TEST_WITH_ROCM)
257255
),
258256
dict(
259257
module_name='Tanhshrink',
260258
input_size=(2, 3, 4, 5),
261-
test_cuda = (not TEST_WITH_ROCM)
259+
test_cuda=(not TEST_WITH_ROCM)
262260
),
263261
]
264262

@@ -575,6 +573,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
575573
reference_fn=lambda i, t, m:
576574
kldivloss_reference(i, t, get_reduction(m)),
577575
check_sum_reduction=True,
576+
test_cuda=(not TEST_WITH_ROCM)
578577
),
579578
dict(
580579
module_name='MSELoss',
@@ -591,7 +590,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
591590
reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum() /
592591
(i.numel() if get_reduction(m) else 1),
593592
check_gradgrad=False,
594-
test_cuda = (not TEST_WITH_ROCM)
593+
test_cuda=(not TEST_WITH_ROCM)
595594
),
596595
dict(
597596
module_name='BCELoss',
@@ -602,7 +601,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
602601
(i.numel() if get_reduction(m) else 1),
603602
desc='weights',
604603
check_gradgrad=False,
605-
test_cuda = (not TEST_WITH_ROCM)
604+
test_cuda=(not TEST_WITH_ROCM)
606605
),
607606
dict(
608607
module_name='CrossEntropyLoss',
@@ -623,7 +622,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
623622
reference_fn=lambda i, t, m:
624623
hingeembeddingloss_reference(i, t, reduction=get_reduction(m)),
625624
check_sum_reduction=True,
626-
test_cuda = (not TEST_WITH_ROCM)
625+
test_cuda=(not TEST_WITH_ROCM)
627626
),
628627
dict(
629628
module_name='HingeEmbeddingLoss',
@@ -634,7 +633,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
634633
hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),
635634
desc='margin',
636635
check_sum_reduction=True,
637-
test_cuda = (not TEST_WITH_ROCM)
636+
test_cuda=(not TEST_WITH_ROCM)
638637
),
639638
dict(
640639
module_name='MultiLabelMarginLoss',
@@ -661,7 +660,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
661660
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
662661
reference_fn=lambda i, t, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()).sum() / i.numel(),
663662
check_gradgrad=False,
664-
test_cuda = (not TEST_WITH_ROCM)
663+
test_cuda=(not TEST_WITH_ROCM)
665664
),
666665
dict(
667666
module_name='MultiMarginLoss',
@@ -740,7 +739,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
740739
reference_fn=lambda i, t, m:
741740
cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),
742741
check_sum_reduction=True,
743-
test_cuda = (not TEST_WITH_ROCM)
742+
test_cuda=(not TEST_WITH_ROCM)
744743
),
745744
dict(
746745
module_name='CosineEmbeddingLoss',
@@ -751,7 +750,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
751750
cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)),
752751
desc='margin',
753752
check_sum_reduction=True,
754-
test_cuda = (not TEST_WITH_ROCM)
753+
test_cuda=(not TEST_WITH_ROCM)
755754
),
756755
dict(
757756
module_name='MarginRankingLoss',
@@ -760,7 +759,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
760759
reference_fn=lambda i, t, m:
761760
marginrankingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),
762761
check_sum_reduction=True,
763-
test_cuda = (not TEST_WITH_ROCM)
762+
test_cuda=(not TEST_WITH_ROCM)
764763
),
765764
dict(
766765
module_name='MarginRankingLoss',
@@ -771,7 +770,7 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
771770
marginrankingloss_reference(i[0], i[1], t, margin=0.5, reduction=get_reduction(m)),
772771
desc='margin',
773772
check_sum_reduction=True,
774-
test_cuda = (not TEST_WITH_ROCM)
773+
test_cuda=(not TEST_WITH_ROCM)
775774
),
776775
]
777776

test/test_jit.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6324,6 +6324,7 @@ class Config:
63246324

63256325
self.checkTrace(SNLIClassifier(Config()), (premise, hypothesis), inputs_require_grads=False)
63266326

6327+
@skipIfRocm
63276328
def test_super_resolution(self):
63286329
import torch.nn.init as init
63296330

0 commit comments

Comments
 (0)