Skip to content

Commit 833f0d9

Browse files
lcskrishnaiotamudelta
authored andcommitted
re-enabled tests in test_nn and fixed flake8 issues
1 parent ffa7190 commit 833f0d9

File tree

2 files changed

+2
-9
lines changed

2 files changed

+2
-9
lines changed

test/common_nn.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -622,7 +622,6 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
622622
reference_fn=lambda i, t, m:
623623
hingeembeddingloss_reference(i, t, reduction=get_reduction(m)),
624624
check_sum_reduction=True,
625-
test_cuda=(not TEST_WITH_ROCM)
626625
),
627626
dict(
628627
module_name='HingeEmbeddingLoss',
@@ -633,7 +632,6 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
633632
hingeembeddingloss_reference(i, t, margin=0.5, reduction=get_reduction(m)),
634633
desc='margin',
635634
check_sum_reduction=True,
636-
test_cuda=(not TEST_WITH_ROCM)
637635
),
638636
dict(
639637
module_name='MultiLabelMarginLoss',
@@ -739,7 +737,6 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
739737
reference_fn=lambda i, t, m:
740738
cosineembeddingloss_reference(i[0], i[1], t, reduction=get_reduction(m)),
741739
check_sum_reduction=True,
742-
test_cuda=(not TEST_WITH_ROCM)
743740
),
744741
dict(
745742
module_name='CosineEmbeddingLoss',
@@ -750,7 +747,6 @@ def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0
750747
cosineembeddingloss_reference(i[0], i[1], t, margin=0.7, reduction=get_reduction(m)),
751748
desc='margin',
752749
check_sum_reduction=True,
753-
test_cuda=(not TEST_WITH_ROCM)
754750
),
755751
dict(
756752
module_name='MarginRankingLoss',

test/test_nn.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2609,7 +2609,6 @@ def test_batchnorm_simple_average(self):
26092609
self._test_batchnorm_simple_average()
26102610

26112611
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
2612-
@skipIfRocm
26132612
def test_batchnorm_simple_average_cuda(self):
26142613
self._test_batchnorm_simple_average(torch.cuda.FloatTensor)
26152614

@@ -3961,7 +3960,6 @@ def test_variable_sequence(self):
39613960

39623961
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
39633962
@repeat_test_for_types(ALL_TENSORTYPES)
3964-
@skipIfRocm
39653963
def test_variable_sequence_cuda(self, dtype=torch.float):
39663964
self._test_variable_sequence("cuda", dtype)
39673965

@@ -4274,7 +4272,6 @@ def test_rnn_retain_variables(self):
42744272

42754273
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
42764274
@repeat_test_for_types(ALL_TENSORTYPES)
4277-
@skipIfRocm
42784275
def test_rnn_retain_variables_cuda(self, dtype=torch.float):
42794276
with torch.backends.cudnn.flags(enabled=False):
42804277
self._test_rnn_retain_variables("cuda", dtype)
@@ -4833,7 +4830,6 @@ def test_batchnorm_update_stats(self):
48334830
self._test_batchnorm_update_stats()
48344831

48354832
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
4836-
@skipIfRocm
48374833
def test_batchnorm_update_stats_cuda(self):
48384834
self._test_batchnorm_update_stats("cuda", torch.float)
48394835

@@ -6686,7 +6682,7 @@ def forward(self, *args):
66866682
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
66876683
check_sum_reduction=True,
66886684
desc='2d',
6689-
test_cuda=(not TEST_WITH_ROCM),
6685+
test_cuda=(not TEST_WITH_ROCM)
66906686
),
66916687
dict(
66926688
module_name='NLLLoss',
@@ -6950,6 +6946,7 @@ def bce_with_logistic_no_reduce_test():
69506946
check_gradgrad=False,
69516947
pickle=False,
69526948
decorator=skipIfRocm)
6949+
pickle=False,)
69536950

69546951

69556952
def bce_with_logistic_no_reduce_scalar_test():

0 commit comments

Comments
 (0)