diff --git a/test/run_test.py b/test/run_test.py index 0c80581c7e51cb..a55b7bafaabcfa 100644 --- a/test/run_test.py +++ b/test/run_test.py @@ -45,7 +45,6 @@ 'cuda', 'distributed', 'distributions', - 'jit', 'legacy_nn', 'multiprocessing', 'nccl', diff --git a/test/test_jit.py b/test/test_jit.py index c3221922bc3c99..31714fcd5330c1 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -555,6 +555,7 @@ def f(x, y): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "fuser requires CUDA") + @skipIfRocm def test_fusion_rand(self): class M(torch.jit.ScriptModule): __constants__ = ['d'] @@ -777,6 +778,7 @@ def broadcast(a, b): @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA_MULTI_GPU, "needs non-zero device") + @skipIfRocm def test_fuse_last_device(self): device = 'cuda:' + str(1) x = torch.tensor([0.4], dtype=torch.float, device=device) @@ -5831,6 +5833,7 @@ def forward(self, x): self.checkTrace(Policy(), (torch.rand(1, 4),)) + @skipIfRocm def test_snli(self): # TODO: # 1) nn.LSTM is called as a Python function https://github.com/pytorch/pytorch/issues/8449 @@ -6077,6 +6080,7 @@ def test_directory(self): export_type=torch.onnx.ExportTypes.DIRECTORY) shutil.rmtree(d) + @skipIfRocm def test_aten_fallback(self): class ModelWithAtenNotONNXOp(nn.Module): def forward(self, x, y):