|
16 | 16 | import uuid
|
17 | 17 |
|
18 | 18 | import backoff
|
19 |
| -from google.api_core.exceptions import (InternalServerError, InvalidArgument, NotFound, |
| 19 | +from google.api_core.exceptions import (InternalServerError, NotFound, |
20 | 20 | ServiceUnavailable)
|
21 | 21 | from google.cloud import dataproc_v1 as dataproc
|
22 | 22 | import pytest
|
|
25 | 25 |
|
26 | 26 | PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
|
27 | 27 | REGION = "us-central1"
|
28 |
| -CLUSTER_NAME = "py-sj-test-{}".format(str(uuid.uuid4())) |
29 |
| -CLUSTER = { |
30 |
| - "project_id": PROJECT_ID, |
31 |
| - "cluster_name": CLUSTER_NAME, |
32 |
| - "config": { |
33 |
| - "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-2"}, |
34 |
| - "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-2"}, |
35 |
| - }, |
36 |
| -} |
37 |
| - |
38 |
| - |
39 |
| -@pytest.fixture(autouse=True) |
40 |
| -def setup_teardown(): |
41 |
| - cluster_client = dataproc.ClusterControllerClient( |
| 28 | + |
| 29 | + |
| 30 | +@pytest.fixture(scope='module') |
| 31 | +def cluster_client(): |
| 32 | + return dataproc.ClusterControllerClient( |
42 | 33 | client_options={
|
43 | 34 | "api_endpoint": "{}-dataproc.googleapis.com:443".format(REGION)
|
44 | 35 | }
|
45 | 36 | )
|
46 | 37 |
|
47 |
| - # Retry on InvalidArgument subnetwork not ready error |
48 |
| - @backoff.on_exception(backoff.expo, (InvalidArgument), max_tries=3) |
49 |
| - def setup(): |
50 |
| - # Create the cluster. |
51 |
| - operation = cluster_client.create_cluster( |
52 |
| - request={"project_id": PROJECT_ID, "region": REGION, "cluster": CLUSTER} |
| 38 | + |
| 39 | +@backoff.on_exception(backoff.expo, ServiceUnavailable, max_tries=5) |
| 40 | +def setup_cluster(cluster_client, curr_cluster_name): |
| 41 | + |
| 42 | + CLUSTER = { |
| 43 | + "project_id": PROJECT_ID, |
| 44 | + "cluster_name": curr_cluster_name, |
| 45 | + "config": { |
| 46 | + "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-2", "disk_config": {"boot_disk_size_gb": 100}}, |
| 47 | + "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-2", "disk_config": {"boot_disk_size_gb": 100}}, |
| 48 | + }, |
| 49 | + } |
| 50 | + |
| 51 | + # Create the cluster. |
| 52 | + operation = cluster_client.create_cluster( |
| 53 | + request={"project_id": PROJECT_ID, "region": REGION, "cluster": CLUSTER} |
| 54 | + ) |
| 55 | + operation.result() |
| 56 | + |
| 57 | + |
| 58 | +@backoff.on_exception(backoff.expo, ServiceUnavailable, max_tries=5) |
| 59 | +def teardown_cluster(cluster_client, curr_cluster_name): |
| 60 | + try: |
| 61 | + operation = cluster_client.delete_cluster( |
| 62 | + request={ |
| 63 | + "project_id": PROJECT_ID, |
| 64 | + "region": REGION, |
| 65 | + "cluster_name": curr_cluster_name, |
| 66 | + } |
53 | 67 | )
|
54 | 68 | operation.result()
|
55 | 69 |
|
56 |
| - def teardown(): |
57 |
| - try: |
58 |
| - operation = cluster_client.delete_cluster( |
59 |
| - request={ |
60 |
| - "project_id": PROJECT_ID, |
61 |
| - "region": REGION, |
62 |
| - "cluster_name": CLUSTER_NAME, |
63 |
| - } |
64 |
| - ) |
65 |
| - operation.result() |
66 |
| - |
67 |
| - except NotFound: |
68 |
| - print("Cluster already deleted") |
| 70 | + except NotFound: |
| 71 | + print("Cluster already deleted") |
| 72 | + |
| 73 | + |
| 74 | +@pytest.fixture(scope='module') |
| 75 | +def cluster_name(cluster_client): |
| 76 | + curr_cluster_name = "py-sj-test-{}".format(str(uuid.uuid4())) |
| 77 | + |
69 | 78 | try:
|
70 |
| - setup() |
71 |
| - yield |
| 79 | + setup_cluster(cluster_client, curr_cluster_name) |
| 80 | + yield curr_cluster_name |
72 | 81 | finally:
|
73 |
| - teardown() |
| 82 | + teardown_cluster(cluster_client, curr_cluster_name) |
74 | 83 |
|
75 | 84 |
|
76 | 85 | @backoff.on_exception(backoff.expo, (InternalServerError, ServiceUnavailable), max_tries=5)
|
77 |
| -def test_submit_job(capsys): |
78 |
| - submit_job.submit_job(PROJECT_ID, REGION, CLUSTER_NAME) |
| 86 | +def test_submit_job(capsys, cluster_name): |
| 87 | + submit_job.submit_job(PROJECT_ID, REGION, cluster_name) |
79 | 88 | out, _ = capsys.readouterr()
|
80 | 89 |
|
81 | 90 | assert "Job finished successfully" in out
|
0 commit comments