Skip to content

Commit 3cf15f9

Browse files
committed
fix: Refactor dataproc update_cluster_test fixture system to be compatible with backoff
1 parent a90b48d commit 3cf15f9

File tree

1 file changed

+32
-33
lines changed

1 file changed

+32
-33
lines changed

dataproc/snippets/update_cluster_test.py

Lines changed: 32 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
3737
REGION = "us-central1"
3838
CLUSTER_NAME = f"py-cc-test-{str(uuid.uuid4())}"
39-
NEW_NUM_INSTANCES = 5
39+
NEW_NUM_INSTANCES = 3
4040
CLUSTER = {
4141
"project_id": PROJECT_ID,
4242
"cluster_name": CLUSTER_NAME,
@@ -47,54 +47,53 @@
4747
}
4848

4949

50-
@pytest.fixture
50+
@pytest.fixture(scope='module')
5151
def cluster_client():
5252
cluster_client = ClusterControllerClient(
5353
client_options={"api_endpoint": "{}-dataproc.googleapis.com:443".format(REGION)}
5454
)
5555
return cluster_client
5656

5757

58-
@pytest.fixture(autouse=True)
59-
def setup_teardown(cluster_client):
60-
# InvalidArgument is thrown when the subnetwork is not ready
61-
@backoff.on_exception(backoff.expo, (InvalidArgument), max_tries=3)
62-
def setup():
63-
# Create the cluster.
64-
operation = cluster_client.create_cluster(
65-
request={"project_id": PROJECT_ID, "region": REGION, "cluster": CLUSTER}
66-
)
67-
operation.result()
58+
@backoff.on_exception(backoff.expo, (ServiceUnavailable, InvalidArgument), max_tries=5)
59+
def setup_cluster(cluster_client):
60+
# Create the cluster.
61+
operation = cluster_client.create_cluster(
62+
request={"project_id": PROJECT_ID, "region": REGION, "cluster": CLUSTER}
63+
)
64+
operation.result()
6865

69-
def teardown():
70-
try:
71-
operation = cluster_client.delete_cluster(
72-
request={
73-
"project_id": PROJECT_ID,
74-
"region": REGION,
75-
"cluster_name": CLUSTER_NAME,
76-
}
77-
)
78-
operation.result()
79-
except NotFound:
80-
print("Cluster already deleted")
8166

67+
@backoff.on_exception(backoff.expo, ServiceUnavailable, max_tries=5)
68+
def teardown_cluster(cluster_client):
8269
try:
83-
setup()
84-
yield
85-
finally:
86-
teardown()
70+
operation = cluster_client.delete_cluster(
71+
request={
72+
"project_id": PROJECT_ID,
73+
"region": REGION,
74+
"cluster_name": CLUSTER_NAME,
75+
}
76+
)
77+
operation.result()
78+
except NotFound:
79+
print("Cluster already deleted")
8780

8881

8982
@backoff.on_exception(
9083
backoff.expo, (InternalServerError, ServiceUnavailable, Cancelled), max_tries=5
9184
)
9285
def test_update_cluster(capsys, cluster_client: ClusterControllerClient):
93-
# Wrapper function for client library function
94-
update_cluster.update_cluster(PROJECT_ID, REGION, CLUSTER_NAME, NEW_NUM_INSTANCES)
95-
new_num_cluster = cluster_client.get_cluster(
96-
project_id=PROJECT_ID, region=REGION, cluster_name=CLUSTER_NAME
97-
)
86+
87+
try:
88+
setup_cluster(cluster_client)
89+
# Wrapper function for client library function
90+
update_cluster.update_cluster(PROJECT_ID, REGION, CLUSTER_NAME, NEW_NUM_INSTANCES)
91+
new_num_cluster = cluster_client.get_cluster(
92+
project_id=PROJECT_ID, region=REGION, cluster_name=CLUSTER_NAME
93+
)
94+
95+
finally:
96+
teardown_cluster(cluster_client)
9897

9998
out, _ = capsys.readouterr()
10099
assert CLUSTER_NAME in out

0 commit comments

Comments
 (0)