12
12
# See the License for the specific language governing permissions and
13
13
# limitations under the License.
14
14
15
+ import datetime
15
16
import os
16
17
import uuid
17
18
18
19
import backoff
19
- from google .api_core .exceptions import (InternalServerError , InvalidArgument , NotFound ,
20
+ from google .api_core .exceptions import (AlreadyExists , InternalServerError , InvalidArgument , NotFound ,
20
21
ServiceUnavailable )
21
22
from google .cloud import dataproc_v1 as dataproc
22
23
26
27
REGION = "us-west1"
27
28
CLUSTER_NAME = "py-cc-test-{}" .format (str (uuid .uuid4 ()))
28
29
30
+ cluster_client = dataproc .ClusterControllerClient (
31
+ client_options = {"api_endpoint" : f"{ REGION } -dataproc.googleapis.com:443" }
32
+ )
33
+
29
34
30
35
@backoff .on_exception (backoff .expo , (Exception ), max_tries = 5 )
31
36
def teardown ():
37
+ < << << << HEAD
32
38
cluster_client = dataproc .ClusterControllerClient (
33
39
client_options = {"api_endpoint" : f"{ REGION } -dataproc.googleapis.com:443" }
34
40
)
41
+ == == == =
42
+ yield
43
+
44
+ >> >> >> > ceedc468a (add more error handling )
35
45
# Client library function
36
46
try :
37
47
operation = cluster_client .delete_cluster (
@@ -53,8 +63,21 @@ def test_cluster_create(capsys):
53
63
# Wrapper function for client library function
54
64
try :
55
65
create_cluster .create_cluster (PROJECT_ID , REGION , CLUSTER_NAME )
56
- finally :
57
- teardown ()
66
+ out , _ = capsys .readouterr ()
67
+ assert CLUSTER_NAME in out
68
+ except AlreadyExists :
69
+ request = dataproc .GetClusterRequest (project_id = PROJECT_ID , region = REGION , cluster_name = CLUSTER_NAME )
70
+ response = cluster_client .get_cluster (request = request )
71
+ assert response .status .state == dataproc .ClusterStatus .State (2 ) # verify the cluster is in the RUNNING state
58
72
59
- out , _ = capsys .readouterr ()
60
- assert CLUSTER_NAME in out
73
+ status_start = response .status .state_start_time # when the cluster started being in the RUNNING state
74
+ now = datetime .datetime .now (datetime .timezone .utc )
75
+ diff = now - status_start
76
+ # check that it's been running for less than 20 min
77
+ # this means we probably did a backoff during the creation for a different reason
78
+ # and that this cluster was created as part of this test, so we can continue
79
+ assert diff .seconds / 60 < 20
80
+ out , _ = capsys .readouterr ()
81
+ assert CLUSTER_NAME in out
82
+ finally :
83
+ teardown ()
0 commit comments