Skip to content

Commit c132870

Browse files
committed
Add e2e test that installs hive and creates a cluster
1 parent 005a0ba commit c132870

File tree

3 files changed

+212
-0
lines changed

3 files changed

+212
-0
lines changed

Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ test: generate fmt vet manifests
3333
test-integration: generate
3434
go test ./test/integration/... -coverprofile cover.out
3535

36+
test-e2e:
37+
hack/e2e-test.sh
38+
3639
# Builds all of hive's binaries (including utils).
3740
.PHONY: build
3841
build: manager hiveutil hiveadmission

contrib/cmd/waitforjob/main.go

Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
package main
2+
3+
import (
4+
"fmt"
5+
"os"
6+
"time"
7+
8+
log "github.com/sirupsen/logrus"
9+
"github.com/spf13/cobra"
10+
11+
batchv1 "k8s.io/api/batch/v1"
12+
corev1 "k8s.io/api/core/v1"
13+
"k8s.io/apimachinery/pkg/api/errors"
14+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15+
"k8s.io/apimachinery/pkg/util/wait"
16+
clientset "k8s.io/client-go/kubernetes"
17+
"k8s.io/client-go/tools/clientcmd"
18+
)
19+
20+
var (
21+
defaultJobExistenceTimeout = 3 * time.Minute
22+
defaultJobExecutionTimeout = 45 * time.Minute
23+
)
24+
25+
const (
26+
defaultLogLevel = "info"
27+
)
28+
29+
func main() {
30+
opts := &waitForJobOpts{}
31+
32+
cmd := &cobra.Command{
33+
Use: "waitforjob JOBNAME [OPTIONS]",
34+
Short: "wait for job",
35+
Long: "Contains various utilities for running and testing hive",
36+
Run: func(cmd *cobra.Command, args []string) {
37+
if len(args) != 1 {
38+
cmd.Usage()
39+
os.Exit(1)
40+
}
41+
opts.jobName = args[0]
42+
opts.Run()
43+
},
44+
}
45+
cmd.PersistentFlags().StringVar(&opts.logLevel, "log-level", defaultLogLevel, "Log level (debug,info,warn,error,fatal)")
46+
cmd.PersistentFlags().DurationVar(&opts.existenceTimeout, "job-existence-timeout", defaultJobExistenceTimeout, "Maximum time to wait for the named job to be created")
47+
cmd.PersistentFlags().DurationVar(&opts.executionTimeout, "job-execution-timeout", defaultJobExecutionTimeout, "Maximum time to wait for the job to execute")
48+
cmd.Execute()
49+
}
50+
51+
type waitForJobOpts struct {
52+
logLevel string
53+
jobName string
54+
existenceTimeout time.Duration
55+
executionTimeout time.Duration
56+
}
57+
58+
func (w *waitForJobOpts) Run() {
59+
// Set log level
60+
level, err := log.ParseLevel(w.logLevel)
61+
if err != nil {
62+
log.WithError(err).Fatal("Cannot parse log level")
63+
}
64+
log.SetLevel(level)
65+
log.Debug("debug logging enabled")
66+
67+
client, namespace, err := w.localClient()
68+
if err != nil {
69+
log.WithError(err).Fatal("failed to obtain client")
70+
}
71+
72+
if err := w.waitForJobExistence(client, namespace); err != nil {
73+
log.WithError(err).Fatal("job existence failed")
74+
}
75+
76+
if err := w.waitForJobExecution(client, namespace); err != nil {
77+
log.WithError(err).Fatal("job execution failed")
78+
}
79+
}
80+
81+
func (w *waitForJobOpts) waitForJobExistence(client clientset.Interface, namespace string) error {
82+
logger := log.WithField("waiting-for-existence", fmt.Sprintf("job (%s/%s)", namespace, w.jobName))
83+
err := wait.PollImmediate(10*time.Second, w.existenceTimeout, func() (bool, error) {
84+
logger.Debug("Retrieving job")
85+
_, err := client.BatchV1().Jobs(namespace).Get(w.jobName, metav1.GetOptions{})
86+
if err != nil {
87+
if errors.IsNotFound(err) {
88+
logger.Debug("job does not exist yet")
89+
} else {
90+
logger.WithError(err).Warning("unexpected error retrieving job")
91+
}
92+
return false, nil
93+
}
94+
logger.Info("Job found")
95+
return true, nil
96+
})
97+
return err
98+
}
99+
100+
func (w *waitForJobOpts) waitForJobExecution(client clientset.Interface, namespace string) error {
101+
logger := log.WithField("waiting-for-run", fmt.Sprintf("job (%s/%s)", namespace, w.jobName))
102+
err := wait.PollImmediate(30*time.Second, w.executionTimeout, func() (bool, error) {
103+
logger.Debug("Retrieving job")
104+
job, err := client.BatchV1().Jobs(namespace).Get(w.jobName, metav1.GetOptions{})
105+
if err != nil {
106+
logger.WithError(err).Error("Could not fetch job")
107+
return false, err
108+
}
109+
if isFailed(job) {
110+
logger.Error("Job has failed")
111+
return false, fmt.Errorf("Job %s/%s has failed.", namespace, w.jobName)
112+
}
113+
if isSuccessful(job) {
114+
logger.Info("Job has finished successfully")
115+
return true, nil
116+
}
117+
logger.Debug("Job has not completed yet")
118+
return false, nil
119+
})
120+
return err
121+
}
122+
123+
func (w *waitForJobOpts) localClient() (clientset.Interface, string, error) {
124+
log.Debug("Creating cluster client")
125+
rules := clientcmd.NewDefaultClientConfigLoadingRules()
126+
kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{})
127+
cfg, err := kubeconfig.ClientConfig()
128+
if err != nil {
129+
log.WithError(err).Error("Cannot obtain client config")
130+
return nil, "", err
131+
}
132+
namespace, _, err := kubeconfig.Namespace()
133+
if err != nil {
134+
log.WithError(err).Error("Cannot obtain default namespace from client config")
135+
return nil, "", err
136+
}
137+
138+
kubeClient, err := clientset.NewForConfig(cfg)
139+
if err != nil {
140+
log.WithError(err).Error("Cannot create kubernetes client from client config")
141+
return nil, "", err
142+
}
143+
144+
return kubeClient, namespace, nil
145+
}
146+
147+
func getJobConditionStatus(job *batchv1.Job, conditionType batchv1.JobConditionType) corev1.ConditionStatus {
148+
for _, condition := range job.Status.Conditions {
149+
if condition.Type == conditionType {
150+
return condition.Status
151+
}
152+
}
153+
return corev1.ConditionFalse
154+
}
155+
156+
func isSuccessful(job *batchv1.Job) bool {
157+
return getJobConditionStatus(job, batchv1.JobComplete) == corev1.ConditionTrue
158+
}
159+
160+
func isFailed(job *batchv1.Job) bool {
161+
return getJobConditionStatus(job, batchv1.JobFailed) == corev1.ConditionTrue
162+
}

hack/e2e-test.sh

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
#!/bin/bash
2+
3+
set -e
4+
5+
component=hive
6+
TEST_IMAGE=$(eval "echo $IMAGE_FORMAT")
7+
component=installer
8+
INSTALLER_IMAGE=$(eval "echo $IMAGE_FORMAT")
9+
10+
11+
# Install Hive
12+
make deploy DEPLOY_IMAGE="${TEST_IMAGE}"
13+
14+
CLUSTER_PROFILE_DIR=/tmp/cluster
15+
16+
# Create a new cluster deployment
17+
export BASE_DOMAIN="origin-ci-int-aws.dev.rhcloud.com"
18+
export CLUSTER_NAME="$(oc get cluster.cluster.k8s.io -n openshift-cluster-api -o jsonpath='{ .items[].metadata.name }')-1"
19+
export SSH_PUB_KEY="$(cat ${CLUSTER_PROFILE_DIR}/ssh-publickey)"
20+
export PULL_SECRET="$(cat ${CLUSTER_PROFILE_DIR}/pull-secret)"
21+
export AWS_ACCESS_KEY_ID=$(cat ${AWS_SHARED_CREDENTIALS_FILE} | awk '/aws_access_key_id/ { print $3; exit; }')
22+
export AWS_SECRET_ACCESS_KEY=$(cat ${AWS_SHARED_CREDENTIALS_FILE} | awk '/aws_secret_access_key/ { print $3; exit; }')
23+
24+
function teardown() {
25+
echo "Deleting ClusterDeployment ${CLUSTER_NAME}"
26+
oc delete clusterdeployment ${CLUSTER_NAME}
27+
}
28+
trap 'teardown' EXIT
29+
30+
oc new-project cluster-test
31+
32+
oc process -f config/templates/cluster-deployment.yaml \
33+
CLUSTER_NAME="${CLUSTER_NAME}" \
34+
SSH_KEY="${SSH_PUB_KEY}" \
35+
PULL_SECRET="${PULL_SECRET}" \
36+
AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \
37+
AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \
38+
BASE_DOMAIN="${BASE_DOMAIN}" \
39+
INSTALLER_IMAGE="${INSTALLER_IMAGE}" \
40+
OPENSHIFT_RELEASE_IMAGE="" \
41+
TRY_INSTALL_ONCE="true" \
42+
| oc apply -f -
43+
44+
# Wait for the cluster deployment to be installed
45+
SRC_ROOT=$(git rev-parse --show-toplevel)
46+
go run "${SRC_ROOT}/contrib/cmd/waitforjob/main.go" "${CLUSTER-NAME}-install"
47+
echo "ClusterDeployment ${CLUSTER_NAME} was installed successfully"

0 commit comments

Comments
 (0)