Skip to content

Add cronjob to cleanup preview environments #11473

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .werft/aks-installer-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ pod:

curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash

export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-azure

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
Expand Down
109 changes: 109 additions & 0 deletions .werft/cleanup-installer-setups.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/self-hosted-installer-tests.yaml -a debug=true`
pod:
serviceAccount: werft
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: dev/workload
operator: In
values:
- "builds"
securityContext:
runAsUser: 0
volumes:
- name: sh-playground-sa-perm
secret:
secretName: sh-playground-sa-perm
- name: sh-playground-dns-perm
secret:
secretName: sh-playground-dns-perm
- name: sh-aks-perm
secret:
secretName: aks-credentials
containers:
- name: nightly-test
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:ljb-werft-cli-grpc-changes.2
workingDir: /workspace
imagePullPolicy: Always
volumeMounts:
- name: sh-playground-sa-perm
mountPath: /mnt/secrets/sh-playground-sa-perm
- name: sh-playground-dns-perm # this sa is used for the DNS management
mountPath: /mnt/secrets/sh-playground-dns-perm
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: TF_VAR_sa_creds
value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json"
- name: TF_VAR_dns_sa_creds
value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json"
- name: ARM_SUBSCRIPTION_ID
valueFrom:
secretKeyRef:
name: aks-credentials
key: subscriptionid
- name: ARM_TENANT_ID
valueFrom:
secretKeyRef:
name: aks-credentials
key: tenantid
- name: ARM_CLIENT_ID
valueFrom:
secretKeyRef:
name: aks-credentials
key: clientid
- name: ARM_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: aks-credentials
key: clientsecret
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: USER_TOKEN # this is for the integration tests
valueFrom:
secretKeyRef:
name: integration-test-user
key: token
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-secret-key
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: aws-credentials
key: aws-region
command:
- bash
- -c
- |
sleep 1
set -Eeuo pipefail

sudo chown -R gitpod:gitpod /workspace
sudo apt update && apt install gettext-base

curl -sLS https://get.k3sup.dev | sh
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json

TESTCONFIG="CLEANUP_OLD_TESTS"

npx ts-node .werft/installer-tests.ts ${TESTCONFIG}
plugins:
cron: "15 3 * * *"
2 changes: 1 addition & 1 deletion .werft/eks-installer-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ pod:
sudo chown -R gitpod:gitpod /workspace
sudo apt update && apt install gettext-base

export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)"
export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-aws"

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
Expand Down
2 changes: 1 addition & 1 deletion .werft/gke-installer-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ pod:
sudo chown -R gitpod:gitpod /workspace
sudo apt update && apt install gettext-base

export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-gcp

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
Expand Down
41 changes: 29 additions & 12 deletions .werft/installer-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,13 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = {
"CHECK_INSTALLATION",
],
},
CLEANUP_OLD_TESTS: {
CLOUD: "",
DESCRIPTION: "Deletes old test setups",
PHASES: [
"CLEANUP_OLD_TESTS"
]
}
};

const config: TestConfig = TEST_CONFIGURATIONS[testConfig];
Expand Down Expand Up @@ -177,10 +184,10 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = {
makeTarget: "cleanup",
description: "Destroy the created infrastucture",
},
RESULTS: {
phase: "get-results",
makeTarget: "get-results",
description: "Get the result of the setup",
CLEANUP_OLD_TESTS: {
phase: "cleanup-old-tests",
makeTarget: "cleanup-old-tests",
description: "",
},
};

Expand Down Expand Up @@ -246,8 +253,11 @@ installerTests(TEST_CONFIGURATIONS[testConfig]).catch((err) => {

export async function installerTests(config: TestConfig) {
console.log(config.DESCRIPTION);
// these phases set up the infrastructure
werft.phase(`create-${cloud}-infra`, `Create the infrastructure in ${cloud}`);
// these phases sets up or clean up the infrastructure
// If the cloud variable is not set, we have a cleanup job in hand
const majorPhase: string = cloud == "" ? `create-${cloud}-infra` : "cleanup-infra"

werft.phase(majorPhase, `Manage the infrastructure`);
for (let phase of config.PHASES) {
const phaseSteps = INFRA_PHASES[phase];
const ret = callMakeTargets(phaseSteps.phase, phaseSteps.description, phaseSteps.makeTarget);
Expand All @@ -257,9 +267,14 @@ export async function installerTests(config: TestConfig) {
break;
}
}
werft.done(`create-${cloud}-infra`);
werft.done(majorPhase);

if (cloud == "") {
// this means that it was a cleanup job, nothing more to do here
return
}

if (upgrade === "true") {
if (upgrade === "true") {
// we could run integration tests in the current setup
// but since we run nightly tests on unstable setups, feels unnecessary
// runIntegrationTests()
Expand All @@ -279,16 +294,18 @@ if (upgrade === "true") {

// if the preview flag is set to true, the script will print the result and exits
if (preview === "true") {
const resultPhase = INFRA_PHASES["RESULTS"];
werft.phase("print-output", "Get connection details to self-hosted setup");

// TODO(nvn): send the kubeconfig to cloud storage
callMakeTargets(resultPhase.phase, resultPhase.description, resultPhase.makeTarget);

exec(
`werft log result -d "self-hosted preview url" url "https://${process.env["TF_VAR_TEST_ID"]}.tests.gitpod-self-hosted.com"`,
);

if (testConfig == "STANDARD_K3S_TEST") {
exec(`werft log result -d "KUBECONFIG file store under GCP project 'sh-automated-tests'" url "gs://nightly-tests/tf-state/${process.env["TF_VAR_TEST_ID"]}-kubeconfig"`);
} else {
exec(`werft log result -d "KUBECONFIG Connection details" url "Follow cloud specific instructions to connect to the cluster"`);
}

exec(`werft log result -d "Terraform state" url "Terraform state file name is ${process.env["TF_VAR_TEST_ID"]}"`);

werft.done("print-output");
Expand Down
2 changes: 1 addition & 1 deletion .werft/k3s-installer-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ pod:

curl -sLS https://get.k3sup.dev | sh

export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-k3s

(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
printf '{{ toJson . }}' > context.json
Expand Down
21 changes: 19 additions & 2 deletions .werft/self-hosted-installer-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ args:
desc: "Name of the supported managed cluster solution to test with, options: [`k3s`, `gke`, `aks`, `eks`], if not specified, it will run for all cloud providers"
required: false
default: ""
- name: subdomain
desc: "Subdomain to use, starting with `gitpod-*` will omit from cleanup, make sure it is not in use already. A terraform workspace of same name will be used"
required: false
default: ""
- name: channel
desc: "Replicated channel to use"
required: false
Expand Down Expand Up @@ -136,8 +140,21 @@ pod:
CLUSTER="k3s"
fi

TESTCONFIG="STANDARD_${CLUSTER^^}_TEST"
export domain="{{ .Annotations.subdomain }}"

export eks=aws
export gke=gcp
export k3s=k3s
export aks=azure

export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)
export provider=${!CLUSTER}

if [[ "$domain" == "<no value>" ]]; then
export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-$provider"
else
export TF_VAR_TEST_ID="$domain"
fi

TESTCONFIG="STANDARD_${CLUSTER^^}_TEST"

npx ts-node .werft/installer-tests.ts ${TESTCONFIG}
4 changes: 2 additions & 2 deletions install/infra/terraform/tools/issuer/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ provider "kubernetes" {
}

resource "kubernetes_secret" "dns_solver" {
count = var.secretAccessKey == null ? 0 : 1
count = var.secretAccessKey == null ? 0 : 1
metadata {
name = "route53-credentials"
namespace = "cert-manager"
Expand All @@ -17,7 +17,7 @@ resource "kubernetes_secret" "dns_solver" {
resource "kubernetes_manifest" "clusterissuer_gitpod" {
manifest = {
"apiVersion" = "cert-manager.io/v1"
"kind" = "ClusterIssuer"
"kind" = "ClusterIssuer"
"metadata" = {
"name" = "gitpod-issuer"
}
Expand Down
Loading