From 2e4eabc758b706c9278c6713d8ee4b50df8d70b0 Mon Sep 17 00:00:00 2001 From: Nandaja Varma Date: Wed, 13 Jul 2022 13:49:59 +0000 Subject: [PATCH] Adding the cron for self-hosted preview cleanup --- .werft/aks-installer-tests.yaml | 2 +- .werft/cleanup-installer-setups.yaml | 109 ++++++++++++++++++ .werft/eks-installer-tests.yaml | 2 +- .werft/gke-installer-tests.yaml | 2 +- .werft/installer-tests.ts | 41 +++++-- .werft/k3s-installer-tests.yaml | 2 +- .werft/self-hosted-installer-tests.yaml | 21 +++- install/infra/terraform/tools/issuer/main.tf | 4 +- install/tests/Makefile | 88 ++++++++++---- install/tests/cleanup.sh | 38 ++++++ .../install/infra/terraform/eks/providers.tf | 0 11 files changed, 267 insertions(+), 42 deletions(-) create mode 100644 .werft/cleanup-installer-setups.yaml create mode 100755 install/tests/cleanup.sh delete mode 100644 install/tests/install/infra/terraform/eks/providers.tf diff --git a/.werft/aks-installer-tests.yaml b/.werft/aks-installer-tests.yaml index 77a05a5fec70d7..ed53ec7fca7106 100644 --- a/.werft/aks-installer-tests.yaml +++ b/.werft/aks-installer-tests.yaml @@ -70,7 +70,7 @@ pod: curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash - export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo) + export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-azure (cd .werft && yarn install && mv node_modules ..) | werft log slice prep printf '{{ toJson . }}' > context.json diff --git a/.werft/cleanup-installer-setups.yaml b/.werft/cleanup-installer-setups.yaml new file mode 100644 index 00000000000000..a8b7a2520af53f --- /dev/null +++ b/.werft/cleanup-installer-setups.yaml @@ -0,0 +1,109 @@ +# debug using `werft run github -f -s .werft/installer-tests.ts -j .werft/self-hosted-installer-tests.yaml -a debug=true` +pod: + serviceAccount: werft + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: dev/workload + operator: In + values: + - "builds" + securityContext: + runAsUser: 0 + volumes: + - name: sh-playground-sa-perm + secret: + secretName: sh-playground-sa-perm + - name: sh-playground-dns-perm + secret: + secretName: sh-playground-dns-perm + - name: sh-aks-perm + secret: + secretName: aks-credentials + containers: + - name: nightly-test + image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:ljb-werft-cli-grpc-changes.2 + workingDir: /workspace + imagePullPolicy: Always + volumeMounts: + - name: sh-playground-sa-perm + mountPath: /mnt/secrets/sh-playground-sa-perm + - name: sh-playground-dns-perm # this sa is used for the DNS management + mountPath: /mnt/secrets/sh-playground-dns-perm + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json" + - name: TF_VAR_sa_creds + value: "/mnt/secrets/sh-playground-sa-perm/sh-sa.json" + - name: TF_VAR_dns_sa_creds + value: "/mnt/secrets/sh-playground-dns-perm/sh-dns-sa.json" + - name: ARM_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + name: aks-credentials + key: subscriptionid + - name: ARM_TENANT_ID + valueFrom: + secretKeyRef: + name: aks-credentials + key: tenantid + - name: ARM_CLIENT_ID + valueFrom: + secretKeyRef: + name: aks-credentials + key: clientid + - name: ARM_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: aks-credentials + key: clientsecret + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: USER_TOKEN # this is for the integration tests + valueFrom: + secretKeyRef: + name: integration-test-user + key: token + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-credentials + key: aws-access-key + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-credentials + key: aws-secret-key + - name: AWS_REGION + valueFrom: + secretKeyRef: + name: aws-credentials + key: aws-region + command: + - bash + - -c + - | + sleep 1 + set -Eeuo pipefail + + sudo chown -R gitpod:gitpod /workspace + sudo apt update && apt install gettext-base + + curl -sLS https://get.k3sup.dev | sh + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + (cd .werft && yarn install && mv node_modules ..) | werft log slice prep + printf '{{ toJson . }}' > context.json + + TESTCONFIG="CLEANUP_OLD_TESTS" + + npx ts-node .werft/installer-tests.ts ${TESTCONFIG} +plugins: + cron: "15 3 * * *" diff --git a/.werft/eks-installer-tests.yaml b/.werft/eks-installer-tests.yaml index 8f8814c5364126..58fece57ee4f1b 100644 --- a/.werft/eks-installer-tests.yaml +++ b/.werft/eks-installer-tests.yaml @@ -63,7 +63,7 @@ pod: sudo chown -R gitpod:gitpod /workspace sudo apt update && apt install gettext-base - export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)" + export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-aws" (cd .werft && yarn install && mv node_modules ..) | werft log slice prep printf '{{ toJson . }}' > context.json diff --git a/.werft/gke-installer-tests.yaml b/.werft/gke-installer-tests.yaml index a7499b8c82dc3e..8a20e8a978aee5 100644 --- a/.werft/gke-installer-tests.yaml +++ b/.werft/gke-installer-tests.yaml @@ -50,7 +50,7 @@ pod: sudo chown -R gitpod:gitpod /workspace sudo apt update && apt install gettext-base - export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo) + export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-gcp (cd .werft && yarn install && mv node_modules ..) | werft log slice prep printf '{{ toJson . }}' > context.json diff --git a/.werft/installer-tests.ts b/.werft/installer-tests.ts index 412a9d52a609c9..7534054ab83bfd 100644 --- a/.werft/installer-tests.ts +++ b/.werft/installer-tests.ts @@ -91,6 +91,13 @@ const TEST_CONFIGURATIONS: { [name: string]: TestConfig } = { "CHECK_INSTALLATION", ], }, + CLEANUP_OLD_TESTS: { + CLOUD: "", + DESCRIPTION: "Deletes old test setups", + PHASES: [ + "CLEANUP_OLD_TESTS" + ] + } }; const config: TestConfig = TEST_CONFIGURATIONS[testConfig]; @@ -177,10 +184,10 @@ const INFRA_PHASES: { [name: string]: InfraConfig } = { makeTarget: "cleanup", description: "Destroy the created infrastucture", }, - RESULTS: { - phase: "get-results", - makeTarget: "get-results", - description: "Get the result of the setup", + CLEANUP_OLD_TESTS: { + phase: "cleanup-old-tests", + makeTarget: "cleanup-old-tests", + description: "", }, }; @@ -246,8 +253,11 @@ installerTests(TEST_CONFIGURATIONS[testConfig]).catch((err) => { export async function installerTests(config: TestConfig) { console.log(config.DESCRIPTION); - // these phases set up the infrastructure - werft.phase(`create-${cloud}-infra`, `Create the infrastructure in ${cloud}`); + // these phases sets up or clean up the infrastructure + // If the cloud variable is not set, we have a cleanup job in hand + const majorPhase: string = cloud == "" ? `create-${cloud}-infra` : "cleanup-infra" + + werft.phase(majorPhase, `Manage the infrastructure`); for (let phase of config.PHASES) { const phaseSteps = INFRA_PHASES[phase]; const ret = callMakeTargets(phaseSteps.phase, phaseSteps.description, phaseSteps.makeTarget); @@ -257,9 +267,14 @@ export async function installerTests(config: TestConfig) { break; } } - werft.done(`create-${cloud}-infra`); + werft.done(majorPhase); + + if (cloud == "") { + // this means that it was a cleanup job, nothing more to do here + return + } -if (upgrade === "true") { + if (upgrade === "true") { // we could run integration tests in the current setup // but since we run nightly tests on unstable setups, feels unnecessary // runIntegrationTests() @@ -279,16 +294,18 @@ if (upgrade === "true") { // if the preview flag is set to true, the script will print the result and exits if (preview === "true") { - const resultPhase = INFRA_PHASES["RESULTS"]; werft.phase("print-output", "Get connection details to self-hosted setup"); - // TODO(nvn): send the kubeconfig to cloud storage - callMakeTargets(resultPhase.phase, resultPhase.description, resultPhase.makeTarget); - exec( `werft log result -d "self-hosted preview url" url "https://${process.env["TF_VAR_TEST_ID"]}.tests.gitpod-self-hosted.com"`, ); + if (testConfig == "STANDARD_K3S_TEST") { + exec(`werft log result -d "KUBECONFIG file store under GCP project 'sh-automated-tests'" url "gs://nightly-tests/tf-state/${process.env["TF_VAR_TEST_ID"]}-kubeconfig"`); + } else { + exec(`werft log result -d "KUBECONFIG Connection details" url "Follow cloud specific instructions to connect to the cluster"`); + } + exec(`werft log result -d "Terraform state" url "Terraform state file name is ${process.env["TF_VAR_TEST_ID"]}"`); werft.done("print-output"); diff --git a/.werft/k3s-installer-tests.yaml b/.werft/k3s-installer-tests.yaml index a30a8cddd98962..6e415745d94a3a 100644 --- a/.werft/k3s-installer-tests.yaml +++ b/.werft/k3s-installer-tests.yaml @@ -57,7 +57,7 @@ pod: curl -sLS https://get.k3sup.dev | sh - export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo) + export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo)-k3s (cd .werft && yarn install && mv node_modules ..) | werft log slice prep printf '{{ toJson . }}' > context.json diff --git a/.werft/self-hosted-installer-tests.yaml b/.werft/self-hosted-installer-tests.yaml index f8060d4297484d..5a4bf2160bfada 100644 --- a/.werft/self-hosted-installer-tests.yaml +++ b/.werft/self-hosted-installer-tests.yaml @@ -4,6 +4,10 @@ args: desc: "Name of the supported managed cluster solution to test with, options: [`k3s`, `gke`, `aks`, `eks`], if not specified, it will run for all cloud providers" required: false default: "" +- name: subdomain + desc: "Subdomain to use, starting with `gitpod-*` will omit from cleanup, make sure it is not in use already. A terraform workspace of same name will be used" + required: false + default: "" - name: channel desc: "Replicated channel to use" required: false @@ -136,8 +140,21 @@ pod: CLUSTER="k3s" fi - TESTCONFIG="STANDARD_${CLUSTER^^}_TEST" + export domain="{{ .Annotations.subdomain }}" + + export eks=aws + export gke=gcp + export k3s=k3s + export aks=azure - export TF_VAR_TEST_ID=$(echo $RANDOM | md5sum | head -c 5; echo) + export provider=${!CLUSTER} + + if [[ "$domain" == "" ]]; then + export TF_VAR_TEST_ID="$(echo $RANDOM | md5sum | head -c 5; echo)-$provider" + else + export TF_VAR_TEST_ID="$domain" + fi + + TESTCONFIG="STANDARD_${CLUSTER^^}_TEST" npx ts-node .werft/installer-tests.ts ${TESTCONFIG} diff --git a/install/infra/terraform/tools/issuer/main.tf b/install/infra/terraform/tools/issuer/main.tf index 079b6cb12ecd35..a00036c6019f9f 100644 --- a/install/infra/terraform/tools/issuer/main.tf +++ b/install/infra/terraform/tools/issuer/main.tf @@ -3,7 +3,7 @@ provider "kubernetes" { } resource "kubernetes_secret" "dns_solver" { - count = var.secretAccessKey == null ? 0 : 1 + count = var.secretAccessKey == null ? 0 : 1 metadata { name = "route53-credentials" namespace = "cert-manager" @@ -17,7 +17,7 @@ resource "kubernetes_secret" "dns_solver" { resource "kubernetes_manifest" "clusterissuer_gitpod" { manifest = { "apiVersion" = "cert-manager.io/v1" - "kind" = "ClusterIssuer" + "kind" = "ClusterIssuer" "metadata" = { "name" = "gitpod-issuer" } diff --git a/install/tests/Makefile b/install/tests/Makefile index 7a967bd6130993..8876f4e734ea6a 100644 --- a/install/tests/Makefile +++ b/install/tests/Makefile @@ -26,18 +26,47 @@ help: Makefile @sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /' @echo +upload-kubeconfig-to-gcp: + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} --project=sh-automated-tests + gsutil cp ${KUBECONFIG} gs://nightly-tests/tf-state/${TF_VAR_TEST_ID}-kubeconfig + +sync-kubeconfig: + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} --project=sh-automated-tests + gsutil cp gs://nightly-tests/tf-state/${TF_VAR_TEST_ID}-kubeconfig ${KUBECONFIG} || echo "No kubeconfig" + +## k3s-kubeconfig: Get the kubeconfig configuration for GCP K3s +k3s-kubeconfig: sync-kubeconfig + +## gcp-kubeconfig: Get the kubeconfig configuration for GCP GKE +gcp-kubeconfig: + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} --project=sh-automated-tests + export KUBECONFIG=${KUBECONFIG} && \ + gcloud container clusters get-credentials c${TF_VAR_TEST_ID} --zone europe-west1-d --project sh-automated-tests || $(MAKE) sync-kubeconfig || echo "No cluster present" + +## azure-kubeconfig: Get the kubeconfig configuration for Azure AKS +azure-kubeconfig: + export KUBECONFIG=${KUBECONFIG} && \ + az aks get-credentials --name gitpod-test-nor-primary-${TF_VAR_TEST_ID} --resource-group gitpod-test-nor-${TF_VAR_TEST_ID} --file ${KUBECONFIG} || echo "No cluster present" + +## aws-kubeconfig: Get the kubeconfig configuration for AWS EKS +aws-kubeconfig: + export KUBECONFIG=${KUBECONFIG} && \ + aws eks update-kubeconfig --name ${TF_VAR_TEST_ID} --region eu-west-1 --kubeconfig ${KUBECONFIG} || echo "No cluster present" + + .PHONY: ## gke-standard-cluster: Creates a zonal GKE cluster gke-standard-cluster: terraform init --upgrade && \ - terraform workspace new $(TF_VAR_TEST_ID) || terraform workspace select $(TF_VAR_TEST_ID) && \ + terraform workspace new $(TF_VAR_TEST_ID) || $(MAKE) select-workspace && \ terraform apply -target=module.gke -var kubeconfig=${KUBECONFIG} --auto-approve + @echo "Done creating GKE cluster" .PHONY: ## eks-standard-cluster: Creates an EKS cluster eks-standard-cluster: terraform init --upgrade && \ - terraform workspace new $(TF_VAR_TEST_ID) || terraform workspace select $(TF_VAR_TEST_ID) && \ + terraform workspace new $(TF_VAR_TEST_ID) || $(MAKE) select-workspace && \ terraform apply -target=module.eks -var kubeconfig=${KUBECONFIG} --auto-approve @echo "Done creating EKS cluster" @@ -45,7 +74,7 @@ eks-standard-cluster: ## aks-standard-cluster: Creates an AKS cluster aks-standard-cluster: terraform init --upgrade && \ - terraform workspace new $(TF_VAR_TEST_ID) || terraform workspace select $(TF_VAR_TEST_ID) && \ + terraform workspace new $(TF_VAR_TEST_ID) || $(MAKE) select-workspace && \ terraform apply -target=module.aks -var kubeconfig=${KUBECONFIG} --auto-approve @echo "Done creating AKS cluster" @@ -69,35 +98,33 @@ cluster-issuer: check-env-cloud ## k3s-standard-cluster: Creates a K3S cluster on GCP with one master and 1 worker node k3s-standard-cluster: terraform init --upgrade && \ - terraform workspace new $(TF_VAR_TEST_ID) || terraform workspace select $(TF_VAR_TEST_ID) && \ - terraform apply -target=module.k3s -var kubeconfig=${KUBECONFIG} --auto-approve + terraform workspace new $(TF_VAR_TEST_ID) || $(MAKE) select-workspace && \ + terraform apply -target=module.k3s -var kubeconfig=${KUBECONFIG} --auto-approve && \ + $(MAKE) upload-kubeconfig-to-gcp # we upload the file to GCP since we cannot retrieve the file against without SSHing to the master @echo "Done creating k3s cluster" .PHONY: ## cert-manager: Installs cert-manager, optionally create secret for cloud-dns access cert-manager: - terraform workspace select $(TF_VAR_TEST_ID) && \ + $(MAKE) select-workspace && \ terraform apply -target=module.certmanager -var kubeconfig=${KUBECONFIG} --auto-approve @echo "Done installing cert-manager" .PHONY: ## managed-dns: Installs external-dns, and setup up CloudDNS access -managed-dns: check-env-sub-domain - terraform workspace select $(TF_VAR_TEST_ID) && \ +managed-dns: check-env-sub-domain select-workspace terraform apply -target=module.clouddns-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve @echo "Done created GCP managed DNS" .PHONY: ## external-dns: Installs external-dns -external-dns: check-env-cloud - terraform workspace select $(TF_VAR_TEST_ID) && \ +external-dns: check-env-cloud select-workspace terraform apply -target=module.$(cloud)-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve @echo "Done creating externaldns for $(cloud)" .PHONY: ## get-kubeconfig: Returns KUBECONFIG of a just created cluster -get-kubeconfig: - ${KUBECONFIG} +get-kubeconfig: ${cloud}-kubeconfig KOTS_KONFIG := "./manifests/kots-config.yaml" @@ -256,7 +283,17 @@ kots-upgrade: @echo "Upgrade gitpod KOTS app to latest" kubectl kots upstream upgrade --kubeconfig=${KUBECONFIG} gitpod -n gitpod --deploy -cleanup: check-env-cloud destroy-gitpod destroy-$(cloud) destroy-workspace +cloud ?= cluster +cleanup: $(cloud)-kubeconfig destroy-gitpod destroy-$(cloud) destroy-workspace destroy-kubeconfig + +cluster-kubeconfig: azure-kubeconfig aws-kubeconfig k3s-kubeconfig gcp-kubeconfig + +destroy-cluster: destroy-gcp destroy-aws destroy-azure + +destroy-kubeconfig: + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} --project=sh-automated-tests + gsutil rm gs://nightly-tests/tf-state/${TF_VAR_TEST_ID}-kubeconfig || echo "No kubeconfig" + rm ${KUBECONFIG} select-workspace: terraform workspace select $(TF_VAR_TEST_ID) @@ -265,11 +302,18 @@ destroy-workspace: terraform workspace select default terraform workspace delete $(TF_VAR_TEST_ID) || echo "Couldn't delete workspace, please cleanup manually" -destroy-gcp: select-workspace +destroy-gcp: destroy-k3s destroy-gke + +destroy-k3s: select-workspace [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.gcp-issuer -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.clouddns-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.certmanager -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" terraform destroy -target=module.k3s -var kubeconfig=${KUBECONFIG} --auto-approve + +destroy-gke: select-workspace + [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.gcp-issuer -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" + [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.clouddns-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" + [[ -f ${KUBECONFIG} ]] && terraform destroy -target=module.certmanager -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" terraform destroy -target=module.gke -var kubeconfig=${KUBECONFIG} --auto-approve # Delete the Gitpod namespace and all associated resources. @@ -281,26 +325,26 @@ destroy-gitpod: && kubectl --kubeconfig=${KUBECONFIG} delete namespace/gitpod --now --timeout 180s \ || true -destroy-aws: select-workspace +destroy-aws: + $(MAKE) select-workspace terraform destroy -target=module.aws-add-dns-record -var kubeconfig=${KUBECONFIG} --auto-approve ls ${KUBECONFIG} && terraform destroy -target=module.aws-issuer -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" ls ${KUBECONFIG} && terraform destroy -target=module.aws-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" ls ${KUBECONFIG} && terraform destroy -target=module.certmanager -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" terraform destroy -target=module.eks -var kubeconfig=${KUBECONFIG} --auto-approve -destroy-azure: select-workspace +destroy-azure: + $(MAKE) select-workspace ls ${KUBECONFIG} && terraform destroy -target=module.azure-issuer -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" terraform destroy -target=module.azure-add-dns-record -var kubeconfig=${KUBECONFIG} --auto-approve ls ${KUBECONFIG} && terraform destroy -target=module.azure-externaldns -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" ls ${KUBECONFIG} && terraform destroy -target=module.certmanager -var kubeconfig=${KUBECONFIG} --auto-approve || echo "No kubeconfig file" terraform destroy -target=module.aks -var kubeconfig=${KUBECONFIG} --auto-approve -get-results: - @echo "If you have gotten this far, it means your setup succeeded" - @echo "The IP address of you setup is "https://$(TF_VAR_TEST_ID).tests.gitpod-self-hosted.com"" - @echo "Following is the KUBECONFIG you can use to connect to the cluster:" - @cat ${KUBECONFIG} - list-state: terraform state list + +cleanup-old-tests: + ./cleanup.sh + # end diff --git a/install/tests/cleanup.sh b/install/tests/cleanup.sh new file mode 100755 index 00000000000000..47fffce0dd8a0a --- /dev/null +++ b/install/tests/cleanup.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# +# +if [ -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then echo "Env var GOOGLE_APPLICATION_CREDENTIALS not set"; exit 1; fi + + +limit='10 hours ago' + +gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" --project=sh-automated-tests +for i in $(gsutil ls gs://nightly-tests/tf-state); do + # we have to check if the file was created atleast 1 day ago + datetime=$(gsutil ls -la "$i" | xargs | awk '{print $2}') + dtSec=$(date --date "$datetime" +'%s') + dtOld=$(date --date "$limit" +'%s') + if [ "$dtSec" -gt "$dtOld" ]; then echo "$i was not created atleast '$limit', skipping"; continue; fi + + filename=$(echo "$i" | rev | cut -d '/' -f 1 | rev) + + [ -z "$filename" ] && continue + + if [[ "$filename" == *-kubeconfig ]]; then continue; fi + + TF_VAR_TEST_ID=$(basename "$filename" .tfstate) + + cloud=$(echo "$TF_VAR_TEST_ID" | sed 's/\(.*\)-/\1 /' | xargs | awk '{print $2}') + + if [[ "$TF_VAR_TEST_ID" == gitpod-* ]] ; then echo "$TF_VAR_TEST_ID has the pattern gitpod-*, skipping"; continue; fi + + if [ "$TF_VAR_TEST_ID" = "default" ] || [ "$TF_VAR_TEST_ID" = "" ]; then continue; fi + + if [ -z "$cloud" ]; then cloud=cluster; fi + + echo "Cleaning up $TF_VAR_TEST_ID" + + export TF_VAR_TEST_ID=$TF_VAR_TEST_ID + + make cleanup cloud=$cloud +done diff --git a/install/tests/install/infra/terraform/eks/providers.tf b/install/tests/install/infra/terraform/eks/providers.tf deleted file mode 100644 index e69de29bb2d1d6..00000000000000