diff --git a/.github/workflows/catalogd-crd-diff.yaml b/.github/workflows/catalogd-crd-diff.yaml
new file mode 100644
index 000000000..d3c6ca099
--- /dev/null
+++ b/.github/workflows/catalogd-crd-diff.yaml
@@ -0,0 +1,19 @@
+name: catalogd-crd-diff
+on:
+ pull_request:
+jobs:
+ crd-diff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+
+ - name: Run make verify-crd-compatibility
+ working-directory: catalogd
+ run: make verify-crd-compatibility CRD_DIFF_ORIGINAL_REF=${{ github.event.pull_request.base.sha }} CRD_DIFF_UPDATED_SOURCE="git://${{ github.event.pull_request.head.sha }}?path=config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml"
+
diff --git a/.github/workflows/catalogd-demo.yaml b/.github/workflows/catalogd-demo.yaml
new file mode 100644
index 000000000..68733fc13
--- /dev/null
+++ b/.github/workflows/catalogd-demo.yaml
@@ -0,0 +1,25 @@
+name: catalogd-demo
+
+on:
+ workflow_dispatch:
+ merge_group:
+ pull_request:
+ push:
+ branches:
+ - main
+
+jobs:
+ demo:
+ runs-on: ubuntu-latest
+ env:
+ TERM: linux
+ steps:
+ - run: sudo apt update && sudo apt install -y asciinema curl
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: "go.mod"
+ - name: Run Demo Update
+ working-directory: catalogd
+ run: make demo-update
+
diff --git a/.github/workflows/catalogd-e2e.yaml b/.github/workflows/catalogd-e2e.yaml
new file mode 100644
index 000000000..06f592788
--- /dev/null
+++ b/.github/workflows/catalogd-e2e.yaml
@@ -0,0 +1,31 @@
+name: catalogd-e2e
+
+on:
+ workflow_dispatch:
+ merge_group:
+ pull_request:
+ push:
+ branches:
+ - main
+
+jobs:
+ e2e:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: "go.mod"
+ - name: Run E2e
+ working-directory: catalogd
+ run: make e2e
+ upgrade-e2e:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: "go.mod"
+ - name: Run the upgrade e2e test
+ working-directory: catalogd
+ run: make test-upgrade-e2e
diff --git a/.github/workflows/tilt.yaml b/.github/workflows/tilt.yaml
index 3a25fdb73..63ddc2a13 100644
--- a/.github/workflows/tilt.yaml
+++ b/.github/workflows/tilt.yaml
@@ -6,32 +6,20 @@ on:
- 'api/**'
- 'cmd/**'
- 'config/**'
+ - 'catalogd/**'
- 'internal/**'
- 'pkg/**'
- 'Tiltfile'
+ - '.tilt-support'
merge_group:
jobs:
tilt:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
- with:
- repository: operator-framework/tilt-support
- path: tilt-support
- uses: actions/checkout@v4
with:
path: operator-controller
- - name: Get catalogd version
- id: get-catalogd-version
- run: |
- cd operator-controller
- echo "CATALOGD_VERSION=$(go list -mod=mod -m -f "{{.Version}}" github.com/operator-framework/catalogd)" >> "$GITHUB_OUTPUT"
- - uses: actions/checkout@v4
- with:
- repository: operator-framework/catalogd
- path: catalogd
- ref: "${{ steps.get-catalogd-version.outputs.CATALOGD_VERSION }}"
- name: Install Go
uses: actions/setup-go@v5
with:
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 57e9dd8b6..d8c5d1a64 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -4,13 +4,30 @@ before:
- go mod download
builds:
- id: operator-controller
- main: ./cmd/manager/
- binary: manager
+ main: ./cmd/operator-controller/
+ binary: operator-controller
asmflags: "{{ .Env.GO_BUILD_ASMFLAGS }}"
gcflags: "{{ .Env.GO_BUILD_GCFLAGS }}"
ldflags: "{{ .Env.GO_BUILD_LDFLAGS }}"
tags:
- - "{{ .Env.GO_BUILD_TAGS }}"
+ - "{{ .Env.GO_BUILD_TAGS }}"
+ mod_timestamp: "{{ .CommitTimestamp }}"
+ goos:
+ - linux
+ goarch:
+ - amd64
+ - arm64
+ - ppc64le
+ - s390x
+ - id: catalogd
+ main: ./catalogd/cmd/catalogd/
+ binary: catalogd
+ asmflags: "{{ .Env.GO_BUILD_ASMFLAGS }}"
+ gcflags: "{{ .Env.GO_BUILD_GCFLAGS }}"
+ ldflags: "{{ .Env.GO_BUILD_LDFLAGS }}"
+ tags:
+ - "{{ .Env.GO_BUILD_TAGS }}"
+ mod_timestamp: "{{ .CommitTimestamp }}"
goos:
- linux
goarch:
@@ -20,7 +37,7 @@ builds:
- s390x
dockers:
- image_templates:
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
dockerfile: Dockerfile
goos: linux
goarch: amd64
@@ -28,7 +45,7 @@ dockers:
build_flag_templates:
- "--platform=linux/amd64"
- image_templates:
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
dockerfile: Dockerfile
goos: linux
goarch: arm64
@@ -36,7 +53,7 @@ dockers:
build_flag_templates:
- "--platform=linux/arm64"
- image_templates:
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
dockerfile: Dockerfile
goos: linux
goarch: ppc64le
@@ -44,20 +61,58 @@ dockers:
build_flag_templates:
- "--platform=linux/ppc64le"
- image_templates:
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
dockerfile: Dockerfile
goos: linux
goarch: s390x
use: buildx
build_flag_templates:
- "--platform=linux/s390x"
+ - image_templates:
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
+ dockerfile: catalogd/Dockerfile
+ goos: linux
+ goarch: amd64
+ use: buildx
+ build_flag_templates:
+ - "--platform=linux/amd64"
+ - image_templates:
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
+ dockerfile: catalogd/Dockerfile
+ goos: linux
+ goarch: arm64
+ use: buildx
+ build_flag_templates:
+ - "--platform=linux/arm64"
+ - image_templates:
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
+ dockerfile: catalogd/Dockerfile
+ goos: linux
+ goarch: ppc64le
+ use: buildx
+ build_flag_templates:
+ - "--platform=linux/ppc64le"
+ - image_templates:
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
+ dockerfile: catalogd/Dockerfile
+ goos: linux
+ goarch: s390x
+ use: buildx
+ build_flag_templates:
+ - "--platform=linux/s390x"
docker_manifests:
- - name_template: "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}"
+ - name_template: "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}"
+ image_templates:
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
+ - "{{ .Env.OPERATOR_CONTROLLER_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
+ - name_template: "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}"
image_templates:
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
- - "{{ .Env.IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-amd64"
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-arm64"
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-ppc64le"
+ - "{{ .Env.CATALOGD_IMAGE_REPO }}:{{ .Env.IMAGE_TAG }}-s390x"
checksum:
name_template: 'checksums.txt'
snapshot:
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 000000000..13566b81b
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/catalogd.iml b/.idea/catalogd.iml
new file mode 100644
index 000000000..5e764c4f0
--- /dev/null
+++ b/.idea/catalogd.iml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 000000000..805dd8395
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 000000000..35eb1ddfb
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.tilt-support b/.tilt-support
new file mode 100644
index 000000000..c55d2851d
--- /dev/null
+++ b/.tilt-support
@@ -0,0 +1,151 @@
+load('ext://restart_process', 'docker_build_with_restart')
+load('ext://cert_manager', 'deploy_cert_manager')
+
+
+def deploy_cert_manager_if_needed():
+ cert_manager_var = '__CERT_MANAGER__'
+ if os.getenv(cert_manager_var) != '1':
+ deploy_cert_manager(version="v1.15.3")
+ os.putenv(cert_manager_var, '1')
+
+
+# Set up our build helper image that has delve in it. We use a helper so parallel image builds don't all simultaneously
+# install delve. Instead, they all wait for this build to complete, and then proceed in parallel.
+docker_build(
+ ref='helper',
+ context='.',
+ build_args={'GO_VERSION': '1.23'},
+ dockerfile_contents='''
+ARG GO_VERSION
+FROM golang:${GO_VERSION}
+ARG GO_VERSION
+RUN CGO_ENABLED=0 go install github.com/go-delve/delve/cmd/dlv@v${GO_VERSION}
+'''
+)
+
+
+def build_binary(repo, binary, deps, image, tags="", debug=True):
+ gcflags = ''
+ if debug:
+ gcflags = "-gcflags 'all=-N -l'"
+
+ # Treat the main binary as a local resource, so we can automatically rebuild it when any of the deps change. This
+ # builds it locally, targeting linux, so it can run in a linux container.
+ binary_name = binary.split("/")[-1]
+ local_resource(
+ '{}_{}_binary'.format(repo, binary_name),
+ cmd='''
+mkdir -p .tiltbuild/bin
+CGO_ENABLED=0 GOOS=linux go build {tags} {gcflags} -o .tiltbuild/bin/{binary_name} {binary}
+'''.format(repo=repo, binary_name=binary_name, binary=binary, gcflags=gcflags, tags=tags),
+ deps=deps
+ )
+
+ entrypoint = ['/{}'.format(binary_name)]
+ if debug:
+ entrypoint = ['/dlv', '--accept-multiclient', '--api-version=2', '--headless=true', '--listen', ':30000', 'exec', '--continue', '--'] + entrypoint
+
+ # Configure our image build. If the file in live_update.sync (.tiltbuild/bin/$binary) changes, Tilt
+ # copies it to the running container and restarts it.
+ docker_build_with_restart(
+ # This has to match an image in the k8s_yaml we call below, so Tilt knows to use this image for our Deployment,
+ # instead of the actual image specified in the yaml.
+ ref='{image}:{binary_name}'.format(image=image, binary_name=binary_name),
+ # This is the `docker build` context, and because we're only copying in the binary we've already had Tilt build
+ # locally, we set the context to the directory containing the binary.
+ context='.tiltbuild/bin',
+ # We use a slimmed-down Dockerfile that only has $binary in it.
+ dockerfile_contents='''
+FROM gcr.io/distroless/static:debug
+WORKDIR /
+COPY --from=helper /go/bin/dlv /
+COPY {} /
+ '''.format(binary_name),
+ # The set of files Tilt should include in the build. In this case, it's just the binary we built above.
+ only=binary_name,
+ # If .tiltbuild/bin/$binary changes, Tilt will copy it into the running container and restart the process.
+ live_update=[
+ sync('.tiltbuild/bin/{}'.format(binary_name), '/{}'.format(binary_name)),
+ ],
+ # The command to run in the container.
+ entrypoint=entrypoint,
+ )
+
+
+def process_yaml(yaml):
+ if type(yaml) == 'string':
+ objects = read_yaml_stream(yaml)
+ elif type(yaml) == 'blob':
+ objects = decode_yaml_stream(yaml)
+ else:
+ fail('expected a string or blob, got: {}'.format(type(yaml)))
+
+ for o in objects:
+ # For Tilt's live_update functionality to work, we have to run the container as root. Remove any PSA labels
+ # to allow this.
+ if o['kind'] == 'Namespace' and 'labels' in o['metadata']:
+ labels_to_delete = [label for label in o['metadata']['labels'] if label.startswith('pod-security.kubernetes.io')]
+ for label in labels_to_delete:
+ o['metadata']['labels'].pop(label)
+
+ if o['kind'] != 'Deployment':
+ # We only need to modify Deployments, so we can skip this
+ continue
+
+ # For Tilt's live_update functionality to work, we have to run the container as root. Otherwise, Tilt won't
+ # be able to untar the updated binary in the container's file system (this is how live update
+ # works). If there are any securityContexts, remove them.
+ if "securityContext" in o['spec']['template']['spec']:
+ o['spec']['template']['spec'].pop('securityContext')
+ for c in o['spec']['template']['spec']['containers']:
+ if "securityContext" in c:
+ c.pop('securityContext')
+
+ # If multiple Deployment manifests all use the same image but use different entrypoints to change the binary,
+ # we have to adjust each Deployment to use a different image. Tilt needs each Deployment's image to be
+ # unique. We replace the tag with what is effectively :$binary, e.g. :helm.
+ for c in o['spec']['template']['spec']['containers']:
+ if c['name'] == 'kube-rbac-proxy':
+ continue
+
+ command = c['command'][0]
+ if command.startswith('./'):
+ command = command.removeprefix('./')
+ elif command.startswith('/'):
+ command = command.removeprefix('/')
+
+ image_without_tag = c['image'].rsplit(':', 1)[0]
+
+ # Update the image so instead of :$tag it's :$binary
+ c['image'] = '{}:{}'.format(image_without_tag, command)
+
+ # Now apply all the yaml
+ # We are using allow_duplicates=True here as both
+ # operator-controller and catalogd will be installed in the same
+ # namespace "olmv1-system" as of https://github.com/operator-framework/operator-controller/pull/888
+ # and https://github.com/operator-framework/catalogd/pull/283
+ k8s_yaml(encode_yaml_stream(objects), allow_duplicates=True)
+
+
+# data format:
+# {
+# 'image': 'quay.io/operator-framework/rukpak',
+# 'yaml': 'manifests/overlays/cert-manager',
+# 'binaries': {
+# 'core': 'core',
+# 'crdvalidator': 'crd-validation-webhook',
+# 'helm': 'helm-provisioner',
+# 'webhooks': 'rukpak-webhooks',
+# },
+# 'deps': ['api', 'cmd/binary_name', 'internal', 'pkg'],
+# },
+def deploy_repo(repo, data, tags="", debug=True):
+ print('Deploying repo {}'.format(repo))
+ deploy_cert_manager_if_needed()
+
+ local_port = data['starting_debug_port']
+ for binary, deployment in data['binaries'].items():
+ build_binary(repo, binary, data['deps'], data['image'], tags, debug)
+ k8s_resource(deployment, port_forwards=['{}:30000'.format(local_port)])
+ local_port += 1
+ process_yaml(kustomize(data['yaml']))
diff --git a/Dockerfile b/Dockerfile
index 03c737e3f..0fe53b71e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,11 +2,9 @@
# required and is intended to be built only with the
# 'make build' or 'make release' targets.
FROM gcr.io/distroless/static:nonroot
-
WORKDIR /
-
-COPY manager manager
-
+COPY operator-controller operator-controller
EXPOSE 8080
-
USER 65532:65532
+
+ENTRYPOINT ["/operator-controller"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
index b18aca141..4d2209a72 100644
--- a/Makefile
+++ b/Makefile
@@ -14,6 +14,11 @@ IMAGE_REPO := quay.io/operator-framework/operator-controller
endif
export IMAGE_REPO
+ifeq ($(origin CATALOG_IMAGE_REPO), undefined)
+CATALOG_IMAGE_REPO := quay.io/operator-framework/catalogd
+endif
+export CATALOG_IMAGE_REPO
+
ifeq ($(origin IMAGE_TAG), undefined)
IMAGE_TAG := devel
endif
@@ -23,7 +28,6 @@ IMG := $(IMAGE_REPO):$(IMAGE_TAG)
# Define dependency versions (use go.mod if we also use Go code from dependency)
export CERT_MGR_VERSION := v1.15.3
-export CATALOGD_VERSION := $(shell go list -mod=mod -m -f "{{.Version}}" github.com/operator-framework/catalogd)
export WAIT_TIMEOUT := 60s
# Install default ClusterCatalogs
@@ -99,9 +103,14 @@ tidy: #HELP Update dependencies.
# Force tidy to use the version already in go.mod
$(Q)go mod tidy -go=$(GOLANG_VERSION)
+
.PHONY: manifests
-manifests: $(CONTROLLER_GEN) #EXHELP Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
- $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/base/crd/bases output:rbac:artifacts:config=config/base/rbac
+manifests: $(CONTROLLER_GEN) #EXHELP Generate WebhookConfiguration, ClusterRole, and CustomResourceDefinition objects.
+ # To generate the manifests used and do not use catalogd directory
+ $(CONTROLLER_GEN) rbac:roleName=manager-role paths=./internal/... output:rbac:artifacts:config=config/base/rbac
+ $(CONTROLLER_GEN) crd paths=./api/... output:crd:artifacts:config=config/base/crd/bases
+ # To generate the manifests for catalogd
+ $(MAKE) -C catalogd generate
.PHONY: generate
generate: $(CONTROLLER_GEN) #EXHELP Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
@@ -138,7 +147,7 @@ verify-crd-compatibility: $(CRD_DIFF) manifests
$(CRD_DIFF) --config="${CRD_DIFF_CONFIG}" "git://${CRD_DIFF_ORIGINAL_REF}?path=config/base/crd/bases/olm.operatorframework.io_clusterextensions.yaml" ${CRD_DIFF_UPDATED_SOURCE}
.PHONY: test
-test: manifests generate fmt vet test-unit test-e2e #HELP Run all tests.
+test: manifests generate generate-catalogd fmt vet test-unit test-e2e #HELP Run all tests.
.PHONY: e2e
e2e: #EXHELP Run the e2e tests.
@@ -159,7 +168,7 @@ test-ext-dev-e2e: $(OPERATOR_SDK) $(KUSTOMIZE) $(KIND) #HELP Run extension creat
go test -count=1 -v ./test/extension-developer-e2e/...
ENVTEST_VERSION := $(shell go list -m k8s.io/client-go | cut -d" " -f2 | sed 's/^v0\.\([[:digit:]]\{1,\}\)\.[[:digit:]]\{1,\}$$/1.\1.x/')
-UNIT_TEST_DIRS := $(shell go list ./... | grep -v /test/)
+UNIT_TEST_DIRS := $(shell go list ./... | grep -v /test/ | grep -v /catalogd/test/)
COVERAGE_UNIT_DIR := $(ROOT_DIR)/coverage/unit
.PHONY: envtest-k8s-bins #HELP Uses setup-envtest to download and install the binaries required to run ENVTEST-test based locally at the project/bin directory.
@@ -229,14 +238,16 @@ e2e-coverage:
COVERAGE_OUTPUT=./coverage/e2e.out ./hack/test/e2e-coverage.sh
.PHONY: kind-load
-kind-load: $(KIND) #EXHELP Loads the currently constructed image onto the cluster.
+kind-load: $(KIND) #EXHELP Loads the currently constructed images into the KIND cluster.
$(CONTAINER_RUNTIME) save $(IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
+ IMAGE_REPO=$(CATALOG_IMAGE_REPO) KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) $(MAKE) -C catalogd kind-load
.PHONY: kind-deploy
-kind-deploy: export MANIFEST="./operator-controller.yaml"
-kind-deploy: manifests $(KUSTOMIZE) #EXHELP Install controller and dependencies onto the kind cluster.
- $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) > operator-controller.yaml
- envsubst '$$CATALOGD_VERSION,$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh | bash -s
+kind-deploy: export MANIFEST := ./operator-controller.yaml
+kind-deploy: manifests $(KUSTOMIZE)
+ ($(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) && echo "---" && $(KUSTOMIZE) build catalogd/config/overlays/cert-manager | sed "s/cert-git-version/cert-$(VERSION)/g") > $(MANIFEST)
+ envsubst '$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh | bash -s
+
.PHONY: kind-cluster
kind-cluster: $(KIND) #EXHELP Standup a kind cluster.
@@ -269,7 +280,7 @@ export GO_BUILD_FLAGS :=
export GO_BUILD_LDFLAGS := -s -w \
-X '$(VERSION_PATH).version=$(VERSION)' \
-BINARIES=manager
+BINARIES=operator-controller
$(BINARIES):
go build $(GO_BUILD_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o $(BUILDBIN)/$@ ./cmd/$@
@@ -293,8 +304,9 @@ go-build-linux: $(BINARIES)
run: docker-build kind-cluster kind-load kind-deploy #HELP Build the operator-controller then deploy it into a new kind cluster.
.PHONY: docker-build
-docker-build: build-linux #EXHELP Build docker image for operator-controller with GOOS=linux and local GOARCH.
+docker-build: build-linux #EXHELP Build docker image for operator-controller and catalog with GOOS=linux and local GOARCH.
$(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile ./bin/linux
+ IMAGE_REPO=$(CATALOG_IMAGE_REPO) $(MAKE) -C catalogd build-container
#SECTION Release
ifeq ($(origin ENABLE_RELEASE_PIPELINE), undefined)
@@ -309,34 +321,32 @@ export GORELEASER_ARGS
.PHONY: release
release: $(GORELEASER) #EXHELP Runs goreleaser for the operator-controller. By default, this will run only as a snapshot and will not publish any artifacts unless it is run with different arguments. To override the arguments, run with "GORELEASER_ARGS=...". When run as a github action from a tag, this target will publish a full release.
- $(GORELEASER) $(GORELEASER_ARGS)
+ OPERATOR_CONTROLLER_IMAGE_REPO=$(IMAGE_REPO) CATALOGD_IMAGE_REPO=$(CATALOG_IMAGE_REPO) $(GORELEASER) $(GORELEASER_ARGS)
.PHONY: quickstart
-quickstart: export MANIFEST := https://github.com/operator-framework/operator-controller/releases/download/$(VERSION)/operator-controller.yaml
-quickstart: $(KUSTOMIZE) manifests #EXHELP Generate the installation release manifests and scripts.
- $(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) | sed "s/:devel/:$(VERSION)/g" > operator-controller.yaml
- envsubst '$$CATALOGD_VERSION,$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh > install.sh
+quickstart: export MANIFEST := ./operator-controller.yaml
+quickstart: $(KUSTOMIZE) manifests #EXHELP Generate the unified installation release manifests and scripts.
+ ($(KUSTOMIZE) build $(KUSTOMIZE_BUILD_DIR) && echo "---" && $(KUSTOMIZE) build catalogd/config/overlays/cert-manager | sed "s/cert-git-version/cert-$(VERSION)/g") > $(MANIFEST)
+ envsubst '$$CERT_MGR_VERSION,$$INSTALL_DEFAULT_CATALOGS,$$MANIFEST' < scripts/install.tpl.sh > install.sh
##@ Docs
.PHONY: crd-ref-docs
OPERATOR_CONTROLLER_API_REFERENCE_FILENAME := operator-controller-api-reference.md
CATALOGD_API_REFERENCE_FILENAME := catalogd-api-reference.md
-CATALOGD_TMP_DIR := $(ROOT_DIR)/.catalogd-tmp/
API_REFERENCE_DIR := $(ROOT_DIR)/docs/api-reference
+
crd-ref-docs: $(CRD_REF_DOCS) #EXHELP Generate the API Reference Documents.
rm -f $(API_REFERENCE_DIR)/$(OPERATOR_CONTROLLER_API_REFERENCE_FILENAME)
$(CRD_REF_DOCS) --source-path=$(ROOT_DIR)/api \
--config=$(API_REFERENCE_DIR)/crd-ref-docs-gen-config.yaml \
--renderer=markdown --output-path=$(API_REFERENCE_DIR)/$(OPERATOR_CONTROLLER_API_REFERENCE_FILENAME);
- rm -rf $(CATALOGD_TMP_DIR)
- git clone --depth 1 --branch $(CATALOGD_VERSION) https://github.com/operator-framework/catalogd $(CATALOGD_TMP_DIR)
+
rm -f $(API_REFERENCE_DIR)/$(CATALOGD_API_REFERENCE_FILENAME)
- $(CRD_REF_DOCS) --source-path=$(CATALOGD_TMP_DIR)/api \
+ $(CRD_REF_DOCS) --source-path=$(ROOT_DIR)/catalogd/api \
--config=$(API_REFERENCE_DIR)/crd-ref-docs-gen-config.yaml \
- --renderer=markdown --output-path=$(API_REFERENCE_DIR)/$(CATALOGD_API_REFERENCE_FILENAME)
- rm -rf $(CATALOGD_TMP_DIR)/
-
+ --renderer=markdown --output-path=$(API_REFERENCE_DIR)/$(CATALOGD_API_REFERENCE_FILENAME);
+
VENVDIR := $(abspath docs/.venv)
.PHONY: build-docs
diff --git a/Tiltfile b/Tiltfile
index 10c4362e1..7aa07e811 100644
--- a/Tiltfile
+++ b/Tiltfile
@@ -1,23 +1,24 @@
-if not os.path.exists('../tilt-support'):
- fail('Please clone https://github.com/operator-framework/tilt-support to ../tilt-support')
+load('.tilt-support', 'deploy_repo')
-load('../tilt-support/Tiltfile', 'deploy_repo')
-
-config.define_string_list('repos', args=True)
-cfg = config.parse()
-repos = cfg.get('repos', ['operator-controller', 'catalogd'])
-
-repo = {
+operator_controller = {
'image': 'quay.io/operator-framework/operator-controller',
'yaml': 'config/overlays/cert-manager',
'binaries': {
- 'manager': 'operator-controller-controller-manager',
+ './cmd/operator-controller': 'operator-controller-controller-manager',
},
+ 'deps': ['api', 'cmd/operator-controller', 'internal', 'pkg', 'go.mod', 'go.sum'],
'starting_debug_port': 30000,
}
+deploy_repo('operator-controller', operator_controller, '-tags containers_image_openpgp')
+
+catalogd = {
+ 'image': 'quay.io/operator-framework/catalogd',
+ 'yaml': 'catalogd/config/overlays/cert-manager',
+ 'binaries': {
+ './catalogd/cmd/catalogd': 'catalogd-controller-manager',
+ },
+ 'deps': ['catalogd/api', 'catalogd/cmd/catalogd', 'catalogd/internal', 'catalogd/pkg', 'go.mod', 'go.sum'],
+ 'starting_debug_port': 20000,
+}
-for r in repos:
- if r == 'operator-controller':
- deploy_repo('operator-controller', repo, '-tags containers_image_openpgp')
- else:
- include('../{}/Tiltfile'.format(r))
+deploy_repo('catalogd', catalogd, '-tags containers_image_openpgp')
diff --git a/catalogd/.dockerignore b/catalogd/.dockerignore
new file mode 100644
index 000000000..83f42b89a
--- /dev/null
+++ b/catalogd/.dockerignore
@@ -0,0 +1,8 @@
+# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
+# Ignore test and tool binaries.
+bin/controller-gen
+bin/goreleaser
+bin/kustomize
+bin/envtest
+testbin/
+.tiltbuild/
diff --git a/catalogd/.gitignore b/catalogd/.gitignore
new file mode 100644
index 000000000..8ee34b8f6
--- /dev/null
+++ b/catalogd/.gitignore
@@ -0,0 +1,21 @@
+
+# Binaries for programs and plugins
+bin
+
+# Kubernetes Generated files - skip generated files, except for vendored files
+
+!vendor/**/zz_generated.*
+
+# Dependency directories
+vendor/
+bin/
+dist/
+cover.out
+
+# Release output
+catalogd.yaml
+install.sh
+
+.tiltbuild/
+.vscode
+.idea
diff --git a/catalogd/Dockerfile b/catalogd/Dockerfile
new file mode 100644
index 000000000..d0833c1fe
--- /dev/null
+++ b/catalogd/Dockerfile
@@ -0,0 +1,8 @@
+# Use distroless as minimal base image to package the manager binary
+# Refer to https://github.com/GoogleContainerTools/distroless for more details
+FROM gcr.io/distroless/static:nonroot
+WORKDIR /
+COPY catalogd catalogd
+USER 65532:65532
+
+ENTRYPOINT ["/catalogd"]
\ No newline at end of file
diff --git a/catalogd/Makefile b/catalogd/Makefile
new file mode 100644
index 000000000..1059554bb
--- /dev/null
+++ b/catalogd/Makefile
@@ -0,0 +1,229 @@
+# Setting SHELL to bash allows bash commands to be executed by recipes.
+# Options are set to exit when a recipe line exits non-zero or a piped command fails.
+SHELL := /usr/bin/env bash -o pipefail
+.SHELLFLAGS := -ec
+
+ifeq ($(origin IMAGE_REPO), undefined)
+IMAGE_REPO := quay.io/operator-framework/catalogd
+endif
+export IMAGE_REPO
+
+ifeq ($(origin IMAGE_TAG), undefined)
+IMAGE_TAG := devel
+endif
+export IMAGE_TAG
+
+IMAGE := $(IMAGE_REPO):$(IMAGE_TAG)
+
+ifneq (, $(shell command -v docker 2>/dev/null))
+CONTAINER_RUNTIME := docker
+else ifneq (, $(shell command -v podman 2>/dev/null))
+CONTAINER_RUNTIME := podman
+else
+$(warning Could not find docker or podman in path! This may result in targets requiring a container runtime failing!)
+endif
+
+# For standard development and release flows, we use the config/overlays/cert-manager overlay.
+KUSTOMIZE_OVERLAY := config/overlays/cert-manager
+
+# bingo manages consistent tooling versions for things like kind, kustomize, etc.
+include ./../.bingo/Variables.mk
+
+# Dependencies
+export CERT_MGR_VERSION := v1.15.3
+ENVTEST_SERVER_VERSION := $(shell go list -m k8s.io/client-go | cut -d" " -f2 | sed 's/^v0\.\([[:digit:]]\{1,\}\)\.[[:digit:]]\{1,\}$$/1.\1.x/')
+
+# Cluster configuration
+ifeq ($(origin KIND_CLUSTER_NAME), undefined)
+KIND_CLUSTER_NAME := catalogd
+endif
+
+# E2E configuration
+TESTDATA_DIR := testdata
+
+CATALOGD_NAMESPACE := olmv1-system
+KIND_CLUSTER_IMAGE := kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e
+
+##@ General
+
+# The help target prints out all targets with their descriptions organized
+# beneath their categories. The categories are represented by '##@' and the
+# target descriptions by '##'. The awk commands is responsible for reading the
+# entire set of makefiles included in this invocation, looking for lines of the
+# file as xyz: ## something, and then pretty-format the target and help. Then,
+# if there's a line with ##@ something, that gets pretty-printed as a category.
+# More info on the usage of ANSI control characters for terminal formatting:
+# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
+# More info on the awk command:
+# http://linuxcommand.org/lc3_adv_awk.php
+
+.PHONY: help
+help: ## Display this help.
+ awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+.DEFAULT_GOAL := help
+
+##@ Development
+
+clean: ## Remove binaries and test artifacts
+ rm -rf bin
+
+.PHONY: generate
+generate: $(CONTROLLER_GEN) ## Generate code and manifests.
+ $(CONTROLLER_GEN) object:headerFile="../hack/boilerplate.go.txt" paths="./..."
+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/base/crd/bases output:rbac:artifacts:config=config/base/rbac output:webhook:artifacts:config=config/base/manager/webhook/
+
+FOCUS := $(if $(TEST),-v -focus "$(TEST)")
+ifeq ($(origin E2E_FLAGS), undefined)
+E2E_FLAGS :=
+endif
+test-e2e: $(GINKGO) ## Run the e2e tests on existing cluster
+ $(GINKGO) $(E2E_FLAGS) -trace -vv $(FOCUS) test/e2e
+
+e2e: KIND_CLUSTER_NAME := catalogd-e2e
+e2e: ISSUER_KIND := Issuer
+e2e: ISSUER_NAME := selfsigned-issuer
+e2e: KUSTOMIZE_OVERLAY := config/overlays/e2e
+e2e: run image-registry test-e2e kind-cluster-cleanup ## Run e2e test suite on local kind cluster
+
+image-registry: ## Setup in-cluster image registry
+ ./test/tools/imageregistry/registry.sh $(ISSUER_KIND) $(ISSUER_NAME)
+
+.PHONY: verify-crd-compatibility
+CRD_DIFF_ORIGINAL_REF := main
+CRD_DIFF_UPDATED_SOURCE := file://config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml
+CRD_DIFF_CONFIG := crd-diff-config.yaml
+verify-crd-compatibility: $(CRD_DIFF)
+ @if git show ${CRD_DIFF_ORIGINAL_REF}:config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml > /dev/null 2>&1; then \
+ echo "Running CRD diff..."; \
+ $(CRD_DIFF) --config="${CRD_DIFF_CONFIG}" "git://${CRD_DIFF_ORIGINAL_REF}?path=config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml" ${CRD_DIFF_UPDATED_SOURCE}; \
+ else \
+ echo "Skipping CRD diff: CRD does not exist in ${CRD_DIFF_ORIGINAL_REF}"; \
+ fi
+
+
+## image-registry target has to come after run-latest-release,
+## because the image-registry depends on the olm-ca issuer.
+.PHONY: test-upgrade-e2e
+test-upgrade-e2e: export TEST_CLUSTER_CATALOG_NAME := test-catalog
+test-upgrade-e2e: export TEST_CLUSTER_CATALOG_IMAGE := docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e
+test-upgrade-e2e: ISSUER_KIND=ClusterIssuer
+test-upgrade-e2e: ISSUER_NAME=olmv1-ca
+test-upgrade-e2e: kind-cluster cert-manager build-container kind-load run-latest-release image-registry pre-upgrade-setup only-deploy-manifest wait post-upgrade-checks kind-cluster-cleanup ## Run upgrade e2e tests on a local kind cluster
+
+pre-upgrade-setup:
+ ./test/tools/imageregistry/pre-upgrade-setup.sh ${TEST_CLUSTER_CATALOG_IMAGE} ${TEST_CLUSTER_CATALOG_NAME}
+
+.PHONY: run-latest-release
+run-latest-release:
+ curl -L -s https://github.com/operator-framework/catalogd/releases/latest/download/install.sh | bash -s
+
+.PHONY: post-upgrade-checks
+post-upgrade-checks: $(GINKGO)
+ $(GINKGO) $(E2E_FLAGS) -trace -vv $(FOCUS) test/upgrade
+
+##@ Build
+
+BINARIES=catalogd
+LINUX_BINARIES=$(join $(addprefix linux/,$(BINARIES)), )
+
+# Build info
+ifeq ($(origin VERSION), undefined)
+VERSION := $(shell git describe --tags --always --dirty)
+endif
+export VERSION
+
+export VERSION_PKG := $(shell go list -m)/internal/version
+
+export GIT_COMMIT := $(shell git rev-parse HEAD)
+export GIT_VERSION := $(shell git describe --tags --always --dirty)
+export GIT_TREE_STATE := $(shell [ -z "$(shell git status --porcelain)" ] && echo "clean" || echo "dirty")
+export GIT_COMMIT_DATE := $(shell TZ=UTC0 git show --quiet --date=format:'%Y-%m-%dT%H:%M:%SZ' --format="%cd")
+
+export CGO_ENABLED := 0
+export GO_BUILD_ASMFLAGS := all=-trimpath=${PWD}
+export GO_BUILD_LDFLAGS := -s -w \
+ -X "$(VERSION_PKG).gitVersion=$(GIT_VERSION)" \
+ -X "$(VERSION_PKG).gitCommit=$(GIT_COMMIT)" \
+ -X "$(VERSION_PKG).gitTreeState=$(GIT_TREE_STATE)" \
+ -X "$(VERSION_PKG).commitDate=$(GIT_COMMIT_DATE)"
+export GO_BUILD_GCFLAGS := all=-trimpath=${PWD}
+export GO_BUILD_TAGS := containers_image_openpgp
+
+BUILDCMD = go build -tags '$(GO_BUILD_TAGS)' -ldflags '$(GO_BUILD_LDFLAGS)' -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o $(BUILDBIN)/$(notdir $@) ./cmd/$(notdir $@)
+
+.PHONY: build-deps
+build-deps: generate
+
+.PHONY: build go-build-local $(BINARIES)
+build: build-deps go-build-local ## Build binaries for current GOOS and GOARCH.
+go-build-local: $(BINARIES)
+$(BINARIES): BUILDBIN = bin
+$(BINARIES):
+ $(BUILDCMD)
+
+.PHONY: build-linux go-build-linux $(LINUX_BINARIES)
+build-linux: build-deps go-build-linux ## Build binaries for GOOS=linux and local GOARCH.
+go-build-linux: $(LINUX_BINARIES)
+$(LINUX_BINARIES): BUILDBIN = bin/linux
+$(LINUX_BINARIES):
+ GOOS=linux $(BUILDCMD)
+
+
+.PHONY: run
+run: generate kind-cluster install ## Create a kind cluster and install a local build of catalogd
+
+.PHONY: build-container
+build-container: build-linux ## Build docker image for catalogd.
+ $(CONTAINER_RUNTIME) build -f Dockerfile -t $(IMAGE) ./bin/linux
+
+##@ Deploy
+
+.PHONY: kind-cluster
+kind-cluster: $(KIND) kind-cluster-cleanup ## Standup a kind cluster
+ $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --image $(KIND_CLUSTER_IMAGE)
+ $(KIND) export kubeconfig --name $(KIND_CLUSTER_NAME)
+
+.PHONY: kind-cluster-cleanup
+kind-cluster-cleanup: $(KIND) ## Delete the kind cluster
+ $(KIND) delete cluster --name $(KIND_CLUSTER_NAME)
+
+.PHONY: kind-load
+kind-load: check-cluster $(KIND) ## Load the built images onto the local cluster
+ $(CONTAINER_RUNTIME) save $(IMAGE) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME)
+
+.PHONY: install
+install: check-cluster build-container kind-load deploy wait ## Install local catalogd to an existing cluster
+
+.PHONY: deploy
+deploy: export MANIFEST="./catalogd.yaml"
+deploy: export DEFAULT_CATALOGS="./config/base/default/clustercatalogs/default-catalogs.yaml"
+deploy: $(KUSTOMIZE) ## Deploy Catalogd to the K8s cluster specified in ~/.kube/config with cert-manager and default clustercatalogs
+ cd config/base/manager && $(KUSTOMIZE) edit set image controller=$(IMAGE) && cd ../../..
+ $(KUSTOMIZE) build $(KUSTOMIZE_OVERLAY) | sed "s/cert-git-version/cert-$(GIT_VERSION)/g" > catalogd.yaml
+ envsubst '$$CERT_MGR_VERSION,$$MANIFEST,$$DEFAULT_CATALOGS' < scripts/install.tpl.sh | bash -s
+
+.PHONY: only-deploy-manifest
+only-deploy-manifest: $(KUSTOMIZE) ## Deploy just the Catalogd manifest--used in e2e testing where cert-manager is installed in a separate step
+ cd config/base/manager && $(KUSTOMIZE) edit set image controller=$(IMAGE)
+ $(KUSTOMIZE) build $(KUSTOMIZE_OVERLAY) | kubectl apply -f -
+
+wait:
+ kubectl wait --for=condition=Available --namespace=$(CATALOGD_NAMESPACE) deployment/catalogd-controller-manager --timeout=60s
+ kubectl wait --for=condition=Ready --namespace=$(CATALOGD_NAMESPACE) certificate/catalogd-service-cert # Avoid upgrade test flakes when reissuing cert
+
+
+.PHONY: cert-manager
+cert-manager:
+ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${CERT_MGR_VERSION}/cert-manager.yaml
+ kubectl wait --for=condition=Available --namespace=cert-manager deployment/cert-manager-webhook --timeout=60s
+
+.PHONY: demo-update
+demo-update:
+ hack/scripts/generate-asciidemo.sh
+
+.PHONY: check-cluster
+check-cluster:
+ @kubectl config current-context >/dev/null 2>&1 || ( \
+ echo "Error: Could not get current Kubernetes context. Maybe use 'run' or 'e2e' targets first?"; \
+ exit 1; \
+ )
diff --git a/catalogd/README.md b/catalogd/README.md
new file mode 100644
index 000000000..04a462f92
--- /dev/null
+++ b/catalogd/README.md
@@ -0,0 +1,169 @@
+# catalogd
+
+Catalogd is a Kubernetes extension that unpacks [file-based catalog (FBC)](https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs) content for on-cluster clients. Currently, catalogd unpacks FBC content that is packaged and distributed as container images. The catalogd road map includes plans for unpacking other content sources, such as Git repositories and OCI artifacts. For more information, see the catalogd [issues](https://github.com/operator-framework/catalogd/issues/) page.
+
+Catalogd helps customers discover installable content by hosting catalog metadata for Kubernetes extensions, such as Operators and controllers. For more information on the Operator Lifecycle Manager (OLM) v1 suite of microservices, see the [documentation](https://github.com/operator-framework/operator-controller/tree/main/docs) for the Operator Controller.
+
+## Quickstart DEMO
+[](https://asciinema.org/a/682344)
+
+## Quickstart Steps
+Procedure steps marked with an asterisk (`*`) are likely to change with future API updates.
+
+**NOTE:** The examples below use the `-k` flag in curl to skip validating the TLS certificates. This is for demonstration purposes only.
+
+1. To install catalogd, navigate to the [releases](https://github.com/operator-framework/catalogd/releases/) page, and follow the install instructions included in the release you want to install.
+
+1. Create a `ClusterCatalog` object that points to the OperatorHub Community catalog by running the following command:
+
+ ```sh
+ $ kubectl apply -f - << EOF
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterCatalog
+ metadata:
+ name: operatorhubio
+ spec:
+ source:
+ type: Image
+ image:
+ ref: quay.io/operatorhubio/catalog:latest
+ EOF
+ ```
+
+1. Verify the `ClusterCatalog` object was created successfully by running the following command:
+
+ ```sh
+ $ kubectl describe clustercatalog/operatorhubio
+ ```
+
+ *Example output*
+ ```sh
+ Name: operatorhubio
+ Namespace:
+ Labels: olm.operatorframework.io/metadata.name=operatorhubio
+ Annotations:
+ API Version: olm.operatorframework.io/v1
+ Kind: ClusterCatalog
+ Metadata:
+ Creation Timestamp: 2024-10-17T13:48:46Z
+ Finalizers:
+ olm.operatorframework.io/delete-server-cache
+ Generation: 1
+ Resource Version: 7908
+ UID: 34eeaa91-9f8e-4254-9937-0ae9d25e92df
+ Spec:
+ Availability Mode: Available
+ Priority: 0
+ Source:
+ Image:
+ Ref: quay.io/operatorhubio/catalog:latest
+ Type: Image
+ Status:
+ Conditions:
+ Last Transition Time: 2024-10-17T13:48:59Z
+ Message: Successfully unpacked and stored content from resolved source
+ Observed Generation: 1
+ Reason: Succeeded
+ Status: False
+ Type: Progressing
+ Last Transition Time: 2024-10-17T13:48:59Z
+ Message: Serving desired content from resolved source
+ Observed Generation: 1
+ Reason: Available
+ Status: True
+ Type: Serving
+ Last Unpacked: 2024-10-17T13:48:58Z
+ Resolved Source:
+ Image:
+ Last Successful Poll Attempt: 2024-10-17T14:49:59Z
+ Ref: quay.io/operatorhubio/catalog@sha256:82be554b15ff246d8cc428f8d2f4cf5857c02ce3225d95d92a769ea3095e1fc7
+ Type: Image
+ Urls:
+ Base: https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio
+ Events:
+ ```
+
+1. Port forward the `catalogd-service` service in the `olmv1-system` namespace:
+ ```sh
+ $ kubectl -n olmv1-system port-forward svc/catalogd-service 8080:443
+ ```
+
+1. Access the `v1/all` service endpoint and filter the results to a list of packages by running the following command:
+
+ ```sh
+ $ curl https://localhost:8080/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.package") | .name'
+ ```
+
+ *Example output*
+ ```sh
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+ 100 110M 100 110M 0 0 112M 0 --:--:-- --:--:-- --:--:-- 112M
+ "ack-acm-controller"
+ "ack-apigatewayv2-controller"
+ "ack-applicationautoscaling-controller"
+ "ack-cloudtrail-controller"
+ "ack-cloudwatch-controller"
+ "ack-dynamodb-controller"
+ "ack-ec2-controller"
+ "ack-ecr-controller"
+ "ack-eks-controller"
+ "ack-elasticache-controller"
+ "ack-emrcontainers-controller"
+ "ack-eventbridge-controller"
+ "ack-iam-controller"
+ "ack-kinesis-controller"
+ ...
+ ```
+1. Run the following command to get a list of channels for the `ack-acm-controller` package:
+
+ ```sh
+ $ curl https://localhost:8080/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.channel") | select(.package == "ack-acm-controller") | .name'
+ ```
+
+ *Example output*
+ ```sh
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+ 100 110M 100 110M 0 0 115M 0 --:--:-- --:--:-- --:--:-- 116M
+ "alpha"
+ ```
+
+1. Run the following command to get a list of bundles belonging to the `ack-acm-controller` package:
+
+ ```sh
+ $ curl https://localhost:8080/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.bundle") | select(.package == "ack-acm-controller") | .name'
+ ```
+
+ *Example output*
+ ```sh
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+ 100 110M 100 110M 0 0 122M 0 --:--:-- --:--:-- --:--:-- 122M
+ "ack-acm-controller.v0.0.1"
+ "ack-acm-controller.v0.0.2"
+ "ack-acm-controller.v0.0.4"
+ "ack-acm-controller.v0.0.5"
+ "ack-acm-controller.v0.0.6"
+ "ack-acm-controller.v0.0.7"
+ ```
+
+## Contributing
+Thanks for your interest in contributing to `catalogd`!
+
+`catalogd` is in the very early stages of development and a more in depth contributing guide will come in the near future.
+
+In the meantime, it is assumed you know how to make contributions to open source projects in general and this guide will only focus on how to manually test your changes (no automated testing yet).
+
+If you have any questions, feel free to reach out to us on the Kubernetes Slack channel [#olm-dev](https://kubernetes.slack.com/archives/C0181L6JYQ2) or [create an issue](https://github.com/operator-framework/catalogd/issues/new)
+### Testing Local Changes
+**Prerequisites**
+- [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
+
+**Test it out**
+
+```sh
+make run
+```
+
+This will build a local container image for the catalogd controller, create a new KIND cluster and then deploy onto that cluster.
diff --git a/catalogd/api/doc.go b/catalogd/api/doc.go
new file mode 100644
index 000000000..2e2c18a58
--- /dev/null
+++ b/catalogd/api/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+//go:generate apiregister-gen --input-dirs ./... -h ../../boilerplate.go.txt
+
+//
+// +domain=operatorframework.io
+
+package api
diff --git a/catalogd/api/v1/clustercatalog_types.go b/catalogd/api/v1/clustercatalog_types.go
new file mode 100644
index 000000000..102c389cb
--- /dev/null
+++ b/catalogd/api/v1/clustercatalog_types.go
@@ -0,0 +1,357 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SourceType defines the type of source used for catalogs.
+// +enum
+type SourceType string
+
+// AvailabilityMode defines the availability of the catalog
+type AvailabilityMode string
+
+const (
+ SourceTypeImage SourceType = "Image"
+
+ TypeProgressing = "Progressing"
+ TypeServing = "Serving"
+
+ // Serving reasons
+ ReasonAvailable = "Available"
+ ReasonUnavailable = "Unavailable"
+ ReasonUserSpecifiedUnavailable = "UserSpecifiedUnavailable"
+
+ // Progressing reasons
+ ReasonSucceeded = "Succeeded"
+ ReasonRetrying = "Retrying"
+ ReasonBlocked = "Blocked"
+
+ MetadataNameLabel = "olm.operatorframework.io/metadata.name"
+
+ AvailabilityModeAvailable AvailabilityMode = "Available"
+ AvailabilityModeUnavailable AvailabilityMode = "Unavailable"
+)
+
+//+kubebuilder:object:root=true
+//+kubebuilder:resource:scope=Cluster
+//+kubebuilder:subresource:status
+//+kubebuilder:printcolumn:name=LastUnpacked,type=date,JSONPath=`.status.lastUnpacked`
+//+kubebuilder:printcolumn:name="Serving",type=string,JSONPath=`.status.conditions[?(@.type=="Serving")].status`
+//+kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp`
+
+// ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+// For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
+type ClusterCatalog struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the desired state of the ClusterCatalog.
+ // spec is required.
+ // The controller will work to ensure that the desired
+ // catalog is unpacked and served over the catalog content HTTP server.
+ // +kubebuilder:validation:Required
+ Spec ClusterCatalogSpec `json:"spec"`
+
+ // status contains information about the state of the ClusterCatalog such as:
+ // - Whether or not the catalog contents are being served via the catalog content HTTP server
+ // - Whether or not the ClusterCatalog is progressing to a new state
+ // - A reference to the source from which the catalog contents were retrieved
+ // +optional
+ Status ClusterCatalogStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// ClusterCatalogList contains a list of ClusterCatalog
+type ClusterCatalogList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // items is a list of ClusterCatalogs.
+ // items is required.
+ // +kubebuilder:validation:Required
+ Items []ClusterCatalog `json:"items"`
+}
+
+// ClusterCatalogSpec defines the desired state of ClusterCatalog
+type ClusterCatalogSpec struct {
+ // source allows a user to define the source of a catalog.
+ // A "catalog" contains information on content that can be installed on a cluster.
+ // Providing a catalog source makes the contents of the catalog discoverable and usable by
+ // other on-cluster components.
+ // These on-cluster components may do a variety of things with this information, such as
+ // presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ // The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
+ // For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
+ // source is a required field.
+ //
+ // Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
+ //
+ // source:
+ // type: Image
+ // image:
+ // ref: quay.io/operatorhubio/catalog:latest
+ //
+ // +kubebuilder:validation:Required
+ Source CatalogSource `json:"source"`
+
+ // priority allows the user to define a priority for a ClusterCatalog.
+ // priority is optional.
+ //
+ // A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
+ // A higher number means higher priority.
+ //
+ // It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ // When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+ //
+ // When omitted, the default priority is 0 because that is the zero value of integers.
+ //
+ // Negative numbers can be used to specify a priority lower than the default.
+ // Positive numbers can be used to specify a priority higher than the default.
+ //
+ // The lowest possible value is -2147483648.
+ // The highest possible value is 2147483647.
+ //
+ // +kubebuilder:default:=0
+ // +kubebuilder:validation:minimum:=-2147483648
+ // +kubebuilder:validation:maximum:=2147483647
+ // +optional
+ Priority int32 `json:"priority"`
+
+ // availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
+ // availabilityMode is optional.
+ //
+ // Allowed values are "Available" and "Unavailable" and omitted.
+ //
+ // When omitted, the default value is "Available".
+ //
+ // When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
+ // Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
+ // and its contents as usable.
+ //
+ // When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
+ // When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
+ // Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
+ // to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ //
+ // +kubebuilder:validation:Enum:="Unavailable";"Available"
+ // +kubebuilder:default:="Available"
+ // +optional
+ AvailabilityMode AvailabilityMode `json:"availabilityMode,omitempty"`
+}
+
+// ClusterCatalogStatus defines the observed state of ClusterCatalog
+type ClusterCatalogStatus struct {
+ // conditions is a representation of the current state for this ClusterCatalog.
+ //
+ // The current condition types are Serving and Progressing.
+ //
+ // The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
+ // When it has a status of True and a reason of Available, the contents of the catalog are being served.
+ // When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
+ // When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+ //
+ // The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
+ // When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
+ // When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ // When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+ //
+ // In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
+ // catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
+ // contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
+ // to the contents we identify that there are updates to the contents.
+ //
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ // resolvedSource contains information about the resolved source based on the source type.
+ // +optional
+ ResolvedSource *ResolvedCatalogSource `json:"resolvedSource,omitempty"`
+ // urls contains the URLs that can be used to access the catalog.
+ // +optional
+ URLs *ClusterCatalogURLs `json:"urls,omitempty"`
+ // lastUnpacked represents the last time the contents of the
+ // catalog were extracted from their source format. As an example,
+ // when using an Image source, the OCI image will be pulled and the
+ // image layers written to a file-system backed cache. We refer to the
+ // act of this extraction from the source format as "unpacking".
+ // +optional
+ LastUnpacked *metav1.Time `json:"lastUnpacked,omitempty"`
+}
+
+// ClusterCatalogURLs contains the URLs that can be used to access the catalog.
+type ClusterCatalogURLs struct {
+ // base is a cluster-internal URL that provides endpoints for
+ // accessing the content of the catalog.
+ //
+ // It is expected that clients append the path for the endpoint they wish
+ // to access.
+ //
+ // Currently, only a single endpoint is served and is accessible at the path
+ // /api/v1.
+ //
+ // The endpoints served for the v1 API are:
+ // - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+ //
+ // As the needs of users and clients of the evolve, new endpoints may be added.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=525
+ // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL"
+ // +kubebuilder:validation:XValidation:rule="isURL(self) ? (url(self).getScheme() == \"http\" || url(self).getScheme() == \"https\") : true",message="scheme must be either http or https"
+ Base string `json:"base"`
+}
+
+// CatalogSource is a discriminated union of possible sources for a Catalog.
+// CatalogSource contains the sourcing information for a Catalog
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise"
+type CatalogSource struct {
+ // type is a reference to the type of source the catalog is sourced from.
+ // type is required.
+ //
+ // The only allowed value is "Image".
+ //
+ // When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ // When using an image source, the image field must be set and must be the only field defined for this type.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:="Image"
+ // +kubebuilder:validation:Required
+ Type SourceType `json:"type"`
+ // image is used to configure how catalog contents are sourced from an OCI image.
+ // This field is required when type is Image, and forbidden otherwise.
+ // +optional
+ Image *ImageSource `json:"image,omitempty"`
+}
+
+// ResolvedCatalogSource is a discriminated union of resolution information for a Catalog.
+// ResolvedCatalogSource contains the information about a sourced Catalog
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise"
+type ResolvedCatalogSource struct {
+ // type is a reference to the type of source the catalog is sourced from.
+ // type is required.
+ //
+ // The only allowed value is "Image".
+ //
+ // When set to "Image", information about the resolved image source will be set in the 'image' field.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:="Image"
+ // +kubebuilder:validation:Required
+ Type SourceType `json:"type"`
+ // image is a field containing resolution information for a catalog sourced from an image.
+ // This field must be set when type is Image, and forbidden otherwise.
+ Image *ResolvedImageSource `json:"image"`
+}
+
+// ResolvedImageSource provides information about the resolved source of a Catalog sourced from an image.
+type ResolvedImageSource struct {
+ // ref contains the resolved image digest-based reference.
+ // The digest format is used so users can use other tooling to fetch the exact
+ // OCI manifests that were used to extract the catalog contents.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=1000
+ // +kubebuilder:validation:XValidation:rule="self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\\\b')",message="must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character."
+ // +kubebuilder:validation:XValidation:rule="self.find('(\\\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)') != \"\"",message="a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters."
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\"",message="must end with a digest"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find('(@.*:)').matches('(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])') : true",message="digest algorithm is not valid. valid algorithms must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the \"-\", \"_\", \"+\", and \".\" characters."
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').substring(1).size() >= 32 : true",message="digest is not valid. the encoded string must be at least 32 characters"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').matches(':[0-9A-Fa-f]*$') : true",message="digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)"
+ Ref string `json:"ref"`
+}
+
+// ImageSource enables users to define the information required for sourcing a Catalog from an OCI image
+//
+// If we see that there is a possibly valid digest-based image reference AND pollIntervalMinutes is specified,
+// reject the resource since there is no use in polling a digest-based image reference.
+// +kubebuilder:validation:XValidation:rule="self.ref.find('(@.*:)') != \"\" ? !has(self.pollIntervalMinutes) : true",message="cannot specify pollIntervalMinutes while using digest-based image"
+type ImageSource struct {
+ // ref allows users to define the reference to a container image containing Catalog contents.
+ // ref is required.
+ // ref can not be more than 1000 characters.
+ //
+ // A reference can be broken down into 3 parts - the domain, name, and identifier.
+ //
+ // The domain is typically the registry where an image is located.
+ // It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
+ // Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
+ // Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
+ // The port must be the last value in the domain.
+ // Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
+ //
+ // The name is typically the repository in the registry where an image is located.
+ // It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
+ // Multiple names can be concatenated with the "/" character.
+ // The domain and name are combined using the "/" character.
+ // Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
+ // An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
+ //
+ // The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
+ // It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
+ // For a digest-based reference, the "@" character is the separator.
+ // For a tag-based reference, the ":" character is the separator.
+ // An identifier is required in the reference.
+ //
+ // Digest-based references must contain an algorithm reference immediately after the "@" separator.
+ // The algorithm reference must be followed by the ":" character and an encoded string.
+ // The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
+ // Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
+ // The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
+ //
+ // Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
+ // The tag must not be longer than 127 characters.
+ //
+ // An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
+ // An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest"
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=1000
+ // +kubebuilder:validation:XValidation:rule="self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\\\b')",message="must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character."
+ // +kubebuilder:validation:XValidation:rule="self.find('(\\\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)') != \"\"",message="a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters."
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" || self.find(':.*$') != \"\"",message="must end with a digest or a tag"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') == \"\" ? (self.find(':.*$') != \"\" ? self.find(':.*$').substring(1).size() <= 127 : true) : true",message="tag is invalid. the tag must not be more than 127 characters"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') == \"\" ? (self.find(':.*$') != \"\" ? self.find(':.*$').matches(':[\\\\w][\\\\w.-]*$') : true) : true",message="tag is invalid. valid tags must begin with a word character (alphanumeric + \"_\") followed by word characters or \".\", and \"-\" characters"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find('(@.*:)').matches('(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])') : true",message="digest algorithm is not valid. valid algorithms must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the \"-\", \"_\", \"+\", and \".\" characters."
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').substring(1).size() >= 32 : true",message="digest is not valid. the encoded string must be at least 32 characters"
+ // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').matches(':[0-9A-Fa-f]*$') : true",message="digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)"
+ Ref string `json:"ref"`
+
+ // pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
+ // pollIntervalMinutes is optional.
+ // pollIntervalMinutes can not be specified when ref is a digest-based reference.
+ //
+ // When omitted, the image will not be polled for new content.
+ // +kubebuilder:validation:Minimum:=1
+ // +optional
+ PollIntervalMinutes *int `json:"pollIntervalMinutes,omitempty"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ClusterCatalog{}, &ClusterCatalogList{})
+}
diff --git a/catalogd/api/v1/clustercatalog_types_test.go b/catalogd/api/v1/clustercatalog_types_test.go
new file mode 100644
index 000000000..074acc524
--- /dev/null
+++ b/catalogd/api/v1/clustercatalog_types_test.go
@@ -0,0 +1,452 @@
+package v1
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+ "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ celconfig "k8s.io/apiserver/pkg/apis/cel"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/yaml"
+)
+
+const crdFilePath = "../../config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml"
+
+func TestImageSourceCELValidationRules(t *testing.T) {
+ validators := fieldValidatorsFromFile(t, crdFilePath)
+ pth := "openAPIV3Schema.properties.spec.properties.source.properties.image"
+ validator, found := validators[GroupVersion.Version][pth]
+ require.True(t, found)
+
+ for name, tc := range map[string]struct {
+ spec ImageSource
+ wantErrs []string
+ }{
+ "valid digest based image ref, poll interval not allowed, poll interval specified": {
+ spec: ImageSource{
+ Ref: "docker.io/test-image@sha256:abcdef123456789abcdef123456789abc",
+ PollIntervalMinutes: ptr.To(1),
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image: Invalid value: \"object\": cannot specify pollIntervalMinutes while using digest-based image",
+ },
+ },
+ "valid digest based image ref, poll interval not allowed, poll interval not specified": {
+ spec: ImageSource{
+ Ref: "docker.io/test-image@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{},
+ },
+ "invalid digest based image ref, invalid domain": {
+ spec: ImageSource{
+ Ref: "-quay+docker/foo/bar@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character.",
+ },
+ },
+ "invalid digest based image ref, invalid name": {
+ spec: ImageSource{
+ Ref: "docker.io/FOO/BAR@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters.",
+ },
+ },
+ "invalid digest based image ref, invalid digest algorithm": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@99-problems:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": digest algorithm is not valid. valid algorithms must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the \"-\", \"_\", \"+\", and \".\" characters.",
+ },
+ },
+ "invalid digest based image ref, too short digest encoding": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@sha256:abcdef123456789",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": digest is not valid. the encoded string must be at least 32 characters",
+ },
+ },
+ "invalid digest based image ref, invalid characters in digest encoding": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@sha256:XYZxy123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)",
+ },
+ },
+ "invalid image ref, no tag or digest": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": must end with a digest or a tag",
+ },
+ },
+ "invalid tag based image ref, tag too long": {
+ spec: ImageSource{
+ Ref: fmt.Sprintf("docker.io/foo/bar:%s", strings.Repeat("x", 128)),
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": tag is invalid. the tag must not be more than 127 characters",
+ },
+ },
+ "invalid tag based image ref, tag contains invalid characters": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar:-foo_bar-",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": tag is invalid. valid tags must begin with a word character (alphanumeric + \"_\") followed by word characters or \".\", and \"-\" characters",
+ },
+ },
+ "valid tag based image ref": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar:v1.0.0",
+ },
+ wantErrs: []string{},
+ },
+ "valid tag based image ref, pollIntervalMinutes specified": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar:v1.0.0",
+ PollIntervalMinutes: ptr.To(5),
+ },
+ wantErrs: []string{},
+ },
+ "invalid image ref, only domain with port": {
+ spec: ImageSource{
+ Ref: "docker.io:8080",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.spec.properties.source.properties.image.ref: Invalid value: \"string\": a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters.",
+ },
+ },
+ "valid image ref, domain with port": {
+ spec: ImageSource{
+ Ref: "my-subdomain.docker.io:8080/foo/bar:latest",
+ },
+ wantErrs: []string{},
+ },
+ "valid image ref, tag ends with hyphen": {
+ spec: ImageSource{
+ Ref: "my-subdomain.docker.io:8080/foo/bar:latest-",
+ },
+ wantErrs: []string{},
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.spec) //nolint:gosec
+ require.NoError(t, err)
+ errs := validator(obj, nil)
+ require.Equal(t, len(tc.wantErrs), len(errs), "want", tc.wantErrs, "got", errs)
+ for i := range tc.wantErrs {
+ got := errs[i].Error()
+ assert.Equal(t, tc.wantErrs[i], got)
+ }
+ })
+ }
+}
+
+func TestResolvedImageSourceCELValidation(t *testing.T) {
+ validators := fieldValidatorsFromFile(t, crdFilePath)
+ pth := "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref"
+ validator, found := validators[GroupVersion.Version][pth]
+ require.True(t, found)
+
+ for name, tc := range map[string]struct {
+ spec ImageSource
+ wantErrs []string
+ }{
+ "valid digest based image ref": {
+ spec: ImageSource{
+ Ref: "docker.io/test-image@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{},
+ },
+ "invalid digest based image ref, invalid domain": {
+ spec: ImageSource{
+ Ref: "-quay+docker/foo/bar@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character.",
+ },
+ },
+ "invalid digest based image ref, invalid name": {
+ spec: ImageSource{
+ Ref: "docker.io/FOO/BAR@sha256:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters.",
+ },
+ },
+ "invalid digest based image ref, invalid digest algorithm": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@99-problems:abcdef123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": digest algorithm is not valid. valid algorithms must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the \"-\", \"_\", \"+\", and \".\" characters.",
+ },
+ },
+ "invalid digest based image ref, too short digest encoding": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@sha256:abcdef123456789",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": digest is not valid. the encoded string must be at least 32 characters",
+ },
+ },
+ "invalid digest based image ref, invalid characters in digest encoding": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar@sha256:XYZxy123456789abcdef123456789abc",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)",
+ },
+ },
+ "invalid image ref, no digest": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": must end with a digest",
+ },
+ },
+ "invalid image ref, only domain with port": {
+ spec: ImageSource{
+ Ref: "docker.io:8080",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": a valid name is required. valid names must contain lowercase alphanumeric characters separated only by the \".\", \"_\", \"__\", \"-\" characters.",
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": must end with a digest",
+ },
+ },
+ "invalid image ref, tag-based ref": {
+ spec: ImageSource{
+ Ref: "docker.io/foo/bar:latest",
+ },
+ wantErrs: []string{
+ "openAPIV3Schema.properties.status.properties.resolvedSource.properties.image.properties.ref: Invalid value: \"string\": must end with a digest",
+ },
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ errs := validator(tc.spec.Ref, nil)
+ require.Equal(t, len(tc.wantErrs), len(errs), "want", tc.wantErrs, "got", errs)
+ for i := range tc.wantErrs {
+ got := errs[i].Error()
+ assert.Equal(t, tc.wantErrs[i], got)
+ }
+ })
+ }
+}
+
+func TestClusterCatalogURLsCELValidation(t *testing.T) {
+ validators := fieldValidatorsFromFile(t, crdFilePath)
+ pth := "openAPIV3Schema.properties.status.properties.urls.properties.base"
+ validator, found := validators[GroupVersion.Version][pth]
+ require.True(t, found)
+ for name, tc := range map[string]struct {
+ urls ClusterCatalogURLs
+ wantErrs []string
+ }{
+ "base is valid": {
+ urls: ClusterCatalogURLs{
+ Base: "https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio",
+ },
+ wantErrs: []string{},
+ },
+ "base is invalid, scheme is not one of http or https": {
+ urls: ClusterCatalogURLs{
+ Base: "file://somefilepath",
+ },
+ wantErrs: []string{
+ fmt.Sprintf("%s: Invalid value: \"string\": scheme must be either http or https", pth),
+ },
+ },
+ "base is invalid": {
+ urls: ClusterCatalogURLs{
+ Base: "notevenarealURL",
+ },
+ wantErrs: []string{
+ fmt.Sprintf("%s: Invalid value: \"string\": must be a valid URL", pth),
+ },
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ errs := validator(tc.urls.Base, nil)
+ fmt.Println(errs)
+ require.Equal(t, len(tc.wantErrs), len(errs))
+ for i := range tc.wantErrs {
+ got := errs[i].Error()
+ assert.Equal(t, tc.wantErrs[i], got)
+ }
+ })
+ }
+}
+
+func TestSourceCELValidation(t *testing.T) {
+ validators := fieldValidatorsFromFile(t, crdFilePath)
+ pth := "openAPIV3Schema.properties.spec.properties.source"
+ validator, found := validators[GroupVersion.Version][pth]
+ require.True(t, found)
+ for name, tc := range map[string]struct {
+ source CatalogSource
+ wantErrs []string
+ }{
+ "image source missing required image field": {
+ source: CatalogSource{
+ Type: SourceTypeImage,
+ },
+ wantErrs: []string{
+ fmt.Sprintf("%s: Invalid value: \"object\": image is required when source type is %s, and forbidden otherwise", pth, SourceTypeImage),
+ },
+ },
+ "image source with required image field": {
+ source: CatalogSource{
+ Type: SourceTypeImage,
+ Image: &ImageSource{
+ Ref: "docker.io/foo/bar:latest",
+ },
+ },
+ wantErrs: []string{},
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.source) //nolint:gosec
+ require.NoError(t, err)
+ errs := validator(obj, nil)
+ fmt.Println(errs)
+ require.Equal(t, len(tc.wantErrs), len(errs))
+ for i := range tc.wantErrs {
+ got := errs[i].Error()
+ assert.Equal(t, tc.wantErrs[i], got)
+ }
+ })
+ }
+}
+
+func TestResolvedSourceCELValidation(t *testing.T) {
+ validators := fieldValidatorsFromFile(t, crdFilePath)
+ pth := "openAPIV3Schema.properties.status.properties.resolvedSource"
+ validator, found := validators[GroupVersion.Version][pth]
+
+ require.True(t, found)
+ for name, tc := range map[string]struct {
+ source ResolvedCatalogSource
+ wantErrs []string
+ }{
+ "image source missing required image field": {
+ source: ResolvedCatalogSource{
+ Type: SourceTypeImage,
+ },
+ wantErrs: []string{
+ fmt.Sprintf("%s: Invalid value: \"object\": image is required when source type is %s, and forbidden otherwise", pth, SourceTypeImage),
+ },
+ },
+ "image source with required image field": {
+ source: ResolvedCatalogSource{
+ Type: SourceTypeImage,
+ Image: &ResolvedImageSource{
+ Ref: "docker.io/foo/bar@sha256:abcdef123456789abcdef123456789abc",
+ },
+ },
+ wantErrs: []string{},
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.source) //nolint:gosec
+ require.NoError(t, err)
+ errs := validator(obj, nil)
+ require.Equal(t, len(tc.wantErrs), len(errs))
+ for i := range tc.wantErrs {
+ got := errs[i].Error()
+ assert.Equal(t, tc.wantErrs[i], got)
+ }
+ })
+ }
+}
+
+// fieldValidatorsFromFile extracts the CEL validators by version and JSONPath from a CRD file and returns
+// a validator func for testing against samples.
+// nolint:unparam
+func fieldValidatorsFromFile(t *testing.T, crdFilePath string) map[string]map[string]CELValidateFunc {
+ data, err := os.ReadFile(crdFilePath)
+ require.NoError(t, err)
+
+ var crd apiextensionsv1.CustomResourceDefinition
+ err = yaml.Unmarshal(data, &crd)
+ require.NoError(t, err)
+
+ ret := map[string]map[string]CELValidateFunc{}
+ for _, v := range crd.Spec.Versions {
+ var internalSchema apiextensions.JSONSchemaProps
+ err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(v.Schema.OpenAPIV3Schema, &internalSchema, nil)
+ require.NoError(t, err, "failed to convert JSONSchemaProps for version %s: %v", v.Name, err)
+ structuralSchema, err := schema.NewStructural(&internalSchema)
+ require.NoError(t, err, "failed to create StructuralSchema for version %s: %v", v.Name, err)
+
+ versionVals, err := findCEL(structuralSchema, true, field.NewPath("openAPIV3Schema"))
+ require.NoError(t, err, "failed to find CEL for version %s: %v", v.Name, err)
+ ret[v.Name] = versionVals
+ }
+ return ret
+}
+
+// CELValidateFunc tests a sample object against a CEL validator.
+type CELValidateFunc func(obj, old interface{}) field.ErrorList
+
+func findCEL(s *schema.Structural, root bool, pth *field.Path) (map[string]CELValidateFunc, error) {
+ ret := map[string]CELValidateFunc{}
+
+ if len(s.XValidations) > 0 {
+ s := *s
+ pth := *pth
+ ret[pth.String()] = func(obj, old interface{}) field.ErrorList {
+ errs, _ := cel.NewValidator(&s, root, celconfig.PerCallLimit).Validate(context.TODO(), &pth, &s, obj, old, celconfig.RuntimeCELCostBudget)
+ return errs
+ }
+ }
+
+ for k, v := range s.Properties {
+ v := v
+ sub, err := findCEL(&v, false, pth.Child("properties").Child(k))
+ if err != nil {
+ return nil, err
+ }
+
+ for pth, val := range sub {
+ ret[pth] = val
+ }
+ }
+ if s.Items != nil {
+ sub, err := findCEL(s.Items, false, pth.Child("items"))
+ if err != nil {
+ return nil, err
+ }
+ for pth, val := range sub {
+ ret[pth] = val
+ }
+ }
+ if s.AdditionalProperties != nil && s.AdditionalProperties.Structural != nil {
+ sub, err := findCEL(s.AdditionalProperties.Structural, false, pth.Child("additionalProperties"))
+ if err != nil {
+ return nil, err
+ }
+ for pth, val := range sub {
+ ret[pth] = val
+ }
+ }
+
+ return ret, nil
+}
diff --git a/catalogd/api/v1/groupversion_info.go b/catalogd/api/v1/groupversion_info.go
new file mode 100644
index 000000000..adb650eb2
--- /dev/null
+++ b/catalogd/api/v1/groupversion_info.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API Schema definitions for the core v1 API group
+// +kubebuilder:object:generate=true
+// +groupName=olm.operatorframework.io
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects
+ GroupVersion = schema.GroupVersion{Group: "olm.operatorframework.io", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/catalogd/api/v1/zz_generated.deepcopy.go b/catalogd/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..ce4237514
--- /dev/null
+++ b/catalogd/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,227 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CatalogSource) DeepCopyInto(out *CatalogSource) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ImageSource)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource.
+func (in *CatalogSource) DeepCopy() *CatalogSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CatalogSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCatalog) DeepCopyInto(out *ClusterCatalog) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCatalog.
+func (in *ClusterCatalog) DeepCopy() *ClusterCatalog {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCatalog)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterCatalog) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCatalogList) DeepCopyInto(out *ClusterCatalogList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterCatalog, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCatalogList.
+func (in *ClusterCatalogList) DeepCopy() *ClusterCatalogList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCatalogList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterCatalogList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCatalogSpec) DeepCopyInto(out *ClusterCatalogSpec) {
+ *out = *in
+ in.Source.DeepCopyInto(&out.Source)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCatalogSpec.
+func (in *ClusterCatalogSpec) DeepCopy() *ClusterCatalogSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCatalogSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCatalogStatus) DeepCopyInto(out *ClusterCatalogStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ResolvedSource != nil {
+ in, out := &in.ResolvedSource, &out.ResolvedSource
+ *out = new(ResolvedCatalogSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.URLs != nil {
+ in, out := &in.URLs, &out.URLs
+ *out = new(ClusterCatalogURLs)
+ **out = **in
+ }
+ if in.LastUnpacked != nil {
+ in, out := &in.LastUnpacked, &out.LastUnpacked
+ *out = (*in).DeepCopy()
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCatalogStatus.
+func (in *ClusterCatalogStatus) DeepCopy() *ClusterCatalogStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCatalogStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCatalogURLs) DeepCopyInto(out *ClusterCatalogURLs) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCatalogURLs.
+func (in *ClusterCatalogURLs) DeepCopy() *ClusterCatalogURLs {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCatalogURLs)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSource) DeepCopyInto(out *ImageSource) {
+ *out = *in
+ if in.PollIntervalMinutes != nil {
+ in, out := &in.PollIntervalMinutes, &out.PollIntervalMinutes
+ *out = new(int)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource.
+func (in *ImageSource) DeepCopy() *ImageSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResolvedCatalogSource) DeepCopyInto(out *ResolvedCatalogSource) {
+ *out = *in
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(ResolvedImageSource)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolvedCatalogSource.
+func (in *ResolvedCatalogSource) DeepCopy() *ResolvedCatalogSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ResolvedCatalogSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResolvedImageSource) DeepCopyInto(out *ResolvedImageSource) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolvedImageSource.
+func (in *ResolvedImageSource) DeepCopy() *ResolvedImageSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ResolvedImageSource)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/catalogd/cmd/catalogd/main.go b/catalogd/cmd/catalogd/main.go
new file mode 100644
index 000000000..77698444c
--- /dev/null
+++ b/catalogd/cmd/catalogd/main.go
@@ -0,0 +1,387 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "log"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containers/image/v5/types"
+ "github.com/go-logr/logr"
+ "github.com/spf13/pflag"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ k8slabels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ k8stypes "k8s.io/apimachinery/pkg/types"
+ apimachineryrand "k8s.io/apimachinery/pkg/util/rand"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/metadata"
+ _ "k8s.io/client-go/plugin/pkg/client/auth"
+ "k8s.io/klog/v2"
+ "k8s.io/klog/v2/textlogger"
+ ctrl "sigs.k8s.io/controller-runtime"
+ crcache "sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/certwatcher"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/healthz"
+ "sigs.k8s.io/controller-runtime/pkg/metrics"
+ "sigs.k8s.io/controller-runtime/pkg/metrics/filters"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+ crwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+ corecontrollers "github.com/operator-framework/operator-controller/catalogd/internal/controllers/core"
+ "github.com/operator-framework/operator-controller/catalogd/internal/features"
+ "github.com/operator-framework/operator-controller/catalogd/internal/garbagecollection"
+ catalogdmetrics "github.com/operator-framework/operator-controller/catalogd/internal/metrics"
+ "github.com/operator-framework/operator-controller/catalogd/internal/serverutil"
+ "github.com/operator-framework/operator-controller/catalogd/internal/source"
+ "github.com/operator-framework/operator-controller/catalogd/internal/storage"
+ "github.com/operator-framework/operator-controller/catalogd/internal/version"
+ "github.com/operator-framework/operator-controller/catalogd/internal/webhook"
+)
+
+var (
+ scheme = runtime.NewScheme()
+ setupLog = ctrl.Log.WithName("setup")
+)
+
+const (
+ storageDir = "catalogs"
+ authFilePrefix = "catalogd-global-pull-secret"
+)
+
+func init() {
+ utilruntime.Must(clientgoscheme.AddToScheme(scheme))
+
+ utilruntime.Must(catalogdv1.AddToScheme(scheme))
+ //+kubebuilder:scaffold:scheme
+}
+
+func main() {
+ var (
+ metricsAddr string
+ enableLeaderElection bool
+ probeAddr string
+ pprofAddr string
+ catalogdVersion bool
+ systemNamespace string
+ catalogServerAddr string
+ externalAddr string
+ cacheDir string
+ gcInterval time.Duration
+ certFile string
+ keyFile string
+ webhookPort int
+ caCertDir string
+ globalPullSecret string
+ )
+ flag.StringVar(&metricsAddr, "metrics-bind-address", "", "The address for the metrics endpoint. Requires tls-cert and tls-key. (Default: ':7443')")
+ flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
+ flag.StringVar(&pprofAddr, "pprof-bind-address", "0", "The address the pprof endpoint binds to. an empty string or 0 disables pprof")
+ flag.BoolVar(&enableLeaderElection, "leader-elect", false,
+ "Enable leader election for controller manager. "+
+ "Enabling this will ensure there is only one active controller manager.")
+ flag.StringVar(&systemNamespace, "system-namespace", "", "The namespace catalogd uses for internal state, configuration, and workloads")
+ flag.StringVar(&catalogServerAddr, "catalogs-server-addr", ":8443", "The address where the unpacked catalogs' content will be accessible")
+ flag.StringVar(&externalAddr, "external-address", "catalogd-service.olmv1-system.svc", "The external address at which the http(s) server is reachable.")
+ flag.StringVar(&cacheDir, "cache-dir", "/var/cache/", "The directory in the filesystem that catalogd will use for file based caching")
+ flag.BoolVar(&catalogdVersion, "version", false, "print the catalogd version and exit")
+ flag.DurationVar(&gcInterval, "gc-interval", 12*time.Hour, "interval in which garbage collection should be run against the catalog content cache")
+ flag.StringVar(&certFile, "tls-cert", "", "The certificate file used for serving catalog and metrics. Required to enable the metrics server. Requires tls-key.")
+ flag.StringVar(&keyFile, "tls-key", "", "The key file used for serving catalog contents and metrics. Required to enable the metrics server. Requires tls-cert.")
+ flag.IntVar(&webhookPort, "webhook-server-port", 9443, "The port that the mutating webhook server serves at.")
+ flag.StringVar(&caCertDir, "ca-certs-dir", "", "The directory of CA certificate to use for verifying HTTPS connections to image registries.")
+ flag.StringVar(&globalPullSecret, "global-pull-secret", "", "The / of the global pull secret that is going to be used to pull bundle images.")
+
+ klog.InitFlags(flag.CommandLine)
+
+ // Combine both flagsets and parse them
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ features.CatalogdFeatureGate.AddFlag(pflag.CommandLine)
+ pflag.Parse()
+
+ if catalogdVersion {
+ fmt.Printf("%#v\n", version.Version())
+ os.Exit(0)
+ }
+
+ ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig()))
+
+ authFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s-%s.json", authFilePrefix, apimachineryrand.String(8)))
+ var globalPullSecretKey *k8stypes.NamespacedName
+ if globalPullSecret != "" {
+ secretParts := strings.Split(globalPullSecret, "/")
+ if len(secretParts) != 2 {
+ setupLog.Error(fmt.Errorf("incorrect number of components"), "value of global-pull-secret should be of the format /")
+ os.Exit(1)
+ }
+ globalPullSecretKey = &k8stypes.NamespacedName{Name: secretParts[1], Namespace: secretParts[0]}
+ }
+
+ if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") {
+ setupLog.Error(nil, "unable to configure TLS certificates: tls-cert and tls-key flags must be used together")
+ os.Exit(1)
+ }
+
+ if metricsAddr != "" && certFile == "" && keyFile == "" {
+ setupLog.Error(nil, "metrics-bind-address requires tls-cert and tls-key flags to be set")
+ os.Exit(1)
+ }
+
+ if certFile != "" && keyFile != "" && metricsAddr == "" {
+ metricsAddr = ":7443"
+ }
+
+ protocol := "http://"
+ if certFile != "" && keyFile != "" {
+ protocol = "https://"
+ }
+ externalAddr = protocol + externalAddr
+
+ cfg := ctrl.GetConfigOrDie()
+
+ cw, err := certwatcher.New(certFile, keyFile)
+ if err != nil {
+ log.Fatalf("Failed to initialize certificate watcher: %v", err)
+ }
+
+ tlsOpts := func(config *tls.Config) {
+ config.GetCertificate = cw.GetCertificate
+ // Ensure HTTP/2 is disabled by default for webhooks and metrics.
+ // Disabling HTTP/2 mitigates vulnerabilities associated with:
+ // - HTTP/2 Stream Cancellation (GHSA-qppj-fm5r-hxr3)
+ // - HTTP/2 Rapid Reset (GHSA-4374-p667-p6c8)
+ // While CVE fixes exist, they remain insufficient; disabling HTTP/2 helps reduce risks.
+ // For details, see: https://github.com/kubernetes/kubernetes/issues/121197
+ config.NextProtos = []string{"http/1.1"}
+ }
+
+ // Create webhook server and configure TLS
+ webhookServer := crwebhook.NewServer(crwebhook.Options{
+ Port: webhookPort,
+ TLSOpts: []func(*tls.Config){
+ tlsOpts,
+ },
+ })
+
+ metricsServerOptions := metricsserver.Options{}
+ if len(certFile) > 0 && len(keyFile) > 0 {
+ setupLog.Info("Starting metrics server with TLS enabled", "addr", metricsAddr, "tls-cert", certFile, "tls-key", keyFile)
+
+ metricsServerOptions.BindAddress = metricsAddr
+ metricsServerOptions.SecureServing = true
+ metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
+
+ metricsServerOptions.TLSOpts = append(metricsServerOptions.TLSOpts, tlsOpts)
+ } else {
+ // Note that the metrics server is not serving if the BindAddress is set to "0".
+ // Therefore, the metrics server is disabled by default. It is only enabled
+ // if certFile and keyFile are provided. The intention is not allowing the metrics
+ // be served with the default self-signed certificate generated by controller-runtime.
+ metricsServerOptions.BindAddress = "0"
+ setupLog.Info("WARNING: Metrics Server is disabled. " +
+ "Metrics will not be served since the TLS certificate and key file are not provided.")
+ }
+
+ cacheOptions := crcache.Options{
+ ByObject: map[client.Object]crcache.ByObject{},
+ }
+ if globalPullSecretKey != nil {
+ cacheOptions.ByObject[&corev1.Secret{}] = crcache.ByObject{
+ Namespaces: map[string]crcache.Config{
+ globalPullSecretKey.Namespace: {
+ LabelSelector: k8slabels.Everything(),
+ FieldSelector: fields.SelectorFromSet(map[string]string{
+ "metadata.name": globalPullSecretKey.Name,
+ }),
+ },
+ },
+ }
+ }
+
+ // Create manager
+ mgr, err := ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme,
+ Metrics: metricsServerOptions,
+ PprofBindAddress: pprofAddr,
+ HealthProbeBindAddress: probeAddr,
+ LeaderElection: enableLeaderElection,
+ LeaderElectionID: "catalogd-operator-lock",
+ WebhookServer: webhookServer,
+ Cache: cacheOptions,
+ })
+ if err != nil {
+ setupLog.Error(err, "unable to create manager")
+ os.Exit(1)
+ }
+
+ // Add the certificate watcher to the manager
+ err = mgr.Add(cw)
+ if err != nil {
+ setupLog.Error(err, "unable to add certificate watcher to manager")
+ os.Exit(1)
+ }
+
+ if systemNamespace == "" {
+ systemNamespace = podNamespace()
+ }
+
+ if err := os.MkdirAll(cacheDir, 0700); err != nil {
+ setupLog.Error(err, "unable to create cache directory")
+ os.Exit(1)
+ }
+
+ unpackCacheBasePath := filepath.Join(cacheDir, source.UnpackCacheDir)
+ if err := os.MkdirAll(unpackCacheBasePath, 0770); err != nil {
+ setupLog.Error(err, "unable to create cache directory for unpacking")
+ os.Exit(1)
+ }
+ unpacker := &source.ContainersImageRegistry{
+ BaseCachePath: unpackCacheBasePath,
+ SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) {
+ srcContext := &types.SystemContext{
+ DockerCertPath: caCertDir,
+ OCICertPath: caCertDir,
+ }
+ if _, err := os.Stat(authFilePath); err == nil && globalPullSecretKey != nil {
+ logger.Info("using available authentication information for pulling image")
+ srcContext.AuthFilePath = authFilePath
+ } else if os.IsNotExist(err) {
+ logger.Info("no authentication information found for pulling image, proceeding without auth")
+ } else {
+ return nil, fmt.Errorf("could not stat auth file, error: %w", err)
+ }
+ return srcContext, nil
+ },
+ }
+
+ var localStorage storage.Instance
+ metrics.Registry.MustRegister(catalogdmetrics.RequestDurationMetric)
+
+ storeDir := filepath.Join(cacheDir, storageDir)
+ if err := os.MkdirAll(storeDir, 0700); err != nil {
+ setupLog.Error(err, "unable to create storage directory for catalogs")
+ os.Exit(1)
+ }
+
+ baseStorageURL, err := url.Parse(fmt.Sprintf("%s/catalogs/", externalAddr))
+ if err != nil {
+ setupLog.Error(err, "unable to create base storage URL")
+ os.Exit(1)
+ }
+
+ localStorage = storage.LocalDirV1{RootDir: storeDir, RootURL: baseStorageURL}
+
+ // Config for the the catalogd web server
+ catalogServerConfig := serverutil.CatalogServerConfig{
+ ExternalAddr: externalAddr,
+ CatalogAddr: catalogServerAddr,
+ CertFile: certFile,
+ KeyFile: keyFile,
+ LocalStorage: localStorage,
+ }
+
+ err = serverutil.AddCatalogServerToManager(mgr, catalogServerConfig, cw)
+ if err != nil {
+ setupLog.Error(err, "unable to configure catalog server")
+ os.Exit(1)
+ }
+
+ if err = (&corecontrollers.ClusterCatalogReconciler{
+ Client: mgr.GetClient(),
+ Unpacker: unpacker,
+ Storage: localStorage,
+ }).SetupWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "ClusterCatalog")
+ os.Exit(1)
+ }
+
+ if globalPullSecretKey != nil {
+ setupLog.Info("creating SecretSyncer controller for watching secret", "Secret", globalPullSecret)
+ err := (&corecontrollers.PullSecretReconciler{
+ Client: mgr.GetClient(),
+ AuthFilePath: authFilePath,
+ SecretKey: *globalPullSecretKey,
+ }).SetupWithManager(mgr)
+ if err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "SecretSyncer")
+ os.Exit(1)
+ }
+ }
+ //+kubebuilder:scaffold:builder
+
+ if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
+ setupLog.Error(err, "unable to set up health check")
+ os.Exit(1)
+ }
+ if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
+ setupLog.Error(err, "unable to set up ready check")
+ os.Exit(1)
+ }
+
+ metaClient, err := metadata.NewForConfig(cfg)
+ if err != nil {
+ setupLog.Error(err, "unable to setup client for garbage collection")
+ os.Exit(1)
+ }
+
+ ctx := ctrl.SetupSignalHandler()
+ gc := &garbagecollection.GarbageCollector{
+ CachePath: unpackCacheBasePath,
+ Logger: ctrl.Log.WithName("garbage-collector"),
+ MetadataClient: metaClient,
+ Interval: gcInterval,
+ }
+ if err := mgr.Add(gc); err != nil {
+ setupLog.Error(err, "unable to add garbage collector to manager")
+ os.Exit(1)
+ }
+
+ // mutating webhook that labels ClusterCatalogs with name label
+ if err = (&webhook.ClusterCatalog{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "ClusterCatalog")
+ os.Exit(1)
+ }
+
+ setupLog.Info("starting mutating webhook manager")
+ if err := mgr.Start(ctx); err != nil {
+ setupLog.Error(err, "problem running manager")
+ os.Exit(1)
+ }
+ if err := os.Remove(authFilePath); err != nil {
+ setupLog.Error(err, "failed to cleanup temporary auth file")
+ os.Exit(1)
+ }
+}
+
+func podNamespace() string {
+ namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
+ if err != nil {
+ return "olmv1-system"
+ }
+ return string(namespace)
+}
diff --git a/catalogd/config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml b/catalogd/config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml
new file mode 100644
index 000000000..46750f058
--- /dev/null
+++ b/catalogd/config/base/crd/bases/olm.operatorframework.io_clustercatalogs.yaml
@@ -0,0 +1,441 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.16.1
+ name: clustercatalogs.olm.operatorframework.io
+spec:
+ group: olm.operatorframework.io
+ names:
+ kind: ClusterCatalog
+ listKind: ClusterCatalogList
+ plural: clustercatalogs
+ singular: clustercatalog
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.lastUnpacked
+ name: LastUnpacked
+ type: date
+ - jsonPath: .status.conditions[?(@.type=="Serving")].status
+ name: Serving
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: |-
+ ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster.
+ For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ spec is the desired state of the ClusterCatalog.
+ spec is required.
+ The controller will work to ensure that the desired
+ catalog is unpacked and served over the catalog content HTTP server.
+ properties:
+ availabilityMode:
+ default: Available
+ description: |-
+ availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
+ availabilityMode is optional.
+
+ Allowed values are "Available" and "Unavailable" and omitted.
+
+ When omitted, the default value is "Available".
+
+ When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
+ Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
+ and its contents as usable.
+
+ When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
+ When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
+ Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
+ to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist.
+ enum:
+ - Unavailable
+ - Available
+ type: string
+ priority:
+ default: 0
+ description: |-
+ priority allows the user to define a priority for a ClusterCatalog.
+ priority is optional.
+
+ A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
+ A higher number means higher priority.
+
+ It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
+ When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
+
+ When omitted, the default priority is 0 because that is the zero value of integers.
+
+ Negative numbers can be used to specify a priority lower than the default.
+ Positive numbers can be used to specify a priority higher than the default.
+
+ The lowest possible value is -2147483648.
+ The highest possible value is 2147483647.
+ format: int32
+ type: integer
+ source:
+ description: |-
+ source allows a user to define the source of a catalog.
+ A "catalog" contains information on content that can be installed on a cluster.
+ Providing a catalog source makes the contents of the catalog discoverable and usable by
+ other on-cluster components.
+ These on-cluster components may do a variety of things with this information, such as
+ presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
+ The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
+ For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
+ source is a required field.
+
+ Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
+
+ source:
+ type: Image
+ image:
+ ref: quay.io/operatorhubio/catalog:latest
+ properties:
+ image:
+ description: |-
+ image is used to configure how catalog contents are sourced from an OCI image.
+ This field is required when type is Image, and forbidden otherwise.
+ properties:
+ pollIntervalMinutes:
+ description: |-
+ pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
+ pollIntervalMinutes is optional.
+ pollIntervalMinutes can not be specified when ref is a digest-based reference.
+
+ When omitted, the image will not be polled for new content.
+ minimum: 1
+ type: integer
+ ref:
+ description: |-
+ ref allows users to define the reference to a container image containing Catalog contents.
+ ref is required.
+ ref can not be more than 1000 characters.
+
+ A reference can be broken down into 3 parts - the domain, name, and identifier.
+
+ The domain is typically the registry where an image is located.
+ It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
+ Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
+ Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
+ The port must be the last value in the domain.
+ Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
+
+ The name is typically the repository in the registry where an image is located.
+ It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
+ Multiple names can be concatenated with the "/" character.
+ The domain and name are combined using the "/" character.
+ Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
+ An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
+
+ The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
+ It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
+ For a digest-based reference, the "@" character is the separator.
+ For a tag-based reference, the ":" character is the separator.
+ An identifier is required in the reference.
+
+ Digest-based references must contain an algorithm reference immediately after the "@" separator.
+ The algorithm reference must be followed by the ":" character and an encoded string.
+ The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
+ Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
+ The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
+
+ Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
+ The tag must not be longer than 127 characters.
+
+ An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
+ An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest"
+ maxLength: 1000
+ type: string
+ x-kubernetes-validations:
+ - message: must start with a valid domain. valid domains must
+ be alphanumeric characters (lowercase and uppercase) separated
+ by the "." character.
+ rule: self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\b')
+ - message: a valid name is required. valid names must contain
+ lowercase alphanumeric characters separated only by the
+ ".", "_", "__", "-" characters.
+ rule: self.find('(\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)')
+ != ""
+ - message: must end with a digest or a tag
+ rule: self.find('(@.*:)') != "" || self.find(':.*$') !=
+ ""
+ - message: tag is invalid. the tag must not be more than 127
+ characters
+ rule: 'self.find(''(@.*:)'') == "" ? (self.find('':.*$'')
+ != "" ? self.find('':.*$'').substring(1).size() <= 127
+ : true) : true'
+ - message: tag is invalid. valid tags must begin with a word
+ character (alphanumeric + "_") followed by word characters
+ or ".", and "-" characters
+ rule: 'self.find(''(@.*:)'') == "" ? (self.find('':.*$'')
+ != "" ? self.find('':.*$'').matches('':[\\w][\\w.-]*$'')
+ : true) : true'
+ - message: digest algorithm is not valid. valid algorithms
+ must start with an uppercase or lowercase alpha character
+ followed by alphanumeric characters and may contain the
+ "-", "_", "+", and "." characters.
+ rule: 'self.find(''(@.*:)'') != "" ? self.find(''(@.*:)'').matches(''(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])'')
+ : true'
+ - message: digest is not valid. the encoded string must be
+ at least 32 characters
+ rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').substring(1).size()
+ >= 32 : true'
+ - message: digest is not valid. the encoded string must only
+ contain hex characters (A-F, a-f, 0-9)
+ rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').matches('':[0-9A-Fa-f]*$'')
+ : true'
+ required:
+ - ref
+ type: object
+ x-kubernetes-validations:
+ - message: cannot specify pollIntervalMinutes while using digest-based
+ image
+ rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes)
+ : true'
+ type:
+ description: |-
+ type is a reference to the type of source the catalog is sourced from.
+ type is required.
+
+ The only allowed value is "Image".
+
+ When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
+ When using an image source, the image field must be set and must be the only field defined for this type.
+ enum:
+ - Image
+ type: string
+ required:
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: image is required when source type is Image, and forbidden
+ otherwise
+ rule: 'has(self.type) && self.type == ''Image'' ? has(self.image)
+ : !has(self.image)'
+ required:
+ - source
+ type: object
+ status:
+ description: |-
+ status contains information about the state of the ClusterCatalog such as:
+ - Whether or not the catalog contents are being served via the catalog content HTTP server
+ - Whether or not the ClusterCatalog is progressing to a new state
+ - A reference to the source from which the catalog contents were retrieved
+ properties:
+ conditions:
+ description: |-
+ conditions is a representation of the current state for this ClusterCatalog.
+
+ The current condition types are Serving and Progressing.
+
+ The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
+ When it has a status of True and a reason of Available, the contents of the catalog are being served.
+ When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
+ When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
+
+ The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
+ When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
+ When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
+ When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
+
+ In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
+ catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
+ contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
+ to the contents we identify that there are updates to the contents.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - type
+ x-kubernetes-list-type: map
+ lastUnpacked:
+ description: |-
+ lastUnpacked represents the last time the contents of the
+ catalog were extracted from their source format. As an example,
+ when using an Image source, the OCI image will be pulled and the
+ image layers written to a file-system backed cache. We refer to the
+ act of this extraction from the source format as "unpacking".
+ format: date-time
+ type: string
+ resolvedSource:
+ description: resolvedSource contains information about the resolved
+ source based on the source type.
+ properties:
+ image:
+ description: |-
+ image is a field containing resolution information for a catalog sourced from an image.
+ This field must be set when type is Image, and forbidden otherwise.
+ properties:
+ ref:
+ description: |-
+ ref contains the resolved image digest-based reference.
+ The digest format is used so users can use other tooling to fetch the exact
+ OCI manifests that were used to extract the catalog contents.
+ maxLength: 1000
+ type: string
+ x-kubernetes-validations:
+ - message: must start with a valid domain. valid domains must
+ be alphanumeric characters (lowercase and uppercase) separated
+ by the "." character.
+ rule: self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\b')
+ - message: a valid name is required. valid names must contain
+ lowercase alphanumeric characters separated only by the
+ ".", "_", "__", "-" characters.
+ rule: self.find('(\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)')
+ != ""
+ - message: must end with a digest
+ rule: self.find('(@.*:)') != ""
+ - message: digest algorithm is not valid. valid algorithms
+ must start with an uppercase or lowercase alpha character
+ followed by alphanumeric characters and may contain the
+ "-", "_", "+", and "." characters.
+ rule: 'self.find(''(@.*:)'') != "" ? self.find(''(@.*:)'').matches(''(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])'')
+ : true'
+ - message: digest is not valid. the encoded string must be
+ at least 32 characters
+ rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').substring(1).size()
+ >= 32 : true'
+ - message: digest is not valid. the encoded string must only
+ contain hex characters (A-F, a-f, 0-9)
+ rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').matches('':[0-9A-Fa-f]*$'')
+ : true'
+ required:
+ - ref
+ type: object
+ type:
+ description: |-
+ type is a reference to the type of source the catalog is sourced from.
+ type is required.
+
+ The only allowed value is "Image".
+
+ When set to "Image", information about the resolved image source will be set in the 'image' field.
+ enum:
+ - Image
+ type: string
+ required:
+ - image
+ - type
+ type: object
+ x-kubernetes-validations:
+ - message: image is required when source type is Image, and forbidden
+ otherwise
+ rule: 'has(self.type) && self.type == ''Image'' ? has(self.image)
+ : !has(self.image)'
+ urls:
+ description: urls contains the URLs that can be used to access the
+ catalog.
+ properties:
+ base:
+ description: |-
+ base is a cluster-internal URL that provides endpoints for
+ accessing the content of the catalog.
+
+ It is expected that clients append the path for the endpoint they wish
+ to access.
+
+ Currently, only a single endpoint is served and is accessible at the path
+ /api/v1.
+
+ The endpoints served for the v1 API are:
+ - /all - this endpoint returns the entirety of the catalog contents in the FBC format
+
+ As the needs of users and clients of the evolve, new endpoints may be added.
+ maxLength: 525
+ type: string
+ x-kubernetes-validations:
+ - message: must be a valid URL
+ rule: isURL(self)
+ - message: scheme must be either http or https
+ rule: 'isURL(self) ? (url(self).getScheme() == "http" || url(self).getScheme()
+ == "https") : true'
+ required:
+ - base
+ type: object
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/catalogd/config/base/crd/kustomization.yaml b/catalogd/config/base/crd/kustomization.yaml
new file mode 100644
index 000000000..36c151281
--- /dev/null
+++ b/catalogd/config/base/crd/kustomization.yaml
@@ -0,0 +1,6 @@
+# This kustomization.yaml is not intended to be run by itself,
+# since it depends on service name and namespace that are out of this kustomize package.
+# It should be run by config/default
+resources:
+- bases/olm.operatorframework.io_clustercatalogs.yaml
+#+kubebuilder:scaffold:crdkustomizeresource
diff --git a/catalogd/config/base/default/clustercatalogs/default-catalogs.yaml b/catalogd/config/base/default/clustercatalogs/default-catalogs.yaml
new file mode 100644
index 000000000..a656b3509
--- /dev/null
+++ b/catalogd/config/base/default/clustercatalogs/default-catalogs.yaml
@@ -0,0 +1,11 @@
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterCatalog
+metadata:
+ name: operatorhubio
+ namespace: olmv1-system
+spec:
+ source:
+ type: Image
+ image:
+ ref: quay.io/operatorhubio/catalog:latest
+ pollIntervalMinutes: 10
diff --git a/catalogd/config/base/default/kustomization.yaml b/catalogd/config/base/default/kustomization.yaml
new file mode 100644
index 000000000..93dce3bac
--- /dev/null
+++ b/catalogd/config/base/default/kustomization.yaml
@@ -0,0 +1,17 @@
+# Adds namespace to all resources.
+namespace: olmv1-system
+
+# Value of this field is prepended to the
+# names of all resources, e.g. a deployment named
+# "wordpress" becomes "alices-wordpress".
+# Note that it should also match with the prefix (text before '-') of the namespace
+# field above.
+namePrefix: catalogd-
+
+# the following config is for teaching kustomize how to do var substitution
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- ../crd
+- ../rbac
+- ../manager
diff --git a/catalogd/config/base/manager/catalogd_service.yaml b/catalogd/config/base/manager/catalogd_service.yaml
new file mode 100644
index 000000000..693b687f3
--- /dev/null
+++ b/catalogd/config/base/manager/catalogd_service.yaml
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: service
+ namespace: system
+spec:
+ selector:
+ control-plane: catalogd-controller-manager
+ ports:
+ - name: http
+ protocol: TCP
+ port: 80
+ targetPort: 8443
+ - name: webhook
+ protocol: TCP
+ port: 9443
+ targetPort: 9443
+ - name: metrics
+ protocol: TCP
+ port: 7443
+ targetPort: 7443
diff --git a/catalogd/config/base/manager/kustomization.yaml b/catalogd/config/base/manager/kustomization.yaml
new file mode 100644
index 000000000..4ca2781d9
--- /dev/null
+++ b/catalogd/config/base/manager/kustomization.yaml
@@ -0,0 +1,17 @@
+resources:
+- manager.yaml
+- catalogd_service.yaml
+- webhook/manifests.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+images:
+- name: controller
+ newName: quay.io/operator-framework/catalogd
+ newTag: devel
+patches:
+- path: webhook/patch.yaml
+ target:
+ group: admissionregistration.k8s.io
+ kind: MutatingWebhookConfiguration
+ name: mutating-webhook-configuration
+ version: v1
diff --git a/catalogd/config/base/manager/manager.yaml b/catalogd/config/base/manager/manager.yaml
new file mode 100644
index 000000000..b394b2800
--- /dev/null
+++ b/catalogd/config/base/manager/manager.yaml
@@ -0,0 +1,91 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ pod-security.kubernetes.io/enforce: baseline
+ pod-security.kubernetes.io/enforce-version: latest
+ name: system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+ annotations:
+ kubectl.kubernetes.io/default-logs-container: manager
+ labels:
+ control-plane: catalogd-controller-manager
+spec:
+ selector:
+ matchLabels:
+ control-plane: catalogd-controller-manager
+ replicas: 1
+ minReadySeconds: 5
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ control-plane: catalogd-controller-manager
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - arm64
+ - ppc64le
+ - s390x
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ containers:
+ - command:
+ - ./catalogd
+ args:
+ - --leader-elect
+ - --metrics-bind-address=:7443
+ - --external-address=catalogd-service.olmv1-system.svc
+ image: controller:latest
+ name: manager
+ volumeMounts:
+ - name: cache
+ mountPath: /var/cache/
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ imagePullPolicy: IfNotPresent
+ terminationMessagePolicy: FallbackToLogsOnError
+ serviceAccountName: controller-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - name: cache
+ emptyDir: {}
diff --git a/catalogd/config/base/manager/webhook/manifests.yaml b/catalogd/config/base/manager/webhook/manifests.yaml
new file mode 100644
index 000000000..a5842de42
--- /dev/null
+++ b/catalogd/config/base/manager/webhook/manifests.yaml
@@ -0,0 +1,27 @@
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /mutate-olm-operatorframework-io-v1-clustercatalog
+ failurePolicy: Fail
+ name: inject-metadata-name.olm.operatorframework.io
+ rules:
+ - apiGroups:
+ - olm.operatorframework.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clustercatalogs
+ sideEffects: None
+ timeoutSeconds: 10
diff --git a/catalogd/config/base/manager/webhook/patch.yaml b/catalogd/config/base/manager/webhook/patch.yaml
new file mode 100644
index 000000000..ab8528c76
--- /dev/null
+++ b/catalogd/config/base/manager/webhook/patch.yaml
@@ -0,0 +1,20 @@
+# None of these values can be set via the kubebuilder directive, hence this patch
+- op: replace
+ path: /webhooks/0/clientConfig/service/namespace
+ value: olmv1-system
+- op: replace
+ path: /webhooks/0/clientConfig/service/name
+ value: catalogd-service
+- op: add
+ path: /webhooks/0/clientConfig/service/port
+ value: 9443
+# Make sure there's a name defined, otherwise, we can't create a label. This could happen when generateName is set
+# Then, if any of the conditions are true, create the label:
+# 1. No labels exist
+# 2. The olm.operatorframework.io/metadata.name label doesn't exist
+# 3. The olm.operatorframework.io/metadata.name label doesn't match the name
+- op: add
+ path: /webhooks/0/matchConditions
+ value:
+ - name: MissingOrIncorrectMetadataNameLabel
+ expression: "'name' in object.metadata && (!has(object.metadata.labels) || !('olm.operatorframework.io/metadata.name' in object.metadata.labels) || object.metadata.labels['olm.operatorframework.io/metadata.name'] != object.metadata.name)"
diff --git a/catalogd/config/base/nginx-ingress/kustomization.yaml b/catalogd/config/base/nginx-ingress/kustomization.yaml
new file mode 100644
index 000000000..7bdced5d6
--- /dev/null
+++ b/catalogd/config/base/nginx-ingress/kustomization.yaml
@@ -0,0 +1,7 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../default
+- resources/nginx_ingress.yaml
+- https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
diff --git a/catalogd/config/base/nginx-ingress/resources/nginx_ingress.yaml b/catalogd/config/base/nginx-ingress/resources/nginx_ingress.yaml
new file mode 100644
index 000000000..81f775fba
--- /dev/null
+++ b/catalogd/config/base/nginx-ingress/resources/nginx_ingress.yaml
@@ -0,0 +1,17 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: catalogd-ingress
+ namespace: olmv1-system
+spec:
+ ingressClassName: nginx
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: catalogd-service
+ port:
+ number: 80
diff --git a/catalogd/config/base/rbac/auth_proxy_client_clusterrole.yaml b/catalogd/config/base/rbac/auth_proxy_client_clusterrole.yaml
new file mode 100644
index 000000000..ab8871b2e
--- /dev/null
+++ b/catalogd/config/base/rbac/auth_proxy_client_clusterrole.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: metrics-reader
+rules:
+- nonResourceURLs:
+ - "/metrics"
+ verbs:
+ - get
diff --git a/catalogd/config/base/rbac/auth_proxy_role.yaml b/catalogd/config/base/rbac/auth_proxy_role.yaml
new file mode 100644
index 000000000..3edf78f58
--- /dev/null
+++ b/catalogd/config/base/rbac/auth_proxy_role.yaml
@@ -0,0 +1,20 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: proxy-role
+rules:
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
diff --git a/catalogd/config/base/rbac/auth_proxy_role_binding.yaml b/catalogd/config/base/rbac/auth_proxy_role_binding.yaml
new file mode 100644
index 000000000..2efcf8dd8
--- /dev/null
+++ b/catalogd/config/base/rbac/auth_proxy_role_binding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: proxy-role
+subjects:
+- kind: ServiceAccount
+ name: controller-manager
+ namespace: system
diff --git a/catalogd/config/base/rbac/kustomization.yaml b/catalogd/config/base/rbac/kustomization.yaml
new file mode 100644
index 000000000..8ed66bdd1
--- /dev/null
+++ b/catalogd/config/base/rbac/kustomization.yaml
@@ -0,0 +1,20 @@
+resources:
+# All RBAC will be applied under this service account in
+# the deployment namespace. You may comment out this resource
+# if your manager will use a service account that exists at
+# runtime. Be sure to update RoleBinding and ClusterRoleBinding
+# subjects if changing service account names.
+- service_account.yaml
+- role.yaml
+- role_binding.yaml
+- leader_election_role.yaml
+- leader_election_role_binding.yaml
+# The following RBAC configurations are used to protect
+# the metrics endpoint with authn/authz. These configurations
+# ensure that only authorized users and service accounts
+# can access the metrics endpoint. Comment the following
+# permissions if you want to disable this protection.
+# More info: https://book.kubebuilder.io/reference/metrics.html
+- auth_proxy_role.yaml
+- auth_proxy_role_binding.yaml
+- auth_proxy_client_clusterrole.yaml
diff --git a/catalogd/config/base/rbac/leader_election_role.yaml b/catalogd/config/base/rbac/leader_election_role.yaml
new file mode 100644
index 000000000..37564d084
--- /dev/null
+++ b/catalogd/config/base/rbac/leader_election_role.yaml
@@ -0,0 +1,40 @@
+# permissions to do leader election.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: leader-election-role
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
diff --git a/catalogd/config/base/rbac/leader_election_role_binding.yaml b/catalogd/config/base/rbac/leader_election_role_binding.yaml
new file mode 100644
index 000000000..6ad0ccf99
--- /dev/null
+++ b/catalogd/config/base/rbac/leader_election_role_binding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: leader-election-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: leader-election-role
+subjects:
+- kind: ServiceAccount
+ name: controller-manager
+ namespace: system
diff --git a/catalogd/config/base/rbac/role.yaml b/catalogd/config/base/rbac/role.yaml
new file mode 100644
index 000000000..40f4095c6
--- /dev/null
+++ b/catalogd/config/base/rbac/role.yaml
@@ -0,0 +1,32 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: manager-role
+rules:
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs/status
+ verbs:
+ - get
+ - patch
+ - update
diff --git a/catalogd/config/base/rbac/role_binding.yaml b/catalogd/config/base/rbac/role_binding.yaml
new file mode 100644
index 000000000..a618c0e47
--- /dev/null
+++ b/catalogd/config/base/rbac/role_binding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: manager-role
+subjects:
+- kind: ServiceAccount
+ name: controller-manager
+ namespace: system
diff --git a/catalogd/config/base/rbac/service_account.yaml b/catalogd/config/base/rbac/service_account.yaml
new file mode 100644
index 000000000..3f0e7af74
--- /dev/null
+++ b/catalogd/config/base/rbac/service_account.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/part-of: olm
+ app.kubernetes.io/name: catalogd
+ name: controller-manager
+ namespace: system
diff --git a/catalogd/config/components/ca/kustomization.yaml b/catalogd/config/components/ca/kustomization.yaml
new file mode 100644
index 000000000..113d2a957
--- /dev/null
+++ b/catalogd/config/components/ca/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1alpha1
+kind: Component
+# No namespace is specified here, otherwise, it will overwrite _all_ the other namespaces!
+resources:
+- resources/issuers.yaml
+patches:
+- target:
+ kind: Deployment
+ name: controller-manager
+ path: patches/manager_deployment_cacerts.yaml
diff --git a/catalogd/config/components/ca/patches/manager_deployment_cacerts.yaml b/catalogd/config/components/ca/patches/manager_deployment_cacerts.yaml
new file mode 100644
index 000000000..b5b03633e
--- /dev/null
+++ b/catalogd/config/components/ca/patches/manager_deployment_cacerts.yaml
@@ -0,0 +1,9 @@
+- op: add
+ path: /spec/template/spec/volumes/-
+ value: {"name":"olmv1-certificate", "secret":{"secretName":"catalogd-service-cert-git-version", "optional": false, "items": [{"key": "ca.crt", "path": "olm-ca.crt"}]}}
+- op: add
+ path: /spec/template/spec/containers/0/volumeMounts/-
+ value: {"name":"olmv1-certificate", "readOnly": true, "mountPath":"/var/ca-certs/"}
+- op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: "--ca-certs-dir=/var/ca-certs"
diff --git a/catalogd/config/components/ca/resources/issuers.yaml b/catalogd/config/components/ca/resources/issuers.yaml
new file mode 100644
index 000000000..00e149d56
--- /dev/null
+++ b/catalogd/config/components/ca/resources/issuers.yaml
@@ -0,0 +1,35 @@
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: self-sign-issuer
+ namespace: cert-manager
+spec:
+ selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: olmv1-ca
+ namespace: cert-manager
+spec:
+ isCA: true
+ commonName: olmv1-ca
+ secretName: olmv1-ca
+ secretTemplate:
+ annotations:
+ cert-manager.io/allow-direct-injection: "true"
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: self-sign-issuer
+ kind: Issuer
+ group: cert-manager.io
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: olmv1-ca
+spec:
+ ca:
+ secretName: olmv1-ca
diff --git a/catalogd/config/components/registries-conf/kustomization.yaml b/catalogd/config/components/registries-conf/kustomization.yaml
new file mode 100644
index 000000000..e48262429
--- /dev/null
+++ b/catalogd/config/components/registries-conf/kustomization.yaml
@@ -0,0 +1,7 @@
+apiVersion: kustomize.config.k8s.io/v1alpha1
+kind: Component
+namespace: olmv1-system
+resources:
+- registries_conf_configmap.yaml
+patches:
+- path: manager_e2e_registries_conf_patch.yaml
diff --git a/catalogd/config/components/registries-conf/manager_e2e_registries_conf_patch.yaml b/catalogd/config/components/registries-conf/manager_e2e_registries_conf_patch.yaml
new file mode 100644
index 000000000..42012d697
--- /dev/null
+++ b/catalogd/config/components/registries-conf/manager_e2e_registries_conf_patch.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: controller-manager
+ namespace: system
+spec:
+ template:
+ spec:
+ containers:
+ - name: manager
+ volumeMounts:
+ - name: e2e-registries-conf
+ mountPath: /etc/containers
+ volumes:
+ - name: e2e-registries-conf
+ configMap:
+ name: e2e-registries-conf
diff --git a/catalogd/config/components/registries-conf/registries_conf_configmap.yaml b/catalogd/config/components/registries-conf/registries_conf_configmap.yaml
new file mode 100644
index 000000000..3561bbe59
--- /dev/null
+++ b/catalogd/config/components/registries-conf/registries_conf_configmap.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: e2e-registries-conf
+ namespace: system
+data:
+ registries.conf: |
+ [[registry]]
+ prefix = "docker-registry.catalogd-e2e.svc:5000"
+ insecure = true
+ location = "docker-registry.catalogd-e2e.svc:5000"
diff --git a/catalogd/config/components/tls/kustomization.yaml b/catalogd/config/components/tls/kustomization.yaml
new file mode 100644
index 000000000..f537d5d14
--- /dev/null
+++ b/catalogd/config/components/tls/kustomization.yaml
@@ -0,0 +1,21 @@
+apiVersion: kustomize.config.k8s.io/v1alpha1
+kind: Component
+namespace: olmv1-system
+namePrefix: catalogd-
+resources:
+- resources/certificate.yaml
+patches:
+- target:
+ kind: Service
+ name: service
+ path: patches/catalogd_service_port.yaml
+- target:
+ kind: Deployment
+ name: controller-manager
+ path: patches/manager_deployment_certs.yaml
+- target:
+ group: admissionregistration.k8s.io
+ kind: MutatingWebhookConfiguration
+ name: mutating-webhook-configuration
+ version: v1
+ path: patches/catalogd_webhook.yaml
diff --git a/catalogd/config/components/tls/patches/catalogd_service_port.yaml b/catalogd/config/components/tls/patches/catalogd_service_port.yaml
new file mode 100644
index 000000000..b5b88bb47
--- /dev/null
+++ b/catalogd/config/components/tls/patches/catalogd_service_port.yaml
@@ -0,0 +1,6 @@
+- op: replace
+ path: /spec/ports/0/port
+ value: 443
+- op: replace
+ path: /spec/ports/0/name
+ value: https
\ No newline at end of file
diff --git a/catalogd/config/components/tls/patches/catalogd_webhook.yaml b/catalogd/config/components/tls/patches/catalogd_webhook.yaml
new file mode 100644
index 000000000..cf1a39ec3
--- /dev/null
+++ b/catalogd/config/components/tls/patches/catalogd_webhook.yaml
@@ -0,0 +1,3 @@
+- op: add
+ path: /metadata/annotations/cert-manager.io~1inject-ca-from-secret
+ value: cert-manager/olmv1-ca
diff --git a/catalogd/config/components/tls/patches/manager_deployment_certs.yaml b/catalogd/config/components/tls/patches/manager_deployment_certs.yaml
new file mode 100644
index 000000000..3d8b33ac3
--- /dev/null
+++ b/catalogd/config/components/tls/patches/manager_deployment_certs.yaml
@@ -0,0 +1,12 @@
+- op: add
+ path: /spec/template/spec/volumes/-
+ value: {"name":"catalogserver-certs", "secret":{"secretName":"catalogd-service-cert-git-version"}}
+- op: add
+ path: /spec/template/spec/containers/0/volumeMounts/-
+ value: {"name":"catalogserver-certs", "mountPath":"/var/certs"}
+- op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: "--tls-cert=/var/certs/tls.crt"
+- op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: "--tls-key=/var/certs/tls.key"
diff --git a/catalogd/config/components/tls/resources/certificate.yaml b/catalogd/config/components/tls/resources/certificate.yaml
new file mode 100644
index 000000000..be14f8301
--- /dev/null
+++ b/catalogd/config/components/tls/resources/certificate.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: service-cert
+ namespace: system
+spec:
+ secretName: catalogd-service-cert-git-version
+ dnsNames:
+ - localhost
+ - catalogd-service.olmv1-system.svc
+ - catalogd-service.olmv1-system.svc.cluster.local
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ kind: ClusterIssuer
+ group: cert-manager.io
+ name: olmv1-ca
diff --git a/catalogd/config/overlays/cert-manager/kustomization.yaml b/catalogd/config/overlays/cert-manager/kustomization.yaml
new file mode 100644
index 000000000..fb27be4f4
--- /dev/null
+++ b/catalogd/config/overlays/cert-manager/kustomization.yaml
@@ -0,0 +1,9 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- ../../base/crd
+- ../../base/rbac
+- ../../base/manager
+components:
+- ../../components/tls
+- ../../components/ca
diff --git a/catalogd/config/overlays/e2e/kustomization.yaml b/catalogd/config/overlays/e2e/kustomization.yaml
new file mode 100644
index 000000000..dbfd7d737
--- /dev/null
+++ b/catalogd/config/overlays/e2e/kustomization.yaml
@@ -0,0 +1,12 @@
+# kustomization file for all the e2e's
+# DO NOT ADD A NAMESPACE HERE
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../base/crd
+ - ../../base/rbac
+ - ../../base/manager
+components:
+ - ../../components/tls
+ - ../../components/registries-conf
+ - ../../components/ca
diff --git a/catalogd/config/rbac/role.yaml b/catalogd/config/rbac/role.yaml
new file mode 100644
index 000000000..b0cf5a213
--- /dev/null
+++ b/catalogd/config/rbac/role.yaml
@@ -0,0 +1,65 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: manager-role
+rules:
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - olm.operatorframework.io
+ resources:
+ - clustercatalogs/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/log
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: manager-role
+ namespace: system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
diff --git a/catalogd/config/samples/core_v1_clustercatalog.yaml b/catalogd/config/samples/core_v1_clustercatalog.yaml
new file mode 100644
index 000000000..661bf2a6c
--- /dev/null
+++ b/catalogd/config/samples/core_v1_clustercatalog.yaml
@@ -0,0 +1,11 @@
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterCatalog
+metadata:
+ name: operatorhubio
+spec:
+ priority: 0
+ source:
+ type: Image
+ image:
+ pollIntervalMinutes: 1440
+ ref: quay.io/operatorhubio/catalog:latest
diff --git a/catalogd/crd-diff-config.yaml b/catalogd/crd-diff-config.yaml
new file mode 100644
index 000000000..8cce39378
--- /dev/null
+++ b/catalogd/crd-diff-config.yaml
@@ -0,0 +1,109 @@
+checks:
+ crd:
+ scope:
+ enabled: true
+ existingFieldRemoval:
+ enabled: true
+ storedVersionRemoval:
+ enabled: true
+ version:
+ sameVersion:
+ enabled: true
+ unhandledFailureMode: "Closed"
+ enum:
+ enabled: true
+ removalEnforcement: "Strict"
+ additionEnforcement: "Strict"
+ default:
+ enabled: true
+ changeEnforcement: "Strict"
+ removalEnforcement: "Strict"
+ additionEnforcement: "Strict"
+ required:
+ enabled: true
+ newEnforcement: "Strict"
+ type:
+ enabled: true
+ changeEnforcement: "Strict"
+ maximum:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxItems:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxProperties:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxLength:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ minimum:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minItems:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minProperties:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minLength:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ servedVersion:
+ enabled: true
+ unhandledFailureMode: "Closed"
+ enum:
+ enabled: true
+ removalEnforcement: "Strict"
+ additionEnforcement: "Strict"
+ default:
+ enabled: true
+ changeEnforcement: "Strict"
+ removalEnforcement: "Strict"
+ additionEnforcement: "Strict"
+ required:
+ enabled: true
+ newEnforcement: "Strict"
+ type:
+ enabled: true
+ changeEnforcement: "Strict"
+ maximum:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxItems:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxProperties:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ maxLength:
+ enabled: true
+ additionEnforcement: "Strict"
+ decreaseEnforcement: "Strict"
+ minimum:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minItems:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minProperties:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
+ minLength:
+ enabled: true
+ additionEnforcement: "Strict"
+ increaseEnforcement: "Strict"
diff --git a/catalogd/docs/fetching-catalog-contents.md b/catalogd/docs/fetching-catalog-contents.md
new file mode 100644
index 000000000..ccc0ff231
--- /dev/null
+++ b/catalogd/docs/fetching-catalog-contents.md
@@ -0,0 +1,204 @@
+# `ClusterCatalog` Interface
+`catalogd` serves catalog content via a catalog-specific, versioned HTTP(S) endpoint. Clients access catalog information via this API endpoint and a versioned reference of the desired format. Current support includes only a complete catalog download, indicated by the path "api/v1/all", for example if `status.urls.base` is `https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio` then `https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/vi/all` would receive the complete FBC for the catalog `operatorhubio`.
+
+
+## Response Format
+`catalogd` responses retrieved via the catalog-specific v1 API are encoded as a [JSON Lines](https://jsonlines.org/) stream of File-Based Catalog (FBC) [Meta](https://olm.operatorframework.io/docs/reference/file-based-catalogs/#schema) objects delimited by newlines.
+
+### Example
+For an example JSON-encoded FBC snippet
+```json
+{
+ "schema": "olm.package",
+ "name": "cockroachdb",
+ "defaultChannel": "stable-v6.x",
+}
+{
+ "schema": "olm.channel",
+ "name": "stable-v6.x",
+ "package": "cockroachdb",
+ "entries": [
+ {
+ "name": "cockroachdb.v6.0.0",
+ "skipRange": "<6.0.0"
+ }
+ ]
+}
+{
+ "schema": "olm.bundle",
+ "name": "cockroachdb.v6.0.0",
+ "package": "cockroachdb",
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba",
+ "properties": [
+ {
+ "type": "olm.package",
+ "value": {
+ "packageName": "cockroachdb",
+ "version": "6.0.0"
+ }
+ },
+ ],
+}
+```
+the corresponding JSON Lines formatted response would be
+```json
+{"schema":"olm.package","name":"cockroachdb","defaultChannel":"stable-v6.x"}
+{"schema":"olm.channel","name":"stable-v6.x","package":"cockroachdb","entries":[{"name":"cockroachdb.v6.0.0","skipRange":"<6.0.0"}]}
+{"schema":"olm.bundle","name":"cockroachdb.v6.0.0","package":"cockroachdb","image":"quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba","properties":[{"type":"olm.package","value":{"packageName":"cockroachdb","version":"6.0.0"}}]}
+```
+
+## Compression Support
+
+`catalogd` supports gzip compression of responses, which can significantly reduce associated network traffic. In order to signal to `catalogd` that the client handles compressed responses, the client must include `Accept-Encoding: gzip` as a header in the HTTP request.
+
+`catalogd` will include a `Content-Encoding: gzip` header in compressed responses.
+
+Note that `catalogd` will only compress catalogs larger than 1400 bytes.
+
+### Example
+
+The demo below
+1. retrieves plaintext catalog content (and saves to file 1)
+2. adds the `Accept-Encoding` header and retrieves compressed content
+3. adds the `Accept-Encofing` header and uses curl to decompress the response (and saves to file 2)
+4. uses diff to demonstrate that there is no difference between the contents of files 1 and 2
+
+
+[](https://asciinema.org/a/668823)
+
+
+
+# Fetching `ClusterCatalog` contents from the Catalogd HTTP Server
+This section covers how to fetch the contents for a `ClusterCatalog` from the
+Catalogd HTTP(S) Server.
+
+For example purposes we make the following assumption:
+- A `ClusterCatalog` named `operatorhubio` has been created and successfully unpacked
+(denoted in the `ClusterCatalog.Status`)
+
+**NOTE:** By default, Catalogd is configured to use TLS with self-signed certificates.
+For local development, consider skipping TLS verification, such as `curl -k`, or reference external material
+on self-signed certificate verification.
+
+`ClusterCatalog` CRs have a `status.urls.base` field which identifies the catalog-specific API to access the catalog content:
+
+```yaml
+ status:
+ .
+ .
+ urls:
+ base: https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio
+ resolvedSource:
+ image:
+ ref: quay.io/operatorhubio/catalog@sha256:e53267559addc85227c2a7901ca54b980bc900276fc24d3f4db0549cb38ecf76
+ type: Image
+```
+
+## On cluster
+
+When making a request for the complete contents of the `operatorhubio` `ClusterCatalog` from within
+the cluster, clients would combine `status.urls.base` with the desired API service and issue an HTTP GET request for the URL.
+
+For example, to receive the complete catalog data for the `operatorhubio` catalog indicated above, the client would append the service point designator `api/v1/all`, like:
+
+`https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/v1/all`.
+
+An example command to run a `Pod` to `curl` the catalog contents:
+```sh
+kubectl run fetcher --image=curlimages/curl:latest -- curl https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/v1/all
+```
+
+## Off cluster
+
+When making a request for the contents of the `operatorhubio` `ClusterCatalog` from outside
+the cluster, we have to perform an extra step:
+1. Port forward the `catalogd-service` service in the `olmv1-system` namespace:
+```sh
+kubectl -n olmv1-system port-forward svc/catalogd-service 8080:443
+```
+
+Once the service has been successfully forwarded to a localhost port, issue a HTTP `GET`
+request to `https://localhost:8080/catalogs/operatorhubio/api/v1/all`
+
+An example `curl` request that assumes the port-forwarding is mapped to port 8080 on the local machine:
+```sh
+curl http://localhost:8080/catalogs/operatorhubio/api/v1/all
+```
+
+# Fetching `ClusterCatalog` contents from the `Catalogd` Service outside of the cluster
+
+This section outlines a way of exposing the `Catalogd` Service's endpoints outside the cluster and then accessing the catalog contents using `Ingress`. We will be using `Ingress NGINX` Controller for the sake of this example but you are welcome to use the `Ingress` Controller of your choice.
+
+**Prerequisites**
+
+- [Install kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
+- Assuming `kind` is installed, create a `kind` cluster with `extraPortMappings` and `node-labels` as shown in the [kind documentation](https://kind.sigs.k8s.io/docs/user/ingress/)
+- Install latest version of `Catalogd` by navigating to the [releases page](https://github.com/operator-framework/catalogd/releases) and following the install instructions included in the release you want to install.
+- Install the `Ingress NGINX` Controller by running the below command:
+
+ ```sh
+ $ kubectl apply -k https://github.com/operator-framework/catalogd/tree/main/config/nginx-ingress
+ ```
+ By running that above command, the `Ingress` Controller is installed. Along with it, the `Ingress` Resource will be applied automatically as well, thereby creating an `Ingress` Object on the cluster.
+
+1. Once the prerequisites are satisfied, create a `ClusterCatalog` object that points to the OperatorHub Community catalog by running the following command:
+
+ ```sh
+ $ kubectl apply -f - << EOF
+ apiVersion: olm.operatorframework.io/v1
+ kind: ClusterCatalog
+ metadata:
+ name: operatorhubio
+ spec:
+ source:
+ type: Image
+ image:
+ ref: quay.io/operatorhubio/catalog:latest
+ EOF
+ ```
+
+1. Before proceeding further, let's verify that the `ClusterCatalog` object was created successfully by running the below command:
+
+ ```sh
+ $ kubectl describe catalog/operatorhubio
+ ```
+
+1. At this point the `ClusterCatalog` object exists and `Ingress` controller is ready to process requests. The sample `Ingress` Resource that was created during Step 4 of Prerequisites is shown as below:
+
+ ```yaml
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ name: catalogd-nginx-ingress
+ namespace: olmv1-system
+ spec:
+ ingressClassName: nginx
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: catalogd-service
+ port:
+ number: 80
+ ```
+ Let's verify that the `Ingress` object got created successfully from the sample by running the following command:
+
+ ```sh
+ $ kubectl describe ingress/catalogd-ingress -n olmv1-system
+ ```
+
+1. Run the below example `curl` request to retrieve all of the catalog contents:
+
+ ```sh
+ $ curl https:///catalogs/operatorhubio/api/v1/all
+ ```
+
+ To obtain `address` of the ingress object, you can run the below command and look for the value in the `ADDRESS` field from output:
+ ```sh
+ $ kubectl -n olmv1-system get ingress
+ ```
+
+ You can further use the `curl` commands outlined in the [Catalogd README](https://github.com/operator-framework/catalogd/blob/main/README.md) to filter out the JSON content by list of bundles, channels & packages.
diff --git a/catalogd/hack/scripts/demo-script.sh b/catalogd/hack/scripts/demo-script.sh
new file mode 100755
index 000000000..b0f1feaa7
--- /dev/null
+++ b/catalogd/hack/scripts/demo-script.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+#
+# Welcome to the catalogd demo
+#
+trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
+
+
+kind delete cluster
+kind create cluster
+kubectl cluster-info --context kind-kind
+sleep 10
+
+# use the install script from the latest github release
+curl -L -s https://github.com/operator-framework/catalogd/releases/latest/download/install.sh | bash
+
+# inspect crds (clustercatalog)
+kubectl get crds -A
+kubectl get clustercatalog -A
+
+echo "... checking catalogd controller is available"
+kubectl wait --for=condition=Available -n olmv1-system deploy/catalogd-controller-manager --timeout=1m
+echo "... checking clustercatalog is serving"
+kubectl wait --for=condition=Serving clustercatalog/operatorhubio --timeout=60s
+echo "... checking clustercatalog is finished unpacking"
+kubectl wait --for=condition=Progressing=False clustercatalog/operatorhubio --timeout=60s
+
+# port forward the catalogd-service service to interact with the HTTP server serving catalog contents
+(kubectl -n olmv1-system port-forward svc/catalogd-service 8081:443)&
+
+sleep 3
+
+# check what 'packages' are available in this catalog
+curl -k https://localhost:8081/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.package") | .name'
+# check what channels are included in the wavefront package
+curl -k https://localhost:8081/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.channel") | select(.package == "wavefront") | .name'
+# check what bundles are included in the wavefront package
+curl -k https://localhost:8081/catalogs/operatorhubio/api/v1/all | jq -s '.[] | select(.schema == "olm.bundle") | select(.package == "wavefront") | .name'
+
diff --git a/catalogd/hack/scripts/generate-asciidemo.sh b/catalogd/hack/scripts/generate-asciidemo.sh
new file mode 100755
index 000000000..aa7262182
--- /dev/null
+++ b/catalogd/hack/scripts/generate-asciidemo.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+trap cleanup SIGINT SIGTERM EXIT
+
+SCRIPTPATH="$( cd -- "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )"
+
+function check_prereq() {
+ prog=$1
+ if ! command -v ${prog} &> /dev/null
+ then
+ echo "unable to find prerequisite: $1"
+ exit 1
+ fi
+}
+
+function cleanup() {
+ if [ -d $WKDIR ]
+ then
+ rm -rf $WKDIR
+ fi
+}
+
+function usage() {
+ echo "$0 [options]"
+ echo "where options is"
+ echo " h help (this message)"
+ exit 1
+}
+
+set +u
+while getopts 'h' flag; do
+ case "${flag}" in
+ h) usage ;;
+ esac
+ shift
+done
+set -u
+
+WKDIR=$(mktemp -td generate-asciidemo.XXXXX)
+if [ ! -d ${WKDIR} ]
+then
+ echo "unable to create temporary workspace"
+ exit 2
+fi
+
+for prereq in "asciinema curl"
+do
+ check_prereq ${prereq}
+done
+
+
+curl https://raw.githubusercontent.com/zechris/asciinema-rec_script/main/bin/asciinema-rec_script -o ${WKDIR}/asciinema-rec_script
+chmod +x ${WKDIR}/asciinema-rec_script
+screencast=${WKDIR}/catalogd-demo.cast ${WKDIR}/asciinema-rec_script ${SCRIPTPATH}/demo-script.sh
+
+asciinema upload ${WKDIR}/catalogd-demo.cast
+
diff --git a/catalogd/hack/scripts/generate-gzip-asciidemo.sh b/catalogd/hack/scripts/generate-gzip-asciidemo.sh
new file mode 100755
index 000000000..c02c54d7b
--- /dev/null
+++ b/catalogd/hack/scripts/generate-gzip-asciidemo.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+trap cleanup SIGINT SIGTERM EXIT
+
+SCRIPTPATH="$( cd -- "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )"
+
+function check_prereq() {
+ prog=$1
+ if ! command -v ${prog} &> /dev/null
+ then
+ echo "unable to find prerequisite: $1"
+ exit 1
+ fi
+}
+
+function cleanup() {
+ if [ -d $WKDIR ]
+ then
+ rm -rf $WKDIR
+ fi
+}
+
+function usage() {
+ echo "$0 [options]"
+ echo "where options is"
+ echo " h help (this message)"
+ exit 1
+}
+
+set +u
+while getopts 'h' flag; do
+ case "${flag}" in
+ h) usage ;;
+ esac
+ shift
+done
+set -u
+
+WKDIR=$(mktemp -td generate-asciidemo.XXXXX)
+if [ ! -d ${WKDIR} ]
+then
+ echo "unable to create temporary workspace"
+ exit 2
+fi
+
+for prereq in "asciinema curl"
+do
+ check_prereq ${prereq}
+done
+
+
+curl https://raw.githubusercontent.com/zechris/asciinema-rec_script/main/bin/asciinema-rec_script -o ${WKDIR}/asciinema-rec_script
+chmod +x ${WKDIR}/asciinema-rec_script
+screencast=${WKDIR}/catalogd-demo.cast ${WKDIR}/asciinema-rec_script ${SCRIPTPATH}/gzip-demo-script.sh
+
+asciinema upload ${WKDIR}/catalogd-demo.cast
+
diff --git a/catalogd/hack/scripts/gzip-demo-script.sh b/catalogd/hack/scripts/gzip-demo-script.sh
new file mode 100755
index 000000000..2cd1bb794
--- /dev/null
+++ b/catalogd/hack/scripts/gzip-demo-script.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
+# Welcome to the catalogd demo
+make run
+
+# create a clustercatalog
+kubectl apply -f $HOME/devel/tmp/operatorhubio-clustercatalog.yaml
+# shows catalog
+kubectl get clustercatalog -A
+# waiting for clustercatalog to report ready status
+time kubectl wait --for=condition=Unpacked clustercatalog/operatorhubio --timeout=1m
+
+# port forward the catalogd-service service to interact with the HTTP server serving catalog contents
+(kubectl -n olmv1-system port-forward svc/catalogd-service 8080:443)&
+sleep 5
+
+# retrieve catalog as plaintext JSONlines
+curl -k -vvv https://localhost:8080/catalogs/operatorhubio/api/v1/all --output /tmp/cat-content.json
+
+# advertise handling of compressed content
+curl -vvv -k https://localhost:8080/catalogs/operatorhubio/api/v1/all -H 'Accept-Encoding: gzip' --output /tmp/cat-content.gz
+
+# let curl handle the compress/decompress for us
+curl -vvv --compressed -k https://localhost:8080/catalogs/operatorhubio/api/v1/all --output /tmp/cat-content-decompressed.txt
+
+# show that there's no content change with changed format
+diff /tmp/cat-content.json /tmp/cat-content-decompressed.txt
+
diff --git a/catalogd/internal/controllers/core/clustercatalog_controller.go b/catalogd/internal/controllers/core/clustercatalog_controller.go
new file mode 100644
index 000000000..4eedd52df
--- /dev/null
+++ b/catalogd/internal/controllers/core/clustercatalog_controller.go
@@ -0,0 +1,443 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "context" // #nosec
+ "errors"
+ "fmt"
+ "slices"
+ "sync"
+ "time"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ crfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+ "github.com/operator-framework/operator-controller/catalogd/internal/source"
+ "github.com/operator-framework/operator-controller/catalogd/internal/storage"
+)
+
+const (
+ fbcDeletionFinalizer = "olm.operatorframework.io/delete-server-cache"
+ // CatalogSources are polled if PollInterval is mentioned, in intervals of wait.Jitter(pollDuration, maxFactor)
+ // wait.Jitter returns a time.Duration between pollDuration and pollDuration + maxFactor * pollDuration.
+ requeueJitterMaxFactor = 0.01
+)
+
+// ClusterCatalogReconciler reconciles a Catalog object
+type ClusterCatalogReconciler struct {
+ client.Client
+ Unpacker source.Unpacker
+ Storage storage.Instance
+
+ finalizers crfinalizer.Finalizers
+
+ // TODO: The below storedCatalogs fields are used for a quick a hack that helps
+ // us correctly populate a ClusterCatalog's status. The fact that we need
+ // these is indicative of a larger problem with the design of one or both
+ // of the Unpacker and Storage interfaces. We should fix this.
+ storedCatalogsMu sync.RWMutex
+ storedCatalogs map[string]storedCatalogData
+}
+
+type storedCatalogData struct {
+ observedGeneration int64
+ unpackResult source.Result
+}
+
+//+kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs/status,verbs=get;update;patch
+//+kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs/finalizers,verbs=update
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
+func (r *ClusterCatalogReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ l := log.FromContext(ctx).WithName("catalogd-controller")
+ ctx = log.IntoContext(ctx, l)
+
+ l.Info("reconcile starting")
+ defer l.Info("reconcile ending")
+
+ existingCatsrc := catalogdv1.ClusterCatalog{}
+ if err := r.Client.Get(ctx, req.NamespacedName, &existingCatsrc); err != nil {
+ return ctrl.Result{}, client.IgnoreNotFound(err)
+ }
+
+ reconciledCatsrc := existingCatsrc.DeepCopy()
+ res, reconcileErr := r.reconcile(ctx, reconciledCatsrc)
+
+ // If we encounter an error, we should delete the stored catalog metadata
+ // which represents the state of a successfully unpacked catalog. Deleting
+ // this state ensures that we will continue retrying the unpacking process
+ // until it succeeds.
+ if reconcileErr != nil {
+ r.deleteStoredCatalog(reconciledCatsrc.Name)
+ }
+
+ // Do checks before any Update()s, as Update() may modify the resource structure!
+ updateStatus := !equality.Semantic.DeepEqual(existingCatsrc.Status, reconciledCatsrc.Status)
+ updateFinalizers := !equality.Semantic.DeepEqual(existingCatsrc.Finalizers, reconciledCatsrc.Finalizers)
+ unexpectedFieldsChanged := checkForUnexpectedFieldChange(existingCatsrc, *reconciledCatsrc)
+
+ if unexpectedFieldsChanged {
+ panic("spec or metadata changed by reconciler")
+ }
+
+ // Save the finalizers off to the side. If we update the status, the reconciledCatsrc will be updated
+ // to contain the new state of the ClusterCatalog, which contains the status update, but (critically)
+ // does not contain the finalizers. After the status update, we need to re-add the finalizers to the
+ // reconciledCatsrc before updating the object.
+ finalizers := reconciledCatsrc.Finalizers
+
+ if updateStatus {
+ if err := r.Client.Status().Update(ctx, reconciledCatsrc); err != nil {
+ reconcileErr = errors.Join(reconcileErr, fmt.Errorf("error updating status: %v", err))
+ }
+ }
+
+ reconciledCatsrc.Finalizers = finalizers
+
+ if updateFinalizers {
+ if err := r.Client.Update(ctx, reconciledCatsrc); err != nil {
+ reconcileErr = errors.Join(reconcileErr, fmt.Errorf("error updating finalizers: %v", err))
+ }
+ }
+
+ return res, reconcileErr
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *ClusterCatalogReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ r.storedCatalogsMu.Lock()
+ defer r.storedCatalogsMu.Unlock()
+ r.storedCatalogs = make(map[string]storedCatalogData)
+
+ if err := r.setupFinalizers(); err != nil {
+ return fmt.Errorf("failed to setup finalizers: %v", err)
+ }
+
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&catalogdv1.ClusterCatalog{}).
+ Complete(r)
+}
+
+// Note: This function always returns ctrl.Result{}. The linter
+// fusses about this as we could instead just return error. This was
+// discussed in https://github.com/operator-framework/rukpak/pull/635#discussion_r1229859464
+// and the consensus was that it is better to keep the ctrl.Result return
+// type so that if we do end up needing to return something else we don't forget
+// to add the ctrl.Result type back as a return value. Adding a comment to ignore
+// linting from the linter that was fussing about this.
+// nolint:unparam
+func (r *ClusterCatalogReconciler) reconcile(ctx context.Context, catalog *catalogdv1.ClusterCatalog) (ctrl.Result, error) {
+ l := log.FromContext(ctx)
+ // Check if the catalog availability is set to disabled, if true then
+ // unset base URL, delete it from the cache and set appropriate status
+ if catalog.Spec.AvailabilityMode == catalogdv1.AvailabilityModeUnavailable {
+ // Delete the catalog from local cache
+ err := r.deleteCatalogCache(ctx, catalog)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // Set status.conditions[type=Progressing] to False as we are done with
+ // all that needs to be done with the catalog
+ updateStatusProgressingUserSpecifiedUnavailable(&catalog.Status, catalog.GetGeneration())
+
+ // Remove the fbcDeletionFinalizer as we do not want a finalizer attached to the catalog
+ // when it is disabled. Because the finalizer serves no purpose now.
+ controllerutil.RemoveFinalizer(catalog, fbcDeletionFinalizer)
+
+ return ctrl.Result{}, nil
+ }
+
+ finalizeResult, err := r.finalizers.Finalize(ctx, catalog)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+ if finalizeResult.Updated || finalizeResult.StatusUpdated {
+ // On create: make sure the finalizer is applied before we do anything
+ // On delete: make sure we do nothing after the finalizer is removed
+ return ctrl.Result{}, nil
+ }
+
+ // TODO: The below algorithm to get the current state based on an in-memory
+ // storedCatalogs map is a hack that helps us keep the ClusterCatalog's
+ // status up-to-date. The fact that we need this setup is indicative of
+ // a larger problem with the design of one or both of the Unpacker and
+ // Storage interfaces and/or their interactions. We should fix this.
+ expectedStatus, storedCatalog, hasStoredCatalog := r.getCurrentState(catalog)
+
+ // If any of the following are true, we need to unpack the catalog:
+ // - we don't have a stored catalog in the map
+ // - we have a stored catalog, but the content doesn't exist on disk
+ // - we have a stored catalog, the content exists, but the expected status differs from the actual status
+ // - we have a stored catalog, the content exists, the status looks correct, but the catalog generation is different from the observed generation in the stored catalog
+ // - we have a stored catalog, the content exists, the status looks correct and reflects the catalog generation, but it is time to poll again
+ needsUnpack := false
+ switch {
+ case !hasStoredCatalog:
+ l.Info("unpack required: no cached catalog metadata found for this catalog")
+ needsUnpack = true
+ case !r.Storage.ContentExists(catalog.Name):
+ l.Info("unpack required: no stored content found for this catalog")
+ needsUnpack = true
+ case !equality.Semantic.DeepEqual(catalog.Status, *expectedStatus):
+ l.Info("unpack required: current ClusterCatalog status differs from expected status")
+ needsUnpack = true
+ case catalog.Generation != storedCatalog.observedGeneration:
+ l.Info("unpack required: catalog generation differs from observed generation")
+ needsUnpack = true
+ case r.needsPoll(storedCatalog.unpackResult.LastSuccessfulPollAttempt.Time, catalog):
+ l.Info("unpack required: poll duration has elapsed")
+ needsUnpack = true
+ }
+
+ if !needsUnpack {
+ // No need to update the status because we've already checked
+ // that it is set correctly. Otherwise, we'd be unpacking again.
+ return nextPollResult(storedCatalog.unpackResult.LastSuccessfulPollAttempt.Time, catalog), nil
+ }
+
+ unpackResult, err := r.Unpacker.Unpack(ctx, catalog)
+ if err != nil {
+ unpackErr := fmt.Errorf("source catalog content: %w", err)
+ updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), unpackErr)
+ return ctrl.Result{}, unpackErr
+ }
+
+ switch unpackResult.State {
+ case source.StateUnpacked:
+ // TODO: We should check to see if the unpacked result has the same content
+ // as the already unpacked content. If it does, we should skip this rest
+ // of the unpacking steps.
+ err := r.Storage.Store(ctx, catalog.Name, unpackResult.FS)
+ if err != nil {
+ storageErr := fmt.Errorf("error storing fbc: %v", err)
+ updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), storageErr)
+ return ctrl.Result{}, storageErr
+ }
+ baseURL := r.Storage.BaseURL(catalog.Name)
+
+ updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), nil)
+ updateStatusServing(&catalog.Status, *unpackResult, baseURL, catalog.GetGeneration())
+ default:
+ panic(fmt.Sprintf("unknown unpack state %q", unpackResult.State))
+ }
+
+ r.storedCatalogsMu.Lock()
+ r.storedCatalogs[catalog.Name] = storedCatalogData{
+ unpackResult: *unpackResult,
+ observedGeneration: catalog.GetGeneration(),
+ }
+ r.storedCatalogsMu.Unlock()
+ return nextPollResult(unpackResult.LastSuccessfulPollAttempt.Time, catalog), nil
+}
+
+func (r *ClusterCatalogReconciler) getCurrentState(catalog *catalogdv1.ClusterCatalog) (*catalogdv1.ClusterCatalogStatus, storedCatalogData, bool) {
+ r.storedCatalogsMu.RLock()
+ storedCatalog, hasStoredCatalog := r.storedCatalogs[catalog.Name]
+ r.storedCatalogsMu.RUnlock()
+
+ expectedStatus := catalog.Status.DeepCopy()
+
+ // Set expected status based on what we see in the stored catalog
+ clearUnknownConditions(expectedStatus)
+ if hasStoredCatalog && r.Storage.ContentExists(catalog.Name) {
+ updateStatusServing(expectedStatus, storedCatalog.unpackResult, r.Storage.BaseURL(catalog.Name), storedCatalog.observedGeneration)
+ updateStatusProgressing(expectedStatus, storedCatalog.observedGeneration, nil)
+ }
+
+ return expectedStatus, storedCatalog, hasStoredCatalog
+}
+
+func nextPollResult(lastSuccessfulPoll time.Time, catalog *catalogdv1.ClusterCatalog) ctrl.Result {
+ var requeueAfter time.Duration
+ switch catalog.Spec.Source.Type {
+ case catalogdv1.SourceTypeImage:
+ if catalog.Spec.Source.Image != nil && catalog.Spec.Source.Image.PollIntervalMinutes != nil {
+ pollDuration := time.Duration(*catalog.Spec.Source.Image.PollIntervalMinutes) * time.Minute
+ jitteredDuration := wait.Jitter(pollDuration, requeueJitterMaxFactor)
+ requeueAfter = time.Until(lastSuccessfulPoll.Add(jitteredDuration))
+ }
+ }
+ return ctrl.Result{RequeueAfter: requeueAfter}
+}
+
+func clearUnknownConditions(status *catalogdv1.ClusterCatalogStatus) {
+ knownTypes := sets.New[string](
+ catalogdv1.TypeServing,
+ catalogdv1.TypeProgressing,
+ )
+ status.Conditions = slices.DeleteFunc(status.Conditions, func(cond metav1.Condition) bool {
+ return !knownTypes.Has(cond.Type)
+ })
+}
+
+func updateStatusProgressing(status *catalogdv1.ClusterCatalogStatus, generation int64, err error) {
+ progressingCond := metav1.Condition{
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ Message: "Successfully unpacked and stored content from resolved source",
+ ObservedGeneration: generation,
+ }
+
+ if err != nil {
+ progressingCond.Status = metav1.ConditionTrue
+ progressingCond.Reason = catalogdv1.ReasonRetrying
+ progressingCond.Message = err.Error()
+ }
+
+ if errors.Is(err, reconcile.TerminalError(nil)) {
+ progressingCond.Status = metav1.ConditionFalse
+ progressingCond.Reason = catalogdv1.ReasonBlocked
+ }
+
+ meta.SetStatusCondition(&status.Conditions, progressingCond)
+}
+
+func updateStatusServing(status *catalogdv1.ClusterCatalogStatus, result source.Result, baseURL string, generation int64) {
+ status.ResolvedSource = result.ResolvedSource
+ if status.URLs == nil {
+ status.URLs = &catalogdv1.ClusterCatalogURLs{}
+ }
+ status.URLs.Base = baseURL
+ status.LastUnpacked = ptr.To(metav1.NewTime(result.UnpackTime))
+ meta.SetStatusCondition(&status.Conditions, metav1.Condition{
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ Message: "Serving desired content from resolved source",
+ ObservedGeneration: generation,
+ })
+}
+
+func updateStatusProgressingUserSpecifiedUnavailable(status *catalogdv1.ClusterCatalogStatus, generation int64) {
+ // Set Progressing condition to True with reason Succeeded
+ // since we have successfully progressed to the unavailable
+ // availability mode and are ready to progress to any future
+ // desired state.
+ progressingCond := metav1.Condition{
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ Message: "Catalog availability mode is set to Unavailable",
+ ObservedGeneration: generation,
+ }
+
+ // Set Serving condition to False with reason UserSpecifiedUnavailable
+ // so that users of this condition are aware that this catalog is
+ // intentionally not being served
+ servingCond := metav1.Condition{
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUserSpecifiedUnavailable,
+ Message: "Catalog availability mode is set to Unavailable",
+ ObservedGeneration: generation,
+ }
+
+ meta.SetStatusCondition(&status.Conditions, progressingCond)
+ meta.SetStatusCondition(&status.Conditions, servingCond)
+}
+
+func updateStatusNotServing(status *catalogdv1.ClusterCatalogStatus, generation int64) {
+ status.ResolvedSource = nil
+ status.URLs = nil
+ status.LastUnpacked = nil
+ meta.SetStatusCondition(&status.Conditions, metav1.Condition{
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUnavailable,
+ ObservedGeneration: generation,
+ })
+}
+
+func (r *ClusterCatalogReconciler) needsPoll(lastSuccessfulPoll time.Time, catalog *catalogdv1.ClusterCatalog) bool {
+ // If polling is disabled, we don't need to poll.
+ if catalog.Spec.Source.Image.PollIntervalMinutes == nil {
+ return false
+ }
+
+ // Only poll if the next poll time is in the past.
+ nextPoll := lastSuccessfulPoll.Add(time.Duration(*catalog.Spec.Source.Image.PollIntervalMinutes) * time.Minute)
+ return nextPoll.Before(time.Now())
+}
+
+// Compare resources - ignoring status & metadata.finalizers
+func checkForUnexpectedFieldChange(a, b catalogdv1.ClusterCatalog) bool {
+ a.Status, b.Status = catalogdv1.ClusterCatalogStatus{}, catalogdv1.ClusterCatalogStatus{}
+ a.Finalizers, b.Finalizers = []string{}, []string{}
+ return !equality.Semantic.DeepEqual(a, b)
+}
+
+type finalizerFunc func(ctx context.Context, obj client.Object) (crfinalizer.Result, error)
+
+func (f finalizerFunc) Finalize(ctx context.Context, obj client.Object) (crfinalizer.Result, error) {
+ return f(ctx, obj)
+}
+
+func (r *ClusterCatalogReconciler) setupFinalizers() error {
+ f := crfinalizer.NewFinalizers()
+ err := f.Register(fbcDeletionFinalizer, finalizerFunc(func(ctx context.Context, obj client.Object) (crfinalizer.Result, error) {
+ catalog, ok := obj.(*catalogdv1.ClusterCatalog)
+ if !ok {
+ panic("could not convert object to clusterCatalog")
+ }
+ err := r.deleteCatalogCache(ctx, catalog)
+ return crfinalizer.Result{StatusUpdated: true}, err
+ }))
+ if err != nil {
+ return err
+ }
+ r.finalizers = f
+ return nil
+}
+
+func (r *ClusterCatalogReconciler) deleteStoredCatalog(catalogName string) {
+ r.storedCatalogsMu.Lock()
+ defer r.storedCatalogsMu.Unlock()
+ delete(r.storedCatalogs, catalogName)
+}
+
+func (r *ClusterCatalogReconciler) deleteCatalogCache(ctx context.Context, catalog *catalogdv1.ClusterCatalog) error {
+ if err := r.Storage.Delete(catalog.Name); err != nil {
+ updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), err)
+ return err
+ }
+ updateStatusNotServing(&catalog.Status, catalog.GetGeneration())
+ if err := r.Unpacker.Cleanup(ctx, catalog); err != nil {
+ updateStatusProgressing(&catalog.Status, catalog.GetGeneration(), err)
+ return err
+ }
+ r.deleteStoredCatalog(catalog.Name)
+ return nil
+}
diff --git a/catalogd/internal/controllers/core/clustercatalog_controller_test.go b/catalogd/internal/controllers/core/clustercatalog_controller_test.go
new file mode 100644
index 000000000..7b6463e36
--- /dev/null
+++ b/catalogd/internal/controllers/core/clustercatalog_controller_test.go
@@ -0,0 +1,1060 @@
+package core
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "net/http"
+ "testing"
+ "testing/fstest"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+ "github.com/operator-framework/operator-controller/catalogd/internal/source"
+ "github.com/operator-framework/operator-controller/catalogd/internal/storage"
+)
+
+var _ source.Unpacker = &MockSource{}
+
+// MockSource is a utility for mocking out an Unpacker source
+type MockSource struct {
+ // result is the result that should be returned when MockSource.Unpack is called
+ result *source.Result
+
+ // error is the error to be returned when MockSource.Unpack is called
+ unpackError error
+
+ // cleanupError is the error to be returned when MockSource.Cleanup is called
+ cleanupError error
+}
+
+func (ms *MockSource) Unpack(_ context.Context, _ *catalogdv1.ClusterCatalog) (*source.Result, error) {
+ if ms.unpackError != nil {
+ return nil, ms.unpackError
+ }
+
+ return ms.result, nil
+}
+
+func (ms *MockSource) Cleanup(_ context.Context, _ *catalogdv1.ClusterCatalog) error {
+ return ms.cleanupError
+}
+
+var _ storage.Instance = &MockStore{}
+
+type MockStore struct {
+ shouldError bool
+}
+
+func (m MockStore) Store(_ context.Context, _ string, _ fs.FS) error {
+ if m.shouldError {
+ return errors.New("mockstore store error")
+ }
+ return nil
+}
+
+func (m MockStore) Delete(_ string) error {
+ if m.shouldError {
+ return errors.New("mockstore delete error")
+ }
+ return nil
+}
+
+func (m MockStore) BaseURL(_ string) string {
+ return "URL"
+}
+
+func (m MockStore) StorageServerHandler() http.Handler {
+ panic("not needed")
+}
+
+func (m MockStore) ContentExists(_ string) bool {
+ return true
+}
+
+func TestCatalogdControllerReconcile(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ catalog *catalogdv1.ClusterCatalog
+ expectedError error
+ shouldPanic bool
+ expectedCatalog *catalogdv1.ClusterCatalog
+ source source.Unpacker
+ store storage.Instance
+ }{
+ {
+ name: "invalid source type, panics",
+ source: &MockSource{},
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: "invalid",
+ },
+ },
+ },
+ shouldPanic: true,
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: "invalid",
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonBlocked,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "valid source type, unpack returns error, status updated to reflect error state and error is returned",
+ expectedError: fmt.Errorf("source catalog content: %w", fmt.Errorf("mocksource error")),
+ source: &MockSource{
+ unpackError: errors.New("mocksource error"),
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonRetrying,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "valid source type, unpack returns terminal error, status updated to reflect terminal error state(Blocked) and error is returned",
+ expectedError: fmt.Errorf("source catalog content: %w", reconcile.TerminalError(fmt.Errorf("mocksource terminal error"))),
+ source: &MockSource{
+ unpackError: reconcile.TerminalError(errors.New("mocksource terminal error")),
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonBlocked,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "valid source type, unpack state == Unpacked, should reflect in status that it's progressing, and is serving",
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "my.org/someimage@someSHA256Digest",
+ },
+ },
+ },
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "my.org/someimage@someSHA256Digest",
+ },
+ },
+ LastUnpacked: &metav1.Time{},
+ },
+ },
+ },
+ {
+ name: "valid source type, unpack state == Unpacked, storage fails, failure reflected in status and error returned",
+ expectedError: fmt.Errorf("error storing fbc: mockstore store error"),
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{
+ shouldError: true,
+ },
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonRetrying,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "storage finalizer not set, storage finalizer gets set",
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), finalizer removed",
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ LastUnpacked: &metav1.Time{},
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "",
+ },
+ },
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUnavailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), storage delete failed, error returned, finalizer not removed and catalog continues serving",
+ expectedError: fmt.Errorf("finalizer %q failed: %w", fbcDeletionFinalizer, fmt.Errorf("mockstore delete error")),
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{
+ shouldError: true,
+ },
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonRetrying,
+ },
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "storage finalizer set, catalog deletion timestamp is not zero (or nil), unpack cleanup failed, error returned, finalizer not removed but catalog stops serving",
+ expectedError: fmt.Errorf("finalizer %q failed: %w", fbcDeletionFinalizer, fmt.Errorf("mocksource cleanup error")),
+ source: &MockSource{
+ unpackError: nil,
+ cleanupError: fmt.Errorf("mocksource cleanup error"),
+ },
+ store: &MockStore{
+ shouldError: false,
+ },
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ DeletionTimestamp: &metav1.Time{Time: time.Date(2023, time.October, 10, 4, 19, 0, 0, time.UTC)},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonRetrying,
+ },
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUnavailable,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "catalog availability set to disabled, status.urls should get unset",
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ AvailabilityMode: catalogdv1.AvailabilityModeUnavailable,
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ LastUnpacked: &metav1.Time{},
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "",
+ },
+ },
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ AvailabilityMode: catalogdv1.AvailabilityModeUnavailable,
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUserSpecifiedUnavailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "catalog availability set to disabled, finalizer should get removed",
+ source: &MockSource{
+ result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ },
+ },
+ store: &MockStore{},
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ AvailabilityMode: catalogdv1.AvailabilityModeUnavailable,
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ LastUnpacked: &metav1.Time{},
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "",
+ },
+ },
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ expectedCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "catalog",
+ Finalizers: []string{},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ AvailabilityMode: catalogdv1.AvailabilityModeUnavailable,
+ },
+ Status: catalogdv1.ClusterCatalogStatus{
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionFalse,
+ Reason: catalogdv1.ReasonUserSpecifiedUnavailable,
+ },
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ reconciler := &ClusterCatalogReconciler{
+ Client: nil,
+ Unpacker: tt.source,
+ Storage: tt.store,
+ storedCatalogs: map[string]storedCatalogData{},
+ }
+ require.NoError(t, reconciler.setupFinalizers())
+ ctx := context.Background()
+
+ if tt.shouldPanic {
+ assert.Panics(t, func() { _, _ = reconciler.reconcile(ctx, tt.catalog) })
+ return
+ }
+
+ res, err := reconciler.reconcile(ctx, tt.catalog)
+ assert.Equal(t, ctrl.Result{}, res)
+ // errors are aggregated/wrapped
+ if tt.expectedError == nil {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ assert.Equal(t, tt.expectedError.Error(), err.Error())
+ }
+ diff := cmp.Diff(tt.expectedCatalog, tt.catalog,
+ cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime"),
+ cmpopts.SortSlices(func(a, b metav1.Condition) bool { return a.Type < b.Type }))
+ assert.Empty(t, diff, "comparing the expected Catalog")
+ })
+ }
+}
+
+func TestPollingRequeue(t *testing.T) {
+ for name, tc := range map[string]struct {
+ catalog *catalogdv1.ClusterCatalog
+ expectedRequeueAfter time.Duration
+ lastPollTime metav1.Time
+ }{
+ "ClusterCatalog with tag based image ref without any poll interval specified, requeueAfter set to 0, ie polling disabled": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ },
+ expectedRequeueAfter: time.Second * 0,
+ lastPollTime: metav1.Now(),
+ },
+ "ClusterCatalog with tag based image ref with poll interval specified, requeueAfter set to wait.jitter(pollInterval)": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ PollIntervalMinutes: ptr.To(5),
+ },
+ },
+ },
+ },
+ expectedRequeueAfter: time.Minute * 5,
+ lastPollTime: metav1.Now(),
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ reconciler := &ClusterCatalogReconciler{
+ Client: nil,
+ Unpacker: &MockSource{result: &source.Result{
+ State: source.StateUnpacked,
+ FS: &fstest.MapFS{},
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "my.org/someImage@someSHA256Digest",
+ },
+ },
+ LastSuccessfulPollAttempt: tc.lastPollTime,
+ }},
+ Storage: &MockStore{},
+ storedCatalogs: map[string]storedCatalogData{},
+ }
+ require.NoError(t, reconciler.setupFinalizers())
+ res, _ := reconciler.reconcile(context.Background(), tc.catalog)
+ assert.InDelta(t, tc.expectedRequeueAfter, res.RequeueAfter, requeueJitterMaxFactor*float64(tc.expectedRequeueAfter))
+ })
+ }
+}
+
+func TestPollingReconcilerUnpack(t *testing.T) {
+ oldDigest := "a5d4f4467250074216eb1ba1c36e06a3ab797d81c431427fc2aca97ecaf4e9d8"
+ newDigest := "f42337e7b85a46d83c94694638e2312e10ca16a03542399a65ba783c94a32b63"
+
+ successfulObservedGeneration := int64(2)
+ successfulUnpackStatus := func(mods ...func(status *catalogdv1.ClusterCatalogStatus)) catalogdv1.ClusterCatalogStatus {
+ s := catalogdv1.ClusterCatalogStatus{
+ URLs: &catalogdv1.ClusterCatalogURLs{Base: "URL"},
+ Conditions: []metav1.Condition{
+ {
+ Type: catalogdv1.TypeProgressing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonSucceeded,
+ Message: "Successfully unpacked and stored content from resolved source",
+ ObservedGeneration: successfulObservedGeneration,
+ },
+ {
+ Type: catalogdv1.TypeServing,
+ Status: metav1.ConditionTrue,
+ Reason: catalogdv1.ReasonAvailable,
+ Message: "Serving desired content from resolved source",
+ ObservedGeneration: successfulObservedGeneration,
+ },
+ },
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: "my.org/someimage@sha256:" + oldDigest,
+ },
+ },
+ LastUnpacked: &metav1.Time{},
+ }
+ for _, mod := range mods {
+ mod(&s)
+ }
+ return s
+ }
+ successfulStoredCatalogData := func(lastPoll metav1.Time) map[string]storedCatalogData {
+ return map[string]storedCatalogData{
+ "test-catalog": {
+ observedGeneration: successfulObservedGeneration,
+ unpackResult: source.Result{
+ ResolvedSource: successfulUnpackStatus().ResolvedSource,
+ LastSuccessfulPollAttempt: lastPoll,
+ },
+ },
+ }
+ }
+
+ for name, tc := range map[string]struct {
+ catalog *catalogdv1.ClusterCatalog
+ storedCatalogData map[string]storedCatalogData
+ expectedUnpackRun bool
+ }{
+ "ClusterCatalog being resolved the first time, unpack should run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ PollIntervalMinutes: ptr.To(5),
+ },
+ },
+ },
+ },
+ expectedUnpackRun: true,
+ },
+ "ClusterCatalog not being resolved the first time, no pollInterval mentioned, unpack should not run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 2,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ },
+ },
+ },
+ Status: successfulUnpackStatus(),
+ },
+ storedCatalogData: successfulStoredCatalogData(metav1.Now()),
+ expectedUnpackRun: false,
+ },
+ "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is before next expected poll time, unpack should not run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 2,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ PollIntervalMinutes: ptr.To(7),
+ },
+ },
+ },
+ Status: successfulUnpackStatus(),
+ },
+ storedCatalogData: successfulStoredCatalogData(metav1.Now()),
+ expectedUnpackRun: false,
+ },
+ "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is after next expected poll time, unpack should run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 2,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someimage:latest",
+ PollIntervalMinutes: ptr.To(3),
+ },
+ },
+ },
+ Status: successfulUnpackStatus(),
+ },
+ storedCatalogData: successfulStoredCatalogData(metav1.NewTime(time.Now().Add(-5 * time.Minute))),
+ expectedUnpackRun: true,
+ },
+ "ClusterCatalog not being resolved the first time, pollInterval mentioned, \"now\" is before next expected poll time, generation changed, unpack should run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 3,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someotherimage@sha256:" + newDigest,
+ PollIntervalMinutes: ptr.To(7),
+ },
+ },
+ },
+ Status: successfulUnpackStatus(),
+ },
+ storedCatalogData: successfulStoredCatalogData(metav1.Now()),
+ expectedUnpackRun: true,
+ },
+ "ClusterCatalog not being resolved the first time, no stored catalog in cache, unpack should run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 3,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someotherimage@sha256:" + newDigest,
+ PollIntervalMinutes: ptr.To(7),
+ },
+ },
+ },
+ Status: successfulUnpackStatus(),
+ },
+ expectedUnpackRun: true,
+ },
+ "ClusterCatalog not being resolved the first time, unexpected status, unpack should run": {
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Finalizers: []string{fbcDeletionFinalizer},
+ Generation: 3,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "my.org/someotherimage@sha256:" + newDigest,
+ PollIntervalMinutes: ptr.To(7),
+ },
+ },
+ },
+ Status: successfulUnpackStatus(func(status *catalogdv1.ClusterCatalogStatus) {
+ meta.FindStatusCondition(status.Conditions, catalogdv1.TypeProgressing).Status = metav1.ConditionTrue
+ }),
+ },
+ storedCatalogData: successfulStoredCatalogData(metav1.Now()),
+ expectedUnpackRun: true,
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ scd := tc.storedCatalogData
+ if scd == nil {
+ scd = map[string]storedCatalogData{}
+ }
+ reconciler := &ClusterCatalogReconciler{
+ Client: nil,
+ Unpacker: &MockSource{unpackError: errors.New("mocksource error")},
+ Storage: &MockStore{},
+ storedCatalogs: scd,
+ }
+ require.NoError(t, reconciler.setupFinalizers())
+ _, err := reconciler.reconcile(context.Background(), tc.catalog)
+ if tc.expectedUnpackRun {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/catalogd/internal/controllers/core/pull_secret_controller.go b/catalogd/internal/controllers/core/pull_secret_controller.go
new file mode 100644
index 000000000..0255309ca
--- /dev/null
+++ b/catalogd/internal/controllers/core/pull_secret_controller.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2024.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package core
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/go-logr/logr"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+)
+
+// PullSecretReconciler reconciles a specific Secret object
+// that contains global pull secrets for pulling Catalog images
+type PullSecretReconciler struct {
+ client.Client
+ SecretKey types.NamespacedName
+ AuthFilePath string
+}
+
+func (r *PullSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ logger := log.FromContext(ctx)
+ if req.Name != r.SecretKey.Name || req.Namespace != r.SecretKey.Namespace {
+ logger.Error(fmt.Errorf("received unexpected request for Secret %v/%v", req.Namespace, req.Name), "reconciliation error")
+ return ctrl.Result{}, nil
+ }
+
+ secret := &corev1.Secret{}
+ err := r.Get(ctx, req.NamespacedName, secret)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ logger.Info("secret not found")
+ return r.deleteSecretFile(logger)
+ }
+ logger.Error(err, "failed to get Secret")
+ return ctrl.Result{}, err
+ }
+
+ return r.writeSecretToFile(logger, secret)
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *PullSecretReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ _, err := ctrl.NewControllerManagedBy(mgr).
+ For(&corev1.Secret{}).
+ WithEventFilter(newSecretPredicate(r.SecretKey)).
+ Build(r)
+
+ return err
+}
+
+func newSecretPredicate(key types.NamespacedName) predicate.Predicate {
+ return predicate.NewPredicateFuncs(func(obj client.Object) bool {
+ return obj.GetName() == key.Name && obj.GetNamespace() == key.Namespace
+ })
+}
+
+// writeSecretToFile writes the secret data to the specified file
+func (r *PullSecretReconciler) writeSecretToFile(logger logr.Logger, secret *corev1.Secret) (ctrl.Result, error) {
+ // image registry secrets are always stored with the key .dockerconfigjson
+ // ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials
+ dockerConfigJSON, ok := secret.Data[".dockerconfigjson"]
+ if !ok {
+ logger.Error(fmt.Errorf("expected secret.Data key not found"), "expected secret Data to contain key .dockerconfigjson")
+ return ctrl.Result{}, nil
+ }
+ // expected format for auth.json
+ // https://github.com/containers/image/blob/main/docs/containers-auth.json.5.md
+ err := os.WriteFile(r.AuthFilePath, dockerConfigJSON, 0600)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("failed to write secret data to file: %w", err)
+ }
+ logger.Info("saved global pull secret data locally")
+ return ctrl.Result{}, nil
+}
+
+// deleteSecretFile deletes the auth file if the secret is deleted
+func (r *PullSecretReconciler) deleteSecretFile(logger logr.Logger) (ctrl.Result, error) {
+ logger.Info("deleting local auth file", "file", r.AuthFilePath)
+ if err := os.Remove(r.AuthFilePath); err != nil {
+ if os.IsNotExist(err) {
+ logger.Info("auth file does not exist, nothing to delete")
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, fmt.Errorf("failed to delete secret file: %w", err)
+ }
+ logger.Info("auth file deleted successfully")
+ return ctrl.Result{}, nil
+}
diff --git a/catalogd/internal/controllers/core/pull_secret_controller_test.go b/catalogd/internal/controllers/core/pull_secret_controller_test.go
new file mode 100644
index 000000000..8b91da340
--- /dev/null
+++ b/catalogd/internal/controllers/core/pull_secret_controller_test.go
@@ -0,0 +1,95 @@
+package core
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestSecretSyncerReconciler(t *testing.T) {
+ secretData := []byte(`{"auths":{"exampleRegistry": "exampledata"}}`)
+ authFileName := "test-auth.json"
+ for _, tt := range []struct {
+ name string
+ secret *corev1.Secret
+ addSecret bool
+ wantErr string
+ fileShouldExistBefore bool
+ fileShouldExistAfter bool
+ }{
+ {
+ name: "secret exists, content gets saved to authFile",
+ secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secret",
+ Namespace: "test-secret-namespace",
+ },
+ Data: map[string][]byte{
+ ".dockerconfigjson": secretData,
+ },
+ },
+ addSecret: true,
+ fileShouldExistBefore: false,
+ fileShouldExistAfter: true,
+ },
+ {
+ name: "secret does not exist, file exists previously, file should get deleted",
+ secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-secret",
+ Namespace: "test-secret-namespace",
+ },
+ Data: map[string][]byte{
+ ".dockerconfigjson": secretData,
+ },
+ },
+ addSecret: false,
+ fileShouldExistBefore: true,
+ fileShouldExistAfter: false,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ tempAuthFile := filepath.Join(t.TempDir(), authFileName)
+ clientBuilder := fake.NewClientBuilder()
+ if tt.addSecret {
+ clientBuilder = clientBuilder.WithObjects(tt.secret)
+ }
+ cl := clientBuilder.Build()
+
+ secretKey := types.NamespacedName{Namespace: tt.secret.Namespace, Name: tt.secret.Name}
+ r := &PullSecretReconciler{
+ Client: cl,
+ SecretKey: secretKey,
+ AuthFilePath: tempAuthFile,
+ }
+ if tt.fileShouldExistBefore {
+ err := os.WriteFile(tempAuthFile, secretData, 0600)
+ require.NoError(t, err)
+ }
+ res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: secretKey})
+ if tt.wantErr == "" {
+ require.NoError(t, err)
+ } else {
+ require.ErrorContains(t, err, tt.wantErr)
+ }
+ require.Equal(t, ctrl.Result{}, res)
+
+ if tt.fileShouldExistAfter {
+ _, err := os.Stat(tempAuthFile)
+ require.NoError(t, err)
+ } else {
+ _, err := os.Stat(tempAuthFile)
+ require.True(t, os.IsNotExist(err))
+ }
+ })
+ }
+}
diff --git a/catalogd/internal/features/features.go b/catalogd/internal/features/features.go
new file mode 100644
index 000000000..8f67b1689
--- /dev/null
+++ b/catalogd/internal/features/features.go
@@ -0,0 +1,14 @@
+package features
+
+import (
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/component-base/featuregate"
+)
+
+var catalogdFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{}
+
+var CatalogdFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
+
+func init() {
+ utilruntime.Must(CatalogdFeatureGate.Add(catalogdFeatureGates))
+}
diff --git a/catalogd/internal/garbagecollection/garbage_collector.go b/catalogd/internal/garbagecollection/garbage_collector.go
new file mode 100644
index 000000000..9a021dc9d
--- /dev/null
+++ b/catalogd/internal/garbagecollection/garbage_collector.go
@@ -0,0 +1,94 @@
+package garbagecollection
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/go-logr/logr"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/metadata"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+var _ manager.Runnable = (*GarbageCollector)(nil)
+
+// GarbageCollector is an implementation of the manager.Runnable
+// interface for running garbage collection on the Catalog content
+// cache that is served by the catalogd HTTP server. It runs in a loop
+// and will ensure that no cache entries exist for Catalog resources
+// that no longer exist. This should only clean up cache entries that
+// were missed by the handling of a DELETE event on a Catalog resource.
+type GarbageCollector struct {
+ CachePath string
+ Logger logr.Logger
+ MetadataClient metadata.Interface
+ Interval time.Duration
+}
+
+// Start will start the garbage collector. It will always run once on startup
+// and loop until context is canceled after an initial garbage collection run.
+// Garbage collection will run again every X amount of time, where X is the
+// supplied garbage collection interval.
+func (gc *GarbageCollector) Start(ctx context.Context) error {
+ // Run once on startup
+ removed, err := runGarbageCollection(ctx, gc.CachePath, gc.MetadataClient)
+ if err != nil {
+ gc.Logger.Error(err, "running garbage collection")
+ }
+ if len(removed) > 0 {
+ gc.Logger.Info("removed stale cache entries", "removed entries", removed)
+ }
+
+ // Loop until context is canceled, running garbage collection
+ // at the configured interval
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(gc.Interval):
+ removed, err := runGarbageCollection(ctx, gc.CachePath, gc.MetadataClient)
+ if err != nil {
+ gc.Logger.Error(err, "running garbage collection")
+ }
+ if len(removed) > 0 {
+ gc.Logger.Info("removed stale cache entries", "removed entries", removed)
+ }
+ }
+ }
+}
+
+func runGarbageCollection(ctx context.Context, cachePath string, metaClient metadata.Interface) ([]string, error) {
+ getter := metaClient.Resource(catalogdv1.GroupVersion.WithResource("clustercatalogs"))
+ metaList, err := getter.List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("error listing clustercatalogs: %w", err)
+ }
+
+ expectedCatalogs := sets.New[string]()
+ for _, meta := range metaList.Items {
+ expectedCatalogs.Insert(meta.GetName())
+ }
+
+ cacheDirEntries, err := os.ReadDir(cachePath)
+ if err != nil {
+ return nil, fmt.Errorf("error reading cache directory: %w", err)
+ }
+ removed := []string{}
+ for _, cacheDirEntry := range cacheDirEntries {
+ if cacheDirEntry.IsDir() && expectedCatalogs.Has(cacheDirEntry.Name()) {
+ continue
+ }
+ if err := os.RemoveAll(filepath.Join(cachePath, cacheDirEntry.Name())); err != nil {
+ return nil, fmt.Errorf("error removing cache directory entry %q: %w ", cacheDirEntry.Name(), err)
+ }
+
+ removed = append(removed, cacheDirEntry.Name())
+ }
+ return removed, nil
+}
diff --git a/catalogd/internal/garbagecollection/garbage_collector_test.go b/catalogd/internal/garbagecollection/garbage_collector_test.go
new file mode 100644
index 000000000..9210278d0
--- /dev/null
+++ b/catalogd/internal/garbagecollection/garbage_collector_test.go
@@ -0,0 +1,96 @@
+package garbagecollection
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/metadata/fake"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+func TestRunGarbageCollection(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ existCatalogs []*metav1.PartialObjectMetadata
+ notExistCatalogs []*metav1.PartialObjectMetadata
+ wantErr bool
+ }{
+ {
+ name: "successful garbage collection",
+ existCatalogs: []*metav1.PartialObjectMetadata{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterCatalog",
+ APIVersion: catalogdv1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "one",
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterCatalog",
+ APIVersion: catalogdv1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "two",
+ },
+ },
+ },
+ notExistCatalogs: []*metav1.PartialObjectMetadata{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterCatalog",
+ APIVersion: catalogdv1.GroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "three",
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ cachePath := t.TempDir()
+ scheme := runtime.NewScheme()
+ require.NoError(t, metav1.AddMetaToScheme(scheme))
+
+ allCatalogs := append(tt.existCatalogs, tt.notExistCatalogs...)
+ for _, catalog := range allCatalogs {
+ require.NoError(t, os.MkdirAll(filepath.Join(cachePath, catalog.Name, "fakedigest"), os.ModePerm))
+ }
+
+ runtimeObjs := []runtime.Object{}
+ for _, catalog := range tt.existCatalogs {
+ runtimeObjs = append(runtimeObjs, catalog)
+ }
+
+ metaClient := fake.NewSimpleMetadataClient(scheme, runtimeObjs...)
+
+ _, err := runGarbageCollection(ctx, cachePath, metaClient)
+ if !tt.wantErr {
+ require.NoError(t, err)
+ entries, err := os.ReadDir(cachePath)
+ require.NoError(t, err)
+ assert.Len(t, entries, len(tt.existCatalogs))
+ for _, catalog := range tt.existCatalogs {
+ assert.DirExists(t, filepath.Join(cachePath, catalog.Name))
+ }
+
+ for _, catalog := range tt.notExistCatalogs {
+ assert.NoDirExists(t, filepath.Join(cachePath, catalog.Name))
+ }
+ } else {
+ assert.Error(t, err)
+ }
+ })
+ }
+}
diff --git a/catalogd/internal/k8sutil/k8sutil.go b/catalogd/internal/k8sutil/k8sutil.go
new file mode 100644
index 000000000..dfea1d0d6
--- /dev/null
+++ b/catalogd/internal/k8sutil/k8sutil.go
@@ -0,0 +1,17 @@
+package k8sutil
+
+import (
+ "regexp"
+
+ "k8s.io/apimachinery/pkg/util/validation"
+)
+
+var invalidNameChars = regexp.MustCompile(`[^\.\-a-zA-Z0-9]`)
+
+// MetadataName replaces all invalid DNS characters with a dash. If the result
+// is not a valid DNS subdomain, returns `result, false`. Otherwise, returns the
+// `result, true`.
+func MetadataName(name string) (string, bool) {
+ result := invalidNameChars.ReplaceAllString(name, "-")
+ return result, validation.IsDNS1123Subdomain(result) == nil
+}
diff --git a/catalogd/internal/k8sutil/k8sutil_test.go b/catalogd/internal/k8sutil/k8sutil_test.go
new file mode 100644
index 000000000..d1b142680
--- /dev/null
+++ b/catalogd/internal/k8sutil/k8sutil_test.go
@@ -0,0 +1,62 @@
+package k8sutil
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMetadataName(t *testing.T) {
+ type testCase struct {
+ name string
+ in string
+ expectedResult string
+ expectedValid bool
+ }
+ for _, tc := range []testCase{
+ {
+ name: "empty",
+ in: "",
+ expectedResult: "",
+ expectedValid: false,
+ },
+ {
+ name: "invalid",
+ in: "foo-bar.123!",
+ expectedResult: "foo-bar.123-",
+ expectedValid: false,
+ },
+ {
+ name: "too long",
+ in: fmt.Sprintf("foo-bar_%s", strings.Repeat("1234567890", 50)),
+ expectedResult: fmt.Sprintf("foo-bar-%s", strings.Repeat("1234567890", 50)),
+ expectedValid: false,
+ },
+ {
+ name: "valid",
+ in: "foo-bar.123",
+ expectedResult: "foo-bar.123",
+ expectedValid: true,
+ },
+ {
+ name: "valid with underscore",
+ in: "foo-bar_123",
+ expectedResult: "foo-bar-123",
+ expectedValid: true,
+ },
+ {
+ name: "valid with colon",
+ in: "foo-bar:123",
+ expectedResult: "foo-bar-123",
+ expectedValid: true,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ actualResult, actualValid := MetadataName(tc.in)
+ assert.Equal(t, tc.expectedResult, actualResult)
+ assert.Equal(t, tc.expectedValid, actualValid)
+ })
+ }
+}
diff --git a/catalogd/internal/metrics/metrics.go b/catalogd/internal/metrics/metrics.go
new file mode 100644
index 000000000..c30aed584
--- /dev/null
+++ b/catalogd/internal/metrics/metrics.go
@@ -0,0 +1,40 @@
+package metrics
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+const (
+ RequestDurationMetricName = "catalogd_http_request_duration_seconds"
+)
+
+// Sets up the necessary metrics for calculating the Apdex Score
+// If using Grafana for visualization connected to a Prometheus data
+// source that is scraping these metrics, you can create a panel that
+// uses the following queries + expressions for calculating the Apdex Score where T = 0.5:
+// Query A: sum(catalogd_http_request_duration_seconds_bucket{code!~"5..",le="0.5"})
+// Query B: sum(catalogd_http_request_duration_seconds_bucket{code!~"5..",le="2"})
+// Query C: sum(catalogd_http_request_duration_seconds_count)
+// Expression for Apdex Score: ($A + (($B - $A) / 2)) / $C
+var (
+ RequestDurationMetric = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: RequestDurationMetricName,
+ Help: "Histogram of request duration in seconds",
+ // create a bucket for each 100 ms up to 1s and ensure it multiplied by 4 also exists.
+ // Include a 10s bucket to capture very long running requests. This allows us to easily
+ // calculate Apdex Scores up to a T of 1 second, but using various mathmatical formulas we
+ // should be able to estimate Apdex Scores up to a T of 2.5. Having a larger range of buckets
+ // will allow us to more easily calculate health indicators other than the Apdex Score.
+ Buckets: []float64{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.6, 2, 2.4, 2.8, 3.2, 3.6, 4, 10},
+ },
+ []string{"code"},
+ )
+)
+
+func AddMetricsToHandler(handler http.Handler) http.Handler {
+ return promhttp.InstrumentHandlerDuration(RequestDurationMetric, handler)
+}
diff --git a/catalogd/internal/serverutil/serverutil.go b/catalogd/internal/serverutil/serverutil.go
new file mode 100644
index 000000000..b91225335
--- /dev/null
+++ b/catalogd/internal/serverutil/serverutil.go
@@ -0,0 +1,63 @@
+package serverutil
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/certwatcher"
+
+ catalogdmetrics "github.com/operator-framework/operator-controller/catalogd/internal/metrics"
+ "github.com/operator-framework/operator-controller/catalogd/internal/storage"
+ "github.com/operator-framework/operator-controller/catalogd/internal/third_party/server"
+)
+
+type CatalogServerConfig struct {
+ ExternalAddr string
+ CatalogAddr string
+ CertFile string
+ KeyFile string
+ LocalStorage storage.Instance
+}
+
+func AddCatalogServerToManager(mgr ctrl.Manager, cfg CatalogServerConfig, tlsFileWatcher *certwatcher.CertWatcher) error {
+ listener, err := net.Listen("tcp", cfg.CatalogAddr)
+ if err != nil {
+ return fmt.Errorf("error creating catalog server listener: %w", err)
+ }
+
+ if cfg.CertFile != "" && cfg.KeyFile != "" {
+ // Use the passed certificate watcher instead of creating a new one
+ config := &tls.Config{
+ GetCertificate: tlsFileWatcher.GetCertificate,
+ MinVersion: tls.VersionTLS12,
+ }
+ listener = tls.NewListener(listener, config)
+ }
+
+ shutdownTimeout := 30 * time.Second
+
+ catalogServer := server.Server{
+ Kind: "catalogs",
+ Server: &http.Server{
+ Addr: cfg.CatalogAddr,
+ Handler: catalogdmetrics.AddMetricsToHandler(cfg.LocalStorage.StorageServerHandler()),
+ ReadTimeout: 5 * time.Second,
+ // TODO: Revert this to 10 seconds if/when the API
+ // evolves to have significantly smaller responses
+ WriteTimeout: 5 * time.Minute,
+ },
+ ShutdownTimeout: &shutdownTimeout,
+ Listener: listener,
+ }
+
+ err = mgr.Add(&catalogServer)
+ if err != nil {
+ return fmt.Errorf("error adding catalog server to manager: %w", err)
+ }
+
+ return nil
+}
diff --git a/catalogd/internal/source/containers_image.go b/catalogd/internal/source/containers_image.go
new file mode 100644
index 000000000..c00db5c0f
--- /dev/null
+++ b/catalogd/internal/source/containers_image.go
@@ -0,0 +1,425 @@
+package source
+
+import (
+ "archive/tar"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containerd/containerd/archive"
+ "github.com/containers/image/v5/copy"
+ "github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/oci/layout"
+ "github.com/containers/image/v5/pkg/blobinfocache/none"
+ "github.com/containers/image/v5/pkg/compression"
+ "github.com/containers/image/v5/signature"
+ "github.com/containers/image/v5/types"
+ "github.com/go-logr/logr"
+ "github.com/opencontainers/go-digest"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+const ConfigDirLabel = "operators.operatorframework.io.index.configs.v1"
+
+type ContainersImageRegistry struct {
+ BaseCachePath string
+ SourceContextFunc func(logger logr.Logger) (*types.SystemContext, error)
+}
+
+func (i *ContainersImageRegistry) Unpack(ctx context.Context, catalog *catalogdv1.ClusterCatalog) (*Result, error) {
+ l := log.FromContext(ctx)
+
+ if catalog.Spec.Source.Type != catalogdv1.SourceTypeImage {
+ panic(fmt.Sprintf("programmer error: source type %q is unable to handle specified catalog source type %q", catalogdv1.SourceTypeImage, catalog.Spec.Source.Type))
+ }
+
+ if catalog.Spec.Source.Image == nil {
+ return nil, reconcile.TerminalError(fmt.Errorf("error parsing catalog, catalog %s has a nil image source", catalog.Name))
+ }
+
+ srcCtx, err := i.SourceContextFunc(l)
+ if err != nil {
+ return nil, err
+ }
+ //////////////////////////////////////////////////////
+ //
+ // Resolve a canonical reference for the image.
+ //
+ //////////////////////////////////////////////////////
+ imgRef, canonicalRef, specIsCanonical, err := resolveReferences(ctx, catalog.Spec.Source.Image.Ref, srcCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ //////////////////////////////////////////////////////
+ //
+ // Check if the image is already unpacked. If it is,
+ // return the unpacked directory.
+ //
+ //////////////////////////////////////////////////////
+ unpackPath := i.unpackPath(catalog.Name, canonicalRef.Digest())
+ if unpackStat, err := os.Stat(unpackPath); err == nil {
+ if !unpackStat.IsDir() {
+ panic(fmt.Sprintf("unexpected file at unpack path %q: expected a directory", unpackPath))
+ }
+ l.Info("image already unpacked", "ref", imgRef.String(), "digest", canonicalRef.Digest().String())
+ return successResult(unpackPath, canonicalRef, unpackStat.ModTime()), nil
+ }
+
+ //////////////////////////////////////////////////////
+ //
+ // Create a docker reference for the source and an OCI
+ // layout reference for the destination, where we will
+ // temporarily store the image in order to unpack it.
+ //
+ // We use the OCI layout as a temporary storage because
+ // copy.Image can concurrently pull all the layers.
+ //
+ //////////////////////////////////////////////////////
+ dockerRef, err := docker.NewReference(imgRef)
+ if err != nil {
+ return nil, fmt.Errorf("error creating source reference: %w", err)
+ }
+
+ layoutDir, err := os.MkdirTemp("", fmt.Sprintf("oci-layout-%s", catalog.Name))
+ if err != nil {
+ return nil, fmt.Errorf("error creating temporary directory: %w", err)
+ }
+ defer func() {
+ if err := os.RemoveAll(layoutDir); err != nil {
+ l.Error(err, "error removing temporary OCI layout directory")
+ }
+ }()
+
+ layoutRef, err := layout.NewReference(layoutDir, canonicalRef.String())
+ if err != nil {
+ return nil, fmt.Errorf("error creating reference: %w", err)
+ }
+
+ //////////////////////////////////////////////////////
+ //
+ // Load an image signature policy and build
+ // a policy context for the image pull.
+ //
+ //////////////////////////////////////////////////////
+ policyContext, err := loadPolicyContext(srcCtx, l)
+ if err != nil {
+ return nil, fmt.Errorf("error loading policy context: %w", err)
+ }
+ defer func() {
+ if err := policyContext.Destroy(); err != nil {
+ l.Error(err, "error destroying policy context")
+ }
+ }()
+
+ //////////////////////////////////////////////////////
+ //
+ // Pull the image from the source to the destination
+ //
+ //////////////////////////////////////////////////////
+ if _, err := copy.Image(ctx, policyContext, layoutRef, dockerRef, ©.Options{
+ SourceCtx: srcCtx,
+ // We use the OCI layout as a temporary storage and
+ // pushing signatures for OCI images is not supported
+ // so we remove the source signatures when copying.
+ // Signature validation will still be performed
+ // accordingly to a provided policy context.
+ RemoveSignatures: true,
+ }); err != nil {
+ return nil, fmt.Errorf("error copying image: %w", err)
+ }
+ l.Info("pulled image", "ref", imgRef.String(), "digest", canonicalRef.Digest().String())
+
+ //////////////////////////////////////////////////////
+ //
+ // Mount the image we just pulled
+ //
+ //////////////////////////////////////////////////////
+ if err := i.unpackImage(ctx, unpackPath, layoutRef, specIsCanonical, srcCtx); err != nil {
+ if cleanupErr := deleteRecursive(unpackPath); cleanupErr != nil {
+ err = errors.Join(err, cleanupErr)
+ }
+ return nil, fmt.Errorf("error unpacking image: %w", err)
+ }
+
+ //////////////////////////////////////////////////////
+ //
+ // Delete other images. They are no longer needed.
+ //
+ //////////////////////////////////////////////////////
+ if err := i.deleteOtherImages(catalog.Name, canonicalRef.Digest()); err != nil {
+ return nil, fmt.Errorf("error deleting old images: %w", err)
+ }
+
+ return successResult(unpackPath, canonicalRef, time.Now()), nil
+}
+
+func successResult(unpackPath string, canonicalRef reference.Canonical, lastUnpacked time.Time) *Result {
+ return &Result{
+ FS: os.DirFS(unpackPath),
+ ResolvedSource: &catalogdv1.ResolvedCatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ResolvedImageSource{
+ Ref: canonicalRef.String(),
+ },
+ },
+ State: StateUnpacked,
+ Message: fmt.Sprintf("unpacked %q successfully", canonicalRef),
+
+ // We truncate both the unpack time and last successful poll attempt
+ // to the second because metav1.Time is serialized
+ // as RFC 3339 which only has second-level precision. When we
+ // use this result in a comparison with what we deserialized
+ // from the Kubernetes API server, we need it to match.
+ UnpackTime: lastUnpacked.Truncate(time.Second),
+ LastSuccessfulPollAttempt: metav1.NewTime(time.Now().Truncate(time.Second)),
+ }
+}
+
+func (i *ContainersImageRegistry) Cleanup(_ context.Context, catalog *catalogdv1.ClusterCatalog) error {
+ if err := deleteRecursive(i.catalogPath(catalog.Name)); err != nil {
+ return fmt.Errorf("error deleting catalog cache: %w", err)
+ }
+ return nil
+}
+
+func (i *ContainersImageRegistry) catalogPath(catalogName string) string {
+ return filepath.Join(i.BaseCachePath, catalogName)
+}
+
+func (i *ContainersImageRegistry) unpackPath(catalogName string, digest digest.Digest) string {
+ return filepath.Join(i.catalogPath(catalogName), digest.String())
+}
+
+func resolveReferences(ctx context.Context, ref string, sourceContext *types.SystemContext) (reference.Named, reference.Canonical, bool, error) {
+ imgRef, err := reference.ParseNamed(ref)
+ if err != nil {
+ return nil, nil, false, reconcile.TerminalError(fmt.Errorf("error parsing image reference %q: %w", ref, err))
+ }
+
+ canonicalRef, isCanonical, err := resolveCanonicalRef(ctx, imgRef, sourceContext)
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("error resolving canonical reference: %w", err)
+ }
+ return imgRef, canonicalRef, isCanonical, nil
+}
+
+func resolveCanonicalRef(ctx context.Context, imgRef reference.Named, imageCtx *types.SystemContext) (reference.Canonical, bool, error) {
+ if canonicalRef, ok := imgRef.(reference.Canonical); ok {
+ return canonicalRef, true, nil
+ }
+
+ srcRef, err := docker.NewReference(imgRef)
+ if err != nil {
+ return nil, false, reconcile.TerminalError(fmt.Errorf("error creating reference: %w", err))
+ }
+
+ imgSrc, err := srcRef.NewImageSource(ctx, imageCtx)
+ if err != nil {
+ return nil, false, fmt.Errorf("error creating image source: %w", err)
+ }
+ defer imgSrc.Close()
+
+ imgManifestData, _, err := imgSrc.GetManifest(ctx, nil)
+ if err != nil {
+ return nil, false, fmt.Errorf("error getting manifest: %w", err)
+ }
+ imgDigest, err := manifest.Digest(imgManifestData)
+ if err != nil {
+ return nil, false, fmt.Errorf("error getting digest of manifest: %w", err)
+ }
+ canonicalRef, err := reference.WithDigest(reference.TrimNamed(imgRef), imgDigest)
+ if err != nil {
+ return nil, false, fmt.Errorf("error creating canonical reference: %w", err)
+ }
+ return canonicalRef, false, nil
+}
+
+func loadPolicyContext(sourceContext *types.SystemContext, l logr.Logger) (*signature.PolicyContext, error) {
+ policy, err := signature.DefaultPolicy(sourceContext)
+ if os.IsNotExist(err) {
+ l.Info("no default policy found, using insecure policy")
+ policy, err = signature.NewPolicyFromBytes([]byte(`{"default":[{"type":"insecureAcceptAnything"}]}`))
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error loading default policy: %w", err)
+ }
+ return signature.NewPolicyContext(policy)
+}
+
+func (i *ContainersImageRegistry) unpackImage(ctx context.Context, unpackPath string, imageReference types.ImageReference, specIsCanonical bool, sourceContext *types.SystemContext) error {
+ img, err := imageReference.NewImage(ctx, sourceContext)
+ if err != nil {
+ return fmt.Errorf("error reading image: %w", err)
+ }
+ defer func() {
+ if err := img.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ layoutSrc, err := imageReference.NewImageSource(ctx, sourceContext)
+ if err != nil {
+ return fmt.Errorf("error creating image source: %w", err)
+ }
+
+ cfg, err := img.OCIConfig(ctx)
+ if err != nil {
+ return fmt.Errorf("error parsing image config: %w", err)
+ }
+
+ dirToUnpack, ok := cfg.Config.Labels[ConfigDirLabel]
+ if !ok {
+ // If the spec is a tagged ref, retries could end up resolving a new digest, where the label
+ // might show up. If the spec is canonical, no amount of retries will make the label appear.
+ // Therefore, we treat the error as terminal if the reference from the spec is canonical.
+ return wrapTerminal(fmt.Errorf("catalog image is missing the required label %q", ConfigDirLabel), specIsCanonical)
+ }
+
+ if err := os.MkdirAll(unpackPath, 0700); err != nil {
+ return fmt.Errorf("error creating unpack directory: %w", err)
+ }
+ l := log.FromContext(ctx)
+ l.Info("unpacking image", "path", unpackPath)
+ for i, layerInfo := range img.LayerInfos() {
+ if err := func() error {
+ layerReader, _, err := layoutSrc.GetBlob(ctx, layerInfo, none.NoCache)
+ if err != nil {
+ return fmt.Errorf("error getting blob for layer[%d]: %w", i, err)
+ }
+ defer layerReader.Close()
+
+ if err := applyLayer(ctx, unpackPath, dirToUnpack, layerReader); err != nil {
+ return fmt.Errorf("error applying layer[%d]: %w", i, err)
+ }
+ l.Info("applied layer", "layer", i)
+ return nil
+ }(); err != nil {
+ return errors.Join(err, deleteRecursive(unpackPath))
+ }
+ }
+ if err := setReadOnlyRecursive(unpackPath); err != nil {
+ return fmt.Errorf("error making unpack directory read-only: %w", err)
+ }
+ return nil
+}
+
+func applyLayer(ctx context.Context, destPath string, srcPath string, layer io.ReadCloser) error {
+ decompressed, _, err := compression.AutoDecompress(layer)
+ if err != nil {
+ return fmt.Errorf("auto-decompress failed: %w", err)
+ }
+ defer decompressed.Close()
+
+ _, err = archive.Apply(ctx, destPath, decompressed, archive.WithFilter(applyLayerFilter(srcPath)))
+ return err
+}
+
+func applyLayerFilter(srcPath string) archive.Filter {
+ cleanSrcPath := path.Clean(strings.TrimPrefix(srcPath, "/"))
+ return func(h *tar.Header) (bool, error) {
+ h.Uid = os.Getuid()
+ h.Gid = os.Getgid()
+ h.Mode |= 0700
+
+ cleanName := path.Clean(strings.TrimPrefix(h.Name, "/"))
+ relPath, err := filepath.Rel(cleanSrcPath, cleanName)
+ if err != nil {
+ return false, fmt.Errorf("error getting relative path: %w", err)
+ }
+ return relPath != ".." && !strings.HasPrefix(relPath, "../"), nil
+ }
+}
+
+func (i *ContainersImageRegistry) deleteOtherImages(catalogName string, digestToKeep digest.Digest) error {
+ catalogPath := i.catalogPath(catalogName)
+ imgDirs, err := os.ReadDir(catalogPath)
+ if err != nil {
+ return fmt.Errorf("error reading image directories: %w", err)
+ }
+ for _, imgDir := range imgDirs {
+ if imgDir.Name() == digestToKeep.String() {
+ continue
+ }
+ imgDirPath := filepath.Join(catalogPath, imgDir.Name())
+ if err := deleteRecursive(imgDirPath); err != nil {
+ return fmt.Errorf("error removing image directory: %w", err)
+ }
+ }
+ return nil
+}
+
+func setReadOnlyRecursive(root string) error {
+ if err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ fi, err := d.Info()
+ if err != nil {
+ return err
+ }
+
+ if err := func() error {
+ switch typ := fi.Mode().Type(); typ {
+ case os.ModeSymlink:
+ // do not follow symlinks
+ // 1. if they resolve to other locations in the root, we'll find them anyway
+ // 2. if they resolve to other locations outside the root, we don't want to change their permissions
+ return nil
+ case os.ModeDir:
+ return os.Chmod(path, 0500)
+ case 0: // regular file
+ return os.Chmod(path, 0400)
+ default:
+ return fmt.Errorf("refusing to change ownership of file %q with type %v", path, typ.String())
+ }
+ }(); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error making catalog cache read-only: %w", err)
+ }
+ return nil
+}
+
+func deleteRecursive(root string) error {
+ if err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ return nil
+ }
+ if err := os.Chmod(path, 0700); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error making catalog cache writable for deletion: %w", err)
+ }
+ return os.RemoveAll(root)
+}
+
+func wrapTerminal(err error, isTerminal bool) error {
+ if !isTerminal {
+ return err
+ }
+ return reconcile.TerminalError(err)
+}
diff --git a/catalogd/internal/source/containers_image_internal_test.go b/catalogd/internal/source/containers_image_internal_test.go
new file mode 100644
index 000000000..0c3ba1286
--- /dev/null
+++ b/catalogd/internal/source/containers_image_internal_test.go
@@ -0,0 +1,130 @@
+package source
+
+import (
+ "archive/tar"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestContainersImage_applyLayerFilter(t *testing.T) {
+ type testCase struct {
+ name string
+ srcPaths []string
+ tarHeaders []tar.Header
+ assertion func(*tar.Header, bool, error)
+ }
+ for _, tc := range []testCase{
+ {
+ name: "everything found when srcPaths represent root",
+ srcPaths: []string{"", "/"},
+ tarHeaders: []tar.Header{
+ {
+ Name: "file",
+ },
+ {
+ Name: "/file",
+ },
+ {
+ Name: "/nested/file",
+ },
+ {
+ Name: "/deeply/nested/file",
+ },
+ },
+ assertion: func(tarHeader *tar.Header, keep bool, err error) {
+ assert.True(t, keep)
+ assert.NoError(t, err)
+ },
+ },
+ {
+ name: "nothing found outside of srcPath",
+ srcPaths: []string{"source"},
+ tarHeaders: []tar.Header{
+ {
+ Name: "elsewhere",
+ },
+ {
+ Name: "/elsewhere",
+ },
+ {
+ Name: "/nested/elsewhere",
+ },
+ {
+ Name: "/deeply/nested/elsewhere",
+ },
+ },
+ assertion: func(tarHeader *tar.Header, keep bool, err error) {
+ assert.False(t, keep)
+ assert.NoError(t, err)
+ },
+ },
+ {
+ name: "absolute paths are trimmed",
+ srcPaths: []string{"source", "/source"},
+ tarHeaders: []tar.Header{
+ {
+ Name: "source",
+ },
+ {
+ Name: "/source",
+ },
+ {
+ Name: "source/nested/elsewhere",
+ },
+ {
+ Name: "/source/nested/elsewhere",
+ },
+ {
+ Name: "source/deeply/nested/elsewhere",
+ },
+ {
+ Name: "/source/deeply/nested/elsewhere",
+ },
+ },
+ assertion: func(tarHeader *tar.Header, keep bool, err error) {
+ assert.True(t, keep)
+ assert.NoError(t, err)
+ },
+ },
+ {
+ name: "up level source paths are not supported",
+ srcPaths: []string{"../not-supported"},
+ tarHeaders: []tar.Header{
+ {
+ Name: "anything",
+ },
+ },
+ assertion: func(tarHeader *tar.Header, keep bool, err error) {
+ assert.False(t, keep)
+ assert.ErrorContains(t, err, "error getting relative path")
+ },
+ },
+ {
+ name: "up level tar headers are not supported",
+ srcPaths: []string{"fine"},
+ tarHeaders: []tar.Header{
+ {
+ Name: "../not-supported",
+ },
+ {
+ Name: "../fine",
+ },
+ },
+ assertion: func(tarHeader *tar.Header, keep bool, err error) {
+ assert.False(t, keep)
+ assert.NoError(t, err)
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ for _, srcPath := range tc.srcPaths {
+ f := applyLayerFilter(srcPath)
+ for _, tarHeader := range tc.tarHeaders {
+ keep, err := f(&tarHeader)
+ tc.assertion(&tarHeader, keep, err)
+ }
+ }
+ })
+ }
+}
diff --git a/catalogd/internal/source/containers_image_test.go b/catalogd/internal/source/containers_image_test.go
new file mode 100644
index 000000000..138464cbe
--- /dev/null
+++ b/catalogd/internal/source/containers_image_test.go
@@ -0,0 +1,477 @@
+package source_test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/containers/image/v5/types"
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+ "github.com/google/go-containerregistry/pkg/name"
+ "github.com/google/go-containerregistry/pkg/registry"
+ "github.com/google/go-containerregistry/pkg/v1"
+ "github.com/google/go-containerregistry/pkg/v1/mutate"
+ "github.com/google/go-containerregistry/pkg/v1/random"
+ "github.com/google/go-containerregistry/pkg/v1/remote"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+ "github.com/operator-framework/operator-controller/catalogd/internal/source"
+)
+
+func TestImageRegistry(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ // catalog is the Catalog passed to the Unpack function.
+ // if the Catalog.Spec.Source.Image.Ref field is empty,
+ // one is injected during test runtime to ensure it
+ // points to the registry created for the test
+ catalog *catalogdv1.ClusterCatalog
+ wantErr bool
+ terminal bool
+ image v1.Image
+ digestAlreadyExists bool
+ oldDigestExists bool
+ // refType is the type of image ref this test
+ // is using. Should be one of "tag","digest"
+ refType string
+ }{
+ {
+ name: ".spec.source.image is nil",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: nil,
+ },
+ },
+ },
+ wantErr: true,
+ terminal: true,
+ refType: "tag",
+ },
+ {
+ name: ".spec.source.image.ref is unparsable",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "::)12-as^&8asd789A(::",
+ },
+ },
+ },
+ },
+ wantErr: true,
+ terminal: true,
+ refType: "tag",
+ },
+ {
+ name: "tag based, image is missing required label",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: true,
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ refType: "tag",
+ },
+ {
+ name: "digest based, image is missing required label",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: true,
+ terminal: true,
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ refType: "digest",
+ },
+ {
+ name: "image doesn't exist",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: true,
+ refType: "tag",
+ },
+ {
+ name: "tag based image, digest already exists in cache",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ digestAlreadyExists: true,
+ refType: "tag",
+ },
+ {
+ name: "digest based image, digest already exists in cache",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ digestAlreadyExists: true,
+ refType: "digest",
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ },
+ {
+ name: "old ref is cached",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ oldDigestExists: true,
+ refType: "tag",
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ img, err = mutate.Config(img, v1.Config{
+ Labels: map[string]string{
+ source.ConfigDirLabel: "/configs",
+ },
+ })
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ },
+ {
+ name: "tag ref, happy path",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ refType: "tag",
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ img, err = mutate.Config(img, v1.Config{
+ Labels: map[string]string{
+ source.ConfigDirLabel: "/configs",
+ },
+ })
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ },
+ {
+ name: "digest ref, happy path",
+ catalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: "",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ refType: "digest",
+ image: func() v1.Image {
+ img, err := random.Image(20, 3)
+ if err != nil {
+ panic(err)
+ }
+ img, err = mutate.Config(img, v1.Config{
+ Labels: map[string]string{
+ source.ConfigDirLabel: "/configs",
+ },
+ })
+ if err != nil {
+ panic(err)
+ }
+ return img
+ }(),
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ // Create context, temporary cache directory,
+ // and image registry source
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+ testCache := t.TempDir()
+ imgReg := &source.ContainersImageRegistry{
+ BaseCachePath: testCache,
+ SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) {
+ return &types.SystemContext{
+ OCIInsecureSkipTLSVerify: true,
+ DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
+ }, nil
+ },
+ }
+
+ // Create a logger with a simple function-based LogSink that writes to the buffer
+ var buf bytes.Buffer
+ logger := funcr.New(func(prefix, args string) {
+ buf.WriteString(fmt.Sprintf("%s %s\n", prefix, args))
+ }, funcr.Options{Verbosity: 1})
+
+ // Add the logger into the context which will later be used
+ // in the Unpack function to get the logger
+ ctx = log.IntoContext(ctx, logger)
+
+ // Start a new server running an image registry
+ srv := httptest.NewServer(registry.New())
+ defer srv.Close()
+
+ // parse the server url so we can grab just the host
+ url, err := url.Parse(srv.URL)
+ require.NoError(t, err)
+
+ // Build the proper image name with {registry}/tt.imgName
+ imgName, err := name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image:test"))
+ require.NoError(t, err)
+
+ // If an old digest should exist in the cache, create one
+ oldDigestDir := filepath.Join(testCache, tt.catalog.Name, "olddigest")
+ var oldDigestModTime time.Time
+ if tt.oldDigestExists {
+ require.NoError(t, os.MkdirAll(oldDigestDir, os.ModePerm))
+ oldDigestDirStat, err := os.Stat(oldDigestDir)
+ require.NoError(t, err)
+ oldDigestModTime = oldDigestDirStat.ModTime()
+ }
+
+ var digest v1.Hash
+ // if the test specifies a method that returns a v1.Image,
+ // call it and push the image to the registry
+ if tt.image != nil {
+ digest, err = tt.image.Digest()
+ require.NoError(t, err)
+
+ // if the digest should already exist in the cache, create it
+ if tt.digestAlreadyExists {
+ err = os.MkdirAll(filepath.Join(testCache, tt.catalog.Name, digest.String()), os.ModePerm)
+ require.NoError(t, err)
+ }
+
+ err = remote.Write(imgName, tt.image)
+ require.NoError(t, err)
+
+ // if the image ref should be a digest ref, make it so
+ if tt.refType == "digest" {
+ imgName, err = name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image@sha256:"+digest.Hex))
+ require.NoError(t, err)
+ }
+ }
+
+ // Inject the image reference if needed
+ if tt.catalog.Spec.Source.Image != nil && tt.catalog.Spec.Source.Image.Ref == "" {
+ tt.catalog.Spec.Source.Image.Ref = imgName.Name()
+ }
+
+ rs, err := imgReg.Unpack(ctx, tt.catalog)
+ if !tt.wantErr {
+ require.NoError(t, err)
+ assert.Equal(t, fmt.Sprintf("%s@sha256:%s", imgName.Context().Name(), digest.Hex), rs.ResolvedSource.Image.Ref)
+ assert.Equal(t, source.StateUnpacked, rs.State)
+
+ unpackDir := filepath.Join(testCache, tt.catalog.Name, digest.String())
+ assert.DirExists(t, unpackDir)
+ unpackDirStat, err := os.Stat(unpackDir)
+ require.NoError(t, err)
+
+ entries, err := os.ReadDir(filepath.Join(testCache, tt.catalog.Name))
+ require.NoError(t, err)
+ assert.Len(t, entries, 1)
+ // If the digest should already exist check that we actually hit it
+ if tt.digestAlreadyExists {
+ assert.Contains(t, buf.String(), "image already unpacked")
+ assert.Equal(t, rs.UnpackTime, unpackDirStat.ModTime().Truncate(time.Second))
+ } else if tt.oldDigestExists {
+ assert.NotContains(t, buf.String(), "image already unpacked")
+ assert.NotEqual(t, rs.UnpackTime, oldDigestModTime)
+ assert.NoDirExists(t, oldDigestDir)
+ } else {
+ require.NotNil(t, rs.UnpackTime)
+ require.NotNil(t, rs.ResolvedSource.Image)
+ assert.False(t, rs.UnpackTime.IsZero())
+ }
+ } else {
+ require.Error(t, err)
+ isTerminal := errors.Is(err, reconcile.TerminalError(nil))
+ assert.Equal(t, tt.terminal, isTerminal, "expected terminal %v, got %v", tt.terminal, isTerminal)
+ }
+
+ assert.NoError(t, imgReg.Cleanup(ctx, tt.catalog))
+ assert.NoError(t, imgReg.Cleanup(ctx, tt.catalog), "cleanup should ignore missing files")
+ })
+ }
+}
+
+// TestImageRegistryMissingLabelConsistentFailure is a test
+// case that specifically tests that multiple calls to the
+// ImageRegistry.Unpack() method return an error and is meant
+// to ensure coverage of the bug reported in
+// https://github.com/operator-framework/operator-controller/catalogd/issues/206
+func TestImageRegistryMissingLabelConsistentFailure(t *testing.T) {
+ // Create context, temporary cache directory,
+ // and image registry source
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+ testCache := t.TempDir()
+ imgReg := &source.ContainersImageRegistry{
+ BaseCachePath: testCache,
+ SourceContextFunc: func(logger logr.Logger) (*types.SystemContext, error) {
+ return &types.SystemContext{}, nil
+ },
+ }
+
+ // Start a new server running an image registry
+ srv := httptest.NewServer(registry.New())
+ defer srv.Close()
+
+ // parse the server url so we can grab just the host
+ url, err := url.Parse(srv.URL)
+ require.NoError(t, err)
+
+ imgName, err := name.ParseReference(fmt.Sprintf("%s/%s", url.Host, "test-image:test"))
+ require.NoError(t, err)
+
+ image, err := random.Image(20, 20)
+ require.NoError(t, err)
+
+ err = remote.Write(imgName, image)
+ require.NoError(t, err)
+
+ catalog := &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: imgName.Name(),
+ },
+ },
+ },
+ }
+
+ for i := 0; i < 3; i++ {
+ _, err = imgReg.Unpack(ctx, catalog)
+ require.Error(t, err, "unpack run ", i)
+ }
+}
diff --git a/catalogd/internal/source/unpacker.go b/catalogd/internal/source/unpacker.go
new file mode 100644
index 000000000..f0bb2449c
--- /dev/null
+++ b/catalogd/internal/source/unpacker.go
@@ -0,0 +1,72 @@
+package source
+
+import (
+ "context"
+ "io/fs"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+// TODO: This package is almost entirely copy/pasted from rukpak. We should look
+// into whether it is possible to share this code.
+//
+// TODO: None of the rukpak CRD validations (both static and from the rukpak
+// webhooks) related to the source are present here. Which of them do we need?
+
+// Unpacker unpacks catalog content, either synchronously or asynchronously and
+// returns a Result, which conveys information about the progress of unpacking
+// the catalog content.
+//
+// If a Source unpacks content asynchronously, it should register one or more
+// watches with a controller to ensure that Bundles referencing this source
+// can be reconciled as progress updates are available.
+//
+// For asynchronous Sources, multiple calls to Unpack should be made until the
+// returned result includes state StateUnpacked.
+//
+// NOTE: A source is meant to be agnostic to specific catalog formats and
+// specifications. A source should treat a catalog root directory as an opaque
+// file tree and delegate catalog format concerns to catalog parsers.
+type Unpacker interface {
+ Unpack(context.Context, *catalogdv1.ClusterCatalog) (*Result, error)
+ Cleanup(context.Context, *catalogdv1.ClusterCatalog) error
+}
+
+// Result conveys progress information about unpacking catalog content.
+type Result struct {
+ // Bundle contains the full filesystem of a catalog's root directory.
+ FS fs.FS
+
+ // ResolvedSource is a reproducible view of a Bundle's Source.
+ // When possible, source implementations should return a ResolvedSource
+ // that pins the Source such that future fetches of the catalog content can
+ // be guaranteed to fetch the exact same catalog content as the original
+ // unpack.
+ //
+ // For example, resolved image sources should reference a container image
+ // digest rather than an image tag, and git sources should reference a
+ // commit hash rather than a branch or tag.
+ ResolvedSource *catalogdv1.ResolvedCatalogSource
+
+ LastSuccessfulPollAttempt metav1.Time
+
+ // State is the current state of unpacking the catalog content.
+ State State
+
+ // Message is contextual information about the progress of unpacking the
+ // catalog content.
+ Message string
+
+ // UnpackTime is the timestamp when the transition to the current State happened
+ UnpackTime time.Time
+}
+
+type State string
+
+// StateUnpacked conveys that the catalog has been successfully unpacked.
+const StateUnpacked State = "Unpacked"
+
+const UnpackCacheDir = "unpack"
diff --git a/catalogd/internal/storage/localdir.go b/catalogd/internal/storage/localdir.go
new file mode 100644
index 000000000..dd06729ea
--- /dev/null
+++ b/catalogd/internal/storage/localdir.go
@@ -0,0 +1,114 @@
+package storage
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/klauspost/compress/gzhttp"
+
+ "github.com/operator-framework/operator-registry/alpha/declcfg"
+)
+
+// LocalDirV1 is a storage Instance. When Storing a new FBC contained in
+// fs.FS, the content is first written to a temporary file, after which
+// it is copied to its final destination in RootDir/catalogName/. This is
+// done so that clients accessing the content stored in RootDir/catalogName have
+// atomic view of the content for a catalog.
+type LocalDirV1 struct {
+ RootDir string
+ RootURL *url.URL
+}
+
+const (
+ v1ApiPath = "api/v1"
+ v1ApiData = "all"
+)
+
+func (s LocalDirV1) Store(ctx context.Context, catalog string, fsys fs.FS) error {
+ fbcDir := filepath.Join(s.RootDir, catalog, v1ApiPath)
+ if err := os.MkdirAll(fbcDir, 0700); err != nil {
+ return err
+ }
+ tempFile, err := os.CreateTemp(s.RootDir, fmt.Sprint(catalog))
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tempFile.Name())
+ if err := declcfg.WalkMetasFS(ctx, fsys, func(path string, meta *declcfg.Meta, err error) error {
+ if err != nil {
+ return err
+ }
+ _, err = tempFile.Write(meta.Blob)
+ return err
+ }); err != nil {
+ return fmt.Errorf("error walking FBC root: %w", err)
+ }
+ fbcFile := filepath.Join(fbcDir, v1ApiData)
+ return os.Rename(tempFile.Name(), fbcFile)
+}
+
+func (s LocalDirV1) Delete(catalog string) error {
+ return os.RemoveAll(filepath.Join(s.RootDir, catalog))
+}
+
+func (s LocalDirV1) BaseURL(catalog string) string {
+ return s.RootURL.JoinPath(catalog).String()
+}
+
+func (s LocalDirV1) StorageServerHandler() http.Handler {
+ mux := http.NewServeMux()
+ fsHandler := http.FileServer(http.FS(&filesOnlyFilesystem{os.DirFS(s.RootDir)}))
+ spHandler := http.StripPrefix(s.RootURL.Path, fsHandler)
+ gzHandler := gzhttp.GzipHandler(spHandler)
+
+ typeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add("Content-Type", "application/jsonl")
+ gzHandler.ServeHTTP(w, r)
+ })
+ mux.Handle(s.RootURL.Path, typeHandler)
+ return mux
+}
+
+func (s LocalDirV1) ContentExists(catalog string) bool {
+ file, err := os.Stat(filepath.Join(s.RootDir, catalog, v1ApiPath, v1ApiData))
+ if err != nil {
+ return false
+ }
+ if !file.Mode().IsRegular() {
+ // path is not valid content
+ return false
+ }
+ return true
+}
+
+// filesOnlyFilesystem is a file system that can open only regular
+// files from the underlying filesystem. All other file types result
+// in os.ErrNotExists
+type filesOnlyFilesystem struct {
+ FS fs.FS
+}
+
+// Open opens a named file from the underlying filesystem. If the file
+// is not a regular file, it return os.ErrNotExists. Callers are resposible
+// for closing the file returned.
+func (f *filesOnlyFilesystem) Open(name string) (fs.File, error) {
+ file, err := f.FS.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ stat, err := file.Stat()
+ if err != nil {
+ _ = file.Close()
+ return nil, err
+ }
+ if !stat.Mode().IsRegular() {
+ _ = file.Close()
+ return nil, os.ErrNotExist
+ }
+ return file, nil
+}
diff --git a/catalogd/internal/storage/localdir_test.go b/catalogd/internal/storage/localdir_test.go
new file mode 100644
index 000000000..c975c8fc9
--- /dev/null
+++ b/catalogd/internal/storage/localdir_test.go
@@ -0,0 +1,438 @@
+package storage
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing/fstest"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/google/go-cmp/cmp"
+ "sigs.k8s.io/yaml"
+
+ "github.com/operator-framework/operator-registry/alpha/declcfg"
+)
+
+const urlPrefix = "/catalogs/"
+
+var ctx = context.Background()
+
+var _ = Describe("LocalDir Storage Test", func() {
+ var (
+ catalog = "test-catalog"
+ store Instance
+ rootDir string
+ baseURL *url.URL
+ testBundleName = "bundle.v0.0.1"
+ testBundleImage = "quaydock.io/namespace/bundle:0.0.3"
+ testBundleRelatedImageName = "test"
+ testBundleRelatedImageImage = "testimage:latest"
+ testBundleObjectData = "dW5pbXBvcnRhbnQK"
+ testPackageDefaultChannel = "preview_test"
+ testPackageName = "webhook_operator_test"
+ testChannelName = "preview_test"
+ testPackage = fmt.Sprintf(testPackageTemplate, testPackageDefaultChannel, testPackageName)
+ testBundle = fmt.Sprintf(testBundleTemplate, testBundleImage, testBundleName, testPackageName, testBundleRelatedImageName, testBundleRelatedImageImage, testBundleObjectData)
+ testChannel = fmt.Sprintf(testChannelTemplate, testPackageName, testChannelName, testBundleName)
+
+ unpackResultFS fs.FS
+ )
+ BeforeEach(func() {
+ d, err := os.MkdirTemp(GinkgoT().TempDir(), "cache")
+ Expect(err).ToNot(HaveOccurred())
+ rootDir = d
+
+ baseURL = &url.URL{Scheme: "http", Host: "test-addr", Path: urlPrefix}
+ store = LocalDirV1{RootDir: rootDir, RootURL: baseURL}
+ unpackResultFS = &fstest.MapFS{
+ "bundle.yaml": &fstest.MapFile{Data: []byte(testBundle), Mode: os.ModePerm},
+ "package.yaml": &fstest.MapFile{Data: []byte(testPackage), Mode: os.ModePerm},
+ "channel.yaml": &fstest.MapFile{Data: []byte(testChannel), Mode: os.ModePerm},
+ }
+ })
+ When("An unpacked FBC is stored using LocalDir", func() {
+ BeforeEach(func() {
+ err := store.Store(context.Background(), catalog, unpackResultFS)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+ It("should store the content in the RootDir correctly", func() {
+ fbcDir := filepath.Join(rootDir, catalog, v1ApiPath)
+ fbcFile := filepath.Join(fbcDir, v1ApiData)
+ _, err := os.Stat(fbcFile)
+ Expect(err).To(Not(HaveOccurred()))
+
+ gotConfig, err := declcfg.LoadFS(ctx, unpackResultFS)
+ Expect(err).To(Not(HaveOccurred()))
+ storedConfig, err := declcfg.LoadFile(os.DirFS(fbcDir), v1ApiData)
+ Expect(err).To(Not(HaveOccurred()))
+ diff := cmp.Diff(gotConfig, storedConfig)
+ Expect(diff).To(Equal(""))
+ })
+ It("should form the content URL correctly", func() {
+ Expect(store.BaseURL(catalog)).To(Equal(baseURL.JoinPath(catalog).String()))
+ })
+ It("should report content exists", func() {
+ Expect(store.ContentExists(catalog)).To(BeTrue())
+ })
+ When("The stored content is deleted", func() {
+ BeforeEach(func() {
+ err := store.Delete(catalog)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+ It("should delete the FBC from the cache directory", func() {
+ fbcFile := filepath.Join(rootDir, catalog)
+ _, err := os.Stat(fbcFile)
+ Expect(err).To(HaveOccurred())
+ Expect(os.IsNotExist(err)).To(BeTrue())
+ })
+ It("should report content does not exist", func() {
+ Expect(store.ContentExists(catalog)).To(BeFalse())
+ })
+ })
+ })
+})
+
+var _ = Describe("LocalDir Server Handler tests", func() {
+ var (
+ testServer *httptest.Server
+ store LocalDirV1
+ )
+ BeforeEach(func() {
+ d, err := os.MkdirTemp(GinkgoT().TempDir(), "cache")
+ Expect(err).ToNot(HaveOccurred())
+ Expect(os.MkdirAll(filepath.Join(d, "test-catalog", v1ApiPath), 0700)).To(Succeed())
+ store = LocalDirV1{RootDir: d, RootURL: &url.URL{Path: urlPrefix}}
+ testServer = httptest.NewServer(store.StorageServerHandler())
+
+ })
+ It("gets 404 for the path /", func() {
+ expectNotFound(testServer.URL)
+ })
+ It("gets 404 for the path /catalogs/", func() {
+ expectNotFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/"))
+ })
+ It("gets 404 for the path /catalogs/test-catalog/", func() {
+ expectNotFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/test-catalog/"))
+ })
+ It("gets 404 for the path /test-catalog/foo.txt", func() {
+ // This ensures that even if the file exists, the URL must contain the /catalogs/ prefix
+ Expect(os.WriteFile(filepath.Join(store.RootDir, "test-catalog", "foo.txt"), []byte("bar"), 0600)).To(Succeed())
+ expectNotFound(fmt.Sprintf("%s/%s", testServer.URL, "/test-catalog/foo.txt"))
+ })
+ It("gets 404 for the path /catalogs/test-catalog/non-existent.txt", func() {
+ expectNotFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/test-catalog/non-existent.txt"))
+ })
+ It("gets 200 for the path /catalogs/foo.txt", func() {
+ expectedContent := []byte("bar")
+ Expect(os.WriteFile(filepath.Join(store.RootDir, "foo.txt"), expectedContent, 0600)).To(Succeed())
+ expectFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/foo.txt"), expectedContent)
+ })
+ It("gets 200 for the path /catalogs/test-catalog/foo.txt", func() {
+ expectedContent := []byte("bar")
+ Expect(os.WriteFile(filepath.Join(store.RootDir, "test-catalog", "foo.txt"), expectedContent, 0600)).To(Succeed())
+ expectFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/test-catalog/foo.txt"), expectedContent)
+ })
+ It("ignores accept-encoding for the path /catalogs/test-catalog/api/v1/all with size < 1400 bytes", func() {
+ expectedContent := []byte("bar")
+ Expect(os.WriteFile(filepath.Join(store.RootDir, "test-catalog", v1ApiPath, v1ApiData), expectedContent, 0600)).To(Succeed())
+ expectFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/test-catalog/api/v1/all"), expectedContent)
+ })
+ It("provides gzipped content for the path /catalogs/test-catalog/api/v1/all with size > 1400 bytes", func() {
+ expectedContent := []byte(testCompressableJSON)
+ Expect(os.WriteFile(filepath.Join(store.RootDir, "test-catalog", v1ApiPath, v1ApiData), expectedContent, 0600)).To(Succeed())
+ expectFound(fmt.Sprintf("%s/%s", testServer.URL, "/catalogs/test-catalog/api/v1/all"), expectedContent)
+ })
+ It("provides json-lines format for the served JSON catalog", func() {
+ catalog := "test-catalog"
+ unpackResultFS := &fstest.MapFS{
+ "catalog.json": &fstest.MapFile{Data: []byte(testCompressableJSON), Mode: os.ModePerm},
+ }
+ err := store.Store(context.Background(), catalog, unpackResultFS)
+ Expect(err).To(Not(HaveOccurred()))
+
+ expectedContent, err := generateJSONLines([]byte(testCompressableJSON))
+ Expect(err).To(Not(HaveOccurred()))
+ path, err := url.JoinPath(testServer.URL, urlPrefix, catalog, v1ApiPath, v1ApiData)
+ Expect(err).To(Not(HaveOccurred()))
+ expectFound(path, []byte(expectedContent))
+ })
+ It("provides json-lines format for the served YAML catalog", func() {
+ catalog := "test-catalog"
+ yamlData, err := makeYAMLFromConcatenatedJSON([]byte(testCompressableJSON))
+ Expect(err).To(Not(HaveOccurred()))
+ unpackResultFS := &fstest.MapFS{
+ "catalog.yaml": &fstest.MapFile{Data: yamlData, Mode: os.ModePerm},
+ }
+ err = store.Store(context.Background(), catalog, unpackResultFS)
+ Expect(err).To(Not(HaveOccurred()))
+
+ expectedContent, err := generateJSONLines(yamlData)
+ Expect(err).To(Not(HaveOccurred()))
+ path, err := url.JoinPath(testServer.URL, urlPrefix, catalog, v1ApiPath, v1ApiData)
+ Expect(err).To(Not(HaveOccurred()))
+ expectFound(path, []byte(expectedContent))
+ })
+ AfterEach(func() {
+ testServer.Close()
+ })
+})
+
+func expectNotFound(url string) {
+ resp, err := http.Get(url) //nolint:gosec
+ Expect(err).To(Not(HaveOccurred()))
+ Expect(resp.StatusCode).To(Equal(http.StatusNotFound))
+ Expect(resp.Body.Close()).To(Succeed())
+}
+
+func expectFound(url string, expectedContent []byte) {
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ Expect(err).To(Not(HaveOccurred()))
+ req.Header.Set("Accept-Encoding", "gzip")
+ resp, err := http.DefaultClient.Do(req)
+ Expect(err).To(Not(HaveOccurred()))
+ Expect(resp.StatusCode).To(Equal(http.StatusOK))
+
+ var actualContent []byte
+ switch resp.Header.Get("Content-Encoding") {
+ case "gzip":
+ Expect(len(expectedContent)).To(BeNumerically(">", 1400),
+ fmt.Sprintf("gzipped content should only be provided for content larger than 1400 bytes, but our expected content is only %d bytes", len(expectedContent)))
+ gz, err := gzip.NewReader(resp.Body)
+ Expect(err).To(Not(HaveOccurred()))
+ actualContent, err = io.ReadAll(gz)
+ Expect(err).To(Not(HaveOccurred()))
+ default:
+ actualContent, err = io.ReadAll(resp.Body)
+ Expect(len(expectedContent)).To(BeNumerically("<", 1400),
+ fmt.Sprintf("plaintext content should only be provided for content smaller than 1400 bytes, but we received plaintext for %d bytes\n expectedContent:\n%s\n", len(expectedContent), expectedContent))
+ Expect(err).To(Not(HaveOccurred()))
+ }
+
+ Expect(actualContent).To(Equal(expectedContent))
+ Expect(resp.Body.Close()).To(Succeed())
+}
+
+const testBundleTemplate = `---
+image: %s
+name: %s
+schema: olm.bundle
+package: %s
+relatedImages:
+ - name: %s
+ image: %s
+properties:
+ - type: olm.bundle.object
+ value:
+ data: %s
+ - type: some.other
+ value:
+ data: arbitrary-info
+`
+
+const testPackageTemplate = `---
+defaultChannel: %s
+name: %s
+schema: olm.package
+`
+
+const testChannelTemplate = `---
+schema: olm.channel
+package: %s
+name: %s
+entries:
+ - name: %s
+`
+
+// by default the compressor will only trigger for files larger than 1400 bytes
+const testCompressableJSON = `{
+ "defaultChannel": "stable-v6.x",
+ "name": "cockroachdb",
+ "schema": "olm.package"
+}
+{
+ "entries": [
+ {
+ "name": "cockroachdb.v5.0.3"
+ },
+ {
+ "name": "cockroachdb.v5.0.4",
+ "replaces": "cockroachdb.v5.0.3"
+ }
+ ],
+ "name": "stable-5.x",
+ "package": "cockroachdb",
+ "schema": "olm.channel"
+}
+{
+ "entries": [
+ {
+ "name": "cockroachdb.v6.0.0",
+ "skipRange": "<6.0.0"
+ }
+ ],
+ "name": "stable-v6.x",
+ "package": "cockroachdb",
+ "schema": "olm.channel"
+}
+{
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:a5d4f4467250074216eb1ba1c36e06a3ab797d81c431427fc2aca97ecaf4e9d8",
+ "name": "cockroachdb.v5.0.3",
+ "package": "cockroachdb",
+ "properties": [
+ {
+ "type": "olm.gvk",
+ "value": {
+ "group": "charts.operatorhub.io",
+ "kind": "Cockroachdb",
+ "version": "v1alpha1"
+ }
+ },
+ {
+ "type": "olm.package",
+ "value": {
+ "packageName": "cockroachdb",
+ "version": "5.0.3"
+ }
+ }
+ ],
+ "relatedImages": [
+ {
+ "name": "",
+ "image": "quay.io/helmoperators/cockroachdb:v5.0.3"
+ },
+ {
+ "name": "",
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:a5d4f4467250074216eb1ba1c36e06a3ab797d81c431427fc2aca97ecaf4e9d8"
+ }
+ ],
+ "schema": "olm.bundle"
+}
+{
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:f42337e7b85a46d83c94694638e2312e10ca16a03542399a65ba783c94a32b63",
+ "name": "cockroachdb.v5.0.4",
+ "package": "cockroachdb",
+ "properties": [
+ {
+ "type": "olm.gvk",
+ "value": {
+ "group": "charts.operatorhub.io",
+ "kind": "Cockroachdb",
+ "version": "v1alpha1"
+ }
+ },
+ {
+ "type": "olm.package",
+ "value": {
+ "packageName": "cockroachdb",
+ "version": "5.0.4"
+ }
+ }
+ ],
+ "relatedImages": [
+ {
+ "name": "",
+ "image": "quay.io/helmoperators/cockroachdb:v5.0.4"
+ },
+ {
+ "name": "",
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:f42337e7b85a46d83c94694638e2312e10ca16a03542399a65ba783c94a32b63"
+ }
+ ],
+ "schema": "olm.bundle"
+}
+{
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba",
+ "name": "cockroachdb.v6.0.0",
+ "package": "cockroachdb",
+ "properties": [
+ {
+ "type": "olm.gvk",
+ "value": {
+ "group": "charts.operatorhub.io",
+ "kind": "Cockroachdb",
+ "version": "v1alpha1"
+ }
+ },
+ {
+ "type": "olm.package",
+ "value": {
+ "packageName": "cockroachdb",
+ "version": "6.0.0"
+ }
+ }
+ ],
+ "relatedImages": [
+ {
+ "name": "",
+ "image": "quay.io/cockroachdb/cockroach-helm-operator:6.0.0"
+ },
+ {
+ "name": "",
+ "image": "quay.io/openshift-community-operators/cockroachdb@sha256:d3016b1507515fc7712f9c47fd9082baf9ccb070aaab58ed0ef6e5abdedde8ba"
+ }
+ ],
+ "schema": "olm.bundle"
+}
+`
+
+// makeYAMLFromConcatenatedJSON takes a byte slice of concatenated JSON objects and returns a byte slice of concatenated YAML objects.
+func makeYAMLFromConcatenatedJSON(data []byte) ([]byte, error) {
+ var msg json.RawMessage
+ var delimiter = []byte("---\n")
+ var yamlData []byte
+
+ yamlData = append(yamlData, delimiter...)
+
+ dec := json.NewDecoder(bytes.NewReader(data))
+ for {
+ err := dec.Decode(&msg)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ y, err := yaml.JSONToYAML(msg)
+ if err != nil {
+ return []byte{}, err
+ }
+ yamlData = append(yamlData, delimiter...)
+ yamlData = append(yamlData, y...)
+ }
+ return yamlData, nil
+}
+
+// generateJSONLines takes a byte slice of concatenated JSON objects and returns a JSONlines-formatted string.
+func generateJSONLines(in []byte) (string, error) {
+ var out strings.Builder
+ reader := bytes.NewReader(in)
+
+ err := declcfg.WalkMetasReader(reader, func(meta *declcfg.Meta, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if meta != nil && meta.Blob != nil {
+ if meta.Blob[len(meta.Blob)-1] != '\n' {
+ return fmt.Errorf("blob does not end with newline")
+ }
+ }
+
+ _, err = out.Write(meta.Blob)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return out.String(), err
+}
diff --git a/catalogd/internal/storage/storage.go b/catalogd/internal/storage/storage.go
new file mode 100644
index 000000000..458ff040b
--- /dev/null
+++ b/catalogd/internal/storage/storage.go
@@ -0,0 +1,19 @@
+package storage
+
+import (
+ "context"
+ "io/fs"
+ "net/http"
+)
+
+// Instance is a storage instance that stores FBC content of catalogs
+// added to a cluster. It can be used to Store or Delete FBC in the
+// host's filesystem. It also a manager runnable object, that starts
+// a server to serve the content stored.
+type Instance interface {
+ Store(ctx context.Context, catalog string, fsys fs.FS) error
+ Delete(catalog string) error
+ BaseURL(catalog string) string
+ StorageServerHandler() http.Handler
+ ContentExists(catalog string) bool
+}
diff --git a/catalogd/internal/storage/suite_test.go b/catalogd/internal/storage/suite_test.go
new file mode 100644
index 000000000..b0c512de7
--- /dev/null
+++ b/catalogd/internal/storage/suite_test.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2023.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package storage
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Storage Suite")
+}
diff --git a/catalogd/internal/third_party/server/server.go b/catalogd/internal/third_party/server/server.go
new file mode 100644
index 000000000..cfdec7b3b
--- /dev/null
+++ b/catalogd/internal/third_party/server/server.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// this is copied from https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/77b08a845e451b695cfa25b79ebe277d85064345/pkg/manager/server.go
+// we will remove this once we update to a version of controller-runitme that has this included
+// https://github.com/kubernetes-sigs/controller-runtime/pull/2473
+
+package server
+
+import (
+ "context"
+ "errors"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/go-logr/logr"
+
+ crlog "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+var (
+ _ manager.Runnable = (*Server)(nil)
+ _ manager.LeaderElectionRunnable = (*Server)(nil)
+)
+
+// Server is a general purpose HTTP(S) server Runnable for a manager.
+// It is used to serve some internal handlers for health probes and profiling,
+// but it can also be used to run custom servers.
+type Server struct {
+ // Kind is an optional string that describes the purpose of the server. It is used in logs to distinguish
+ // among multiple servers.
+ Kind string
+
+ // Log is the logger used by the server. If not set, a logger will be derived from the context passed to Start.
+ Log logr.Logger
+
+ // Server is the HTTP server to run. It is required.
+ Server *http.Server
+
+ // Listener is an optional listener to use. If not set, the server start a listener using the server.Addr.
+ // Using a listener is useful when the port reservation needs to happen in advance of this runnable starting.
+ Listener net.Listener
+
+ // OnlyServeWhenLeader is an optional bool that indicates that the server should only be started when the manager is the leader.
+ OnlyServeWhenLeader bool
+
+ // ShutdownTimeout is an optional duration that indicates how long to wait for the server to shutdown gracefully. If not set,
+ // the server will wait indefinitely for all connections to close.
+ ShutdownTimeout *time.Duration
+}
+
+// Start starts the server. It will block until the server is stopped or an error occurs.
+func (s *Server) Start(ctx context.Context) error {
+ log := s.Log
+ if log.GetSink() == nil {
+ log = crlog.FromContext(ctx)
+ }
+ if s.Kind != "" {
+ log = log.WithValues("kind", s.Kind)
+ }
+ log = log.WithValues("addr", s.addr())
+
+ serverShutdown := make(chan struct{})
+ go func() {
+ <-ctx.Done()
+ log.Info("shutting down server")
+
+ shutdownCtx := context.Background()
+ if s.ShutdownTimeout != nil {
+ var shutdownCancel context.CancelFunc
+ shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), *s.ShutdownTimeout)
+ defer shutdownCancel()
+ }
+
+ if err := s.Server.Shutdown(shutdownCtx); err != nil {
+ log.Error(err, "error shutting down server")
+ }
+ close(serverShutdown)
+ }()
+
+ log.Info("starting server")
+ if err := s.serve(); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ return err
+ }
+
+ <-serverShutdown
+ return nil
+}
+
+// NeedLeaderElection returns true if the server should only be started when the manager is the leader.
+func (s *Server) NeedLeaderElection() bool {
+ return s.OnlyServeWhenLeader
+}
+
+func (s *Server) addr() string {
+ if s.Listener != nil {
+ return s.Listener.Addr().String()
+ }
+ return s.Server.Addr
+}
+
+func (s *Server) serve() error {
+ if s.Listener != nil {
+ return s.Server.Serve(s.Listener)
+ }
+
+ return s.Server.ListenAndServe()
+}
diff --git a/catalogd/internal/version/version.go b/catalogd/internal/version/version.go
new file mode 100644
index 000000000..73ba429a9
--- /dev/null
+++ b/catalogd/internal/version/version.go
@@ -0,0 +1,36 @@
+package version
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/blang/semver/v4"
+ genericversion "k8s.io/apimachinery/pkg/version"
+)
+
+var (
+ gitVersion = "unknown"
+ gitCommit = "unknown" // sha1 from git, output of $(git rev-parse HEAD)
+ gitTreeState = "unknown" // state of git tree, either "clean" or "dirty"
+ commitDate = "unknown" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
+
+// Version returns a version struct for the build
+func Version() genericversion.Info {
+ info := genericversion.Info{
+ GitVersion: gitVersion,
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ BuildDate: commitDate,
+ GoVersion: runtime.Version(),
+ Compiler: runtime.Compiler,
+ Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+ }
+ v, err := semver.Parse(strings.TrimPrefix(gitVersion, "v"))
+ if err == nil {
+ info.Major = fmt.Sprintf("%d", v.Major)
+ info.Minor = fmt.Sprintf("%d", v.Minor)
+ }
+ return info
+}
diff --git a/catalogd/internal/webhook/cluster_catalog_webhook.go b/catalogd/internal/webhook/cluster_catalog_webhook.go
new file mode 100644
index 000000000..3938939a7
--- /dev/null
+++ b/catalogd/internal/webhook/cluster_catalog_webhook.go
@@ -0,0 +1,46 @@
+package webhook
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+// +kubebuilder:webhook:admissionReviewVersions={v1},failurePolicy=Fail,groups=olm.operatorframework.io,mutating=true,name=inject-metadata-name.olm.operatorframework.io,path=/mutate-olm-operatorframework-io-v1-clustercatalog,resources=clustercatalogs,verbs=create;update,versions=v1,sideEffects=None,timeoutSeconds=10
+
+// +kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs,verbs=get;list;watch;patch;update
+
+// ClusterCatalog wraps the external v1.ClusterCatalog type and implements admission.Defaulter
+type ClusterCatalog struct{}
+
+// Default is the method that will be called by the webhook to apply defaults.
+func (r *ClusterCatalog) Default(ctx context.Context, obj runtime.Object) error {
+ log := log.FromContext(ctx)
+ log.Info("Invoking Default method for ClusterCatalog", "object", obj)
+ catalog, ok := obj.(*catalogdv1.ClusterCatalog)
+ if !ok {
+ return fmt.Errorf("expected a ClusterCatalog but got a %T", obj)
+ }
+
+ // Defaulting logic: add the "olm.operatorframework.io/metadata.name" label
+ if catalog.Labels == nil {
+ catalog.Labels = map[string]string{}
+ }
+ catalog.Labels[catalogdv1.MetadataNameLabel] = catalog.GetName()
+ log.Info("default", catalogdv1.MetadataNameLabel, catalog.Name, "labels", catalog.Labels)
+
+ return nil
+}
+
+// SetupWebhookWithManager sets up the webhook with the manager
+func (r *ClusterCatalog) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(&catalogdv1.ClusterCatalog{}).
+ WithDefaulter(r).
+ Complete()
+}
diff --git a/catalogd/internal/webhook/cluster_catalog_webhook_test.go b/catalogd/internal/webhook/cluster_catalog_webhook_test.go
new file mode 100644
index 000000000..33d07a833
--- /dev/null
+++ b/catalogd/internal/webhook/cluster_catalog_webhook_test.go
@@ -0,0 +1,106 @@
+package webhook
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+// Define a dummy struct that implements runtime.Object but isn't a ClusterCatalog
+type NotClusterCatalog struct {
+ metav1.TypeMeta
+ metav1.ObjectMeta
+}
+
+func (n *NotClusterCatalog) DeepCopyObject() runtime.Object {
+ return &NotClusterCatalog{}
+}
+
+func TestClusterCatalogDefaulting(t *testing.T) {
+ tests := map[string]struct {
+ clusterCatalog runtime.Object
+ expectedLabels map[string]string
+ expectError bool
+ errorMessage string
+ }{
+ "no labels provided, name label added": {
+ clusterCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ },
+ },
+ expectedLabels: map[string]string{
+ "olm.operatorframework.io/metadata.name": "test-catalog",
+ },
+ expectError: false,
+ },
+ "labels already present, name label added": {
+ clusterCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Labels: map[string]string{
+ "existing": "label",
+ },
+ },
+ },
+ expectedLabels: map[string]string{
+ "olm.operatorframework.io/metadata.name": "test-catalog",
+ "existing": "label",
+ },
+ expectError: false,
+ },
+ "name label already present, no changes": {
+ clusterCatalog: &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-catalog",
+ Labels: map[string]string{
+ "olm.operatorframework.io/metadata.name": "existing-name",
+ },
+ },
+ },
+ expectedLabels: map[string]string{
+ "olm.operatorframework.io/metadata.name": "test-catalog", // Defaulting should still override this to match the object name
+ },
+ expectError: false,
+ },
+ "invalid object type, expect error": {
+ clusterCatalog: &NotClusterCatalog{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "NotClusterCatalog",
+ APIVersion: "v1",
+ },
+ },
+ expectedLabels: nil,
+ expectError: true,
+ errorMessage: "expected a ClusterCatalog but got a *webhook.NotClusterCatalog",
+ },
+ }
+
+ for name, tc := range tests {
+ t.Run(name, func(t *testing.T) {
+ // Arrange
+ clusterCatalogWrapper := &ClusterCatalog{}
+
+ // Act
+ err := clusterCatalogWrapper.Default(context.TODO(), tc.clusterCatalog)
+
+ // Assert
+ if tc.expectError {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), tc.errorMessage)
+ } else {
+ assert.NoError(t, err)
+ if tc.expectedLabels != nil {
+ labels := tc.clusterCatalog.(*catalogdv1.ClusterCatalog).Labels
+ assert.Equal(t, tc.expectedLabels, labels)
+ }
+ }
+ })
+ }
+}
diff --git a/catalogd/pprof/README.md b/catalogd/pprof/README.md
new file mode 100644
index 000000000..e782ac60f
--- /dev/null
+++ b/catalogd/pprof/README.md
@@ -0,0 +1,195 @@
+## pprof
+
+> **Warning**
+> This pprof data is based on early versions of catalogd and has not been updated since. The data is likely not accurate anymore.
+> A decision about removing or updating this data will be made in the future.
+
+This folder contains some profiles that can be read using [pprof](https://github.com/google/pprof) to show how the core kubernetes apiserver and the custom catalogd apiserver CPU & Memory utilization is affected by the creation and reconciliation of the sample `Catalog` CR found at `../config/samples/core_v1_clustercatalog.yaml`.
+
+Instead of providing static screenshots and losing the interactivity associated with these `pprof` profiles, each of the files with the extension `.pb` can be used to view the profiles that were the result of running `pprof` against the live processes.
+
+To view the `pprof` profiles in the most interactive way (or if you have no prior `pprof`experience) it is recommended to run:
+```
+go tool pprof -http=localhost: somefile.pb
+```
+
+This will start up an interactive web UI for viewing the profile data for a specific file on `localhost:`. There are quite a few different ways this data can be viewed so feel free to play around and find the view which gives you the most meaningful information.
+
+If you know your way around `pprof` you *should* be able to run any other variations of `pprof` with these files as well.
+
+Here is a brief breakdown of what information is provided in each profile file in this directory:
+- `kubeapiserver_cpu_profile.pb` - This is the CPU utilization of the core kube-apiserver
+- `kubeapiserver_heap_profile.pb` - This is the Memory utilization of the core kube-apiserver
+- `catalogd_apiserver_cpu_profile.pb` - This is the CPU utilization of the custom catalogd apiserver
+- `catalogd_apiserver_heap_profile.pb` - This is the Memory utilization of the custom catalogd apiserver
+- `manager_cpu_profile.pb` - This is the CPU utilization of the Catalog controller (and other controllers associated with this manager).
+- `manager_heap_profile.pb` - This is the Memory utilization of the Catalog controller (and other controllers associated with this manager).
+- `kubeapiserver_alone_cpu_profile.pb` - This is the CPU utilization for the core kube-apiserver without running our custom apiserver
+- `kubeapiserver_alone_heap_profile.pb` - This is the Memory utilization for the core kube-apiserver without running our custom apiserver
+
+> **NOTE**: All profiles were collected ASAP after all child resources were created from the reconciliation of the sample `Catalog` CR.
+
+
+## Pprof Breakdown
+In this section, we will break down the differences between how the core kube-apiserver resource utilization was impacted when running with and without the custom catalogd apiserver in an effort to determine how beneficial it is to use a custom apiserver.
+
+> **NOTE**: All this information was compared by someone who is not very experienced with using `pprof`. If you find that any of these values are incorrect or the calculations don't seem to make sense, feel free to create an issue or open a PR to update these findings.
+
+### CPU Utilization
+
+| Metric | kube-apiserver alone | kube-apiserver w\custom | Normalized Difference |
+| ------- | -------------------- | ----------------------- | --------------------- |
+| cpu | 1.72s / 30s (5.73%) | 1.99s / 30.06s (6.62%) | 1720ms / 60.06s (2.86%) |
+
+The `Normalized Difference` Metric was evaluated by running:
+```
+go tool pprof -http=localhost:6060 -diff_base=pprof/kubeapiserver_alone_cpu_profile.pb -normalize pprof/kubeapiserver_cpu_profile.pb
+```
+This command will normalize the profiles to better compare the differences. In its simplest form this difference was calculated by `pprof/kubeapiserver_alone_cpu_profile.pb - pprof/kubeapiserver_cpu_profile.pb`
+
+According to the `Normalized Difference`, there appears to be little to no difference in the amount of time the CPU is utilized (almost 0).
+
+### Memory Utilization
+
+| Metric | kube-apiserver alone | kube-apiserver w\custom | Normalized Difference |
+| ------------- | -------------------- | ----------------------- | --------------------- |
+| inuse_space | 126.85MB | 139.67MB | -0.02kB, 1.7e-05% of 129891.07kB total |
+| inuse_objects | 721278 | 640819 | -9, 0.0012% of 721278 total |
+| alloc_space | 1924.76MB | 3455.75MB | 0, 2e-07% of 1970951.57kB total |
+| alloc_objects | 19717785 | 33134306 | 102, 0.00052% of 19717785 total |
+
+The `Normalized Difference` Metric was evaluated by running:
+```
+go tool pprof -http=localhost:6060 -diff_base=pprof/kubeapiserver_alone_heap_profile.pb -normalize pprof/kubeapiserver_heap_profile.pb
+```
+This command will normalize the profiles to better compare the differences. In its simplest form this difference was calculated by `pprof/kubeapiserver_alone_heap_profile.pb - pprof/kubeapiserver_heap_profile.pb`
+
+According to the `Normalized Difference`, there appears to be:
+- An additional 0.02kB space used when in combination with the custom catalogd apiserver
+- An additional 9 objects used when in combination with the custom catalogd apiserver
+- No additional space allocated when in combination with the custom catalogd apiserver
+- 102 less objects allocated when in combination with the custom catalogd apiserver
+
+
+## Metric Server Analysis
+This section will be an analysis of the on cluster CPU/Memory consumption of the pods corresponding to the core kube-apiserver, catalogd apiserver and the controller-manager.
+
+This section is being added as the pprof metrics don't necessarily show the whole picture. This section will include 2 scenarios for the core kube-apiserver:
+1. The CPU/Memory consumption of the kube-apiserver pod without the catalogd apisver running
+2. The CPU/Memory consumption of the kube-apiserver pod with the catalogd apisever running
+
+### Core kube-apiserver without catalogd apiserver
+
+
+**TLDR**: CPU utilization spike of 0.156 cores and settles ~0.011 cores above prior utilization. Memory consumption increase of 22Mi.
+
+This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command:
+```
+kubectl apply -f config/samples/core_v1_clustercatalog.yaml
+```
+was run right at 1:44 PM.
+
+The CPU spike lasted ~3 minutes and the values were:
+- 1:44PM - 0.067 cores
+- 1:45PM (PEAK) - 0.223 cores
+- 1:47PM - 0.078 cores
+
+With this, we can see that without the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.156 cores and then settled at ~0.011 cores above what the utilization was prior to the reconciliation of the sample `Catalog` CR.
+
+The memory consumption increased over the span of ~3 minutes and then stabilized. The values were:
+- 1:44PM - 289Mi
+- 1:45PM - 305Mi
+- 1:47PM - 311Mi
+
+With this, we can see that without the catalogd apiserver the core kube-apiserver had a memory consumption increase of 22Mi.
+
+### Core kube-apiserver with catalogd apiserver
+
+#### kube-apiserver:
+
+
+**TLDR**: CPU utilization spike of 0.125 cores and settles ~0.001 cores above prior utilization. Memory consumption increase of ~26Mi.
+
+This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command:
+```
+kubectl apply -f config/samples/core_v1_clustercatalog.yaml
+```
+was run right at 3:06 PM
+
+The CPU spike lasted ~3 minutes and the values were:
+- 3:06PM - 0.09 cores
+- 3:07PM - 0.109 cores
+- 3:08 PM (PEAK) - 0.215 cores
+- 3:09 PM - 0.091 cores
+
+With this, we can see that with the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.125 cores and then settled at ~0.001 cores above what the utilization was prior to the reconciliation of the sample `Catalog` CR.
+
+The memory consumption increased over the span of ~3 minutes and then stabilized. The values were:
+- 3:06PM - 337Mi
+- 3:07PM - 361Mi
+- 3:08 PM - 363Mi
+- 3:09 PM - 363Mi
+
+With this, we can see that with the catalogd apiserver the core kube-apiserver had a memory consumption increase of ~26Mi.
+
+#### catalogd apiserver
+
+
+**TLDR**: potential increase of ~0.012 cores, but more likely ~0.002 cores. Memory consumption increase of ~0.1Mi
+
+This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command:
+```
+kubectl apply -f config/samples/core_v1_clustercatalog.yaml
+```
+was run right at 3:06 PM
+
+The CPU consumption increase lasted ~3 minutes and the values were:
+- 3:06PM - 0.002 cores (there was a weird dip right here from ~0.012 cores at 3:05PM)
+- 3:07PM - 0.01 cores
+- 3:08 PM - 0.012 cores
+- 3:09 PM - 0.014 cores
+
+We can see that our custom apiserver had a CPU utilization increase of ~0.012 cores. If we take into consideration the weird dip and place the starting value at ~0.12 cores the CPU utilization increase is only ~0.002 cores.
+
+The memory consumption increased over the span of ~3 minutes. The values were:
+- 3:06PM - 77.9Mi
+- 3:07PM - 77.9Mi
+- 3:08 PM - 77.9Mi
+- 3:09 PM - 78Mi (stable around this after)
+
+We can see that our custom apiserver had a memory consumption increase of ~0.1Mi.
+
+#### Summary
+Comparing the results of the kube-apiserver running both with and without the catalogd apiserver, we can see that:
+- The kube-apiserver CPU utilization spikes 0.031 cores less and settles ~0.01 cores less when running in combination with the catalogd apiserver
+- The kube-apiserver consumes ~4Mi more memory when running in combination with the catalogd apiserver
+
+
+Overall, when running both the kube-apiserver and the catalogd apiserver the total CPU utilization remains roughly the same while the overall memory consumption increases ~73Mi.
+
+### controller-manager metrics
+
+
+**TLDR**: CPU spike of 0.288 cores, settling ~0.003 cores above the previous consumption. Memory consumption of ~232.2Mi.
+
+This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command:
+```
+kubectl apply -f config/samples/core_v1_clustercatalog.yaml
+```
+was run right at 3:06 PM
+
+The CPU spike lasted ~3 minutes and the values were:
+- 3:06PM - 0.001 cores
+- 3:07PM (PEAK) - 0.289 cores
+- 3:08PM - 0.177 cores
+- 3:09PM - 0.004 cores
+
+We can see that the controller manager had a CPU utilization spike of 0.288 cores and then settled ~0.003 cores above the previous consumption. This is likely due to the large number of creation requests that needed to be made (~170 `Package` CR and ~1301 `BundleMetadata` CR creation requests).
+
+The memory consumption increased over the span of ~3 minutes. The values were:
+- 3:06PM - 49.8Mi
+- 3:07PM - 248Mi
+- 3:08PM - 282Mi
+- 3:09PM - 282Mi
+
+We can see that our controller-manager had a memory consumption increase of ~232.2Mi. This is likely due to the fact that the cache was populated with ~170 `Package` CRs and ~1301 `BundleMetadata` CRs.
diff --git a/catalogd/pprof/catalogd_apiserver_cpu_profile.pb b/catalogd/pprof/catalogd_apiserver_cpu_profile.pb
new file mode 100644
index 000000000..8ffee8b51
Binary files /dev/null and b/catalogd/pprof/catalogd_apiserver_cpu_profile.pb differ
diff --git a/catalogd/pprof/catalogd_apiserver_heap_profile.pb b/catalogd/pprof/catalogd_apiserver_heap_profile.pb
new file mode 100644
index 000000000..3b422419d
Binary files /dev/null and b/catalogd/pprof/catalogd_apiserver_heap_profile.pb differ
diff --git a/catalogd/pprof/images/controller_metrics.png b/catalogd/pprof/images/controller_metrics.png
new file mode 100644
index 000000000..4d842fdbc
Binary files /dev/null and b/catalogd/pprof/images/controller_metrics.png differ
diff --git a/catalogd/pprof/images/customapiserver_metrics.png b/catalogd/pprof/images/customapiserver_metrics.png
new file mode 100644
index 000000000..970e85142
Binary files /dev/null and b/catalogd/pprof/images/customapiserver_metrics.png differ
diff --git a/catalogd/pprof/images/kubeapiserver_alone_metrics.png b/catalogd/pprof/images/kubeapiserver_alone_metrics.png
new file mode 100644
index 000000000..ff3cd35c6
Binary files /dev/null and b/catalogd/pprof/images/kubeapiserver_alone_metrics.png differ
diff --git a/catalogd/pprof/images/kubeapiserver_metrics.png b/catalogd/pprof/images/kubeapiserver_metrics.png
new file mode 100644
index 000000000..6c12fe926
Binary files /dev/null and b/catalogd/pprof/images/kubeapiserver_metrics.png differ
diff --git a/catalogd/pprof/kind.yaml b/catalogd/pprof/kind.yaml
new file mode 100644
index 000000000..02fcde4a4
--- /dev/null
+++ b/catalogd/pprof/kind.yaml
@@ -0,0 +1,11 @@
+# A KinD configuration to enable profiling on the core apiserver
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ profiling: "true"
diff --git a/catalogd/pprof/kubeapiserver_alone_cpu_profile.pb b/catalogd/pprof/kubeapiserver_alone_cpu_profile.pb
new file mode 100644
index 000000000..e4c88c752
--- /dev/null
+++ b/catalogd/pprof/kubeapiserver_alone_cpu_profile.pb
@@ -0,0 +1,317 @@
+
+
+
+
+
+
+
3
+* !!!"#$#%#&'()*+#$#%#,#$#%#$#%#-#.#/2
+)0123456789:;<=>?@AB +#$#%#,#$#%#$#%#-#.#/
+CDE4
++FGHIJKL !!!"#$#%#&'()*+#$#%#,#$#%#$#%#-#.#/&
+MNOPQRS +#$#%#,#$#%#$#%#-#.#/?
+6TUVWXYZ[\]Y^_`abcd]Y^_`abcd]Yefghijklmnopqrstuvwxyz{|}
+~1
+( !!!"#$#%#&'()*+#$#%#,#$#%#$#%#-#.#/1
+(%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/
+#
+!
+##
+
+##
+&@
+7%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/L
+C%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/8
+/###########
+<
+3###########
++
+"1
+(56789:;'*+#$#%#,#$#%#$#%#-#.#/+
+" p
+g2%##$#%#&'*+#$#%#
+ =
+4#%##$#%#&'*+#$#%#,#$#%#$#%#-#.#/
+##6
+-23456789:;<=>?@AB +#$#%#,#$#%#$#%#-#.#/
++
+"
+
+E
+
+
+
+
+!
+#.#/l
+c%##$#%##$#%#,#$#%#$#%#-#.##
+##
+
E?
+6 !!!"#$#%#&'*+#$#%#,#$#%#$#%#-#.#/
+
+
+
+?
+6#%##$#%#&'*+#$#%#,#$#%#$#%#-#.#/B
+9%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/
+
+
E
+
+E>
+5 !!!"#$#%#&'*+#$#%#,#$#%#$#%#-#.#/
+
+'
+##
+
+
+D '
+
+`
+W%##$#%##$#%#,#$#%#$#%#-#.##########4
++ !!!"#$#%#&'()*+#$#%#,#$#%#$#%#-#.#/
+!
+##%
+jklmnopqrstuvwxyz{|}
+
+
+
+
+56#-#.#/'
+5
+, %
+I
+@#%##$#%#&'*+#$#%#,#$#%#$#%#-#.#/4
++#.#/
+
+
+
+
+
+
+D
+;###########>
+5%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/'
+
+
+)
+ 2###
+
+#
+#.#/
+
+
+3
+*789:;'*+#$#%#,#$#%#$#%#-#.#/%
+
+E)
+ 1
+(
#
+
+
+##
+
+!
+5
+,'
+##)
+
+<
+3###########
+D8
+/23456789:=>?@AB +#$#%#,#$#%#$#%#-#.#/S
+J5
+,
+
+56#-#.#/
+!
+##
+
+#.#/
+"
+###
+E
+
+
+
+
++
+"\
+S%##$#%#&'()*+#$#%#,#$#%#$#%#-#.#/?
+6`abcd]Y^_`abcd]Yefghjklmnopqrstuvwxyz{|}
+
+
+
+
+6
+-23456789:;<=>?@AB +#$#%#,#$#%#$#%#-#.#/;
+2xyz{|}
+
+(8"Ќ """"" "Þ"+"˧
+""Ԛ
+">"
+"3"("
+" "
+"
+""
+˜
+""Ŀx"
"t""
u""˸u""r""""""""""""Ο"""B""6"ת"""
+""܋"e""""$"
+" 4""!"""" ˱"#N"! "$4"" "%:"#"&"$ǵ "'P"%е "(h"&Ȝ
+")"'
+"*"( "+") ","*
+""+Ü
+"-",ɷ ".2"-ύ "/P".ʵ "'Y"/
+"0r"0Խ"1"1"2"2׆"3"3"45"4ˏ"5"5"6"6"7"7υ"8J"9"8Ӆ":"9ǻ ";": "<[";ψ
+"=M"<ל
+">"=՜
+"?"> "@"A"? "B"@ "C"A "D"B
+"
+"Ct""Du""E""F"E"G"Fa"H"G"I
+"H<"J"I"K"J"L"""Mє
+"K"N"L"Oʔ
+"M"P "N<"Qۿ "OS"Pm"Rݜ
+"Q"SϨ
+"
+"T"E"U"R"V"S"W"T"X"Uj"Y"VQ"Z"W"["X"\"Y"]"Ut"^"Z"_"[p"`"\{"a"]o"b"^"c"_"d"Y"e"`S"f"a&"g"b}"hÅ
+"c^"i
+"dd"jѰ
+"e:"k
+"f0"l߀
+"g4"m
+"h1"n
+"iK"o
+"j6"pӆ
+"k-"qۯ
+"l"rޯ
+"m"s
+"n"tÌ "o"u
+"p"v
+"q"w
+"r"x
+"s"y
+"t]"z
+"u"{
+"v"|
+"w"}
+"xu"~"y""zE""{""|"}""~"""""""""""""""
+"" "%@"ɒ""r" ֒"
+""1"ߒ"P""Ӡ"y"v""""5""""&"$"Q"$""!õ-""""O"`"S""͕""Օ""" """"""""""g"*""̅"e"Ӆ"""" ""۞ ""
+""
+"0""""""""""""" ""
+""ЮB""
+""
+""
+""
+""
+"+""y""R"""""ϕ""""""""T""2"" """""
+"""
+"|" """P"""
+""ܖ
+""
+""
+"D""
+""
+""ϼ
+" "Á """"=""
+"":"
+"T" "d"
+"M" "#"· ""ㅵ "n" "9"Ӹ "'" "&" "4"͝
+"I" "B"
+"""""""""i""""U"ߵ"~""""" "["
+""
+"_"t""u""""""$"D"$""!-""""O"7"S""""B""""""""
+"X""+"""""""""""""5"ٜ
+""ǜ
+")"֔
+""""""ß""""" """"""2"ϟ """"" ""㭇 ""܇ "" ""돇 ""㈇ ""ހ "B"
+"p" "" ":" ""ۀ "#" "2" "i" ""ӈ "w"
+"G"ƕ
+""
+""
+"@""ų
+"n"
+"H"
+" "u""5"""6""
+"q"Ç
+""狰 """""""ɱ""Յ""""""""2"""딭 "*"ʗ
+""×
+""""̙ " ""j"&"W""k"""x"
"t"""""~""~"
+""ߴ
+""
+""x"
"""ż"""" """:""8" "/8"""""""""""ۧ""ǽ""績""""""X"""" ""
+""문
+""ۄ
+""կ ""
+""
+"-"
+"V" "A"߲
+"3"ÿ<";""
+""<""=""="">""="""""""B"""""r"""""
+"
+"
+"
+"Ñ
+""
+""ԏ"N"ʹ""""+"""ҽ"""" "v"
+""
+""
+""""
+""4"
+" ""~"t"""""""p"DZ"7""""b" """Ϩ
+"" "" """"""ˌ"h"F""ۥ"""`"ؒ"="ޒ""ޠ"y"""""" ""B""">"ˎ""ׇ"""""""""""""D"""""="<""=""
+""
+"<"
+"
+" "
+"" "/""i"u""""ۉf"|""΅"i"""
+"&"Y"
+""
+"dk""""""""""""
"2"ʗ
+""
+""'"""""|"Г""""""" """ "B"е "(f""""""""" """Ǟ""
+""
+""ú""""
+""
+""""""""
+"\"""
+"ӈ""""""
+"""
+""
+"""
+"˟""" """ה""""""""،
+""
+"p"x""v""Dzv""Ϣv""u""ֈ""""""""Џ""ˏ"""""""("&"""""""""""~"B"l""Ϻ""""""!"
+"
+""""߈" "p";""""" ""ߧ
+""""""""˟""ӕ" "
+"d"
+""7"߷""""""p"Ї""%""%""""""""
+""
+""˒"" "" "" "" "" ""
+"" "4"ǖ """"ҏ"""""~"ג";""
+"""ؾx"
"u""*"""M""""7""""""""" "" ""˥ """"ߤ!"p""4""""""="?"":""5" """"""а""""ߏ"""l"
+"E"
+""
+""ۻ
+""
+"m"
+"Y""\" ""-""ג"6"e""ۧf"""΅"h"̡"""촫""̫""""""""NJ";"O""W""""8"""""""""""""" "
+""""f"|""""ē""ɕ""ҕ""=""ȿx"
"""""
+"B"֜
+">"""
+""
+""
+"a"""`"""ך"""""""""""""ӥ"""""""""ۈ""ۤ"L"""ȳ""ij""ó"o"""~"e"h"e"""*"ܩ"\"Ӝ">"""""ǀ"""r"!
+""""t""
+">""""&"""""C"""㙊""""뒈"M"碈""""ǰ""""""ȶ"6""""""""""""" "k"È ""ӏ"K"磿"" ""׃
+""
+"P"""""Ь"""E"ז"Y"[p"
+"d`"x"
"""""""̀""!"""ހ "I" "2" "u" ""
+"r"""""""u"* * *
+
+ * *
* * * * *
+ * * *
* * * * !*"" #*$$ #*%% &*'' &*(( #*)) **++ **,, -*.. /*00 /*11 2*33 4*55 6*77 8* 99 :*!;; <*"== >*#?? <*$@@ A*%BB C*&DD 2*'EE F*(GG F*)HH **II *+JJ K*,LL K*-MM *.NN O*/PP Q*0RR S*1TT U*2VV W*3XX W*4YY Z*5[[ \*6]] \*7^^ \*8__ `*9aa b*:cc b*;dd e*<ff e*=gg h*>ii *?jj *@kk K*All K*Bmm K*Cnn K*Doo K*Epp q*Frr 4*Gss 6*Htt u*Ivv <*Jww >*Kxx y*Lzz {*M|| }*N~~ *O *P *
+Q *R *S *T *U *V *W *X *Y *Z *[ *\ *] *^ *_ *` *a *b *c *d *e *f *g *h *i *j *k *l *m *n *o *p *q *r *
+s *t *u *v *w *x *y *z *{ *| *} *~ * * * * >* >* * * * * * * * * * * ** * * * * * * * * * /* /* /* * * * * * * * * * * * 2* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * S* S* * * * * * * * * * * * * 2* 2* * * * * * * * S* * * * * * * * /* * /* /* /* /* * * * * * * \* \* * * * * * * * * W* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * W* W* * y* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * >* >* <* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * q* * * * * * * * * * * * * F* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * #* * * * * * * * * * * * * * * * /* * * * * * * * * * * * * * * * * * * * * * * * * \* * * * * * * {* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /* * h* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * &* * * * * * * * * * * * * * * * * * * * * * * * * * * 2 2samples2count2cpu2nanoseconds2/usr/local/bin/kube-apiserver2runtime.heapBitsSetType2$/usr/local/go/src/runtime/mbitmap.go2runtime.mallocgc2#/usr/local/go/src/runtime/malloc.go2runtime.newobject2Gk8s.io/kubernetes/pkg/registry/networking/ingress/storage.NewREST.func122pkg/registry/networking/ingress/storage/storage.go2vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go2@k8s.io/apiserver/pkg/util/flowcontrol.(*configController).Handle2:vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go2Ck8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func2.82Cvendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go2math/big.addMulVVW2(/usr/local/go/src/math/big/arith_arm64.s2math/big.nat.montgomery2!/usr/local/go/src/math/big/nat.go2math/big.nat.expNNMontgomery2math/big.nat.expNN2math/big.(*Int).Exp2!/usr/local/go/src/math/big/int.go2crypto/rsa.decrypt2#/usr/local/go/src/crypto/rsa/rsa.go2crypto/rsa.decryptAndCheck2crypto/rsa.signPSSWithSalt2#/usr/local/go/src/crypto/rsa/pss.go2crypto/rsa.SignPSS2crypto/rsa.(*PrivateKey).Sign2=crypto/tls.(*serverHandshakeStateTLS13).sendServerCertificate26/usr/local/go/src/crypto/tls/handshake_server_tls13.go21crypto/tls.(*serverHandshakeStateTLS13).handshake2"crypto/tls.(*Conn).serverHandshake20/usr/local/go/src/crypto/tls/handshake_server.go2#crypto/tls.(*Conn).handshakeContext2$/usr/local/go/src/crypto/tls/conn.go2#crypto/tls.(*Conn).HandshakeContext2net/http.(*conn).serve2$/usr/local/go/src/net/http/server.go22k8s.io/client-go/tools/cache.(*threadSafeMap).List28vendor/k8s.io/client-go/tools/cache/thread_safe_store.go2*k8s.io/client-go/tools/cache.(*cache).List2,vendor/k8s.io/client-go/tools/cache/store.go2$k8s.io/client-go/tools/cache.ListAll2.vendor/k8s.io/client-go/tools/cache/listers.go2Ak8s.io/client-go/listers/rbac/v1.(*clusterRoleBindingLister).List2=vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go2ek8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac.(*ClusterRoleBindingLister).ListClusterRoleBindings2'plugin/pkg/auth/authorizer/rbac/rbac.go2Sk8s.io/kubernetes/pkg/registry/rbac/validation.(*DefaultRuleResolver).VisitRulesFor2$pkg/registry/rbac/validation/rule.go2Mk8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac.(*RBACAuthorizer).Authorize2Dk8s.io/apiserver/pkg/authorization/union.unionAuthzHandler.Authorize28vendor/k8s.io/apiserver/pkg/authorization/union/union.go2>k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func12>vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go2net/http.HandlerFunc.ServeHTTP2?k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func12Dvendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go2Ak8s.io/apiserver/pkg/endpoints/filterlatency.trackCompleted.func12Ck8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func2.92Fk8s.io/apiserver/pkg/util/flowcontrol.(*configController).Handle.func22Rk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*request).Finish.func12Mvendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go2Lk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*request).Finish2Ak8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func22>k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func12>vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go2?k8s.io/apiserver/pkg/endpoints/filters.withAuthentication.func12?vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go2Ek8s.io/apiserver/pkg/server/filters.(*timeoutHandler).ServeHTTP.func125vendor/k8s.io/apiserver/pkg/server/filters/timeout.go2runtime.memmove2)/usr/local/go/src/runtime/memmove_arm64.s2runtime.copystack2"/usr/local/go/src/runtime/stack.go2runtime.newstack2runtime.mapaccess2_fast642'/usr/local/go/src/runtime/map_fast64.go2[github.com/prometheus/client_golang/prometheus.(*metricMap).getMetricWithHashAndLabelValues2type..eq.k8s.io/apiserver/pkg/util/flowcontrol.watchIdentifier22runtime.mapaccess12 /usr/local/go/src/runtime/map.go2Mk8s.io/apiserver/pkg/util/flowcontrol.(*watchTracker).GetInterestedWatchCount2=vendor/k8s.io/apiserver/pkg/util/flowcontrol/watch_tracker.go2Ok8s.io/apiserver/pkg/util/flowcontrol/request.(*mutatingWorkEstimator).estimate2Ovendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go2Lk8s.io/apiserver/pkg/util/flowcontrol/request.WorkEstimatorFunc.EstimateWork2=vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/width.go2Gk8s.io/apiserver/pkg/util/flowcontrol/request.(*workEstimator).estimate2Ck8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func2.22Csigs.k8s.io/structured-merge-diff/v4/schema.(*Schema).FindNamedType2>vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go2Hsigs.k8s.io/structured-merge-diff/v4/schema.(*Schema).resolveNoOverrides2=sigs.k8s.io/structured-merge-diff/v4/schema.(*Schema).Resolve28sigs.k8s.io/structured-merge-diff/v4/typed.resolveSchema2sigs.k8s.io/structured-merge-diff/v4/typed.TypedValue.Validate2:vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go22sigs.k8s.io/structured-merge-diff/v4/typed.AsTyped2Gsigs.k8s.io/structured-merge-diff/v4/typed.ParseableType.FromStructured2;vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go2Sk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*typeConverter).ObjectToTyped2Lvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go2Uk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*structuredMergeManager).Update2Nvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/structuredmerge.go2Ok8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*stripMetaManager).Update2Hvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/stripmeta.go2Sk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*managedFieldsUpdater).Update2Svendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/managedfieldsupdater.go2Vk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*buildManagerInfoManager).Update2Ovendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/buildmanagerinfo.go2Qk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*capManagersManager).Update2Jvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/capmanagers.go2Tk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*skipNonAppliedManager).Update2Mvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/skipnonapplied.go2Qk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*lastAppliedManager).Update2Qvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedmanager.go2Qk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*lastAppliedUpdater).Update2Qvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go2Kk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*FieldManager).Update2Kvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go2Sk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*FieldManager).UpdateNoErrors2>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.128vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go2Lk8s.io/apiserver/pkg/registry/rest.(*defaultUpdatedObjectInfo).UpdatedObject23vendor/k8s.io/apiserver/pkg/registry/rest/update.go2Dk8s.io/apiserver/pkg/registry/generic/registry.(*Store).Update.func12>vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go27k8s.io/apiserver/pkg/storage/etcd3.(*store).updateState22vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go2k8s.io/apiserver/pkg/storage/cacher.(*Cacher).GuaranteedUpdate2Uk8s.io/apiserver/pkg/registry/generic/registry.(*DryRunnableStorage).GuaranteedUpdate2?vendor/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go2>k8s.io/apiserver/pkg/registry/generic/registry.(*Store).Update2>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.42>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.52Dk8s.io/apiserver/pkg/endpoints/handlers/finisher.finishRequest.func12Cvendor/k8s.io/apiserver/pkg/endpoints/handlers/finisher/finisher.go2
runtime.futex2+/usr/local/go/src/runtime/sys_linux_arm64.s2runtime.futexsleep2%/usr/local/go/src/runtime/os_linux.go2runtime.notesleep2'/usr/local/go/src/runtime/lock_futex.go2
runtime.mPark2!/usr/local/go/src/runtime/proc.go2
runtime.stopm2runtime.findRunnable2runtime.schedule2runtime.park_m2
runtime.mcall2%/usr/local/go/src/runtime/asm_arm64.s2vendor/google.golang.org/grpc/internal/transport/controlbuf.go2>google.golang.org/grpc/internal/transport.newHTTP2Client.func32@vendor/google.golang.org/grpc/internal/transport/http2_client.go2runtime.makeslice2"/usr/local/go/src/runtime/slice.go2 path.Join2/usr/local/go/src/path/path.go2;k8s.io/kube-openapi/pkg/handler3.constructServerRelativeURL22vendor/k8s.io/kube-openapi/pkg/handler3/handler.go2@k8s.io/kube-openapi/pkg/handler3.(*OpenAPIService).getGroupBytes2Bk8s.io/kube-openapi/pkg/handler3.(*OpenAPIService).HandleDiscovery28k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP26vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go2vendor/github.com/prometheus/client_golang/prometheus/gauge.go2@k8s.io/component-base/metrics.(*GaugeVec).WithLabelValuesChecked2-vendor/k8s.io/component-base/metrics/gauge.go29k8s.io/component-base/metrics.(*GaugeVec).WithLabelValues2=k8s.io/apiserver/pkg/storage/etcd3/metrics.RecordEtcdBookmark2k8s.io/kube-aggregator/pkg/apiserver.(*proxyHandler).ServeHTTP2k8s.io/apiserver/pkg/storage/cacher.(*Cacher).startDispatching2;k8s.io/apiserver/pkg/storage/cacher.(*Cacher).dispatchEvent2.golang.org/x/net/http2.(*Framer).readMetaFrame2Xk8s.io/apiserver/pkg/authentication/group.(*AuthenticatedGroupAdder).AuthenticateRequest2Mvendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go2`k8s.io/apiserver/pkg/authentication/request/union.(*unionAuthRequestHandler).AuthenticateRequest2Avendor/k8s.io/apiserver/pkg/authentication/request/union/union.go2
runtime.ready2runtime.goready.func12runtime.systemstack2runtime.goready2runtime.send2runtime.selectgo2#/usr/local/go/src/runtime/select.go2:golang.org/x/net/http2.(*serverConn).writeFrameFromHandler29golang.org/x/net/http2.(*serverConn).writeDataFromHandler28golang.org/x/net/http2.(*responseWriterState).writeChunk2(golang.org/x/net/http2.chunkWriter.Write2bufio.(*Writer).Flush2.golang.org/x/net/http2.(*responseWriter).Flush2Pk8s.io/apiserver/pkg/endpoints/responsewriter.outerWithCloseNotifyAndFlush.Flush2@k8s.io/apiserver/pkg/endpoints/handlers.(*WatchServer).ServeHTTP27vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go22k8s.io/apiserver/pkg/endpoints/handlers.serveWatch2k8s.io/kubernetes/pkg/registry/rbac/validation.describeSubject2Tk8s.io/kubernetes/pkg/registry/rbac/validation.(*clusterRoleBindingDescriber).String2Mk8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac.(*authorizingVisitor).visit2runtime.read2Gk8s.io/apiserver/pkg/storage/cacher.(*cacheWatcher).convertToWatchEvent2Gk8s.io/apiserver/pkg/storage/cacher.(*cacheWatcher).sendWatchCacheEvent2;k8s.io/apiserver/pkg/storage/cacher.(*cacheWatcher).process2Ck8s.io/apiserver/pkg/storage/cacher.(*cacheWatcher).processInterval2sigs.k8s.io/structured-merge-diff/v4/fieldpath.NewVersionedSet2Avendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/managers.go2google.golang.org/grpc/internal/transport.(*controlBuffer).put2Kgoogle.golang.org/grpc/internal/transport.(*http2Client).handleWindowUpdate2runtime.chansend12:k8s.io/apiserver/pkg/storage/cacher.(*Cacher).processEvent2Gk8s.io/apiserver/pkg/storage/cacher.(*watchCache).UpdateResourceVersion29vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go2)k8s.io/client-go/tools/cache.watchHandler20vendor/k8s.io/client-go/tools/cache/reflector.go26k8s.io/client-go/tools/cache.(*Reflector).ListAndWatch2:k8s.io/apiserver/pkg/storage/cacher.(*Cacher).startCaching2?k8s.io/apiserver/pkg/storage/cacher.NewCacherFromConfig.func1.124k8s.io/apimachinery/pkg/util/wait.BackoffUntil.func120vendor/k8s.io/apimachinery/pkg/util/wait/wait.go2.k8s.io/apimachinery/pkg/util/wait.BackoffUntil2-k8s.io/apimachinery/pkg/util/wait.JitterUntil2'k8s.io/apimachinery/pkg/util/wait.Until2=k8s.io/apiserver/pkg/storage/cacher.NewCacherFromConfig.func12%crypto/tls.marshalCertificate.func1.122/usr/local/go/src/crypto/tls/handshake_messages.go2Avendor/golang.org/x/crypto/cryptobyte.(*Builder).callContinuation2B/usr/local/go/src/vendor/golang.org/x/crypto/cryptobyte/builder.go2Bvendor/golang.org/x/crypto/cryptobyte.(*Builder).addLengthPrefixed2Hvendor/golang.org/x/crypto/cryptobyte.(*Builder).AddUint24LengthPrefixed2#crypto/tls.marshalCertificate.func12crypto/tls.marshalCertificate2/crypto/tls.(*certificateMsgTLS13).marshal.func12)crypto/tls.(*certificateMsgTLS13).marshal2runtime.newproc12runtime.newproc.func12runtime.newproc23k8s.io/apimachinery/pkg/util/wait.ContextForChannel24k8s.io/apimachinery/pkg/util/wait.PollImmediateUntil2-k8s.io/client-go/tools/cache.WaitForCacheSync26vendor/k8s.io/client-go/tools/cache/shared_informer.go2Dk8s.io/client-go/informers.(*sharedInformerFactory).WaitForCacheSync2,vendor/k8s.io/client-go/informers/factory.go2math/big.mulAddVWW2math/big.nat.divBasic2$/usr/local/go/src/math/big/natdiv.go2math/big.nat.divLarge2math/big.nat.div2crypto/rsa.encrypt2crypto/rsa.VerifyPKCS1v152(/usr/local/go/src/crypto/rsa/pkcs1v15.go2crypto/x509.checkSignature2%/usr/local/go/src/crypto/x509/x509.go2-crypto/x509.(*Certificate).CheckSignatureFrom2,crypto/x509.(*Certificate).buildChains.func12'/usr/local/go/src/crypto/x509/verify.go2&crypto/x509.(*Certificate).buildChains2!crypto/x509.(*Certificate).Verify2Uk8s.io/apiserver/pkg/authentication/request/x509.(*Authenticator).AuthenticateRequest2?vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go2
time.sendTime2runtime.runOneTimer2!/usr/local/go/src/runtime/time.go2runtime.runtimer2runtime.checkTimers2runtime.stealWork2 bytes.(*Buffer).tryGrowByReslice2bytes.(*Buffer).Write2Cgoogle.golang.org/grpc/internal/transport.(*http2Client).handleData2crypto/tls.(*Conn).Handshake2runtime.unlock22runtime.unlockWithRank2)/usr/local/go/src/runtime/lockrank_off.go2runtime.unlock2runtime.selunlock2runtime.selectgo.func32=go.etcd.io/etcd/client/v3.(*watchGrpcStream).serveWatchClient2)vendor/go.etcd.io/etcd/client/v3/watch.go2crypto/sha256.sha256block23/usr/local/go/src/crypto/sha256/sha256block_arm64.s2crypto/sha256.block24/usr/local/go/src/crypto/sha256/sha256block_arm64.go2crypto/sha256.(*digest).Write2)/usr/local/go/src/crypto/sha256/sha256.go2time.Time.AppendFormat2 /usr/local/go/src/time/format.go2time.Time.Format2net/http.setLastModified2 /usr/local/go/src/net/http/fs.go2net/http.serveContent2net/http.ServeContent2Wk8s.io/kube-openapi/pkg/handler.(*OpenAPIService).RegisterOpenAPIVersionedService.func121vendor/k8s.io/kube-openapi/pkg/handler/handler.go2google.golang.org/grpc.recv2)vendor/google.golang.org/grpc/rpc_util.go2+google.golang.org/grpc.(*csAttempt).recvMsg24google.golang.org/grpc.(*clientStream).RecvMsg.func12.google.golang.org/grpc.(*clientStream).RecvMsg2Mgithub.com/grpc-ecosystem/go-grpc-prometheus.(*monitoredClientStream).RecvMsg2google.golang.org/protobuf/internal/impl.legacyLoadMessageInfo2Avendor/google.golang.org/protobuf/internal/impl/legacy_message.go2:google.golang.org/protobuf/internal/impl.legacyWrapMessage2@google.golang.org/protobuf/internal/impl.Export.ProtoMessageV2Of2=vendor/google.golang.org/protobuf/internal/impl/api_export.go2*github.com/golang/protobuf/proto.MessageV220vendor/github.com/golang/protobuf/proto/proto.go2/github.com/golang/protobuf/proto.UnmarshalMerge2/vendor/github.com/golang/protobuf/proto/wire.go2*github.com/golang/protobuf/proto.Unmarshal25google.golang.org/grpc/encoding/proto.codec.Unmarshal25vendor/google.golang.org/grpc/encoding/proto/proto.go2*encoding/json.(*decodeState).rescanLiteral2)/usr/local/go/src/encoding/json/decode.go2"encoding/json.(*decodeState).value2#encoding/json.(*decodeState).object2&encoding/json.(*decodeState).unmarshal2encoding/json.Unmarshal2Wk8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*Downloader).OpenAPIV3Root2ek8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*specProxier).updateAPIServiceSpecLocked2Pvendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go2_k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*specProxier).UpdateAPIServiceSpec2Nk8s.io/kube-aggregator/pkg/controllers/openapiv3.(*AggregationController).sync2Evendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/controller.go2]k8s.io/kube-aggregator/pkg/controllers/openapiv3.(*AggregationController).processNextWorkItem2Sk8s.io/kube-aggregator/pkg/controllers/openapiv3.(*AggregationController).runWorker20go.etcd.io/etcd/client/v3.(*watchGrpcStream).run2net/url.escape2net/url.QueryEscape2net/url.Values.Encode2runtime.(*waitq).dequeue2regexp.(*Regexp).tryBacktrack2%/usr/local/go/src/regexp/backtrack.go2regexp.(*Regexp).backtrack2regexp.(*Regexp).doExecute2 /usr/local/go/src/regexp/exec.go2regexp.(*Regexp).replaceAll2"/usr/local/go/src/regexp/regexp.go2!regexp.(*Regexp).ReplaceAllString2,gopkg.in/square/go-jose%2ev2.stripWhitespace2-vendor/gopkg.in/square/go-jose.v2/encoding.go2(gopkg.in/square/go-jose%2ev2.ParseSigned2(vendor/gopkg.in/square/go-jose.v2/jws.go2*gopkg.in/square/go-jose.v2/jwt.ParseSigned2,vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go2Ok8s.io/kubernetes/pkg/serviceaccount.(*jwtTokenAuthenticator).AuthenticateToken2pkg/serviceaccount/jwt.go2Zk8s.io/apiserver/pkg/authentication/token/union.(*unionAuthTokenHandler).AuthenticateToken2?vendor/k8s.io/apiserver/pkg/authentication/token/union/union.go2ek8s.io/apiserver/pkg/authentication/token/cache.(*cachedTokenAuthenticator).doAuthenticateToken.func12Tvendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go24golang.org/x/sync/singleflight.(*Group).doCall.func225vendor/golang.org/x/sync/singleflight/singleflight.go2.golang.org/x/sync/singleflight.(*Group).doCall2runtime.entersyscall_sysmon2-runtime.(*gcControllerState).heapGoalInternal2%/usr/local/go/src/runtime/mgcpacer.go2$runtime.(*gcControllerState).trigger2runtime.gcTrigger.test2 /usr/local/go/src/runtime/mgc.go2/golang.org/x/net/http2.(*serverConn).readFrames2&crypto/tls.(*prefixNonceAEAD).Overhead2-/usr/local/go/src/crypto/tls/cipher_suites.go2)crypto/tls.(*Conn).maxPayloadSizeForWrite2runtime.findfunc2#/usr/local/go/src/runtime/symtab.go2Fk8s.io/apiserver/pkg/server/filters.(*requestWatermark).recordMutating21k8s.io/apiserver/pkg/storage/etcd3.(*store).Count23k8s.io/apiserver/pkg/storage/cacher.(*Cacher).Count2Jk8s.io/apiserver/pkg/registry/generic/registry.(*DryRunnableStorage).Count2Qk8s.io/apiserver/pkg/registry/generic/registry.(*Store).startObservingCount.func12.golang.org/x/net/http2.(*ClientConn).RoundTrip2*vendor/golang.org/x/net/http2/transport.go20golang.org/x/net/http2.(*Transport).RoundTripOpt2-golang.org/x/net/http2.(*Transport).RoundTrip25golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip2net/http.(*Transport).roundTrip2'/usr/local/go/src/net/http/transport.go2net/http.(*Transport).RoundTrip2'/usr/local/go/src/net/http/roundtrip.go2>k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip23vendor/k8s.io/client-go/transport/round_trippers.go2=k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip2
net/http.send2$/usr/local/go/src/net/http/client.go2net/http.(*Client).send2net/http.(*Client).do2net/http.(*Client).Do2(k8s.io/client-go/rest.(*Request).request2'vendor/k8s.io/client-go/rest/request.go2#k8s.io/client-go/rest.(*Request).Do2;k8s.io/client-go/kubernetes/typed/core/v1.(*namespaces).Get2=vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go2:k8s.io/kubernetes/pkg/controlplane.createNamespaceIfNeeded2pkg/controlplane/client_util.go2Hk8s.io/kubernetes/pkg/controlplane.(*Controller).UpdateKubernetesService2pkg/controlplane/controller.go2Kk8s.io/kubernetes/pkg/controlplane.(*Controller).RunKubernetesService.func221k8s.io/apimachinery/pkg/util/wait.NonSlidingUntil2Ek8s.io/kubernetes/pkg/controlplane.(*Controller).RunKubernetesService2net/url.shouldEscape2\k8s.io/apiserver/pkg/authentication/request/bearertoken.(*Authenticator).AuthenticateRequest2Mvendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go23golang.org/x/net/http2/hpack.(*Encoder).searchTable2runtime.(*mspan).nextFreeIndex2runtime.slicebytetostring2#/usr/local/go/src/runtime/string.go2strconv.quoteWith2"/usr/local/go/src/strconv/quote.go2
strconv.Quote2runtime.full2 crypto/sha256.(*digest).checkSum2crypto/sha256.(*digest).Sum2crypto/rsa.emsaPSSEncode2Gk8s.io/apimachinery/pkg/apis/meta/v1.(*ObjectMeta).MarshalToSizedBuffer2;vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go28k8s.io/api/coordination/v1.(*Lease).MarshalToSizedBuffer21vendor/k8s.io/api/coordination/v1/generated.pb.go2:k8s.io/apimachinery/pkg/runtime.(*Unknown).NestedMarshalTo25vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go2Jk8s.io/apimachinery/pkg/runtime/serializer/protobuf.(*Serializer).doEncode2Fvendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go2Hk8s.io/apimachinery/pkg/runtime/serializer/protobuf.(*Serializer).encode2Hk8s.io/apimachinery/pkg/runtime/serializer/protobuf.(*Serializer).Encode2Gk8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).doEncode2Ek8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).encode2Ek8s.io/apimachinery/pkg/runtime/serializer/versioning.(*codec).Encode2Gk8s.io/apiserver/pkg/endpoints/handlers/responsewriters.SerializeObject2Ivendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go2Sk8s.io/apiserver/pkg/endpoints/handlers/responsewriters.WriteObjectNegotiated.func22?k8s.io/apiserver/pkg/endpoints/request.(*durationTracker).Track2Avendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go2Jk8s.io/apiserver/pkg/endpoints/request.TrackSerializeResponseObjectLatency2Mk8s.io/apiserver/pkg/endpoints/handlers/responsewriters.WriteObjectNegotiated2?k8s.io/apiserver/pkg/endpoints/handlers.transformResponseObject2:vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go28k8s.io/apimachinery/pkg/apis/meta/v1.Time.ToUnstructured23vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go2Osigs.k8s.io/structured-merge-diff/v4/value.TypeReflectCacheEntry.ToUnstructured2Avendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go2@sigs.k8s.io/structured-merge-diff/v4/value.(*valueReflect).reuse2Avendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go2Dsigs.k8s.io/structured-merge-diff/v4/value.(*valueReflect).mustReuse2;golang.org/x/net/http2.(*serverConn).processFrameFromReader2*golang.org/x/net/http2.(*serverConn).serve2sync.(*entry).load23go.etcd.io/etcd/api/v3/etcdserverpb.(*kVClient).Txn2.go.etcd.io/etcd/client/v3.(*retryKVClient).Txn2'go.etcd.io/etcd/client/v3.(*txn).Commit2'vendor/go.etcd.io/etcd/client/v3/txn.go2runtime.siftdownTimer2runtime.dodeltimer0HPoZ`p
\ No newline at end of file
diff --git a/catalogd/pprof/kubeapiserver_alone_heap_profile.pb b/catalogd/pprof/kubeapiserver_alone_heap_profile.pb
new file mode 100644
index 000000000..1bc87a150
Binary files /dev/null and b/catalogd/pprof/kubeapiserver_alone_heap_profile.pb differ
diff --git a/catalogd/pprof/kubeapiserver_cpu_profile.pb b/catalogd/pprof/kubeapiserver_cpu_profile.pb
new file mode 100644
index 000000000..b5cf7fa78
--- /dev/null
+++ b/catalogd/pprof/kubeapiserver_cpu_profile.pb
@@ -0,0 +1,290 @@
+
+
+
+
+
+
+
+
+
+ !
+ "#$%&'(&
+)*+,-./01234567!
+89
+ :;<=>?@A
+"BCDE&
+FGHIJKLM
+NOPQRSTUVWXYZ !
+[\]^_=>?@AU
+L`ab]cdefghijklmnopqrstuvwxyz{|}~
4
++
+]&
+]
+NOQRTUVWXYZ !
+QRTUVWXYZ !8
+/R
+
+b]
+
+
+RTUVWXYZ !
)
+
+]
+QRTUVWXYZ !"
+"#$%&
+
+"#$%&'('
+
#
+
6
+-]
+QRTUVWXYZ !#
+
+]
+b]#
+
+PQRTUVWXYZ !1
+(
+
+%
+
S
+JGHI
&
+"#3
+_=>?@A
+
+5
+,
+_=>?@A
+OPQRSTUVWXYZ !c
+Zfghijklmnopqrs
+;<=>?@A?
+6HIJK
+
+NOQRTUVWXYZ ! d
+[klmnopqrs
P
+G}~
K
+B
+
+ *
+
+QRSTUVWXYZ !5
+,
+)*+,-./012
+
+Z !
+7
+
+]
+
+PQRTUVWXYZ !1
+(
+"#$%&E
+<|}~
+CDE
+
'
+"#$%&
+NPQRSTUVWXYZ !
+QRSTUVWXYZ !F
+=
+
+
+
+""#$_=>?@A
+
+
+NOQRSTUVWXYZ ! e
+\Y
+PR
-
+$
+ !+
+""#$_=?@A
+
+HI
+
+OQRTUVWXYZ !6
+-
0
+']f
+]]
+!
+
+
+RTUVWXYZ !]
+Tfghijklmnopqrstuvwxyz{|}~
V
+M
+9%
+
V
+M{|}~
+RTUVWXYZ !T
+K{|}~
+
+]
+
+6
+-
+ !]
+Tdefghijklmnopqrs
+
+
+
+E
+
+
+ !
+QRTUVWXYZ !7
+.ab]defghijklmnopqrst
+
+
+QRSTUVWXYZ !j
+aklmnopqrs
=
+4
+
+
+Z !
+]"
+_=>?@A
+
+%
+
+
+
+RUVWXYZ !2
+)b
+ !Y
+PGHIJK
?
+6"#$%&
(
+?
+
+1YZ !3
+*NR
+-./012
+QRTUVWXYZ !(8"""""""""""""㠻"""" " "
+"
+"""
+"2"
"
+""
"" ""۞ ""
+""
+"+"""̣""
+"""ø
+"" "4"ǖ """"""""׆""㸕"""" ""!"""+" ת"#"!"$
+"%"""&"#"'R"$"("%")"&ϕ"*"'"+"("")","*"-&"+$".Q",$"/"-õ-"0"1"2".O"3`"/S"4"0͕"5"1Օ"6"2"7 "3X"8"4"9"5":"6";D"7"<"8"=,">,"9"":Δ"?";Ӕ"@"<""=""A
+">X"B"?+"C"@"D"E"A""B"FE"C"G"D"H"I"E""F"JU"Gˏ"K"H"L"I"M"J"N"O"K"P"L "Q"M
+"R"NĿx"S"Ot"T"Pu"U"Q˸u"V"Rr"W"S"X"T"Y"U"Z"V"["W"\"XΟ"]"Y"^B"Z""6"["_i"\"`"]"a "^"b"_""`܄""aϟ "c"b"`"c"d"e"d "f"e㭇 "g"f܇ "h"g "i"h돇 "j"i㈇ "k"jހ "lB"k
+"mp"l "n"m "o:"n "p"oۀ "q#"p "r2"q "si"r "t"sӈ "uw"t
+"vG"uƕ
+"w"v
+"x"w
+"y@"z"xų
+"{n"y
+"|H"z
+"} "{Á "~"|""}
+"""~
+"|" ""е "h" "@"ǵ "P"Ȝ
+""
+"" "" ""
+""Ü
+""ɷ "2"Խ""""ﳝ
+")" "#"· ""ㅵ "n" "9"Ӹ "'" "&" "4"͝
+"I" "B"
+"""""""""`"̗
+""×
+"""" "m" """""`"""
+""9"
+""
+"p"
+"\""""""""\"u"U""X"x"S"t"T"*""p"""v""Dzv""Ϣv""u"V"ֈ""""""""Џ""ˏ"""""""8""(" "8"ʵ "Y"
+"r""i""""U"ߵ"" "c"秜
"1"˧
+""Ե""""""""x""v"""X"'""" ""`"" "9""۶9"
"""""""""ڱ""ȱ""Յ""""
+"0" "0"1""" "x"S"ӕ" """ה""Ⱦ" "Ծ"
+"¾" """""ҽ"""%"""""""""%""%""""""""
+""
+"""""""" ""`""g"*""̅"e"Ӆ"" "" "" ""
+""
+""ύ "P"t"T""B""H""""5"""""""Ӛ"f"""
+":""ߗ
+""ʗ
+"""""˅""""
"2" "c""j"&"W""k""""" """"f"|""΅"i"t"T"ܚ
+">"
+"3"("
+" """"""@"""ߦ
+""""""g""""|""?"!""""Ǚ""" """"J5"υ"J""Ӆ""ǻ "" "["ψ
+"M"ל
+""՜
+"" """ "" """y"߈" "p";"""""ۧ""ǽ""績""""""""Ǿ"""""",""O"7"S""""B"""b"*""""""p"""
+"_"
+"Y""Ӽ""""""""߹""ڹ""ȼ"""""""""""ǔ""㘊""""":"""""-"1""߂ "" "" "" "" "g"
+""
+""
+""Ç
+""狰 ""֔
+"""p"DZ"7"""Δ"?"""""K" "" """"""Ӿ"
+"""u"U""`"""F""F""F""ό "" ""ހ "lI""U""""~""'""""""E"""箭"""Z""""""""Y"""""
+"0"
+""
+"k"Ѱ
+":"
+"0"߀
+"4"
+"1"
+"K"
+"6"ӆ
+"-"ۯ
+""ޯ
+""
+""Ì ""
+""
+""
+""
+""
+"]"
+""
+""
+""
+"u"Ԧ "" "" "h" ""
+"""""~"º""""
+""
+""
+"\""t"T"u"U"Ԡ*">"*""
+""
+""
+"`""%""%"Ǽ
+"_""7"
+"E"
+""""""""}"""""""""""п""Y"ߏp"0"p","ӓ""͔""ß"""^?"B"k"""M"""""""Խ""""""`"ߧ
+"""d""""" ""g"""
+"Y""""""""""" "~""F>"""" "Þ""""""""&"""""""""""`"u"D""u"U"""˱"N" "4" ":""~"" "p""""
+""""""""$".D"$"" -""1""""х"x"""u"U"ʳ"#""f"""ͬ"""E""Y""""""""t"=""u"V"
+"V" "A"߲
+"3" "/""""""""H"ǿ"#""/"""䱚""" """"""""
+"""H"è+""*""*"I"*"0"҅""}""
+"""˅"""""""""""˿"""""`"ׄ"e"˶"Y""""߃"""""`""/""""8"""""""
+"E"
+""
+""ۻ
+""
+"m""""."ߵ"."""x"""N" "~"""㯪"""""m""""""t""Q""S""&""}"Å
+"^"
+"`"">,"̀"""""!"ז" """
+"\"ͷ
+"T""
+" "иv""""˾""""""""
+"s""" """"" "p""""""""""
+"Ú":"""d"e"̘""Ϧ
+""
+"c""<"""0"(""" """&""I"""˛ """0""" "t"T"""G""""""e"
+""
+""
+"a""""""" "e" "A""""""z"ǃ"("""-"ό "" "" ""ހ "lF"!""""В""딭 "*"Ϩ
+""Ԣ"""Β""" ג"""9"ޒ""ޠ"y"""""^5"ؒ"&"Ж""""","T"׀-"Y""""Ѿ"
+""""""""""""9"ӹ"$""8"
+""
+"
""`""r"3""3""˔ "(""Y" "a"c"" """""""""""""""
+"""@""秚"""""""l"""
+"d" "d"
+"M""""""""""""D"" "" ""˥ "" """r"""̕"5"ޕ""ҟ"]"t""t""u"V"$"/"t"T* * *
+
+ * *
* * * * *
+ * * *
* * * !*"" !*## $*%% &*'' *(( )*** +*,, -*.. /*00 1*22 3*44 5*66 3*77 3*88 9*:: 9* ;; 9*!<< =*">> =*#?? 9*$@@ 9*%AA *&BB C*'DD E*(FF G*)HH **II *+JJ *,KK L*-MM N*.OO P*/QQ R*0SS T*1UU V*2WW V*3XX Y*4ZZ [*5\\ 9*6]] 9*7^^ 9*8__ `*9aa b*:cc d*;ee f*<gg d*=hh *>ii *?jj 9*@kk 9*All 9*Bmm `*Cnn o*Dpp o*Eqq *Frr E*Gss G*Htt *Iuu *Jvv w*Kxx y*Lzz y*M{{ y*N|| }*O~~ *
+P *Q *R *S *T *U *V *W *X *Y *Z *[ *\ *] *^ *_ *` *a *
+b 9*c *
+d *
+e *f *g *h *i *j *k *l *
+m -*n *o *p *q *
+r /*s *t *u *v *w *x *y *z *{ *| *} *~ * * * * * * * * * * * * * * * * * * * * * * * * * $* * * d* * * * * * * * * &* * * * * * * * * * * * * * * * * * * * * * * * * * C* * * * * * * * * * * * * * * * * * * * * * * * * *
* * d* d* d* d* d* d* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 3* * * * * * * * * * * * G* * G* * * * * d* d* d* d* d* d* d* V* Y* [* 9* * * * * * * * * * * * * * * * d* * * * * * * * * * * * * * * * * * * * * y* y* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * C* * * * * * * * * * * * * * * * * * d* d* d* d* * * * * 9* * * * * f* f* f* * * * * * * * * * * * * * * * * * * * * * * * * * * R* T* * * * * * * * * * * * * * * * * * * * )* )* * * * * * * * * 5* 5* * d* d* * * * * * * * * * * * * * * C* * * * * * * * * * * * * * * * * * * * * * * * * 1* C* * * G* * G* * * * 1* * * * * * * * * * /* /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * d* * * * * * * * * * * * * * * * * * * * * * * * * * * * * C* 9* * 2 2samples2count2cpu2nanoseconds2/usr/local/bin/kube-apiserver2runtime.netpoll2*/usr/local/go/src/runtime/netpoll_epoll.go2runtime.findRunnable2!/usr/local/go/src/runtime/proc.go2runtime.schedule2runtime.park_m2
runtime.mcall2%/usr/local/go/src/runtime/asm_arm64.s2runtime.mapaccess12 /usr/local/go/src/runtime/map.go2&golang.org/x/net/http2.typeFrameParser2&vendor/golang.org/x/net/http2/frame.go2*golang.org/x/net/http2.(*Framer).ReadFrame2?google.golang.org/grpc/internal/transport.(*http2Client).reader2@vendor/google.golang.org/grpc/internal/transport/http2_client.go2runtime.mapaccess1_faststr2(/usr/local/go/src/runtime/map_faststr.go2net/textproto.MIMEHeader.Get2)/usr/local/go/src/net/textproto/header.go2net/http.Header.Get2$/usr/local/go/src/net/http/header.go2*github.com/NYTimes/gziphandler.acceptsGzip2-vendor/github.com/NYTimes/gziphandler/gzip.go2:github.com/NYTimes/gziphandler.GzipHandlerWithOpts.func1.12net/http.HandlerFunc.ServeHTTP2$/usr/local/go/src/net/http/server.go28k8s.io/apiserver/pkg/server/mux.(*pathHandler).ServeHTTP26vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go2vendor/github.com/prometheus/client_golang/prometheus/gauge.go2@k8s.io/component-base/metrics.(*GaugeVec).WithLabelValuesChecked2-vendor/k8s.io/component-base/metrics/gauge.go29k8s.io/component-base/metrics.(*GaugeVec).WithLabelValues2=k8s.io/apiserver/pkg/storage/etcd3/metrics.RecordEtcdBookmark2vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go29k8s.io/apiserver/pkg/endpoints/handlers.GetResource.func125vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go2@k8s.io/apiserver/pkg/endpoints/handlers.getResourceHandler.func127k8s.io/apiserver/pkg/endpoints.restfulGetResource.func122vendor/k8s.io/apiserver/pkg/endpoints/installer.go2@k8s.io/apiserver/pkg/endpoints/metrics.InstrumentRouteFunc.func128vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go27github.com/emicklei/go-restful/v3.(*Container).dispatch25vendor/github.com/emicklei/go-restful/v3/container.go27github.com/emicklei/go-restful/v3.(*Container).Dispatch2>k8s.io/kube-aggregator/pkg/apiserver.(*proxyHandler).ServeHTTP2k8s.io/apiserver/pkg/endpoints/filters.WithAuthorization.func12>vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go2?k8s.io/apiserver/pkg/endpoints/filterlatency.trackStarted.func12Ck8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func2.92Cvendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go2Fk8s.io/apiserver/pkg/util/flowcontrol.(*configController).Handle.func22:vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go2Rk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*request).Finish.func12Mvendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go2Lk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*request).Finish2@k8s.io/apiserver/pkg/util/flowcontrol.(*configController).Handle2Ak8s.io/apiserver/pkg/server/filters.WithPriorityAndFairness.func22>k8s.io/apiserver/pkg/endpoints/filters.WithImpersonation.func12>vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go2runtime.memmove2)/usr/local/go/src/runtime/memmove_arm64.s27k8s.io/apiserver/pkg/server/filters.withWaitGroup.func127vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go2@k8s.io/apiserver/pkg/endpoints/filters.WithWarningRecorder.func128vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go2=k8s.io/apiserver/pkg/endpoints/filters.WithCacheControl.func12=vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go25k8s.io/apiserver/pkg/server/httplog.withLogging.func125vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go2@k8s.io/apiserver/pkg/endpoints/filters.WithLatencyTrackers.func12Avendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go2vendor/google.golang.org/grpc/internal/transport/controlbuf.go2google.golang.org/grpc/internal/transport.newHTTP2Client.func32math/big.mulAddVWW2time.Now2/usr/local/go/src/time/time.go2reflect.unsafe_New2reflect.copyVal2"/usr/local/go/src/reflect/value.go2reflect.(*MapIter).Key2encoding/json.mapEncoder.encode2)/usr/local/go/src/encoding/json/encode.go2"encoding/json.structEncoder.encode2encoding/json.ptrEncoder.encode2)encoding/json.(*encodeState).reflectValue2$encoding/json.(*encodeState).marshal2encoding/json.Marshal2@k8s.io/kube-openapi/pkg/handler3.(*OpenAPIService).getGroupBytes22vendor/k8s.io/kube-openapi/pkg/handler3/handler.go2Bk8s.io/kube-openapi/pkg/handler3.(*OpenAPIService).HandleDiscovery2_k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*Downloader).handlerWithUser.func12Pvendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/downloader.go2*google.golang.org/grpc/internal/status.New27vendor/google.golang.org/grpc/internal/status/status.go2!google.golang.org/grpc/status.New2.vendor/google.golang.org/grpc/status/status.go2Ggoogle.golang.org/grpc/internal/transport.(*http2Client).operateHeaders2runtime.newproc.func12runtime.systemstack2runtime.newproc24golang.org/x/net/http2.(*serverConn).startFrameWrite27golang.org/x/net/http2.(*serverConn).scheduleFrameWrite2/golang.org/x/net/http2.(*serverConn).writeFrame2*golang.org/x/net/http2.(*serverConn).serve2*golang.org/x/net/http2.(*Server).ServeConn2,golang.org/x/net/http2.ConfigureServer.func12runtime.goexit02runtime.pcvalue2#/usr/local/go/src/runtime/symtab.go2runtime.funcspdelta2time.Time.AppendFormat2 /usr/local/go/src/time/format.go2time.Time.Format2net/http.setLastModified2 /usr/local/go/src/net/http/fs.go2net/http.serveContent2net/http.ServeContent2Wk8s.io/kube-openapi/pkg/handler.(*OpenAPIService).RegisterOpenAPIVersionedService.func121vendor/k8s.io/kube-openapi/pkg/handler/handler.go2runtime.eqslice2"/usr/local/go/src/runtime/mprof.go2runtime.stkbucket2runtime.mProf_Malloc2runtime.profilealloc2runtime.makeslice2"/usr/local/go/src/runtime/slice.go2 path.Join2/usr/local/go/src/path/path.go2;k8s.io/kube-openapi/pkg/handler3.constructServerRelativeURL2lk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*queueSet).removeTimedOutRequestsFromQueueLocked2qk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*queueSet).timeoutOldRequestsAndRejectOrEnqueueLocked2Sk8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset.(*queueSet).StartRequest2Fk8s.io/apiserver/pkg/util/flowcontrol.(*configController).startRequest2>vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go2Dk8s.io/apimachinery/pkg/apis/meta/v1.(*ManagedFieldsEntry).Unmarshal2;vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go2k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.128vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go2Lk8s.io/apiserver/pkg/registry/rest.(*defaultUpdatedObjectInfo).UpdatedObject23vendor/k8s.io/apiserver/pkg/registry/rest/update.go2Dk8s.io/apiserver/pkg/registry/generic/registry.(*Store).Update.func127k8s.io/apiserver/pkg/storage/etcd3.(*store).updateState2k8s.io/apiserver/pkg/storage/cacher.(*Cacher).GuaranteedUpdate2Uk8s.io/apiserver/pkg/registry/generic/registry.(*DryRunnableStorage).GuaranteedUpdate2>k8s.io/apiserver/pkg/registry/generic/registry.(*Store).Update2>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.42>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.52Dk8s.io/apiserver/pkg/endpoints/handlers/finisher.finishRequest.func12Cvendor/k8s.io/apiserver/pkg/endpoints/handlers/finisher/finisher.go2+google.golang.org/grpc.(*csAttempt).recvMsg24google.golang.org/grpc.(*clientStream).RecvMsg.func12runtime.epollwait2)k8s.io/client-go/tools/cache.watchHandler20vendor/k8s.io/client-go/tools/cache/reflector.go26k8s.io/client-go/tools/cache.(*Reflector).ListAndWatch2:k8s.io/apiserver/pkg/storage/cacher.(*Cacher).startCaching2?k8s.io/apiserver/pkg/storage/cacher.NewCacherFromConfig.func1.12=k8s.io/apiserver/pkg/storage/cacher.NewCacherFromConfig.func12sort.Search2;k8s.io/apiserver/pkg/admission/metrics.(*metricSet).observe28vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go2Uk8s.io/apiserver/pkg/admission/metrics.(*AdmissionMetrics).ObserveAdmissionController2Ek8s.io/apiserver/pkg/admission/metrics.pluginHandlerWithMetrics.Admit2:k8s.io/apiserver/pkg/admission.chainAdmissionHandler.Admit2.vendor/k8s.io/apiserver/pkg/admission/chain.go21k8s.io/apiserver/pkg/admission.(*reinvoker).Admit25vendor/k8s.io/apiserver/pkg/admission/reinvocation.go24k8s.io/apiserver/pkg/admission.(*auditHandler).Admit2.vendor/k8s.io/apiserver/pkg/admission/audit.go2hk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*managedFieldsValidatingAdmissionController).Admit2Hvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go2>k8s.io/apiserver/pkg/endpoints/handlers.UpdateResource.func1.22golang.org/x/net/http2.(*serverConn).newWriterAndRequestNoBody28golang.org/x/net/http2.(*serverConn).newWriterAndRequest25gopkg.in/square/go-jose.v2/json.(*decodeState).object20vendor/gopkg.in/square/go-jose.v2/json/decode.go24gopkg.in/square/go-jose.v2/json.(*decodeState).value28gopkg.in/square/go-jose.v2/json.(*decodeState).unmarshal2)gopkg.in/square/go-jose.v2/json.Unmarshal25gopkg.in/square/go-jose.v2/jwt.(*JSONWebToken).Claims2,vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go2Ok8s.io/kubernetes/pkg/serviceaccount.(*jwtTokenAuthenticator).AuthenticateToken2pkg/serviceaccount/jwt.go2Zk8s.io/apiserver/pkg/authentication/token/union.(*unionAuthTokenHandler).AuthenticateToken2?vendor/k8s.io/apiserver/pkg/authentication/token/union/union.go2ek8s.io/apiserver/pkg/authentication/token/cache.(*cachedTokenAuthenticator).doAuthenticateToken.func124golang.org/x/sync/singleflight.(*Group).doCall.func225vendor/golang.org/x/sync/singleflight/singleflight.go2.golang.org/x/sync/singleflight.(*Group).doCall2Wk8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*Downloader).OpenAPIV3Root2ek8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*specProxier).updateAPIServiceSpecLocked2Pvendor/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go2_k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator.(*specProxier).UpdateAPIServiceSpec2Nk8s.io/kube-aggregator/pkg/controllers/openapiv3.(*AggregationController).sync2runtime.usleep2runtime.runqgrab2runtime.runqsteal2;google.golang.org/grpc/balancer/roundrobin.(*rrPicker).Pick2?vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go2?sigs.k8s.io/structured-merge-diff/v4/value.(*freelist).allocate2>vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go2Qsigs.k8s.io/structured-merge-diff/v4/value.(*freelistAllocator).allocValueReflect2Esigs.k8s.io/structured-merge-diff/v4/value.structReflect.IterateUsing2Rsigs.k8s.io/structured-merge-diff/v4/typed.(*validatingObjectWalker).visitMapItems2=vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go2Jsigs.k8s.io/structured-merge-diff/v4/typed.(*validatingObjectWalker).doMap28sigs.k8s.io/structured-merge-diff/v4/typed.resolveSchema2Msigs.k8s.io/structured-merge-diff/v4/typed.(*validatingObjectWalker).validate2>sigs.k8s.io/structured-merge-diff/v4/typed.TypedValue.Validate22sigs.k8s.io/structured-merge-diff/v4/typed.AsTyped2Gsigs.k8s.io/structured-merge-diff/v4/typed.ParseableType.FromStructured2;vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go2Sk8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.(*typeConverter).ObjectToTyped2Lvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/typeconverter.go2runtime.findfunc2
runtime.gfget2runtime.newproc12>k8s.io/apiserver/pkg/endpoints/handlers/finisher.finishRequest2>k8s.io/apiserver/pkg/endpoints/handlers/finisher.FinishRequest227k8s.io/apimachinery/pkg/conversion.(*Converter).Convert26vendor/k8s.io/apimachinery/pkg/conversion/converter.go21k8s.io/apimachinery/pkg/runtime.(*Scheme).Convert20vendor/k8s.io/apimachinery/pkg/runtime/scheme.go20go.etcd.io/etcd/client/v3.(*watchGrpcStream).run2runtime.nanotime12runtime.nanotime2:vendor/golang.org/x/crypto/cryptobyte.(*String).ReadUint162A/usr/local/go/src/vendor/golang.org/x/crypto/cryptobyte/string.go2
runtime.lock22runtime.lockWithRank2runtime.lock2runtime.sellock2!runtime.(*mcache).prepareForSweep2runtime.acquirep2;go.etcd.io/etcd/client/v3.(*watchGrpcStream).serveSubstream2runtime.(*randomEnum).next2context.WithValue2Agoogle.golang.org/grpc/internal/credentials.NewRequestInfoContext2Avendor/google.golang.org/grpc/internal/credentials/credentials.go2Kgoogle.golang.org/grpc/internal/transport.(*http2Client).createHeaderFields21k8s.io/apiserver/pkg/storage/etcd3.(*store).Count23k8s.io/apiserver/pkg/storage/cacher.(*Cacher).Count2Jk8s.io/apiserver/pkg/registry/generic/registry.(*DryRunnableStorage).Count2Qk8s.io/apiserver/pkg/registry/generic/registry.(*Store).startObservingCount.func12runtime.convTstring28go.etcd.io/etcd/api/v3/etcdserverpb.(*RangeRequest).Size2;go.etcd.io/etcd/api/v3/etcdserverpb.(*RangeRequest).Marshal26google.golang.org/protobuf/internal/impl.legacyMarshal2Avendor/google.golang.org/protobuf/internal/impl/legacy_message.go27google.golang.org/protobuf/proto.MarshalOptions.marshal21vendor/google.golang.org/protobuf/proto/encode.go2=google.golang.org/protobuf/proto.MarshalOptions.MarshalAppend2.github.com/golang/protobuf/proto.marshalAppend2/vendor/github.com/golang/protobuf/proto/wire.go2(github.com/golang/protobuf/proto.Marshal23google.golang.org/grpc/encoding/proto.codec.Marshal25vendor/google.golang.org/grpc/encoding/proto/proto.go2google.golang.org/grpc.encode2)vendor/google.golang.org/grpc/rpc_util.go2!google.golang.org/grpc.prepareMsg2.google.golang.org/grpc.(*clientStream).SendMsg2sync.(*Pool).Get2/usr/local/go/src/sync/pool.go2Ek8s.io/apimachinery/pkg/apis/meta/v1.(*ObjectMeta).SetResourceVersion23vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go2k8s.io/apiserver/pkg/authentication/authenticator.authenticate2hk8s.io/apiserver/pkg/authentication/authenticator.(*audAgnosticRequestAuthenticator).AuthenticateRequest2-github.com/NYTimes/gziphandler.parseEncodings2runtime.slicebytetostring2#/usr/local/go/src/runtime/string.go2strconv.formatBits2!/usr/local/go/src/strconv/itoa.go2strconv.FormatUint2runtime.add12runtime.newarray2runtime.makeBucketArray2runtime.hashGrow2runtime.mapassign2Ok8s.io/apimachinery/third_party/forked/golang/reflect.Equalities.deepValueEqual2Jvendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go2Jk8s.io/apimachinery/third_party/forked/golang/reflect.Equalities.deepEqual2ck8s.io/apimachinery/third_party/forked/golang/reflect.Equalities.DeepEqualWithNilDifferentFromEmpty2]k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager.IgnoreManagedFieldsTimestampsTransformer2Gvendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go2runtime.findmoduledatap2Dk8s.io/component-base/metrics.(*GaugeVecWithContext).WithLabelValues2Gk8s.io/apiserver/pkg/authentication/token/cache.statsCollector.blocking2runtime.releaseSudog2Hgoogle.golang.org/grpc/internal/transport.(*recvBufferReader).readClient2Bgoogle.golang.org/grpc/internal/transport.(*recvBufferReader).Read2Agoogle.golang.org/grpc/internal/transport.(*transportReader).Read28google.golang.org/grpc/internal/transport.(*Stream).Read2(google.golang.org/grpc.(*parser).recvMsg2(google.golang.org/grpc.recvAndDecompress2google.golang.org/grpc.recv2runtime.read2crypto/tls.(*Conn).writeRecord2math/big.basicSqr2math/big.nat.sqrHŻPoZ`p
\ No newline at end of file
diff --git a/catalogd/pprof/kubeapiserver_heap_profile.pb b/catalogd/pprof/kubeapiserver_heap_profile.pb
new file mode 100644
index 000000000..94753f97f
Binary files /dev/null and b/catalogd/pprof/kubeapiserver_heap_profile.pb differ
diff --git a/catalogd/pprof/manager_cpu_profile.pb b/catalogd/pprof/manager_cpu_profile.pb
new file mode 100644
index 000000000..ae48d4a4b
Binary files /dev/null and b/catalogd/pprof/manager_cpu_profile.pb differ
diff --git a/catalogd/pprof/manager_heap_profile.pb b/catalogd/pprof/manager_heap_profile.pb
new file mode 100644
index 000000000..b98dac169
Binary files /dev/null and b/catalogd/pprof/manager_heap_profile.pb differ
diff --git a/catalogd/scripts/install.tpl.sh b/catalogd/scripts/install.tpl.sh
new file mode 100644
index 000000000..b71892439
--- /dev/null
+++ b/catalogd/scripts/install.tpl.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+set -euo pipefail
+IFS=$'\n\t'
+
+catalogd_manifest=$MANIFEST
+
+if [[ -z "$catalogd_manifest" ]]; then
+ echo "Error: Missing required MANIFEST variable"
+ exit 1
+fi
+
+cert_mgr_version=$CERT_MGR_VERSION
+default_catalogs=$DEFAULT_CATALOGS
+
+if [[ -z "$default_catalogs" || -z "$cert_mgr_version" ]]; then
+ err="Error: Missing component value(s) for: "
+ if [[ -z "$default_catalogs" ]]; then
+ err+="default cluster catalogs "
+ fi
+ if [[ -z "$cert_mgr_version" ]]; then
+ err+="cert-manager version "
+ fi
+ echo "$err"
+ exit 1
+fi
+
+function kubectl_wait() {
+ namespace=$1
+ runtime=$2
+ timeout=$3
+
+ kubectl wait --for=condition=Available --namespace="${namespace}" "${runtime}" --timeout="${timeout}"
+}
+
+kubectl apply -f "https://github.com/cert-manager/cert-manager/releases/download/${cert_mgr_version}/cert-manager.yaml"
+kubectl_wait "cert-manager" "deployment/cert-manager-cainjector" "60s"
+kubectl_wait "cert-manager" "deployment/cert-manager-webhook" "60s"
+kubectl_wait "cert-manager" "deployment/cert-manager" "60s"
+kubectl wait mutatingwebhookconfigurations/cert-manager-webhook --for=jsonpath='{.webhooks[0].clientConfig.caBundle}' --timeout=60s
+kubectl wait validatingwebhookconfigurations/cert-manager-webhook --for=jsonpath='{.webhooks[0].clientConfig.caBundle}' --timeout=60s
+kubectl apply -f "${catalogd_manifest}"
+kubectl_wait "olmv1-system" "deployment/catalogd-controller-manager" "60s"
+
+kubectl apply -f "${default_catalogs}"
+kubectl wait --for=condition=Serving "clustercatalog/operatorhubio" --timeout="60s"
\ No newline at end of file
diff --git a/catalogd/test/e2e/e2e_suite_test.go b/catalogd/test/e2e/e2e_suite_test.go
new file mode 100644
index 000000000..0a8970a1f
--- /dev/null
+++ b/catalogd/test/e2e/e2e_suite_test.go
@@ -0,0 +1,49 @@
+package e2e
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+var (
+ cfg *rest.Config
+ c client.Client
+ err error
+ kubeClient kubernetes.Interface
+)
+
+func TestE2E(t *testing.T) {
+ _, err := ctrl.GetConfig()
+ if err != nil {
+ fmt.Println("Error: Could not get current Kubernetes context. Verify the cluster configuration")
+ os.Exit(0)
+ }
+ RegisterFailHandler(Fail)
+ SetDefaultEventuallyTimeout(1 * time.Minute)
+ SetDefaultEventuallyPollingInterval(1 * time.Second)
+ RunSpecs(t, "E2E Suite")
+}
+
+var _ = BeforeSuite(func() {
+ cfg = ctrl.GetConfigOrDie()
+
+ sch := scheme.Scheme
+ Expect(catalogdv1.AddToScheme(sch)).To(Succeed())
+ c, err = client.New(cfg, client.Options{Scheme: sch})
+ Expect(err).To(Not(HaveOccurred()))
+ kubeClient, err = kubernetes.NewForConfig(cfg)
+ Expect(err).ToNot(HaveOccurred())
+})
diff --git a/catalogd/test/e2e/metrics_endpoint_test.go b/catalogd/test/e2e/metrics_endpoint_test.go
new file mode 100644
index 000000000..803ffaf28
--- /dev/null
+++ b/catalogd/test/e2e/metrics_endpoint_test.go
@@ -0,0 +1,127 @@
+package e2e
+
+import (
+ "bytes"
+ "io"
+ "os/exec"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// nolint:gosec
+// TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for the catalogd
+// is exported correctly and accessible by authorized users through RBAC and a ServiceAccount token.
+// The test performs the following steps:
+// 1. Creates a ClusterRoleBinding to grant necessary permissions for accessing metrics.
+// 2. Generates a ServiceAccount token for authentication.
+// 3. Deploys a curl pod to interact with the metrics endpoint.
+// 4. Waits for the curl pod to become ready.
+// 5. Executes a curl command from the pod to validate the metrics endpoint.
+// 6. Cleans up all resources created during the test, such as the ClusterRoleBinding and curl pod.
+func TestCatalogdMetricsExportedEndpoint(t *testing.T) {
+ var (
+ token string
+ curlPod = "curl-metrics"
+ client = ""
+ clients = []string{"kubectl", "oc"}
+ )
+
+ t.Log("Looking for k8s client")
+ for _, c := range clients {
+ // Would prefer to use `command -v`, but even that may not be installed!
+ err := exec.Command(c, "version", "--client").Run()
+ if err == nil {
+ client = c
+ break
+ }
+ }
+ if client == "" {
+ t.Fatal("k8s client not found")
+ }
+ t.Logf("Using %q as k8s client", client)
+
+ t.Log("Determining catalogd namespace")
+ cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector=control-plane=catalogd-controller-manager", "--output=jsonpath={.items[0].metadata.namespace}")
+ output, err := cmd.CombinedOutput()
+ require.NoError(t, err, "Error creating determining catalogd namespace: %s", string(output))
+ namespace := string(output)
+ if namespace == "" {
+ t.Fatal("No catalogd namespace found")
+ }
+ t.Logf("Using %q as catalogd namespace", namespace)
+
+ t.Log("Creating ClusterRoleBinding for metrics access")
+ cmd = exec.Command(client, "create", "clusterrolebinding", "catalogd-metrics-binding",
+ "--clusterrole=catalogd-metrics-reader",
+ "--serviceaccount="+namespace+":catalogd-controller-manager")
+ output, err = cmd.CombinedOutput()
+ require.NoError(t, err, "Error creating ClusterRoleBinding: %s", string(output))
+
+ defer func() {
+ t.Log("Cleaning up ClusterRoleBinding")
+ _ = exec.Command(client, "delete", "clusterrolebinding", "catalogd-metrics-binding", "--ignore-not-found=true").Run()
+ }()
+
+ t.Log("Creating service account token for authentication")
+ tokenCmd := exec.Command(client, "create", "token", "catalogd-controller-manager", "-n", namespace)
+ tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(tokenCmd)
+ require.NoError(t, err, "Error creating token: %s", string(tokenCombinedOutput))
+ token = string(bytes.TrimSpace(tokenOutput))
+
+ t.Log("Creating a pod to run curl commands")
+ cmd = exec.Command(client, "run", curlPod,
+ "--image=curlimages/curl:7.87.0", "-n", namespace,
+ "--restart=Never",
+ "--overrides", `{
+ "spec": {
+ "containers": [{
+ "name": "curl",
+ "image": "curlimages/curl:7.87.0",
+ "command": ["sh", "-c", "sleep 3600"],
+ "securityContext": {
+ "allowPrivilegeEscalation": false,
+ "capabilities": {
+ "drop": ["ALL"]
+ },
+ "runAsNonRoot": true,
+ "runAsUser": 1000,
+ "seccompProfile": {
+ "type": "RuntimeDefault"
+ }
+ }
+ }],
+ "serviceAccountName": "catalogd-controller-manager"
+ }
+ }`)
+ output, err = cmd.CombinedOutput()
+ require.NoError(t, err, "Error creating curl pod: %s", string(output))
+
+ defer func() {
+ t.Log("Cleaning up curl pod")
+ _ = exec.Command(client, "delete", "pod", curlPod, "-n", namespace, "--ignore-not-found=true").Run()
+ }()
+
+ t.Log("Waiting for the curl pod to become ready")
+ waitCmd := exec.Command(client, "wait", "--for=condition=Ready", "pod", curlPod, "-n", namespace, "--timeout=60s")
+ waitOutput, waitErr := waitCmd.CombinedOutput()
+ require.NoError(t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput))
+
+ t.Log("Validating the metrics endpoint")
+ metricsURL := "https://catalogd-service.olmv1-system.svc.cluster.local:7443/metrics"
+ curlCmd := exec.Command(client, "exec", curlPod, "-n", namespace, "--",
+ "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, metricsURL)
+ output, err = curlCmd.CombinedOutput()
+ require.NoError(t, err, "Error calling metrics endpoint: %s", string(output))
+ require.Contains(t, string(output), "200 OK", "Metrics endpoint did not return 200 OK")
+}
+
+func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) {
+ var outOnly bytes.Buffer
+ var outAndErr bytes.Buffer
+ allWriter := io.MultiWriter(&outOnly, &outAndErr)
+ cmd.Stderr = &outAndErr
+ cmd.Stdout = allWriter
+ err := cmd.Run()
+ return outOnly.Bytes(), outAndErr.Bytes(), err
+}
diff --git a/catalogd/test/e2e/unpack_test.go b/catalogd/test/e2e/unpack_test.go
new file mode 100644
index 000000000..a00200703
--- /dev/null
+++ b/catalogd/test/e2e/unpack_test.go
@@ -0,0 +1,109 @@
+package e2e
+
+import (
+ "context"
+ "os"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/google/go-cmp/cmp"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+const (
+ catalogRefEnvVar = "TEST_CATALOG_IMAGE"
+ catalogName = "test-catalog"
+ pkg = "prometheus"
+ version = "0.47.0"
+ channel = "beta"
+ bundle = "prometheus-operator.0.47.0"
+ bundleImage = "localhost/testdata/bundles/registry-v1/prometheus-operator:v0.47.0"
+)
+
+// catalogImageRef returns the image reference for the test catalog image, defaulting to the value of the environment
+// variable TEST_CATALOG_IMAGE if set, falling back to docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e otherwise.
+func catalogImageRef() string {
+ if s := os.Getenv(catalogRefEnvVar); s != "" {
+ return s
+ }
+
+ return "docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e"
+}
+
+var _ = Describe("ClusterCatalog Unpacking", func() {
+ var (
+ ctx context.Context
+ catalog *catalogdv1.ClusterCatalog
+ )
+ When("A ClusterCatalog is created", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ var err error
+
+ catalog = &catalogdv1.ClusterCatalog{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: catalogName,
+ },
+ Spec: catalogdv1.ClusterCatalogSpec{
+ Source: catalogdv1.CatalogSource{
+ Type: catalogdv1.SourceTypeImage,
+ Image: &catalogdv1.ImageSource{
+ Ref: catalogImageRef(),
+ },
+ },
+ },
+ }
+
+ err = c.Create(ctx, catalog)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("Successfully unpacks catalog contents", func() {
+ By("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == False and reason == Succeeded")
+ Eventually(func(g Gomega) {
+ err := c.Get(ctx, types.NamespacedName{Name: catalog.Name}, catalog)
+ g.Expect(err).ToNot(HaveOccurred())
+ cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeProgressing)
+ g.Expect(cond).ToNot(BeNil())
+ g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
+ g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonSucceeded))
+ }).Should(Succeed())
+
+ By("Checking that it has an appropriate name label")
+ Expect(catalog.ObjectMeta.Labels).To(Not(BeNil()))
+ Expect(catalog.ObjectMeta.Labels).To(Not(BeEmpty()))
+ Expect(catalog.ObjectMeta.Labels).To(HaveKeyWithValue("olm.operatorframework.io/metadata.name", catalogName))
+
+ By("Making sure the catalog content is available via the http server")
+ actualFBC, err := ReadTestCatalogServerContents(ctx, catalog, c, kubeClient)
+ Expect(err).To(Not(HaveOccurred()))
+
+ expectedFBC, err := os.ReadFile("../../testdata/catalogs/test-catalog/expected_all.json")
+ Expect(err).To(Not(HaveOccurred()))
+ Expect(cmp.Diff(expectedFBC, actualFBC)).To(BeEmpty())
+
+ By("Ensuring ClusterCatalog has Status.Condition of Type = Serving with a status == True")
+ Eventually(func(g Gomega) {
+ err := c.Get(ctx, types.NamespacedName{Name: catalog.Name}, catalog)
+ g.Expect(err).ToNot(HaveOccurred())
+ cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeServing)
+ g.Expect(cond).ToNot(BeNil())
+ g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
+ g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonAvailable))
+ }).Should(Succeed())
+ })
+ AfterEach(func() {
+ Expect(c.Delete(ctx, catalog)).To(Succeed())
+ Eventually(func(g Gomega) {
+ err = c.Get(ctx, types.NamespacedName{Name: catalog.Name}, &catalogdv1.ClusterCatalog{})
+ g.Expect(errors.IsNotFound(err)).To(BeTrue())
+ }).Should(Succeed())
+ })
+ })
+})
diff --git a/catalogd/test/e2e/util.go b/catalogd/test/e2e/util.go
new file mode 100644
index 000000000..dab5edaeb
--- /dev/null
+++ b/catalogd/test/e2e/util.go
@@ -0,0 +1,51 @@
+package e2e
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/url"
+ "strings"
+
+ "k8s.io/client-go/kubernetes"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+func ReadTestCatalogServerContents(ctx context.Context, catalog *catalogdv1.ClusterCatalog, c client.Client, kubeClient kubernetes.Interface) ([]byte, error) {
+ if catalog == nil {
+ return nil, fmt.Errorf("cannot read nil catalog")
+ }
+ if catalog.Status.URLs == nil {
+ return nil, fmt.Errorf("catalog %q has no catalog urls", catalog.Name)
+ }
+ url, err := url.Parse(catalog.Status.URLs.Base)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing clustercatalog url %q: %v", catalog.Status.URLs.Base, err)
+ }
+ // url is expected to be in the format of
+ // http://{service_name}.{namespace}.svc/catalogs/{catalog_name}/
+ // so to get the namespace and name of the service we grab only
+ // the hostname and split it on the '.' character
+ ns := strings.Split(url.Hostname(), ".")[1]
+ name := strings.Split(url.Hostname(), ".")[0]
+ port := url.Port()
+ // the ProxyGet() call below needs an explicit port value, so if
+ // value from url.Port() is empty, we assume port 443.
+ if port == "" {
+ if url.Scheme == "https" {
+ port = "443"
+ } else {
+ port = "80"
+ }
+ }
+ resp := kubeClient.CoreV1().Services(ns).ProxyGet(url.Scheme, name, port, url.JoinPath("api", "v1", "all").Path, map[string]string{})
+ rc, err := resp.Stream(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer rc.Close()
+
+ return io.ReadAll(rc)
+}
diff --git a/catalogd/test/tools/imageregistry/imagebuilder.yaml b/catalogd/test/tools/imageregistry/imagebuilder.yaml
new file mode 100644
index 000000000..a9035ccdd
--- /dev/null
+++ b/catalogd/test/tools/imageregistry/imagebuilder.yaml
@@ -0,0 +1,32 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: kaniko
+ namespace: catalogd-e2e
+spec:
+ template:
+ spec:
+ containers:
+ - name: kaniko
+ image: gcr.io/kaniko-project/executor:latest
+ args: ["--dockerfile=/workspace/test-catalog.Dockerfile",
+ "--context=/workspace/",
+ "--destination=docker-registry.catalogd-e2e.svc:5000/test-catalog:e2e",
+ "--skip-tls-verify"]
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ - name: dockerfile
+ mountPath: /workspace/
+ - name: build-contents
+ mountPath: /workspace/test-catalog/
+ restartPolicy: Never
+ volumes:
+ - name: dockerfile
+ configMap:
+ name: catalogd-e2e.dockerfile
+ items:
+ - key: test-catalog.Dockerfile
+ path: test-catalog.Dockerfile
+ - name: build-contents
+ configMap:
+ name: catalogd-e2e.build-contents
diff --git a/catalogd/test/tools/imageregistry/imgreg.yaml b/catalogd/test/tools/imageregistry/imgreg.yaml
new file mode 100644
index 000000000..c8a104351
--- /dev/null
+++ b/catalogd/test/tools/imageregistry/imgreg.yaml
@@ -0,0 +1,75 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: catalogd-e2e
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: selfsigned-issuer
+ namespace: catalogd-e2e
+spec:
+ selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: catalogd-e2e-registry
+ namespace: catalogd-e2e
+spec:
+ secretName: catalogd-e2e-registry
+ isCA: true
+ dnsNames:
+ - docker-registry.catalogd-e2e.svc
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: ${ISSUER_NAME}
+ kind: ${ISSUER_KIND}
+ group: cert-manager.io
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: docker-registry
+ namespace: catalogd-e2e
+ labels:
+ app: registry
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: registry
+ template:
+ metadata:
+ labels:
+ app: registry
+ spec:
+ containers:
+ - name: registry
+ image: registry:2
+ volumeMounts:
+ - name: certs-vol
+ mountPath: "/certs"
+ env:
+ - name: REGISTRY_HTTP_TLS_CERTIFICATE
+ value: "/certs/tls.crt"
+ - name: REGISTRY_HTTP_TLS_KEY
+ value: "/certs/tls.key"
+ volumes:
+ - name: certs-vol
+ secret:
+ secretName: catalogd-e2e-registry
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: docker-registry
+ namespace: catalogd-e2e
+spec:
+ selector:
+ app: registry
+ ports:
+ - port: 5000
+ targetPort: 5000
diff --git a/catalogd/test/tools/imageregistry/pre-upgrade-setup.sh b/catalogd/test/tools/imageregistry/pre-upgrade-setup.sh
new file mode 100755
index 000000000..707e2c9e6
--- /dev/null
+++ b/catalogd/test/tools/imageregistry/pre-upgrade-setup.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -euo pipefail
+
+
+help="pre-upgrade-setup.sh is used to create some basic resources
+which will later be used in upgrade testing.
+
+Usage:
+ pre-upgrade-setup.sh [TEST_CLUSTER_CATALOG_IMAGE] [TEST_CLUSTER_CATALOG_NAME]
+"
+
+if [[ "$#" -ne 2 ]]; then
+ echo "Illegal number of arguments passed"
+ echo "${help}"
+ exit 1
+fi
+
+export TEST_CLUSTER_CATALOG_IMAGE=$1
+export TEST_CLUSTER_CATALOG_NAME=$2
+
+kubectl apply -f - << EOF
+apiVersion: olm.operatorframework.io/v1
+kind: ClusterCatalog
+metadata:
+ name: ${TEST_CLUSTER_CATALOG_NAME}
+spec:
+ source:
+ type: Image
+ image:
+ ref: ${TEST_CLUSTER_CATALOG_IMAGE}
+EOF
+
+kubectl wait --for=condition=Serving --timeout=60s ClusterCatalog "$TEST_CLUSTER_CATALOG_NAME"
diff --git a/catalogd/test/tools/imageregistry/registry.sh b/catalogd/test/tools/imageregistry/registry.sh
new file mode 100755
index 000000000..3995c9b3f
--- /dev/null
+++ b/catalogd/test/tools/imageregistry/registry.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+set -e
+
+# registry.sh will create an in-cluster image registry useful for end-to-end testing
+# of catalogd's unpacking process. It does a few things:
+# 1. Installs cert-manager for creating a self-signed certificate for the image registry
+# 2. Creates all the resources necessary for deploying the image registry in the catalogd-e2e namespace
+# 3. Creates ConfigMaps containing the test catalog + Dockerfile to be mounted to the kaniko pod
+# 4. Waits for kaniko pod to have Condition Complete == true, indicating the test catalog image has been built + pushed
+# to the test image registry
+# Usage:
+# registry.sh
+
+if [[ "$#" -ne 2 ]]; then
+ echo "Incorrect number of arguments passed"
+ echo "Usage: registry.sh "
+ exit 1
+fi
+
+export ISSUER_KIND=$1
+export ISSUER_NAME=$2
+
+# create the image registry with all the certs
+envsubst '${ISSUER_KIND},${ISSUER_NAME}' < test/tools/imageregistry/imgreg.yaml | kubectl apply -f -
+kubectl wait -n catalogd-e2e --for=condition=Available deployment/docker-registry --timeout=60s
+
+# Load the testdata onto the cluster as a configmap so it can be used with kaniko
+kubectl create configmap -n catalogd-e2e --from-file=testdata/catalogs/test-catalog.Dockerfile catalogd-e2e.dockerfile
+kubectl create configmap -n catalogd-e2e --from-file=testdata/catalogs/test-catalog catalogd-e2e.build-contents
+
+# Create the kaniko pod to build the test image and push it to the test registry.
+kubectl apply -f test/tools/imageregistry/imagebuilder.yaml
+kubectl wait --for=condition=Complete -n catalogd-e2e jobs/kaniko --timeout=60s
diff --git a/catalogd/test/upgrade/unpack_test.go b/catalogd/test/upgrade/unpack_test.go
new file mode 100644
index 000000000..e13354454
--- /dev/null
+++ b/catalogd/test/upgrade/unpack_test.go
@@ -0,0 +1,131 @@
+package upgradee2e
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/google/go-cmp/cmp"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+ "github.com/operator-framework/operator-controller/catalogd/test/e2e"
+)
+
+var _ = Describe("ClusterCatalog Unpacking", func() {
+ When("A ClusterCatalog is created", func() {
+ It("Successfully unpacks catalog contents", func() {
+ ctx := context.Background()
+
+ var managerDeployment appsv1.Deployment
+ managerLabelSelector := labels.Set{"control-plane": "catalogd-controller-manager"}
+ By("Checking that the controller-manager deployment is updated")
+ Eventually(func(g Gomega) {
+ var managerDeployments appsv1.DeploymentList
+ err := c.List(ctx, &managerDeployments, client.MatchingLabels(managerLabelSelector), client.InNamespace("olmv1-system"))
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(managerDeployments.Items).To(HaveLen(1))
+ managerDeployment = managerDeployments.Items[0]
+ g.Expect(managerDeployment.Status.UpdatedReplicas).To(Equal(*managerDeployment.Spec.Replicas))
+ g.Expect(managerDeployment.Status.Replicas).To(Equal(*managerDeployment.Spec.Replicas))
+ g.Expect(managerDeployment.Status.AvailableReplicas).To(Equal(*managerDeployment.Spec.Replicas))
+ g.Expect(managerDeployment.Status.ReadyReplicas).To(Equal(*managerDeployment.Spec.Replicas))
+ }).Should(Succeed())
+
+ var managerPod corev1.Pod
+ By("Waiting for only one controller-manager pod to remain")
+ Eventually(func(g Gomega) {
+ var managerPods corev1.PodList
+ err := c.List(ctx, &managerPods, client.MatchingLabels(managerLabelSelector))
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(managerPods.Items).To(HaveLen(1))
+ managerPod = managerPods.Items[0]
+ }).Should(Succeed())
+
+ By("Reading logs to make sure that ClusterCatalog was reconciled by catalogdv1")
+ logCtx, cancel := context.WithTimeout(ctx, time.Minute)
+ defer cancel()
+ substrings := []string{
+ "reconcile ending",
+ fmt.Sprintf(`ClusterCatalog=%q`, testClusterCatalogName),
+ }
+ found, err := watchPodLogsForSubstring(logCtx, &managerPod, "manager", substrings...)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(found).To(BeTrue())
+
+ catalog := &catalogdv1.ClusterCatalog{}
+ By("Ensuring ClusterCatalog has Status.Condition of Progressing with a status == True, reason == Succeeded")
+ Eventually(func(g Gomega) {
+ err := c.Get(ctx, types.NamespacedName{Name: testClusterCatalogName}, catalog)
+ g.Expect(err).ToNot(HaveOccurred())
+ cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeProgressing)
+ g.Expect(cond).ToNot(BeNil())
+ g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
+ g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonSucceeded))
+ }).Should(Succeed())
+
+ expectedFBC, err := os.ReadFile("../../testdata/catalogs/test-catalog/expected_all.json")
+ Expect(err).To(Not(HaveOccurred()))
+
+ By("Making sure the catalog content is available via the http server")
+ Eventually(func(g Gomega) {
+ actualFBC, err := e2e.ReadTestCatalogServerContents(ctx, catalog, c, kubeClient)
+ g.Expect(err).To(Not(HaveOccurred()))
+ g.Expect(cmp.Diff(expectedFBC, actualFBC)).To(BeEmpty())
+ }).Should(Succeed())
+
+ By("Ensuring ClusterCatalog has Status.Condition of Serving with a status == True, reason == Available")
+ Eventually(func(g Gomega) {
+ err := c.Get(ctx, types.NamespacedName{Name: testClusterCatalogName}, catalog)
+ g.Expect(err).ToNot(HaveOccurred())
+ cond := meta.FindStatusCondition(catalog.Status.Conditions, catalogdv1.TypeServing)
+ g.Expect(cond).ToNot(BeNil())
+ g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
+ g.Expect(cond.Reason).To(Equal(catalogdv1.ReasonAvailable))
+ }).Should(Succeed())
+ })
+ })
+})
+
+func watchPodLogsForSubstring(ctx context.Context, pod *corev1.Pod, container string, substrings ...string) (bool, error) {
+ podLogOpts := corev1.PodLogOptions{
+ Follow: true,
+ Container: container,
+ }
+
+ req := kubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts)
+ podLogs, err := req.Stream(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer podLogs.Close()
+
+ scanner := bufio.NewScanner(podLogs)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ foundCount := 0
+ for _, substring := range substrings {
+ if strings.Contains(line, substring) {
+ foundCount++
+ }
+ }
+ if foundCount == len(substrings) {
+ return true, nil
+ }
+ }
+
+ return false, scanner.Err()
+}
diff --git a/catalogd/test/upgrade/upgrade_suite_test.go b/catalogd/test/upgrade/upgrade_suite_test.go
new file mode 100644
index 000000000..33b7c731b
--- /dev/null
+++ b/catalogd/test/upgrade/upgrade_suite_test.go
@@ -0,0 +1,53 @@
+package upgradee2e
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ catalogdv1 "github.com/operator-framework/operator-controller/catalogd/api/v1"
+)
+
+const (
+ testClusterCatalogNameEnv = "TEST_CLUSTER_CATALOG_NAME"
+)
+
+var (
+ cfg *rest.Config
+ c client.Client
+ err error
+ kubeClient kubernetes.Interface
+
+ testClusterCatalogName string
+)
+
+func TestUpgradeE2E(t *testing.T) {
+ RegisterFailHandler(Fail)
+ SetDefaultEventuallyTimeout(1 * time.Minute)
+ SetDefaultEventuallyPollingInterval(1 * time.Second)
+ RunSpecs(t, "Upgrade E2E Suite")
+}
+
+var _ = BeforeSuite(func() {
+ cfg = ctrl.GetConfigOrDie()
+
+ sch := scheme.Scheme
+ Expect(catalogdv1.AddToScheme(sch)).To(Succeed())
+ c, err = client.New(cfg, client.Options{Scheme: sch})
+ Expect(err).To(Not(HaveOccurred()))
+ kubeClient, err = kubernetes.NewForConfig(cfg)
+ Expect(err).ToNot(HaveOccurred())
+
+ var ok bool
+ testClusterCatalogName, ok = os.LookupEnv(testClusterCatalogNameEnv)
+ Expect(ok).To(BeTrue())
+})
diff --git a/catalogd/testdata/catalogs/test-catalog.Dockerfile b/catalogd/testdata/catalogs/test-catalog.Dockerfile
new file mode 100644
index 000000000..849d331bd
--- /dev/null
+++ b/catalogd/testdata/catalogs/test-catalog.Dockerfile
@@ -0,0 +1,6 @@
+FROM scratch
+COPY test-catalog /configs
+
+# Set DC-specific label for the location of the DC root directory
+# in the image
+LABEL operators.operatorframework.io.index.configs.v1=/configs
\ No newline at end of file
diff --git a/catalogd/testdata/catalogs/test-catalog/.indexignore b/catalogd/testdata/catalogs/test-catalog/.indexignore
new file mode 100644
index 000000000..699fa6d33
--- /dev/null
+++ b/catalogd/testdata/catalogs/test-catalog/.indexignore
@@ -0,0 +1,2 @@
+/expected_all.json
+..*
diff --git a/catalogd/testdata/catalogs/test-catalog/catalog.yaml b/catalogd/testdata/catalogs/test-catalog/catalog.yaml
new file mode 100644
index 000000000..14d33b9d9
--- /dev/null
+++ b/catalogd/testdata/catalogs/test-catalog/catalog.yaml
@@ -0,0 +1,20 @@
+---
+schema: olm.package
+name: prometheus
+defaultChannel: beta
+---
+schema: olm.channel
+name: beta
+package: prometheus
+entries:
+ - name: prometheus-operator.0.47.0
+---
+schema: olm.bundle
+name: prometheus-operator.0.47.0
+package: prometheus
+image: localhost/testdata/bundles/registry-v1/prometheus-operator:v0.47.0
+properties:
+ - type: olm.package
+ value:
+ packageName: prometheus
+ version: 0.47.0
diff --git a/catalogd/testdata/catalogs/test-catalog/expected_all.json b/catalogd/testdata/catalogs/test-catalog/expected_all.json
new file mode 100644
index 000000000..554488982
--- /dev/null
+++ b/catalogd/testdata/catalogs/test-catalog/expected_all.json
@@ -0,0 +1,3 @@
+{"defaultChannel":"beta","name":"prometheus","schema":"olm.package"}
+{"entries":[{"name":"prometheus-operator.0.47.0"}],"name":"beta","package":"prometheus","schema":"olm.channel"}
+{"image":"localhost/testdata/bundles/registry-v1/prometheus-operator:v0.47.0","name":"prometheus-operator.0.47.0","package":"prometheus","properties":[{"type":"olm.package","value":{"packageName":"prometheus","version":"0.47.0"}}],"schema":"olm.bundle"}
diff --git a/cmd/manager/main.go b/cmd/operator-controller/main.go
similarity index 99%
rename from cmd/manager/main.go
rename to cmd/operator-controller/main.go
index 8bb230895..345560bcd 100644
--- a/cmd/manager/main.go
+++ b/cmd/operator-controller/main.go
@@ -49,10 +49,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
- catalogd "github.com/operator-framework/catalogd/api/v1"
helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client"
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/action"
"github.com/operator-framework/operator-controller/internal/applier"
"github.com/operator-framework/operator-controller/internal/authentication"
diff --git a/config/base/manager/manager.yaml b/config/base/manager/manager.yaml
index 12bd673a0..25ba5598a 100644
--- a/config/base/manager/manager.yaml
+++ b/config/base/manager/manager.yaml
@@ -49,7 +49,7 @@ spec:
type: RuntimeDefault
containers:
- command:
- - /manager
+ - /operator-controller
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=:8443"
diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml
new file mode 100644
index 000000000..a5842de42
--- /dev/null
+++ b/config/webhook/manifests.yaml
@@ -0,0 +1,27 @@
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: webhook-service
+ namespace: system
+ path: /mutate-olm-operatorframework-io-v1-clustercatalog
+ failurePolicy: Fail
+ name: inject-metadata-name.olm.operatorframework.io
+ rules:
+ - apiGroups:
+ - olm.operatorframework.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clustercatalogs
+ sideEffects: None
+ timeoutSeconds: 10
diff --git a/docs/contribute/developer.md b/docs/contribute/developer.md
index f21a3b28b..a87f3a682 100644
--- a/docs/contribute/developer.md
+++ b/docs/contribute/developer.md
@@ -91,16 +91,6 @@ Follow Tilt's [instructions](https://docs.tilt.dev/install.html) for installatio
operator-controller requires
[catalogd](https://github.com/operator-framework/catalogd). Please make sure it's installed, either normally or via its own Tiltfile., before proceeding. If you want to use Tilt, make sure you specify a unique `--port` flag to each `tilt up` invocation.
-### Install tilt-support Repo
-
-You must install the tilt-support repo at the directory level above this repo:
-
-```bash
-pushd ..
-git clone https://github.com/operator-framework/tilt-support
-popd
-```
-
### Starting Tilt
This is typically as short as:
@@ -136,6 +126,15 @@ v0.33.1, built 2023-06-28
At the end of the installation process, the command output will prompt you to press the space bar to open the web UI, which provides a useful overview of all the installed components.
+Shortly after starting, Tilt processes the `Tiltfile`, resulting in:
+
+- Building the go binaries
+- Building the images
+- Loading the images into kind
+- Running kustomize and applying everything except the Deployments that reference the images above
+- Modifying the Deployments to use the just-built images
+- Creating the Deployments
+
---
## Special Setup for MacOS
@@ -161,6 +160,14 @@ done
---
+## Making code changes
+
+Any time you change any of the files listed in the `deps` section in the `_binary` `local_resource`,
+Tilt automatically rebuilds the go binary. As soon as the binary is rebuilt, Tilt pushes it (and only it) into the
+appropriate running container, and then restarts the process.
+
+---
+
## Contributing
Refer to [CONTRIBUTING.md](contributing.md) for more information.
diff --git a/go.mod b/go.mod
index 1c961e3b1..ab72212f1 100644
--- a/go.mod
+++ b/go.mod
@@ -13,11 +13,14 @@ require (
github.com/go-logr/logr v1.4.2
github.com/google/go-cmp v0.6.0
github.com/google/go-containerregistry v0.20.2
+ github.com/klauspost/compress v1.17.11
+ github.com/onsi/ginkgo/v2 v2.22.1
+ github.com/onsi/gomega v1.36.2
github.com/opencontainers/go-digest v1.0.0
github.com/operator-framework/api v0.27.0
- github.com/operator-framework/catalogd v1.1.0
github.com/operator-framework/helm-operator-plugins v0.7.0
github.com/operator-framework/operator-registry v1.48.0
+ github.com/prometheus/client_golang v1.20.5
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.10.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
@@ -26,6 +29,7 @@ require (
k8s.io/api v0.31.4
k8s.io/apiextensions-apiserver v0.31.4
k8s.io/apimachinery v0.31.4
+ k8s.io/apiserver v0.31.4
k8s.io/cli-runtime v0.31.4
k8s.io/client-go v0.31.4
k8s.io/component-base v0.31.4
@@ -105,6 +109,7 @@ require (
github.com/go-openapi/strfmt v0.23.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -113,6 +118,7 @@ require (
github.com/google/cel-go v0.20.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
@@ -135,7 +141,6 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368 // indirect
github.com/k14s/ytt v0.36.0 // indirect
- github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
@@ -166,7 +171,6 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/oklog/ulid v1.3.1 // indirect
- github.com/onsi/gomega v1.36.2 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 // indirect
@@ -176,7 +180,6 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
- github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
@@ -227,6 +230,7 @@ require (
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
+ golang.org/x/tools v0.28.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
@@ -237,7 +241,6 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiserver v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/kubectl v0.31.3 // indirect
oras.land/oras-go v1.2.5 // indirect
diff --git a/go.sum b/go.sum
index 8fc9c1882..b7283b9fe 100644
--- a/go.sum
+++ b/go.sum
@@ -518,8 +518,8 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
-github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
+github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
+github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
@@ -535,8 +535,6 @@ github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11 h1:eT
github.com/openshift/crd-schema-checker v0.0.0-20240404194209-35a9033b1d11/go.mod h1:EmVJt97N+pfWFsli/ipXTBZqSG5F5KGQhm3c3IsGq1o=
github.com/operator-framework/api v0.27.0 h1:OrVaGKZJvbZo58HTv2guz7aURkhVKYhFqZ/6VpifiXI=
github.com/operator-framework/api v0.27.0/go.mod h1:lg2Xx+S8NQWGYlEOvFwQvH46E5EK5IrAIL7HWfAhciM=
-github.com/operator-framework/catalogd v1.1.0 h1:mu2DYL5mpREEAAP+uPG+CMSsfsJkgrIasgLRG8nvwJg=
-github.com/operator-framework/catalogd v1.1.0/go.mod h1:8Je9CqMPwhNgRoqGX5OPsLYHsEoTDvPnELLLKRw1RHE=
github.com/operator-framework/helm-operator-plugins v0.7.0 h1:YmtIWFc9BaNaDc5mk/dkG0P2BqPZOqpDvjWih5Fczuk=
github.com/operator-framework/helm-operator-plugins v0.7.0/go.mod h1:fUUCJR3bWtMBZ1qdDhbwjacsBHi9uT576tF4u/DwOgQ=
github.com/operator-framework/operator-lib v0.15.0 h1:0QeRM4PMtThqINpcFGCEBnIV3Z8u7/8fYLEx6mUtdcM=
@@ -787,8 +785,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
-golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180730214132-a0f8a16cb08c/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/internal/catalogmetadata/client/client.go b/internal/catalogmetadata/client/client.go
index 4b75e6291..7daddaaec 100644
--- a/internal/catalogmetadata/client/client.go
+++ b/internal/catalogmetadata/client/client.go
@@ -12,8 +12,9 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
+
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
const (
diff --git a/internal/catalogmetadata/client/client_test.go b/internal/catalogmetadata/client/client_test.go
index 16adb94a0..45228684a 100644
--- a/internal/catalogmetadata/client/client_test.go
+++ b/internal/catalogmetadata/client/client_test.go
@@ -14,9 +14,9 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
catalogClient "github.com/operator-framework/operator-controller/internal/catalogmetadata/client"
)
diff --git a/internal/controllers/clustercatalog_controller.go b/internal/controllers/clustercatalog_controller.go
index f326b4578..2ee78694f 100644
--- a/internal/controllers/clustercatalog_controller.go
+++ b/internal/controllers/clustercatalog_controller.go
@@ -26,7 +26,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
- catalogd "github.com/operator-framework/catalogd/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
type CatalogCache interface {
diff --git a/internal/controllers/clustercatalog_controller_test.go b/internal/controllers/clustercatalog_controller_test.go
index 2bc1cb2bb..92cbbe269 100644
--- a/internal/controllers/clustercatalog_controller_test.go
+++ b/internal/controllers/clustercatalog_controller_test.go
@@ -14,8 +14,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/controllers"
"github.com/operator-framework/operator-controller/internal/scheme"
)
diff --git a/internal/controllers/clusterextension_controller.go b/internal/controllers/clusterextension_controller.go
index f77511539..66c61de6f 100644
--- a/internal/controllers/clusterextension_controller.go
+++ b/internal/controllers/clusterextension_controller.go
@@ -45,11 +45,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
- catalogd "github.com/operator-framework/catalogd/api/v1"
helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client"
"github.com/operator-framework/operator-registry/alpha/declcfg"
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/bundleutil"
"github.com/operator-framework/operator-controller/internal/conditionsets"
"github.com/operator-framework/operator-controller/internal/contentmanager"
diff --git a/internal/resolve/catalog.go b/internal/resolve/catalog.go
index 944744c5f..ea7cf6e32 100644
--- a/internal/resolve/catalog.go
+++ b/internal/resolve/catalog.go
@@ -15,10 +15,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
- catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/bundleutil"
"github.com/operator-framework/operator-controller/internal/catalogmetadata/compare"
"github.com/operator-framework/operator-controller/internal/catalogmetadata/filter"
diff --git a/internal/resolve/catalog_test.go b/internal/resolve/catalog_test.go
index 83eeba9b0..1054a1fcd 100644
--- a/internal/resolve/catalog_test.go
+++ b/internal/resolve/catalog_test.go
@@ -16,11 +16,11 @@ import (
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/v1"
"github.com/operator-framework/operator-registry/alpha/declcfg"
"github.com/operator-framework/operator-registry/alpha/property"
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/features"
)
diff --git a/internal/scheme/scheme.go b/internal/scheme/scheme.go
index a5fae6298..fecdacf08 100644
--- a/internal/scheme/scheme.go
+++ b/internal/scheme/scheme.go
@@ -7,9 +7,8 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
var Scheme = runtime.NewScheme()
diff --git a/scripts/install.tpl.sh b/scripts/install.tpl.sh
index c3525dbcb..a728790ee 100644
--- a/scripts/install.tpl.sh
+++ b/scripts/install.tpl.sh
@@ -2,26 +2,19 @@
set -euo pipefail
IFS=$'\n\t'
-operator_controller_manifest=$MANIFEST
+olmv1_manifest=$MANIFEST
-if [[ -z "$operator_controller_manifest" ]]; then
+if [[ -z "$olmv1_manifest" ]]; then
echo "Error: Missing required MANIFEST variable"
exit 1
fi
-catalogd_version=$CATALOGD_VERSION
+default_catalogs_manifest="./catalogd/config/base/default/clustercatalogs/default-catalogs.yaml"
cert_mgr_version=$CERT_MGR_VERSION
install_default_catalogs=$INSTALL_DEFAULT_CATALOGS
-if [[ -z "$catalogd_version" || -z "$cert_mgr_version" ]]; then
- err="Error: Missing component version(s) for: "
- if [[ -z "$catalogd_version" ]]; then
- err+="catalogd "
- fi
- if [[ -z "$cert_mgr_version" ]]; then
- err+="cert-manager "
- fi
- echo "$err"
+if [[ -z "$cert_mgr_version" ]]; then
+ echo "Error: Missing CERT_MGR_VERSION variable"
exit 1
fi
@@ -76,15 +69,18 @@ kubectl_wait "cert-manager" "deployment/cert-manager" "60s"
kubectl_wait_for_query "mutatingwebhookconfigurations/cert-manager-webhook" '{.webhooks[0].clientConfig.caBundle}' 60 5
kubectl_wait_for_query "validatingwebhookconfigurations/cert-manager-webhook" '{.webhooks[0].clientConfig.caBundle}' 60 5
-kubectl apply -f "https://github.com/operator-framework/catalogd/releases/download/${catalogd_version}/catalogd.yaml"
+kubectl apply -f "${olmv1_manifest}"
# Wait for the rollout, and then wait for the deployment to be Available
kubectl_wait_rollout "olmv1-system" "deployment/catalogd-controller-manager" "60s"
kubectl_wait "olmv1-system" "deployment/catalogd-controller-manager" "60s"
+kubectl_wait "olmv1-system" "deployment/operator-controller-controller-manager" "60s"
if [[ "${install_default_catalogs}" != "false" ]]; then
- kubectl apply -f "https://github.com/operator-framework/catalogd/releases/download/${catalogd_version}/default-catalogs.yaml"
+ if [[ ! -f "$default_catalogs_manifest" ]]; then
+ echo "Error: Missing required default catalogs manifest file at $default_catalogs_manifest"
+ exit 1
+ fi
+
+ kubectl apply -f "${default_catalogs_manifest}"
kubectl wait --for=condition=Serving "clustercatalog/operatorhubio" --timeout="60s"
fi
-
-kubectl apply -f "${operator_controller_manifest}"
-kubectl_wait "olmv1-system" "deployment/operator-controller-controller-manager" "60s"
diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go
index 6d137fb1a..74cf3b3da 100644
--- a/test/e2e/cluster_extension_install_test.go
+++ b/test/e2e/cluster_extension_install_test.go
@@ -28,9 +28,8 @@ import (
"k8s.io/utils/env"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
const (
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
index df6d3fdd9..e65fd5e5d 100644
--- a/test/e2e/e2e_suite_test.go
+++ b/test/e2e/e2e_suite_test.go
@@ -13,8 +13,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
"github.com/operator-framework/operator-controller/internal/scheme"
)
diff --git a/test/extension-developer-e2e/extension_developer_test.go b/test/extension-developer-e2e/extension_developer_test.go
index 5edaa910c..5f160617c 100644
--- a/test/extension-developer-e2e/extension_developer_test.go
+++ b/test/extension-developer-e2e/extension_developer_test.go
@@ -18,9 +18,8 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
func TestExtensionDeveloper(t *testing.T) {
diff --git a/test/upgrade-e2e/post_upgrade_test.go b/test/upgrade-e2e/post_upgrade_test.go
index e361f8814..547a7142a 100644
--- a/test/upgrade-e2e/post_upgrade_test.go
+++ b/test/upgrade-e2e/post_upgrade_test.go
@@ -18,9 +18,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
- catalogd "github.com/operator-framework/catalogd/api/v1"
-
ocv1 "github.com/operator-framework/operator-controller/api/v1"
+ catalogd "github.com/operator-framework/operator-controller/catalogd/api/v1"
)
func TestClusterExtensionAfterOLMUpgrade(t *testing.T) {