diff --git a/Makefile b/Makefile index 0be7868f5..6245c6ccb 100644 --- a/Makefile +++ b/Makefile @@ -120,10 +120,13 @@ custom-linter-build: #EXHELP Build custom linter lint-custom: custom-linter-build #EXHELP Call custom linter for the project go vet -tags=$(GO_BUILD_TAGS) -vettool=./bin/custom-linter ./... -.PHONY: tidy -tidy: #HELP Update dependencies. - # Force tidy to use the version already in go.mod - $(Q)go mod tidy -go=$(GOLANG_VERSION) +.PHONY: k8s-pin +k8s-pin: #EXHELP Pin k8s staging modules based on k8s.io/kubernetes version (in go.mod or from K8S_IO_K8S_VERSION env var) and run go mod tidy. + K8S_IO_K8S_VERSION='$(K8S_IO_K8S_VERSION)' go run hack/tools/k8smaintainer/main.go + +.PHONY: tidy #HELP Run go mod tidy. +tidy: + go mod tidy .PHONY: manifests KUSTOMIZE_CATD_CRDS_DIR := config/base/catalogd/crd/bases @@ -151,7 +154,7 @@ generate: $(CONTROLLER_GEN) #EXHELP Generate code containing DeepCopy, DeepCopyI $(CONTROLLER_GEN) --load-build-tags=$(GO_BUILD_TAGS) object:headerFile="hack/boilerplate.go.txt" paths="./..." .PHONY: verify -verify: tidy fmt generate manifests crd-ref-docs generate-test-data #HELP Verify all generated code is up-to-date. +verify: k8s-pin fmt generate manifests crd-ref-docs generate-test-data #HELP Verify all generated code is up-to-date. Runs k8s-pin instead of just tidy. git diff --exit-code # Renders registry+v1 bundles in test/convert diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index 57131226f..a9cd69863 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -31,6 +31,7 @@ import ( "github.com/containers/image/v5/types" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" "k8s.io/apimachinery/pkg/fields" k8slabels "k8s.io/apimachinery/pkg/labels" @@ -56,6 +57,7 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/action" "github.com/operator-framework/operator-controller/internal/operator-controller/applier" "github.com/operator-framework/operator-controller/internal/operator-controller/authentication" + "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/catalogmetadata/cache" catalogclient "github.com/operator-framework/operator-controller/internal/operator-controller/catalogmetadata/client" "github.com/operator-framework/operator-controller/internal/operator-controller/contentmanager" @@ -178,6 +180,9 @@ func validateMetricsFlags() error { func run() error { setupLog.Info("starting up the controller", "version info", version.String()) + // log feature gate status after parsing flags and setting up logger + features.LogFeatureGateStates(setupLog, features.OperatorControllerFeatureGate) + authFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s-%s.json", authFilePrefix, apimachineryrand.String(8))) var globalPullSecretKey *k8stypes.NamespacedName if cfg.globalPullSecret != "" { @@ -197,8 +202,12 @@ func run() error { setupLog.Info("set up manager") cacheOptions := crcache.Options{ ByObject: map[client.Object]crcache.ByObject{ - &ocv1.ClusterExtension{}: {Label: k8slabels.Everything()}, - &ocv1.ClusterCatalog{}: {Label: k8slabels.Everything()}, + &ocv1.ClusterExtension{}: {Label: k8slabels.Everything()}, + &ocv1.ClusterCatalog{}: {Label: k8slabels.Everything()}, + &rbacv1.ClusterRole{}: {Label: k8slabels.Everything()}, + &rbacv1.ClusterRoleBinding{}: {Label: k8slabels.Everything()}, + &rbacv1.Role{}: {Namespaces: map[string]crcache.Config{}, Label: k8slabels.Everything()}, + &rbacv1.RoleBinding{}: {Namespaces: map[string]crcache.Config{}, Label: k8slabels.Everything()}, }, DefaultNamespaces: map[string]crcache.Config{ cfg.systemNamespace: {LabelSelector: k8slabels.Everything()}, @@ -403,10 +412,18 @@ func run() error { crdupgradesafety.NewPreflight(aeClient.CustomResourceDefinitions()), } + // determine if PreAuthorizer should be enabled based on feature gate + var preAuth authorization.PreAuthorizer + if features.OperatorControllerFeatureGate.Enabled(features.PreflightPermissions) { + preAuth = authorization.NewRBACPreAuthorizer(mgr.GetClient()) + } + + // now initialize the helmApplier, assigning the potentially nil preAuth helmApplier := &applier.Helm{ ActionClientGetter: acg, Preflights: preflights, BundleToHelmChartFn: convert.RegistryV1ToHelmChart, + PreAuthorizer: preAuth, } cm := contentmanager.NewManager(clientRestConfigMapper, mgr.GetConfig(), mgr.GetRESTMapper()) diff --git a/codecov.yml b/codecov.yml index a3bfabd61..7ea9929a5 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,4 +8,5 @@ coverage: paths: - "api/" - "cmd/" - - "internal/" \ No newline at end of file + - "internal/" + diff --git a/config/base/operator-controller/rbac/role.yaml b/config/base/operator-controller/rbac/role.yaml index a929e78e9..be89deec1 100644 --- a/config/base/operator-controller/rbac/role.yaml +++ b/config/base/operator-controller/rbac/role.yaml @@ -47,6 +47,16 @@ rules: verbs: - patch - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - list + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/go.mod b/go.mod index 749294fb4..ce93025c2 100644 --- a/go.mod +++ b/go.mod @@ -24,23 +24,30 @@ require ( github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 + golang.org/x/mod v0.24.0 golang.org/x/sync v0.13.0 golang.org/x/tools v0.32.0 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.17.3 k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.2 + k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/apiserver v0.32.3 k8s.io/cli-runtime v0.32.3 k8s.io/client-go v0.32.3 k8s.io/component-base v0.32.3 k8s.io/klog/v2 v2.130.1 + k8s.io/kubernetes v1.32.3 k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.20.2 sigs.k8s.io/yaml v1.4.0 ) +require ( + k8s.io/component-helpers v0.32.3 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect +) + require ( cel.dev/expr v0.19.0 // indirect dario.cat/mergo v1.0.1 // indirect @@ -215,7 +222,6 @@ require ( go.opentelemetry.io/otel/trace v1.33.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.37.0 // indirect - golang.org/x/mod v0.24.0 // indirect golang.org/x/net v0.39.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sys v0.32.0 // indirect @@ -232,8 +238,8 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect - k8s.io/kubectl v0.32.2 // indirect + k8s.io/controller-manager v0.32.3 // indirect + k8s.io/kubectl v0.32.3 // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect @@ -241,3 +247,63 @@ require ( sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect ) + +replace k8s.io/api => k8s.io/api v0.32.3 + +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.3 + +replace k8s.io/apimachinery => k8s.io/apimachinery v0.32.3 + +replace k8s.io/apiserver => k8s.io/apiserver v0.32.3 + +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.3 + +replace k8s.io/client-go => k8s.io/client-go v0.32.3 + +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.3 + +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.3 + +replace k8s.io/code-generator => k8s.io/code-generator v0.32.3 + +replace k8s.io/component-base => k8s.io/component-base v0.32.3 + +replace k8s.io/component-helpers => k8s.io/component-helpers v0.32.3 + +replace k8s.io/controller-manager => k8s.io/controller-manager v0.32.3 + +replace k8s.io/cri-api => k8s.io/cri-api v0.32.3 + +replace k8s.io/cri-client => k8s.io/cri-client v0.32.3 + +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.3 + +replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.3 + +replace k8s.io/endpointslice => k8s.io/endpointslice v0.32.3 + +replace k8s.io/externaljwt => k8s.io/externaljwt v0.32.3 + +replace k8s.io/kms => k8s.io/kms v0.32.3 + +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.3 + +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.3 + +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.3 + +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.3 + +replace k8s.io/kubectl => k8s.io/kubectl v0.32.3 + +replace k8s.io/kubelet => k8s.io/kubelet v0.32.3 + +replace k8s.io/kubernetes => k8s.io/kubernetes v1.32.3 + +replace k8s.io/metrics => k8s.io/metrics v0.32.3 + +replace k8s.io/mount-utils => k8s.io/mount-utils v0.32.3 + +replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.3 + +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.3 diff --git a/go.sum b/go.sum index 41c39ba01..f788a5f2b 100644 --- a/go.sum +++ b/go.sum @@ -771,8 +771,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= @@ -783,12 +783,18 @@ k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-helpers v0.32.3 h1:9veHpOGTPLluqU4hAu5IPOwkOIZiGAJUhHndfVc5FT4= +k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao= +k8s.io/controller-manager v0.32.3 h1:jBxZnQ24k6IMeWLyxWZmpa3QVS7ww+osAIzaUY/jqyc= +k8s.io/controller-manager v0.32.3/go.mod h1:out1L3DZjE/p7JG0MoMMIaQGWIkt3c+pKaswqSHgKsI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us= -k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= +k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= +k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= +k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= diff --git a/hack/tools/k8smaintainer/README.md b/hack/tools/k8smaintainer/README.md new file mode 100644 index 000000000..a8162704d --- /dev/null +++ b/hack/tools/k8smaintainer/README.md @@ -0,0 +1,43 @@ +# Kubernetes Staging Module Version Synchronization Tool + +## Purpose +This tool ensures that if `k8s.io/kubernetes` changes version in your `go.mod`, all related staging modules (e.g., `k8s.io/api`, `k8s.io/apimachinery`) are automatically pinned to the corresponding published version. Recent improvements include an environment variable override and refined logic for version resolution. + +## How It Works + +1. **Parsing and Filtering:** + - Reads and parses your `go.mod` file. + - Removes existing `replace` directives for `k8s.io/` modules to avoid stale mappings. + +2. **Determine Kubernetes Version:** + - **Environment Variable Override:** + If the environment variable `K8S_IO_K8S_VERSION` is set, its value is validated (using semver standards) and used as the target version for `k8s.io/kubernetes`. The tool then runs `go get k8s.io/kubernetes@` to update the dependency. + - **Default Behavior:** + If `K8S_IO_K8S_VERSION` is not set, the tool reads the version of `k8s.io/kubernetes` from the `go.mod` file. + +3. **Compute the Target Staging Version:** + - Converts a Kubernetes version in the form `v1.xx.yy` into the staging version format `v0.xx.yy`. + - If the target staging version is unavailable, the tool attempts to fall back to the previous patch version. + +4. **Updating Module Replace Directives:** + - Retrieves the full dependency graph using `go list -m -json all`. + - Identifies relevant `k8s.io/*` modules (skipping the main module and version-suffixed modules). + - Removes outdated `replace` directives (ignoring local path replacements). + - Adds new `replace` directives to pin modules—including `k8s.io/kubernetes`—to the computed staging version. + +5. **Finalizing Changes:** + - Writes the updated `go.mod` file. + - Runs `go mod tidy` to clean up dependencies. + - Executes `go mod download k8s.io/kubernetes` to update `go.sum`. + - Logs any issues, such as modules remaining at an untagged version (`v0.0.0`), which may indicate upstream tagging problems. + +## Environment Variables + +- **K8S_IO_K8S_VERSION (optional):** + When set, this environment variable overrides the Kubernetes version found in `go.mod`. The tool validates this semver string, updates the dependency using `go get`, and processes modules accordingly. + +## Additional Notes + +- The tool ensures consistency across all `k8s.io/*` modules, even if they are not explicitly listed in `go.mod`. +- If a suitable staging version is not found, a warning is logged and the closest valid version is used. +- All operations are logged, which helps in troubleshooting and verifying the process. \ No newline at end of file diff --git a/hack/tools/k8smaintainer/main.go b/hack/tools/k8smaintainer/main.go new file mode 100644 index 000000000..978c884a6 --- /dev/null +++ b/hack/tools/k8smaintainer/main.go @@ -0,0 +1,410 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/fs" + "log" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "github.com/blang/semver/v4" + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +const ( + k8sRepo = "k8s.io/kubernetes" + expectedMajorMinorParts = 2 + goModFilename = "go.mod" + goModFilePerms = fs.FileMode(0600) + minGoListVersionFields = 2 + minPatchNumberToDecrementFrom = 1 // We can only decrement patch if it's 1 or greater (to get 0 or greater) + k8sVersionEnvVar = "K8S_IO_K8S_VERSION" +) + +//nolint:gochecknoglobals +var goExe = "go" + +// readAndParseGoMod reads and parses the go.mod file. +func readAndParseGoMod(filename string) (*modfile.File, error) { + modBytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + modF, err := modfile.Parse(filename, modBytes, nil) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %w", filename, err) + } + return modF, nil +} + +// getK8sVersionFromEnv processes the version specified via environment variable. +// It validates the version and runs `go get` to update the dependency. +func getK8sVersionFromEnv(targetK8sVer string) (string, error) { + log.Printf("Found target %s version override from env var %s: %s", k8sRepo, k8sVersionEnvVar, targetK8sVer) + if _, err := semver.ParseTolerant(targetK8sVer); err != nil { + return "", fmt.Errorf("invalid semver specified in %s: %s (%w)", k8sVersionEnvVar, targetK8sVer, err) + } + // Update the go.mod file first + log.Printf("Running 'go get %s@%s' to update the main dependency...", k8sRepo, targetK8sVer) + getArgs := fmt.Sprintf("%s@%s", k8sRepo, targetK8sVer) + if _, err := runGoCommand("get", getArgs); err != nil { + return "", fmt.Errorf("error running 'go get %s': %w", getArgs, err) + } + return targetK8sVer, nil // Return the validated version +} + +// getK8sVersionFromMod reads the go.mod file to find the current version of k8s.io/kubernetes. +// It returns the version string if found, or an empty string (and nil error) if not found. +func getK8sVersionFromMod() (string, error) { + modF, err := readAndParseGoMod(goModFilename) + if err != nil { + return "", err // Propagate error from reading/parsing + } + + // Find k8s.io/kubernetes version + for _, req := range modF.Require { + if req.Mod.Path == k8sRepo { + log.Printf("Found existing %s version in %s: %s", k8sRepo, goModFilename, req.Mod.Version) + return req.Mod.Version, nil // Return found version + } + } + // Not found case - return empty string, no error (as per original logic) + log.Printf("INFO: %s not found in %s require block. Nothing to do.", k8sRepo, goModFilename) + return "", nil +} + +func main() { + log.SetFlags(0) + if os.Getenv("GOEXE") != "" { + goExe = os.Getenv("GOEXE") + } + + wd, err := os.Getwd() + if err != nil { + log.Fatalf("Error getting working directory: %v", err) + } + modRoot := findModRoot(wd) + if modRoot == "" { + log.Fatalf("Failed to find %s in %s or parent directories", goModFilename, wd) + } + if err := os.Chdir(modRoot); err != nil { + log.Fatalf("Error changing directory to %s: %v", modRoot, err) + } + log.Printf("Running in module root: %s", modRoot) + + var k8sVer string + + // Determine the target k8s version using helper functions + targetK8sVerEnv := os.Getenv(k8sVersionEnvVar) + if targetK8sVerEnv != "" { + // Process version from environment variable + k8sVer, err = getK8sVersionFromEnv(targetK8sVerEnv) + if err != nil { + log.Fatalf("Failed to process k8s version from environment variable %s: %v", k8sVersionEnvVar, err) + } + } else { + // Process version from go.mod file + k8sVer, err = getK8sVersionFromMod() + if err != nil { + log.Fatalf("Failed to get k8s version from %s: %v", goModFilename, err) + } + // Handle the "not found" case where getK8sVersionFromMod returns "", nil + if k8sVer == "" { + os.Exit(0) // Exit gracefully as requested + } + } + + // Calculate target staging version + k8sSemVer, err := semver.ParseTolerant(k8sVer) + if err != nil { + // This should ideally not happen if validation passed earlier, but check anyway. + log.Fatalf("Invalid semver for %s: %s (%v)", k8sRepo, k8sVer, err) // Adjusted log format slightly + } + + if k8sSemVer.Major != 1 { + log.Fatalf("Expected k8s version %s to have major version 1", k8sVer) + } + targetSemVer := semver.Version{Major: 0, Minor: k8sSemVer.Minor, Patch: k8sSemVer.Patch} + // Prepend 'v' as expected by Go modules and the rest of the script logic + targetStagingVer := "v" + targetSemVer.String() + + // Validate the constructed staging version string + if _, err := semver.ParseTolerant(targetStagingVer); err != nil { + log.Fatalf("Calculated invalid staging semver: %s from k8s version %s (%v)", targetStagingVer, k8sVer, err) // Adjusted log format slightly + } + log.Printf("Target staging version calculated: %s", targetStagingVer) + + // Run `go list -m -json all` + type Module struct { + Path string + Version string + Replace *Module + Main bool + } + log.Println("Running 'go list -m -json all'...") + output, err := runGoCommand("list", "-m", "-json", "all") + if err != nil { + // Try downloading first if list fails + log.Println("'go list' failed, trying 'go mod download'...") + if _, downloadErr := runGoCommand("mod", "download"); downloadErr != nil { + log.Fatalf("Error running 'go mod download' after list failed: %v", downloadErr) + } + output, err = runGoCommand("list", "-m", "-json", "all") + if err != nil { + log.Fatalf("Error running 'go list -m -json all' even after download: %v", err) + } + } + + // Iterate, identify k8s.io/* staging modules, and determine version to pin + pins := make(map[string]string) // Module path -> version to pin + decoder := json.NewDecoder(bytes.NewReader(output)) + for decoder.More() { + var mod Module + if err := decoder.Decode(&mod); err != nil { + log.Fatalf("Error decoding go list output: %v", err) + } + + // Skip main module, non-k8s modules, k8s.io/kubernetes itself, and versioned modules like k8s.io/client-go/v2 + _, pathSuffix, _ := module.SplitPathVersion(mod.Path) // Check if path has a version suffix like /v2, /v3 etc. + if mod.Main || !strings.HasPrefix(mod.Path, "k8s.io/") || mod.Path == k8sRepo || pathSuffix != "" { + continue + } + + // Use replacement path if it exists, but skip local file replacements + effectivePath := mod.Path + if mod.Replace != nil { + // Heuristic: Assume module paths have a domain-like structure (e.g., 'xxx.yyy/zzz') in the first segment. + // Local paths usually don't (e.g., '../othermod', './local'). + parts := strings.SplitN(mod.Replace.Path, "/", 2) + if len(parts) > 0 && !strings.Contains(parts[0], ".") { + log.Printf("Skipping local replace: %s => %s", mod.Path, mod.Replace.Path) + continue + } + effectivePath = mod.Replace.Path + } + + // Check existence of target version, fallback to previous patch if needed + determinedVer, err := getLatestExistingVersion(effectivePath, targetStagingVer) + if err != nil { + log.Printf("WARNING: Error checking versions for %s: %v. Skipping pinning.", effectivePath, err) + continue + } + + if determinedVer == "" { + log.Printf("WARNING: Neither target version %s nor its predecessor found for %s. Skipping pinning.", targetStagingVer, effectivePath) + continue + } + + if determinedVer != targetStagingVer { + log.Printf("INFO: Target version %s not found for %s. Using existing predecessor version %s.", targetStagingVer, effectivePath, determinedVer) + } + + // map the original module path (as seen in the dependency graph) to the desired version for the 'replace' directive + pins[mod.Path] = determinedVer + } + + // Add k8s.io/kubernetes itself to the pins map (ensures it's covered by the replace logic) + pins[k8sRepo] = k8sVer + log.Printf("Identified %d k8s.io/* modules to manage.", len(pins)) + + // Parse go.mod again (needed in case `go list` or `go get` modified it) + modF, err := readAndParseGoMod(goModFilename) + if err != nil { + log.Fatal(err) // Error already formatted by helper function + } + + // Remove all existing k8s.io/* replaces that target other modules (not local paths) + log.Println("Removing existing k8s.io/* module replace directives...") + var replacesToRemove []string + for _, rep := range modF.Replace { + // Only remove replaces targeting k8s.io/* modules (not local replacements like ../staging) + // Check that the old path starts with k8s.io/ and the new path looks like a module path (contains '.') + if strings.HasPrefix(rep.Old.Path, "k8s.io/") && strings.Contains(rep.New.Path, ".") { + replacesToRemove = append(replacesToRemove, rep.Old.Path) + } else if strings.HasPrefix(rep.Old.Path, "k8s.io/") { + log.Printf("Note: Found existing non-module replace for %s, leaving untouched: %s => %s %s", rep.Old.Path, rep.Old.Path, rep.New.Path, rep.New.Version) + } + } + if len(replacesToRemove) > 0 { + for _, path := range replacesToRemove { + log.Printf("Removing replace for: %s", path) + // Drop replace expects oldPath and oldVersion. Version is empty for path-only replaces. + if err := modF.DropReplace(path, ""); err != nil { + // Tolerate errors if the replace was already somehow removed or structure changed + log.Printf("Note: Error dropping replace for %s (might be benign): %v", path, err) + } + } + } else { + log.Println("No existing k8s.io/* module replaces found to remove.") + } + + // Add new replace directives + log.Println("Adding determined replace directives...") + // Sort for deterministic output + sortedPaths := make([]string, 0, len(pins)) + for path := range pins { + sortedPaths = append(sortedPaths, path) + } + sort.Strings(sortedPaths) + + for _, path := range sortedPaths { + version := pins[path] + // Add replace for the module path itself (e.g., k8s.io/api => k8s.io/api v0.32.3) + if err := modF.AddReplace(path, "", path, version); err != nil { + log.Fatalf("Error adding replace for %s => %s %s: %v", path, path, version, err) + } + log.Printf("Adding replace: %s => %s %s", path, path, version) + } + + // Write go.mod + log.Println("Writing updated go.mod...") + modF.Cleanup() // Sort blocks, remove redundant directives etc. + newModBytes, err := modF.Format() + if err != nil { + log.Fatalf("Error formatting go.mod: %v", err) + } + if err := os.WriteFile(goModFilename, newModBytes, goModFilePerms); err != nil { + log.Fatalf("Error writing %s: %v", goModFilename, err) + } + + // Run `go mod tidy` + goVer := "" + if modF.Go != nil { // Ensure Go directive exists before accessing Version + goVer = modF.Go.Version + } + tidyArgs := []string{"mod", "tidy"} + if goVer != "" { + tidyArgs = append(tidyArgs, fmt.Sprintf("-go=%s", goVer)) + } + log.Printf("Running '%s %s'...", goExe, strings.Join(tidyArgs, " ")) + if _, err := runGoCommand(tidyArgs...); err != nil { + log.Fatalf("Error running 'go mod tidy': %v", err) + } + + // Run `go mod download k8s.io/kubernetes` + log.Printf("Running '%s mod download %s'...", goExe, k8sRepo) + if _, err := runGoCommand("mod", "download", k8sRepo); err != nil { + // This might not be fatal, could be network issues, but log it prominently + log.Printf("WARNING: Error running 'go mod download %s': %v", k8sRepo, err) + } + + log.Println("Successfully updated k8s dependencies.") +} + +// findModRoot searches for go.mod in dir and parent directories +func findModRoot(dir string) string { + for { + if _, err := os.Stat(filepath.Join(dir, goModFilename)); err == nil { + return dir + } + parent := filepath.Dir(dir) + if parent == dir { + return "" // Reached root + } + dir = parent + } +} + +// runGoCommand executes a go command and returns its stdout or an error +func runGoCommand(args ...string) ([]byte, error) { + cmd := exec.Command(goExe, args...) + cmd.Env = append(os.Environ(), "GO111MODULE=on") // Ensure module mode + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + log.Printf("Executing: %s %s", goExe, strings.Join(args, " ")) + if err := cmd.Run(); err != nil { + if stderr.Len() > 0 { + log.Printf("Stderr:\n%s", stderr.String()) + } + return nil, fmt.Errorf("command '%s %s' failed: %w", goExe, strings.Join(args, " "), err) + } + return stdout.Bytes(), nil +} + +// getModuleVersions retrieves the list of available versions for a module +func getModuleVersions(modulePath string) ([]string, error) { + output, err := runGoCommand("list", "-m", "-versions", modulePath) + // Combine output and error message for checking because 'go list' sometimes writes errors to stdout + combinedOutput := string(output) + if err != nil { + if !strings.Contains(combinedOutput, err.Error()) { + combinedOutput += err.Error() + } + } + + // Check if the error/output indicates "no matching versions" - this is not a fatal error for our logic + if strings.Contains(combinedOutput, "no matching versions") || strings.Contains(combinedOutput, "no required module provides package") { + log.Printf("INFO: No versions found for module %s via 'go list'.", modulePath) + return []string{}, nil // Return empty list, not an error + } + // If there was an actual error beyond "no matching versions" + if err != nil { + return nil, fmt.Errorf("error listing versions for %s: %w", modulePath, err) + } + + fields := strings.Fields(string(output)) + if len(fields) < minGoListVersionFields { + log.Printf("INFO: No versions listed for module %s (output: '%s')", modulePath, string(output)) + return []string{}, nil // No versions listed (e.g., just the module path) + } + return fields[1:], nil // First field is the module path +} + +// getLatestExistingVersion checks for targetVer and its predecessor, returning the latest one that exists +func getLatestExistingVersion(modulePath, targetVer string) (string, error) { + availableVersions, err := getModuleVersions(modulePath) + if err != nil { + return "", err + } + + foundTarget := false + for _, v := range availableVersions { + if v == targetVer { + foundTarget = true + break + } + } + + if foundTarget { + return targetVer, nil // Target version exists + } + + // Target not found, try previous patch version + targetSemVer, err := semver.ParseTolerant(targetVer) + if err != nil { + log.Printf("Could not parse target version %s for module %s: %v. Cannot determine predecessor.", targetVer, modulePath, err) + return "", nil // Cannot determine predecessor + } + + // Only try to decrement if the patch number is >= the minimum required to do so + if targetSemVer.Patch < uint64(minPatchNumberToDecrementFrom) { + log.Printf("Patch version %d is less than %d for %s, cannot determine predecessor.", targetSemVer.Patch, minPatchNumberToDecrementFrom, targetVer) + return "", nil // Cannot determine predecessor (e.g., target was v0.32.0) + } + + prevSemVer := targetSemVer + prevSemVer.Patch-- + prevPatchVer := "v" + prevSemVer.String() + + foundPrev := false + for _, v := range availableVersions { + if v == prevPatchVer { + foundPrev = true + break + } + } + + if foundPrev { + return prevPatchVer, nil // Predecessor version exists + } + + // Neither found + return "", nil +} diff --git a/internal/operator-controller/applier/helm.go b/internal/operator-controller/applier/helm.go index 60a03477a..9b47ba37e 100644 --- a/internal/operator-controller/applier/helm.go +++ b/internal/operator-controller/applier/helm.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "io/fs" + "slices" "strings" "helm.sh/helm/v3/pkg/action" @@ -15,6 +16,7 @@ import ( "helm.sh/helm/v3/pkg/postrender" "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/storage/driver" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" apimachyaml "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" @@ -23,7 +25,7 @@ import ( helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client" ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/features" + "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util" ) @@ -56,6 +58,7 @@ type BundleToHelmChartFn func(rv1 fs.FS, installNamespace string, watchNamespace type Helm struct { ActionClientGetter helmclient.ActionClientGetter Preflights []Preflight + PreAuthorizer authorization.PreAuthorizer BundleToHelmChartFn BundleToHelmChartFn } @@ -79,6 +82,38 @@ func shouldSkipPreflight(ctx context.Context, preflight Preflight, ext *ocv1.Clu return false } +// runPreAuthorizationChecks performs pre-authorization checks for a Helm release +// it renders a client-only release, checks permissions using the PreAuthorizer +// and returns an error if authorization fails or required permissions are missing +func (h *Helm) runPreAuthorizationChecks(ctx context.Context, ext *ocv1.ClusterExtension, chart *chart.Chart, values chartutil.Values, post postrender.PostRenderer) error { + tmplRel, err := h.renderClientOnlyRelease(ctx, ext, chart, values, post) + if err != nil { + return fmt.Errorf("failed to get release state using client-only dry-run: %w", err) + } + + missingRules, authErr := h.PreAuthorizer.PreAuthorize(ctx, ext, strings.NewReader(tmplRel.Manifest)) + + var preAuthErrors []error + + if len(missingRules) > 0 { + var missingRuleDescriptions []string + for _, policyRules := range missingRules { + for _, rule := range policyRules.MissingRules { + missingRuleDescriptions = append(missingRuleDescriptions, ruleDescription(policyRules.Namespace, rule)) + } + } + slices.Sort(missingRuleDescriptions) + preAuthErrors = append(preAuthErrors, fmt.Errorf("service account requires the following permissions to manage cluster extension:\n %s", strings.Join(missingRuleDescriptions, "\n "))) + } + if authErr != nil { + preAuthErrors = append(preAuthErrors, fmt.Errorf("authorization evaluation error: %w", authErr)) + } + if len(preAuthErrors) > 0 { + return fmt.Errorf("pre-authorization failed: %v", preAuthErrors) + } + return nil +} + func (h *Helm) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels map[string]string, storageLabels map[string]string) ([]client.Object, string, error) { chrt, err := h.buildHelmChart(contentFS, ext) if err != nil { @@ -86,18 +121,26 @@ func (h *Helm) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExte } values := chartutil.Values{} + post := &postrenderer{ + labels: objectLabels, + } + + if h.PreAuthorizer != nil { + err := h.runPreAuthorizationChecks(ctx, ext, chrt, values, post) + if err != nil { + // Return the pre-authorization error directly + return nil, "", err + } + } + ac, err := h.ActionClientGetter.ActionClientFor(ctx, ext) if err != nil { return nil, "", err } - post := &postrenderer{ - labels: objectLabels, - } - rel, desiredRel, state, err := h.getReleaseState(ac, ext, chrt, values, post) if err != nil { - return nil, "", err + return nil, "", fmt.Errorf("failed to get release state using server-side dry-run: %w", err) } for _, preflight := range h.Preflights { @@ -164,6 +207,34 @@ func (h *Helm) buildHelmChart(bundleFS fs.FS, ext *ocv1.ClusterExtension) (*char return h.BundleToHelmChartFn(bundleFS, ext.Spec.Namespace, watchNamespace) } +func (h *Helm) renderClientOnlyRelease(ctx context.Context, ext *ocv1.ClusterExtension, chrt *chart.Chart, values chartutil.Values, post postrender.PostRenderer) (*release.Release, error) { + // We need to get a separate action client because our work below + // permanently modifies the underlying action.Configuration for ClientOnly mode. + ac, err := h.ActionClientGetter.ActionClientFor(ctx, ext) + if err != nil { + return nil, err + } + + isUpgrade := false + currentRelease, err := ac.Get(ext.GetName()) + if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) { + return nil, err + } + if currentRelease != nil { + isUpgrade = true + } + + return ac.Install(ext.GetName(), ext.Spec.Namespace, chrt, values, func(i *action.Install) error { + i.DryRun = true + i.ReleaseName = ext.GetName() + i.Replace = true + i.ClientOnly = true + i.IncludeCRDs = true + i.IsUpgrade = isUpgrade + return nil + }, helmclient.AppendInstallPostRenderer(post)) +} + func (h *Helm) getReleaseState(cl helmclient.ActionInterface, ext *ocv1.ClusterExtension, chrt *chart.Chart, values chartutil.Values, post postrender.PostRenderer) (*release.Release, *release.Release, string, error) { currentRelease, err := cl.Get(ext.GetName()) if errors.Is(err, driver.ErrReleaseNotFound) { @@ -173,10 +244,6 @@ func (h *Helm) getReleaseState(cl helmclient.ActionInterface, ext *ocv1.ClusterE return nil }, helmclient.AppendInstallPostRenderer(post)) if err != nil { - if features.OperatorControllerFeatureGate.Enabled(features.PreflightPermissions) { - _ = struct{}{} // minimal no-op to satisfy linter - // probably need to break out this error as it's the one for helm dry-run as opposed to any returned later - } return nil, nil, StateError, err } return nil, desiredRelease, StateNeedsInstall, nil @@ -232,3 +299,25 @@ func (p *postrenderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, erro } return &buf, nil } + +func ruleDescription(ns string, rule rbacv1.PolicyRule) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("Namespace:%q", ns)) + + if len(rule.APIGroups) > 0 { + sb.WriteString(fmt.Sprintf(" APIGroups:[%s]", strings.Join(slices.Sorted(slices.Values(rule.APIGroups)), ","))) + } + if len(rule.Resources) > 0 { + sb.WriteString(fmt.Sprintf(" Resources:[%s]", strings.Join(slices.Sorted(slices.Values(rule.Resources)), ","))) + } + if len(rule.ResourceNames) > 0 { + sb.WriteString(fmt.Sprintf(" ResourceNames:[%s]", strings.Join(slices.Sorted(slices.Values(rule.ResourceNames)), ","))) + } + if len(rule.Verbs) > 0 { + sb.WriteString(fmt.Sprintf(" Verbs:[%s]", strings.Join(slices.Sorted(slices.Values(rule.Verbs)), ","))) + } + if len(rule.NonResourceURLs) > 0 { + sb.WriteString(fmt.Sprintf(" NonResourceURLs:[%s]", strings.Join(slices.Sorted(slices.Values(rule.NonResourceURLs)), ","))) + } + return sb.String() +} diff --git a/internal/operator-controller/applier/helm_test.go b/internal/operator-controller/applier/helm_test.go index 5b0451789..b46991206 100644 --- a/internal/operator-controller/applier/helm_test.go +++ b/internal/operator-controller/applier/helm_test.go @@ -3,6 +3,7 @@ package applier_test import ( "context" "errors" + "io" "io/fs" "os" "testing" @@ -23,6 +24,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/applier" + "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/features" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/convert" ) @@ -32,6 +34,28 @@ type mockPreflight struct { upgradeErr error } +type noOpPreAuthorizer struct{} + +func (p *noOpPreAuthorizer) PreAuthorize( + ctx context.Context, + ext *ocv1.ClusterExtension, + manifestReader io.Reader, +) ([]authorization.ScopedPolicyRules, error) { + // No-op: always return an empty map and no error + return nil, nil +} + +type errorPreAuthorizer struct{} + +func (p *errorPreAuthorizer) PreAuthorize( + ctx context.Context, + ext *ocv1.ClusterExtension, + manifestReader io.Reader, +) ([]authorization.ScopedPolicyRules, error) { + // Always returns no missing rules and an error + return nil, errors.New("problem running preauthorization") +} + func (mp *mockPreflight) Install(context.Context, *release.Release) error { return mp.installErr } @@ -257,8 +281,6 @@ func TestApply_Installation(t *testing.T) { } func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, features.OperatorControllerFeatureGate, features.PreflightPermissions, true) - t.Run("fails during dry-run installation", func(t *testing.T) { mockAcg := &mockActionGetter{ getClientErr: driver.ErrReleaseNotFound, @@ -280,11 +302,16 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { mockAcg := &mockActionGetter{ getClientErr: driver.ErrReleaseNotFound, installErr: errors.New("failed installing chart"), + desiredRel: &release.Release{ + Info: &release.Info{Status: release.StatusDeployed}, + Manifest: validManifest, + }, } mockPf := &mockPreflight{installErr: errors.New("failed during install pre-flight check")} helmApplier := applier.Helm{ ActionClientGetter: mockAcg, Preflights: []applier.Preflight{mockPf}, + PreAuthorizer: &noOpPreAuthorizer{}, BundleToHelmChartFn: convert.RegistryV1ToHelmChart, } @@ -295,20 +322,32 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { require.Nil(t, objs) }) - t.Run("fails during installation", func(t *testing.T) { + t.Run("fails during installation because of pre-authorization failure", func(t *testing.T) { mockAcg := &mockActionGetter{ getClientErr: driver.ErrReleaseNotFound, - installErr: errors.New("failed installing chart"), + desiredRel: &release.Release{ + Info: &release.Info{Status: release.StatusDeployed}, + Manifest: validManifest, + }, } helmApplier := applier.Helm{ ActionClientGetter: mockAcg, + PreAuthorizer: &errorPreAuthorizer{}, BundleToHelmChartFn: convert.RegistryV1ToHelmChart, } - - objs, state, err := helmApplier.Apply(context.TODO(), validFS, testCE, testObjectLabels, testStorageLabels) + // Use a ClusterExtension with valid Spec fields. + validCE := &ocv1.ClusterExtension{ + Spec: ocv1.ClusterExtensionSpec{ + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{ + Name: "default", + }, + }, + } + objs, state, err := helmApplier.Apply(context.TODO(), validFS, validCE, testObjectLabels, testStorageLabels) require.Error(t, err) - require.ErrorContains(t, err, "installing chart") - require.Equal(t, applier.StateNeedsInstall, state) + require.ErrorContains(t, err, "problem running preauthorization") + require.Equal(t, "", state) require.Nil(t, objs) }) @@ -322,10 +361,21 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { } helmApplier := applier.Helm{ ActionClientGetter: mockAcg, + PreAuthorizer: &noOpPreAuthorizer{}, BundleToHelmChartFn: convert.RegistryV1ToHelmChart, } - objs, state, err := helmApplier.Apply(context.TODO(), validFS, testCE, testObjectLabels, testStorageLabels) + // Use a ClusterExtension with valid Spec fields. + validCE := &ocv1.ClusterExtension{ + Spec: ocv1.ClusterExtensionSpec{ + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{ + Name: "default", + }, + }, + } + + objs, state, err := helmApplier.Apply(context.TODO(), validFS, validCE, testObjectLabels, testStorageLabels) require.NoError(t, err) require.Equal(t, applier.StateNeedsInstall, state) require.NotNil(t, objs) @@ -447,7 +497,6 @@ func TestApply_Upgrade(t *testing.T) { func TestApply_InstallationWithSingleOwnNamespaceInstallSupportEnabled(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, features.OperatorControllerFeatureGate, features.SingleOwnNamespaceInstallSupport, true) - t.Run("generates bundle resources using the configured watch namespace", func(t *testing.T) { var expectedWatchNamespace = "watch-namespace" diff --git a/internal/operator-controller/authorization/rbac.go b/internal/operator-controller/authorization/rbac.go new file mode 100644 index 000000000..f841c6561 --- /dev/null +++ b/internal/operator-controller/authorization/rbac.go @@ -0,0 +1,671 @@ +package authorization + +import ( + "context" + "errors" + "fmt" + "io" + "maps" + "regexp" + "slices" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + apimachyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/request" + rbacinternal "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" + rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" + "k8s.io/kubernetes/pkg/registry/rbac/validation" + rbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + "sigs.k8s.io/controller-runtime/pkg/client" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" +) + +type PreAuthorizer interface { + PreAuthorize(ctx context.Context, ext *ocv1.ClusterExtension, manifestReader io.Reader) ([]ScopedPolicyRules, error) +} + +type ScopedPolicyRules struct { + Namespace string + MissingRules []rbacv1.PolicyRule +} + +var objectVerbs = []string{"get", "patch", "update", "delete"} + +// Here we are splitting collection verbs based on required scope +// NB: this split is tightly coupled to the requirements of the contentmanager, specifically +// its need for cluster-scoped list/watch permissions. +// TODO: We are accepting this coupling for now, but plan to decouple +// TODO: link for above https://github.com/operator-framework/operator-controller/issues/1911 +var namespacedCollectionVerbs = []string{"create"} +var clusterCollectionVerbs = []string{"list", "watch"} + +type rbacPreAuthorizer struct { + authorizer authorizer.Authorizer + ruleResolver validation.AuthorizationRuleResolver + restMapper meta.RESTMapper +} + +func NewRBACPreAuthorizer(cl client.Client) PreAuthorizer { + return &rbacPreAuthorizer{ + authorizer: newRBACAuthorizer(cl), + ruleResolver: newRBACRulesResolver(cl), + restMapper: cl.RESTMapper(), + } +} + +// PreAuthorize validates whether the current user/request satisfies the necessary permissions +// as defined by the RBAC policy. It examines the user’s roles, resource identifiers, and +// the intended action to determine if the operation is allowed. +// +// Return Value: +// - nil: indicates that the authorization check passed and the operation is permitted. +// - non-nil error: indicates that an error occurred during the permission evaluation process +// (for example, a failure decoding the manifest or other internal issues). If the evaluation +// completes successfully but identifies missing rules, then a nil error is returned along with +// the list (or slice) of missing rules. Note that in some cases the error may encapsulate multiple +// evaluation failures +func (a *rbacPreAuthorizer) PreAuthorize(ctx context.Context, ext *ocv1.ClusterExtension, manifestReader io.Reader) ([]ScopedPolicyRules, error) { + dm, err := a.decodeManifest(manifestReader) + if err != nil { + return nil, err + } + manifestManager := &user.DefaultInfo{Name: fmt.Sprintf("system:serviceaccount:%s:%s", ext.Spec.Namespace, ext.Spec.ServiceAccount.Name)} + attributesRecords := dm.asAuthorizationAttributesRecordsForUser(manifestManager, ext) + + var preAuthEvaluationErrors []error + missingRules, err := a.authorizeAttributesRecords(ctx, attributesRecords) + if err != nil { + preAuthEvaluationErrors = append(preAuthEvaluationErrors, err) + } + + ec := a.escalationCheckerFor(dm) + + var parseErrors []error + for _, obj := range dm.rbacObjects() { + if err := ec.checkEscalation(ctx, manifestManager, obj); err != nil { + result, err := parseEscalationErrorForMissingRules(err) + missingRules[obj.GetNamespace()] = append(missingRules[obj.GetNamespace()], result.MissingRules...) + preAuthEvaluationErrors = append(preAuthEvaluationErrors, result.ResolutionErrors) + parseErrors = append(parseErrors, err) + } + } + allMissingPolicyRules := make([]ScopedPolicyRules, 0, len(missingRules)) + + for ns, nsMissingRules := range missingRules { + // NOTE: Although CompactRules is defined to return an error, its current implementation + // never produces a non-nil error. This is because all operations within the function are + // designed to succeed under current conditions. In the future, if more complex rule validations + // are introduced, this behavior may change and proper error handling will be required. + if compactMissingRules, err := validation.CompactRules(nsMissingRules); err == nil { + missingRules[ns] = compactMissingRules + } + + missingRulesWithDeduplicatedVerbs := make([]rbacv1.PolicyRule, 0, len(missingRules[ns])) + for _, rule := range missingRules[ns] { + verbSet := sets.New[string](rule.Verbs...) + if verbSet.Has("*") { + rule.Verbs = []string{"*"} + } else { + rule.Verbs = sets.List(verbSet) + } + missingRulesWithDeduplicatedVerbs = append(missingRulesWithDeduplicatedVerbs, rule) + } + + sortableRules := rbacv1helpers.SortableRuleSlice(missingRulesWithDeduplicatedVerbs) + + sort.Sort(sortableRules) + allMissingPolicyRules = append(allMissingPolicyRules, ScopedPolicyRules{Namespace: ns, MissingRules: sortableRules}) + } + + // sort allMissingPolicyRules alphabetically by namespace + slices.SortFunc(allMissingPolicyRules, func(a, b ScopedPolicyRules) int { + return strings.Compare(a.Namespace, b.Namespace) + }) + + var errs []error + if parseErr := errors.Join(parseErrors...); parseErr != nil { + errs = append(errs, fmt.Errorf("failed to parse escalation check error strings: %v", parseErr)) + } + if len(preAuthEvaluationErrors) > 0 { + errs = append(errs, fmt.Errorf("failed to resolve or evaluate permissions: %v", errors.Join(preAuthEvaluationErrors...))) + } + if len(errs) > 0 { + return allMissingPolicyRules, fmt.Errorf("missing rules may be incomplete: %w", errors.Join(errs...)) + } + return allMissingPolicyRules, nil +} + +func (a *rbacPreAuthorizer) escalationCheckerFor(dm *decodedManifest) escalationChecker { + ec := escalationChecker{ + authorizer: a.authorizer, + ruleResolver: a.ruleResolver, + extraClusterRoles: dm.clusterRoles, + extraRoles: dm.roles, + } + return ec +} + +func (a *rbacPreAuthorizer) decodeManifest(manifestReader io.Reader) (*decodedManifest, error) { + dm := &decodedManifest{ + gvrs: map[schema.GroupVersionResource][]types.NamespacedName{}, + clusterRoles: map[client.ObjectKey]rbacv1.ClusterRole{}, + roles: map[client.ObjectKey]rbacv1.Role{}, + clusterRoleBindings: map[client.ObjectKey]rbacv1.ClusterRoleBinding{}, + roleBindings: map[client.ObjectKey]rbacv1.RoleBinding{}, + } + var ( + i int + errs []error + decoder = apimachyaml.NewYAMLOrJSONDecoder(manifestReader, 1024) + ) + for { + var uObj unstructured.Unstructured + err := decoder.Decode(&uObj) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + errs = append(errs, fmt.Errorf("could not decode object %d in manifest: %w", i, err)) + continue + } + gvk := uObj.GroupVersionKind() + restMapping, err := a.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + var objName string + if name := uObj.GetName(); name != "" { + objName = fmt.Sprintf(" (name: %s)", name) + } + + errs = append( + errs, + fmt.Errorf("could not get REST mapping for object %d in manifest with GVK %s%s: %w", i, gvk, objName, err), + ) + continue + } + + gvr := restMapping.Resource + dm.gvrs[gvr] = append(dm.gvrs[gvr], client.ObjectKeyFromObject(&uObj)) + + switch restMapping.Resource.GroupResource() { + case schema.GroupResource{Group: rbacv1.GroupName, Resource: "clusterroles"}: + obj := &rbacv1.ClusterRole{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.UnstructuredContent(), obj); err != nil { + errs = append(errs, fmt.Errorf("could not decode object %d in manifest as ClusterRole: %w", i, err)) + continue + } + dm.clusterRoles[client.ObjectKeyFromObject(obj)] = *obj + case schema.GroupResource{Group: rbacv1.GroupName, Resource: "clusterrolebindings"}: + obj := &rbacv1.ClusterRoleBinding{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.UnstructuredContent(), obj); err != nil { + errs = append(errs, fmt.Errorf("could not decode object %d in manifest as ClusterRoleBinding: %w", i, err)) + continue + } + dm.clusterRoleBindings[client.ObjectKeyFromObject(obj)] = *obj + case schema.GroupResource{Group: rbacv1.GroupName, Resource: "roles"}: + obj := &rbacv1.Role{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.UnstructuredContent(), obj); err != nil { + errs = append(errs, fmt.Errorf("could not decode object %d in manifest as Role: %w", i, err)) + continue + } + dm.roles[client.ObjectKeyFromObject(obj)] = *obj + case schema.GroupResource{Group: rbacv1.GroupName, Resource: "rolebindings"}: + obj := &rbacv1.RoleBinding{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.UnstructuredContent(), obj); err != nil { + errs = append(errs, fmt.Errorf("could not decode object %d in manifest as RoleBinding: %w", i, err)) + continue + } + dm.roleBindings[client.ObjectKeyFromObject(obj)] = *obj + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return dm, nil +} + +func (a *rbacPreAuthorizer) authorizeAttributesRecords(ctx context.Context, attributesRecords []authorizer.AttributesRecord) (map[string][]rbacv1.PolicyRule, error) { + var ( + missingRules = map[string][]rbacv1.PolicyRule{} + errs []error + ) + for _, ar := range attributesRecords { + allow, err := a.attributesAllowed(ctx, ar) + if err != nil { + errs = append(errs, err) + continue + } + if !allow { + missingRules[ar.Namespace] = append(missingRules[ar.Namespace], policyRuleFromAttributesRecord(ar)) + } + } + return missingRules, errors.Join(errs...) +} + +func (a *rbacPreAuthorizer) attributesAllowed(ctx context.Context, attributesRecord authorizer.AttributesRecord) (bool, error) { + decision, reason, err := a.authorizer.Authorize(ctx, attributesRecord) + if err != nil { + if reason != "" { + return false, fmt.Errorf("%s: %w", reason, err) + } + return false, err + } + return decision == authorizer.DecisionAllow, nil +} + +func policyRuleFromAttributesRecord(attributesRecord authorizer.AttributesRecord) rbacv1.PolicyRule { + pr := rbacv1.PolicyRule{} + if attributesRecord.Verb != "" { + pr.Verbs = []string{attributesRecord.Verb} + } + if !attributesRecord.ResourceRequest { + pr.NonResourceURLs = []string{attributesRecord.Path} + return pr + } + + pr.APIGroups = []string{attributesRecord.APIGroup} + if attributesRecord.Name != "" { + pr.ResourceNames = []string{attributesRecord.Name} + } + + r := attributesRecord.Resource + if attributesRecord.Subresource != "" { + r += "/" + attributesRecord.Subresource + } + pr.Resources = []string{r} + + return pr +} + +type decodedManifest struct { + gvrs map[schema.GroupVersionResource][]types.NamespacedName + clusterRoles map[client.ObjectKey]rbacv1.ClusterRole + roles map[client.ObjectKey]rbacv1.Role + clusterRoleBindings map[client.ObjectKey]rbacv1.ClusterRoleBinding + roleBindings map[client.ObjectKey]rbacv1.RoleBinding +} + +func (dm *decodedManifest) rbacObjects() []client.Object { + objects := make([]client.Object, 0, len(dm.clusterRoles)+len(dm.roles)+len(dm.clusterRoleBindings)+len(dm.roleBindings)) + for obj := range maps.Values(dm.clusterRoles) { + objects = append(objects, &obj) + } + for obj := range maps.Values(dm.roles) { + objects = append(objects, &obj) + } + for obj := range maps.Values(dm.clusterRoleBindings) { + objects = append(objects, &obj) + } + for obj := range maps.Values(dm.roleBindings) { + objects = append(objects, &obj) + } + return objects +} + +func (dm *decodedManifest) asAuthorizationAttributesRecordsForUser(manifestManager user.Info, ext *ocv1.ClusterExtension) []authorizer.AttributesRecord { + var attributeRecords []authorizer.AttributesRecord + + for gvr, keys := range dm.gvrs { + namespaces := sets.New[string]() + for _, k := range keys { + namespaces.Insert(k.Namespace) + // generate records for object-specific verbs (get, update, patch, delete) in their respective namespaces + for _, v := range objectVerbs { + attributeRecords = append(attributeRecords, authorizer.AttributesRecord{ + User: manifestManager, + Namespace: k.Namespace, + Name: k.Name, + APIGroup: gvr.Group, + APIVersion: gvr.Version, + Resource: gvr.Resource, + ResourceRequest: true, + Verb: v, + }) + } + } + // generate records for namespaced collection verbs (create) for each relevant namespace + for _, ns := range sets.List(namespaces) { + for _, v := range namespacedCollectionVerbs { + attributeRecords = append(attributeRecords, authorizer.AttributesRecord{ + User: manifestManager, + Namespace: ns, + APIGroup: gvr.Group, + APIVersion: gvr.Version, + Resource: gvr.Resource, + ResourceRequest: true, + Verb: v, + }) + } + } + // generate records for cluster-scoped collection verbs (list, watch) required by contentmanager + for _, v := range clusterCollectionVerbs { + attributeRecords = append(attributeRecords, authorizer.AttributesRecord{ + User: manifestManager, + Namespace: corev1.NamespaceAll, // check cluster scope + APIGroup: gvr.Group, + APIVersion: gvr.Version, + Resource: gvr.Resource, + ResourceRequest: true, + Verb: v, + }) + } + + for _, verb := range []string{"update"} { + attributeRecords = append(attributeRecords, authorizer.AttributesRecord{ + User: manifestManager, + Name: ext.Name, + APIGroup: ext.GroupVersionKind().Group, + APIVersion: ext.GroupVersionKind().Version, + Resource: "clusterextensions/finalizers", + ResourceRequest: true, + Verb: verb, + }) + } + } + return attributeRecords +} + +func newRBACAuthorizer(cl client.Client) authorizer.Authorizer { + rg := &rbacGetter{cl: cl} + return rbac.New(rg, rg, rg, rg) +} + +type rbacGetter struct { + cl client.Client +} + +func (r rbacGetter) ListClusterRoleBindings(ctx context.Context) ([]*rbacv1.ClusterRoleBinding, error) { + var clusterRoleBindingsList rbacv1.ClusterRoleBindingList + if err := r.cl.List(ctx, &clusterRoleBindingsList); err != nil { + return nil, err + } + return toPtrSlice(clusterRoleBindingsList.Items), nil +} + +func (r rbacGetter) GetClusterRole(ctx context.Context, name string) (*rbacv1.ClusterRole, error) { + var clusterRole rbacv1.ClusterRole + if err := r.cl.Get(ctx, client.ObjectKey{Name: name}, &clusterRole); err != nil { + return nil, err + } + return &clusterRole, nil +} + +func (r rbacGetter) ListRoleBindings(ctx context.Context, namespace string) ([]*rbacv1.RoleBinding, error) { + var roleBindingsList rbacv1.RoleBindingList + if err := r.cl.List(ctx, &roleBindingsList, client.InNamespace(namespace)); err != nil { + return nil, err + } + return toPtrSlice(roleBindingsList.Items), nil +} + +func (r rbacGetter) GetRole(ctx context.Context, namespace, name string) (*rbacv1.Role, error) { + var role rbacv1.Role + if err := r.cl.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &role); err != nil { + return nil, err + } + return &role, nil +} + +func newRBACRulesResolver(cl client.Client) validation.AuthorizationRuleResolver { + rg := &rbacGetter{cl: cl} + return validation.NewDefaultRuleResolver(rg, rg, rg, rg) +} + +type escalationChecker struct { + authorizer authorizer.Authorizer + ruleResolver validation.AuthorizationRuleResolver + extraRoles map[types.NamespacedName]rbacv1.Role + extraClusterRoles map[types.NamespacedName]rbacv1.ClusterRole +} + +func (ec *escalationChecker) checkEscalation(ctx context.Context, manifestManager user.Info, obj client.Object) error { + ctx = request.WithUser(request.WithNamespace(ctx, obj.GetNamespace()), manifestManager) + switch v := obj.(type) { + case *rbacv1.Role: + ctx = request.WithRequestInfo(ctx, &request.RequestInfo{APIGroup: rbacv1.GroupName, Resource: "roles", IsResourceRequest: true}) + return ec.checkRoleEscalation(ctx, v) + case *rbacv1.RoleBinding: + ctx = request.WithRequestInfo(ctx, &request.RequestInfo{APIGroup: rbacv1.GroupName, Resource: "rolebindings", IsResourceRequest: true}) + return ec.checkRoleBindingEscalation(ctx, v) + case *rbacv1.ClusterRole: + ctx = request.WithRequestInfo(ctx, &request.RequestInfo{APIGroup: rbacv1.GroupName, Resource: "clusterroles", IsResourceRequest: true}) + return ec.checkClusterRoleEscalation(ctx, v) + case *rbacv1.ClusterRoleBinding: + ctx = request.WithRequestInfo(ctx, &request.RequestInfo{APIGroup: rbacv1.GroupName, Resource: "clusterrolebindings", IsResourceRequest: true}) + return ec.checkClusterRoleBindingEscalation(ctx, v) + default: + return fmt.Errorf("unknown object type %T", v) + } +} + +func (ec *escalationChecker) checkClusterRoleEscalation(ctx context.Context, clusterRole *rbacv1.ClusterRole) error { + if rbacregistry.EscalationAllowed(ctx) || rbacregistry.RoleEscalationAuthorized(ctx, ec.authorizer) { + return nil + } + + // to set the aggregation rule, since it can gather anything, requires * on *.* + if hasAggregationRule(clusterRole) { + if err := validation.ConfirmNoEscalation(ctx, ec.ruleResolver, fullAuthority); err != nil { + return fmt.Errorf("must have cluster-admin privileges to use an aggregationRule: %w", err) + } + } + + if err := validation.ConfirmNoEscalation(ctx, ec.ruleResolver, clusterRole.Rules); err != nil { + return err + } + return nil +} + +func (ec *escalationChecker) checkClusterRoleBindingEscalation(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding) error { + if rbacregistry.EscalationAllowed(ctx) { + return nil + } + + roleRef := rbacinternal.RoleRef{} + err := rbacv1helpers.Convert_v1_RoleRef_To_rbac_RoleRef(&clusterRoleBinding.RoleRef, &roleRef, nil) + if err != nil { + return err + } + + if rbacregistry.BindingAuthorized(ctx, roleRef, metav1.NamespaceNone, ec.authorizer) { + return nil + } + + rules, err := ec.ruleResolver.GetRoleReferenceRules(ctx, clusterRoleBinding.RoleRef, metav1.NamespaceNone) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + if clusterRoleBinding.RoleRef.Kind == "ClusterRole" { + if manifestClusterRole, ok := ec.extraClusterRoles[types.NamespacedName{Name: clusterRoleBinding.RoleRef.Name}]; ok { + rules = append(rules, manifestClusterRole.Rules...) + } + } + + if err := validation.ConfirmNoEscalation(ctx, ec.ruleResolver, rules); err != nil { + return err + } + return nil +} + +func (ec *escalationChecker) checkRoleEscalation(ctx context.Context, role *rbacv1.Role) error { + if rbacregistry.EscalationAllowed(ctx) || rbacregistry.RoleEscalationAuthorized(ctx, ec.authorizer) { + return nil + } + + rules := role.Rules + if err := validation.ConfirmNoEscalation(ctx, ec.ruleResolver, rules); err != nil { + return err + } + return nil +} + +func (ec *escalationChecker) checkRoleBindingEscalation(ctx context.Context, roleBinding *rbacv1.RoleBinding) error { + if rbacregistry.EscalationAllowed(ctx) { + return nil + } + + roleRef := rbacinternal.RoleRef{} + err := rbacv1helpers.Convert_v1_RoleRef_To_rbac_RoleRef(&roleBinding.RoleRef, &roleRef, nil) + if err != nil { + return err + } + if rbacregistry.BindingAuthorized(ctx, roleRef, roleBinding.Namespace, ec.authorizer) { + return nil + } + + rules, err := ec.ruleResolver.GetRoleReferenceRules(ctx, roleBinding.RoleRef, roleBinding.Namespace) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + switch roleRef.Kind { + case "ClusterRole": + if manifestClusterRole, ok := ec.extraClusterRoles[types.NamespacedName{Name: roleBinding.RoleRef.Name}]; ok { + rules = append(rules, manifestClusterRole.Rules...) + } + case "Role": + if manifestRole, ok := ec.extraRoles[types.NamespacedName{Namespace: roleBinding.Namespace, Name: roleBinding.RoleRef.Name}]; ok { + rules = append(rules, manifestRole.Rules...) + } + } + + if err := validation.ConfirmNoEscalation(ctx, ec.ruleResolver, rules); err != nil { + return err + } + return nil +} + +var fullAuthority = []rbacv1.PolicyRule{ + {Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}}, + {Verbs: []string{"*"}, NonResourceURLs: []string{"*"}}, +} + +var ( + errRegex = regexp.MustCompile(`(?s)^user ".*" \(groups=.*\) is attempting to grant RBAC permissions not currently held:\n([^;]+)(?:; resolution errors: (.*))?$`) + ruleRegex = regexp.MustCompile(`{([^}]*)}`) + itemRegex = regexp.MustCompile(`"[^"]*"`) +) + +type parseResult struct { + MissingRules []rbacv1.PolicyRule + ResolutionErrors error +} + +// TODO: Investigate replacing this regex parsing with structured error handling once there are +// +// structured RBAC errors introduced by https://github.com/kubernetes/kubernetes/pull/130955. +// +// parseEscalationErrorForMissingRules attempts to extract specific RBAC permissions +// that were denied due to escalation prevention from a given error's text. +// It returns the list of extracted PolicyRules and an error detailing the escalation attempt +// and any resolution errors found. +// Note: If parsing is successful, the returned error is derived from the *input* error's +// message, not an error encountered during the parsing process itself. If parsing fails due to an unexpected +// error format, a distinct parsing error is returned. +func parseEscalationErrorForMissingRules(ecError error) (*parseResult, error) { + var ( + result = &parseResult{} + parseErrors []error + ) + + // errRegex captures the missing permissions and optionally resolution errors from an escalation error message + // Group 1: The list of missing permissions + // Group 2: Optional resolution errors + errString := ecError.Error() + errMatches := errRegex.FindStringSubmatch(errString) // Use FindStringSubmatch for single match expected + + // Check if the main error message pattern was matched and captured the required groups + // We expect at least 3 elements: full match, missing permissions, resolution errors (can be empty) + if len(errMatches) != 3 { + // The error format doesn't match the expected pattern for escalation errors + return &parseResult{}, fmt.Errorf("unexpected format of escalation check error string: %q", errString) + } + missingPermissionsStr := errMatches[1] + if resolutionErrorsStr := errMatches[2]; resolutionErrorsStr != "" { + result.ResolutionErrors = errors.New(resolutionErrorsStr) + } + + // Extract permissions using permRegex from the captured permissions string (Group 1) + for _, rule := range ruleRegex.FindAllString(missingPermissionsStr, -1) { + pr, err := parseCompactRuleString(rule) + if err != nil { + parseErrors = append(parseErrors, err) + continue + } + result.MissingRules = append(result.MissingRules, *pr) + } + // Return the extracted permissions and the constructed error message + return result, errors.Join(parseErrors...) +} + +func parseCompactRuleString(rule string) (*rbacv1.PolicyRule, error) { + var fields []string + if ruleText := rule[1 : len(rule)-1]; ruleText != "" { + fields = mapSlice(strings.Split(ruleText, ","), func(in string) string { + return strings.TrimSpace(in) + }) + } + var pr rbacv1.PolicyRule + for _, item := range fields { + field, valuesStr, ok := strings.Cut(item, ":") + if !ok { + return nil, fmt.Errorf("unexpected item %q: expected :[...]", item) + } + values := mapSlice(itemRegex.FindAllString(valuesStr, -1), func(in string) string { + return strings.Trim(in, `"`) + }) + switch field { + case "APIGroups": + pr.APIGroups = values + case "Resources": + pr.Resources = values + case "ResourceNames": + pr.ResourceNames = values + case "NonResourceURLs": + pr.NonResourceURLs = values + case "Verbs": + pr.Verbs = values + default: + return nil, fmt.Errorf("unexpected item %q: unknown field: %q", item, field) + } + } + return &pr, nil +} + +func hasAggregationRule(clusterRole *rbacv1.ClusterRole) bool { + // Currently, an aggregation rule is considered present only if it has one or more selectors. + // An empty slice of ClusterRoleSelectors means no selectors were provided, + // which does NOT imply "match all." + return clusterRole.AggregationRule != nil && len(clusterRole.AggregationRule.ClusterRoleSelectors) > 0 +} + +func mapSlice[I, O any](in []I, f func(I) O) []O { + out := make([]O, len(in)) + for i := range in { + out[i] = f(in[i]) + } + return out +} + +func toPtrSlice[V any](in []V) []*V { + out := make([]*V, len(in)) + for i := range in { + out[i] = &in[i] + } + return out +} diff --git a/internal/operator-controller/authorization/rbac_test.go b/internal/operator-controller/authorization/rbac_test.go new file mode 100644 index 000000000..c081377ac --- /dev/null +++ b/internal/operator-controller/authorization/rbac_test.go @@ -0,0 +1,560 @@ +package authorization + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/kubernetes/pkg/registry/rbac/validation" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" +) + +var ( + testManifest = `apiVersion: v1 +kind: Service +metadata: + name: test-service + namespace: test-namespace +spec: + clusterIP: None +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-extension-role + namespace: test-namespace +rules: +- apiGroups: ["*"] + resources: [serviceaccounts] + verbs: [watch] +- apiGroups: ["*"] + resources: [certificates] + verbs: [create] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-extension-binding + namespace: test-namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-extension-role +subjects: +- kind: ServiceAccount + name: test-serviceaccount + namespace: test-namespace + ` + + testManifestMultiNamespace = `apiVersion: v1 +kind: Service +metadata: + name: test-service + namespace: test-namespace +spec: + clusterIP: None +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-extension-role + namespace: test-namespace +rules: +- apiGroups: ["*"] + resources: [serviceaccounts] + verbs: [watch] +- apiGroups: ["*"] + resources: [certificates] + verbs: [create] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-extension-binding + namespace: test-namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-extension-role +subjects: +- kind: ServiceAccount + name: test-serviceaccount + namespace: test-namespace +--- +kind: Service +metadata: + name: test-service + namespace: a-test-namespace +spec: + clusterIP: None +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-extension-role + namespace: a-test-namespace +rules: +- apiGroups: ["*"] + resources: [serviceaccounts] + verbs: [watch] +- apiGroups: ["*"] + resources: [certificates] + verbs: [create] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-extension-binding + namespace: a-test-namespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-extension-role +subjects: +- kind: ServiceAccount + name: test-serviceaccount + namespace: a-test-namespace + ` + + saName = "test-serviceaccount" + ns = "test-namespace" + exampleClusterExtension = ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-extension"}, + Spec: ocv1.ClusterExtensionSpec{ + Namespace: ns, + ServiceAccount: ocv1.ServiceAccountReference{ + Name: saName, + }, + }, + } + + objects = []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-namespace", + }, + }, + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin-clusterrole-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: saName, + Namespace: ns, + }, + }, + RoleRef: rbacv1.RoleRef{ + Name: "admin-clusterrole", + Kind: "ClusterRole", + }, + }, + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-serviceaccount", + Namespace: "test-namespace", + }, + }, + } + + privilegedClusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin-clusterrole", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, + }, + } + + limitedClusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin-clusterrole", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{""}, + Verbs: []string{""}, + }, + }, + } + + escalatingClusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin-clusterrole", + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"serviceaccounts", "services", "clusterextensions/finalizers"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles", "clusterroles", "rolebindings", "clusterrolebindings"}, + Verbs: []string{"get", "patch", "watch", "list", "create", "update", "delete", "escalate", "bind"}, + }, + }, + } +) + +func setupFakeClient(role client.Object) client.Client { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = rbacv1.AddToScheme(s) + restMapper := testrestmapper.TestOnlyStaticRESTMapper(s) + // restMapper := meta.NewDefaultRESTMapper(nil) + fakeClientBuilder := fake.NewClientBuilder().WithObjects(append(objects, role)...).WithRESTMapper(restMapper) + return fakeClientBuilder.Build() +} + +func TestPreAuthorize_Success(t *testing.T) { + t.Run("preauthorize succeeds with no missing rbac rules", func(t *testing.T) { + fakeClient := setupFakeClient(privilegedClusterRole) + preAuth := NewRBACPreAuthorizer(fakeClient) + missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + require.NoError(t, err) + require.Equal(t, []ScopedPolicyRules{}, missingRules) + }) +} + +func TestPreAuthorize_Failure(t *testing.T) { + t.Run("preauthorize fails with missing rbac rules", func(t *testing.T) { + fakeClient := setupFakeClient(limitedClusterRole) + preAuth := NewRBACPreAuthorizer(fakeClient) + missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + require.Error(t, err) + require.NotEqual(t, []ScopedPolicyRules{}, missingRules) + }) +} + +func TestPreAuthorizeMultiNamespace_Failure(t *testing.T) { + t.Run("preauthorize fails with missing rbac rules in multiple namespaces", func(t *testing.T) { + fakeClient := setupFakeClient(limitedClusterRole) + preAuth := NewRBACPreAuthorizer(fakeClient) + missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifestMultiNamespace)) + require.Error(t, err) + require.NotEqual(t, []ScopedPolicyRules{}, missingRules) + }) +} + +func TestPreAuthorize_CheckEscalation(t *testing.T) { + t.Run("preauthorize succeeds with no missing rbac rules", func(t *testing.T) { + fakeClient := setupFakeClient(escalatingClusterRole) + preAuth := NewRBACPreAuthorizer(fakeClient) + missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + require.NoError(t, err) + require.Equal(t, []ScopedPolicyRules{}, missingRules) + }) +} + +// TestParseEscalationErrorForMissingRules Are tests with respect to https://github.com/kubernetes/api/blob/e8d4d542f6a9a16a694bfc8e3b8cd1557eecfc9d/rbac/v1/types.go#L49-L74 +// Goal is: prove the regex works as planned AND that if the error messages ever change we'll learn about it with these tests +func TestParseEscalationErrorForMissingRules_ParsingLogic(t *testing.T) { + testCases := []struct { + name string + inputError error + expectedResult *parseResult + expectError require.ErrorAssertionFunc + }{ + { + name: "One Missing Resource Rule", + inputError: errors.New(`user "test-user" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:["apps"], Resources:["deployments"], Verbs:["get"]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{"apps"}, Resources: []string{"deployments"}, Verbs: []string{"get"}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Multiple Missing Rules (Resource + NonResource)", + inputError: errors.New(`user "sa" (groups=["system:authenticated"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["pods"], Verbs:["list" "watch"]} +{NonResourceURLs:["/healthz"], Verbs:["get"]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"pods"}, Verbs: []string{"list", "watch"}}, + {NonResourceURLs: []string{"/healthz"}, Verbs: []string{"get"}}, + }, + }, + expectError: require.NoError, + }, + { + name: "One Missing Rule with Resolution Errors", + inputError: errors.New(`user "test-admin" (groups=["system:masters"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:["batch"], Resources:["jobs"], Verbs:["create"]}; resolution errors: [role "missing-role" not found]`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{"batch"}, Resources: []string{"jobs"}, Verbs: []string{"create"}}, + }, + ResolutionErrors: errors.New(`[role "missing-role" not found]`), + }, + expectError: require.NoError, + }, + { + name: "Multiple Missing Rules with Resolution Errors", + inputError: errors.New(`user "another-user" (groups=[]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["secrets"], Verbs:["get"]} +{APIGroups:[""], Resources:["configmaps"], Verbs:["list"]}; resolution errors: [clusterrole "missing-clusterrole" not found, role "other-missing" not found]`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}}, + {APIGroups: []string{""}, Resources: []string{"configmaps"}, Verbs: []string{"list"}}, + }, + ResolutionErrors: errors.New(`[clusterrole "missing-clusterrole" not found, role "other-missing" not found]`), + }, + expectError: require.NoError, + }, + { + name: "Missing Rule (All Resource Fields)", + inputError: errors.New(`user "resource-name-user" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:["extensions"], Resources:["ingresses"], ResourceNames:["my-ingress"], Verbs:["update" "patch"]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{"extensions"}, Resources: []string{"ingresses"}, ResourceNames: []string{"my-ingress"}, Verbs: []string{"update", "patch"}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Missing Rule (No ResourceNames)", + inputError: errors.New(`user "no-res-name-user" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:["networking.k8s.io"], Resources:["networkpolicies"], Verbs:["watch"]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{"networking.k8s.io"}, Resources: []string{"networkpolicies"}, Verbs: []string{"watch"}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Missing Rule (NonResourceURLs only)", + inputError: errors.New(`user "url-user" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{NonResourceURLs:["/version" "/apis"], Verbs:["get"]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {NonResourceURLs: []string{"/version", "/apis"}, Verbs: []string{"get"}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Unexpected Format", + inputError: errors.New("some completely different error message that doesn't match"), + expectedResult: &parseResult{}, + expectError: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorContains(t, err, "unexpected format of escalation check error string") + }, + }, + { + name: "Empty Permissions String", + inputError: errors.New(`user "empty-perms" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +`), + expectedResult: &parseResult{}, + expectError: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorContains(t, err, "unexpected format of escalation check error string") + }, + }, + { + name: "Rule with Empty Strings in lists", + inputError: errors.New(`user "empty-strings" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:["" "apps"], Resources:["" "deployments"], Verbs:["get" ""]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{"", "apps"}, Resources: []string{"", "deployments"}, Verbs: []string{"get", ""}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Rule with Only Empty Verb", + inputError: errors.New(`user "empty-verb" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["pods"], Verbs:[""]}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"pods"}, Verbs: []string{""}}, + }, + }, + expectError: require.NoError, + }, + { + name: "Rule with no fields", + inputError: errors.New(`user "empty-verb" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{}`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{{}}, + }, + expectError: require.NoError, + }, + { + name: "Rule with no colon separator", + inputError: errors.New(`user "empty-verb" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources, Verbs:["get"]} +`), + expectedResult: &parseResult{}, + expectError: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorContains(t, err, `unexpected item "Resources": expected :[...]`) + }, + }, + { + name: "Rule with unknown field", + inputError: errors.New(`user "empty-verb" (groups=["test"]) is attempting to grant RBAC permissions not currently held: +{FooBar:["baz"]} +{APIGroups:[""], Resources:["secrets"], Verbs:["get"]} +`), + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}}, + }, + }, + expectError: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorContains(t, err, `unknown field: "FooBar"`) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rules, err := parseEscalationErrorForMissingRules(tc.inputError) + tc.expectError(t, err) + require.Equal(t, tc.expectedResult, rules) + }) + } +} + +func TestParseEscalationErrorForMissingRules_KubernetesCompatibility(t *testing.T) { + testCases := []struct { + name string + ruleResolver validation.AuthorizationRuleResolver + wantRules []rbacv1.PolicyRule + expectedErrorString string + expectedResult *parseResult + }{ + { + name: "missing rules", + ruleResolver: mockRulesResolver{ + rules: []rbacv1.PolicyRule{}, + err: nil, + }, + wantRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}, ResourceNames: []string{"test-secret"}}, + {APIGroups: []string{""}, Resources: []string{"configmaps"}, Verbs: []string{"get", "list", "watch"}}, + {APIGroups: []string{"apps"}, Resources: []string{"deployments", "replicasets"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {NonResourceURLs: []string{"/healthz", "/livez"}, Verbs: []string{"get", "post"}}, + }, + expectedErrorString: `user "user" (groups=["a" "b"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["configmaps"], Verbs:["get" "list" "watch"]} +{APIGroups:[""], Resources:["secrets"], ResourceNames:["test-secret"], Verbs:["get"]} +{APIGroups:["apps"], Resources:["deployments"], Verbs:["create" "update" "patch" "delete"]} +{APIGroups:["apps"], Resources:["replicasets"], Verbs:["create" "update" "patch" "delete"]} +{NonResourceURLs:["/healthz"], Verbs:["get"]} +{NonResourceURLs:["/healthz"], Verbs:["post"]} +{NonResourceURLs:["/livez"], Verbs:["get"]} +{NonResourceURLs:["/livez"], Verbs:["post"]}`, + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"configmaps"}, Verbs: []string{"get", "list", "watch"}}, + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}, ResourceNames: []string{"test-secret"}}, + {APIGroups: []string{"apps"}, Resources: []string{"deployments"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {APIGroups: []string{"apps"}, Resources: []string{"replicasets"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {NonResourceURLs: []string{"/healthz"}, Verbs: []string{"get"}}, + {NonResourceURLs: []string{"/healthz"}, Verbs: []string{"post"}}, + {NonResourceURLs: []string{"/livez"}, Verbs: []string{"get"}}, + {NonResourceURLs: []string{"/livez"}, Verbs: []string{"post"}}, + }, + }, + }, + { + name: "resolution failure", + ruleResolver: mockRulesResolver{ + rules: []rbacv1.PolicyRule{}, + err: errors.New("resolution error"), + }, + wantRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}, ResourceNames: []string{"test-secret"}}, + {APIGroups: []string{""}, Resources: []string{"configmaps"}, Verbs: []string{"get", "list", "watch"}}, + {APIGroups: []string{"apps"}, Resources: []string{"deployments", "replicasets"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {NonResourceURLs: []string{"/healthz", "/livez"}, Verbs: []string{"get", "post"}}, + }, + expectedErrorString: `user "user" (groups=["a" "b"]) is attempting to grant RBAC permissions not currently held: +{APIGroups:[""], Resources:["configmaps"], Verbs:["get" "list" "watch"]} +{APIGroups:[""], Resources:["secrets"], ResourceNames:["test-secret"], Verbs:["get"]} +{APIGroups:["apps"], Resources:["deployments"], Verbs:["create" "update" "patch" "delete"]} +{APIGroups:["apps"], Resources:["replicasets"], Verbs:["create" "update" "patch" "delete"]} +{NonResourceURLs:["/healthz"], Verbs:["get"]} +{NonResourceURLs:["/healthz"], Verbs:["post"]} +{NonResourceURLs:["/livez"], Verbs:["get"]} +{NonResourceURLs:["/livez"], Verbs:["post"]}; resolution errors: [resolution error]`, + expectedResult: &parseResult{ + MissingRules: []rbacv1.PolicyRule{ + {APIGroups: []string{""}, Resources: []string{"configmaps"}, Verbs: []string{"get", "list", "watch"}}, + {APIGroups: []string{""}, Resources: []string{"secrets"}, Verbs: []string{"get"}, ResourceNames: []string{"test-secret"}}, + {APIGroups: []string{"apps"}, Resources: []string{"deployments"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {APIGroups: []string{"apps"}, Resources: []string{"replicasets"}, Verbs: []string{"create", "update", "patch", "delete"}}, + {NonResourceURLs: []string{"/healthz"}, Verbs: []string{"get"}}, + {NonResourceURLs: []string{"/healthz"}, Verbs: []string{"post"}}, + {NonResourceURLs: []string{"/livez"}, Verbs: []string{"get"}}, + {NonResourceURLs: []string{"/livez"}, Verbs: []string{"post"}}, + }, + ResolutionErrors: errors.New("[resolution error]"), + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := request.WithUser(request.WithNamespace(context.Background(), "namespace"), &user.DefaultInfo{ + Name: "user", + Groups: []string{"a", "b"}, + }) + + // Let's actually call the upstream function that generates and returns the + // error message that we are attempting to parse correctly. The hope is that + // these tests will start failing if we bump to a new version of kubernetes + // that causes our parsing logic to be incorrect. + err := validation.ConfirmNoEscalation(ctx, tc.ruleResolver, tc.wantRules) + require.Error(t, err) + require.Equal(t, tc.expectedErrorString, err.Error()) + + res, err := parseEscalationErrorForMissingRules(err) + require.NoError(t, err) + require.Equal(t, tc.expectedResult, res) + }) + } +} + +type mockRulesResolver struct { + rules []rbacv1.PolicyRule + err error +} + +func (m mockRulesResolver) GetRoleReferenceRules(ctx context.Context, roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error) { + panic("unimplemented") +} + +func (m mockRulesResolver) RulesFor(ctx context.Context, user user.Info, namespace string) ([]rbacv1.PolicyRule, error) { + return m.rules, m.err +} + +func (m mockRulesResolver) VisitRulesFor(ctx context.Context, user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) { + panic("unimplemented") +} diff --git a/internal/operator-controller/controllers/clusterextension_controller.go b/internal/operator-controller/controllers/clusterextension_controller.go index 5cbb670b6..07f54b94f 100644 --- a/internal/operator-controller/controllers/clusterextension_controller.go +++ b/internal/operator-controller/controllers/clusterextension_controller.go @@ -96,6 +96,7 @@ type InstalledBundleGetter interface { //+kubebuilder:rbac:namespace=system,groups=core,resources=secrets,verbs=create;update;patch;delete;deletecollection;get;list;watch //+kubebuilder:rbac:groups=core,resources=serviceaccounts/token,verbs=create //+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings;roles;rolebindings,verbs=list;watch //+kubebuilder:rbac:groups=olm.operatorframework.io,resources=clustercatalogs,verbs=list;watch diff --git a/internal/operator-controller/features/features.go b/internal/operator-controller/features/features.go index 885f3b4db..41645f62c 100644 --- a/internal/operator-controller/features/features.go +++ b/internal/operator-controller/features/features.go @@ -1,6 +1,9 @@ package features import ( + "sort" + + "github.com/go-logr/logr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/component-base/featuregate" ) @@ -36,3 +39,21 @@ var OperatorControllerFeatureGate featuregate.MutableFeatureGate = featuregate.N func init() { utilruntime.Must(OperatorControllerFeatureGate.Add(operatorControllerFeatureGates)) } + +// LogFeatureGateStates logs the state of all known feature gates. +func LogFeatureGateStates(log logr.Logger, fg featuregate.FeatureGate) { + // Sort the keys for consistent logging order + featureKeys := make([]featuregate.Feature, 0, len(operatorControllerFeatureGates)) + for k := range operatorControllerFeatureGates { + featureKeys = append(featureKeys, k) + } + sort.Slice(featureKeys, func(i, j int) bool { + return string(featureKeys[i]) < string(featureKeys[j]) // Sort by string representation + }) + + featurePairs := make([]interface{}, 0, len(featureKeys)) + for _, feature := range featureKeys { + featurePairs = append(featurePairs, feature, fg.Enabled(feature)) + } + log.Info("feature gate status", featurePairs...) +}