From a4a403a806e005d3321306eea468a596915d8a0b Mon Sep 17 00:00:00 2001 From: Todd Short Date: Tue, 16 Apr 2024 12:05:56 -0400 Subject: [PATCH 1/3] Fix suite_test.go Signed-off-by: Todd Short --- internal/controllers/suite_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/controllers/suite_test.go b/internal/controllers/suite_test.go index a7707d272..4efd68fb3 100644 --- a/internal/controllers/suite_test.go +++ b/internal/controllers/suite_test.go @@ -47,7 +47,7 @@ func newClient(t *testing.T) client.Client { } func newClientAndReconciler(t *testing.T) (client.Client, *controllers.ClusterExtensionReconciler) { - resolver, err := solver.New() + _, err := solver.New() require.NoError(t, err) cl := newClient(t) @@ -56,7 +56,6 @@ func newClientAndReconciler(t *testing.T) (client.Client, *controllers.ClusterEx Client: cl, BundleProvider: &fakeCatalogClient, Scheme: sch, - Resolver: resolver, } return cl, reconciler } From 110950f62913acc75f07cbfb17c9cbbf495b9b95 Mon Sep 17 00:00:00 2001 From: Todd Short Date: Tue, 16 Apr 2024 09:51:46 -0400 Subject: [PATCH 2/3] Copy over some rukpak code and replace BundleDeployment Replace BundleDeployment in the Unpack APIs with a combination of BundleSource and ClusterExtension. It builds... Signed-off-by: Todd Short --- go.mod | 9 +- go.sum | 2 + .../clusterextension_controller.go | 16 + internal/rukpak/api/bundle_types.go | 51 ++ internal/rukpak/convert/registryv1.go | 447 ++++++++++++++++++ internal/rukpak/operator-registry/registry.go | 23 + internal/rukpak/source/common.go | 9 + internal/rukpak/source/image.go | 272 +++++++++++ internal/rukpak/source/unpacker.go | 110 +++++ internal/rukpak/util/hash.go | 39 ++ internal/rukpak/util/labels.go | 6 + internal/rukpak/util/util.go | 11 + 12 files changed, 991 insertions(+), 4 deletions(-) create mode 100644 internal/rukpak/api/bundle_types.go create mode 100644 internal/rukpak/convert/registryv1.go create mode 100644 internal/rukpak/operator-registry/registry.go create mode 100644 internal/rukpak/source/common.go create mode 100644 internal/rukpak/source/image.go create mode 100644 internal/rukpak/source/unpacker.go create mode 100644 internal/rukpak/util/hash.go create mode 100644 internal/rukpak/util/labels.go create mode 100644 internal/rukpak/util/util.go diff --git a/go.mod b/go.mod index 1594a34bc..b014fbf24 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,8 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/go-logr/logr v1.4.1 github.com/google/go-cmp v0.6.0 + github.com/nlepage/go-tarfs v1.2.1 + github.com/operator-framework/api v0.23.0 github.com/operator-framework/catalogd v0.12.0 github.com/operator-framework/deppy v0.3.0 github.com/operator-framework/helm-operator-plugins v0.1.3 @@ -21,11 +23,14 @@ require ( golang.org/x/exp v0.0.0-20240213143201-ec583247a57a gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 k8s.io/apimachinery v0.29.3 + k8s.io/cli-runtime v0.29.2 k8s.io/client-go v0.29.3 k8s.io/component-base v0.29.3 k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/controller-runtime v0.17.2 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -140,7 +145,6 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc6 // indirect github.com/opencontainers/runc v1.1.12 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/operator-framework/api v0.23.0 // indirect github.com/operator-framework/operator-lib v0.12.0 // indirect github.com/otiai10/copy v1.14.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -199,9 +203,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.14.3 // indirect - k8s.io/apiextensions-apiserver v0.29.3 // indirect k8s.io/apiserver v0.29.3 // indirect - k8s.io/cli-runtime v0.29.2 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 // indirect k8s.io/kubectl v0.29.2 // indirect @@ -211,5 +213,4 @@ require ( sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 380e76639..aa33a96bf 100644 --- a/go.sum +++ b/go.sum @@ -376,6 +376,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nlepage/go-tarfs v1.2.1 h1:o37+JPA+ajllGKSPfy5+YpsNHDjZnAoyfvf5GsUa+Ks= +github.com/nlepage/go-tarfs v1.2.1/go.mod h1:rno18mpMy9aEH1IiJVftFsqPyIpwqSUiAOpJYjlV2NA= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= diff --git a/internal/controllers/clusterextension_controller.go b/internal/controllers/clusterextension_controller.go index eaca319d6..cc61206a6 100644 --- a/internal/controllers/clusterextension_controller.go +++ b/internal/controllers/clusterextension_controller.go @@ -50,6 +50,7 @@ import ( "github.com/operator-framework/operator-controller/internal/catalogmetadata" catalogfilter "github.com/operator-framework/operator-controller/internal/catalogmetadata/filter" catalogsort "github.com/operator-framework/operator-controller/internal/catalogmetadata/sort" + rukpakapi "github.com/operator-framework/operator-controller/internal/rukpak/api" ) // ClusterExtensionReconciler reconciles a ClusterExtension object @@ -135,6 +136,12 @@ func (r *ClusterExtensionReconciler) reconcile(ctx context.Context, ext *ocv1alp // Unpack contents into a fs based on the bundle. // Considering only image source. + // Generate a BundleSource, and then pass this and the ClusterExtension to Unpack + // TODO: + // bs := r.GenerateExpectedBundleSource(*ext, bundle.Image) + // unpacker := NewDefaultUnpacker(msg, namespace, unpackImage) + // unpacker..Unpack(bs, ext) + // set the status of the cluster extension based on the respective bundle deployment status conditions. return ctrl.Result{}, nil } @@ -221,6 +228,15 @@ func SetDeprecationStatus(ext *ocv1alpha1.ClusterExtension, bundle *catalogmetad } } +func (r *ClusterExtensionReconciler) GenerateExpectedBundleSource(o ocv1alpha1.ClusterExtension, bundlePath string) *rukpakapi.BundleSource { + return &rukpakapi.BundleSource{ + Type: rukpakapi.SourceTypeImage, + Image: rukpakapi.ImageSource{ + Ref: bundlePath, + }, + } +} + func (r *ClusterExtensionReconciler) GenerateExpectedBundleDeployment(o ocv1alpha1.ClusterExtension, bundlePath string, bundleProvisioner string) *unstructured.Unstructured { // We use unstructured here to avoid problems of serializing default values when sending patches to the apiserver. // If you use a typed object, any default values from that struct get serialized into the JSON patch, which could diff --git a/internal/rukpak/api/bundle_types.go b/internal/rukpak/api/bundle_types.go new file mode 100644 index 000000000..dbd035a7b --- /dev/null +++ b/internal/rukpak/api/bundle_types.go @@ -0,0 +1,51 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rukpakapi + +type SourceType string + +const ( + SourceTypeImage SourceType = "image" + + TypeUnpacked = "Unpacked" + + ReasonUnpackPending = "UnpackPending" + ReasonUnpacking = "Unpacking" + ReasonUnpackSuccessful = "UnpackSuccessful" + ReasonUnpackFailed = "UnpackFailed" + ReasonProcessingFinalizerFailed = "ProcessingFinalizerFailed" + + PhasePending = "Pending" + PhaseUnpacking = "Unpacking" + PhaseFailing = "Failing" + PhaseUnpacked = "Unpacked" +) + +type BundleSource struct { + // Type defines the kind of Bundle content being sourced. + Type SourceType `json:"type"` + // Image is the bundle image that backs the content of this bundle. + Image ImageSource `json:"image,omitempty"` + // Git is the git repository that backs the content of this Bundle. +} + +type ImageSource struct { + // Ref contains the reference to a container image containing Bundle contents. + Ref string `json:"ref"` + // ImagePullSecretName contains the name of the image pull secret in the namespace that the provisioner is deployed. + ImagePullSecretName string `json:"pullSecret,omitempty"` +} diff --git a/internal/rukpak/convert/registryv1.go b/internal/rukpak/convert/registryv1.go new file mode 100644 index 000000000..e4af9b650 --- /dev/null +++ b/internal/rukpak/convert/registryv1.go @@ -0,0 +1,447 @@ +package convert + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "strings" + "testing/fstest" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + apimachyaml "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/operator-framework/api/pkg/operators/v1alpha1" + registrybundle "github.com/operator-framework/operator-registry/pkg/lib/bundle" + + registry "github.com/operator-framework/operator-controller/internal/rukpak/operator-registry" + "github.com/operator-framework/operator-controller/internal/rukpak/util" +) + +type RegistryV1 struct { + PackageName string + CSV v1alpha1.ClusterServiceVersion + CRDs []apiextensionsv1.CustomResourceDefinition + Others []unstructured.Unstructured +} + +type Plain struct { + Objects []client.Object +} + +func RegistryV1ToPlain(rv1 fs.FS, watchNamespaces []string) (fs.FS, error) { + reg := RegistryV1{} + fileData, err := fs.ReadFile(rv1, filepath.Join("metadata", "annotations.yaml")) + if err != nil { + return nil, err + } + annotationsFile := registry.AnnotationsFile{} + if err := yaml.Unmarshal(fileData, &annotationsFile); err != nil { + return nil, err + } + reg.PackageName = annotationsFile.Annotations.PackageName + + var objects []*unstructured.Unstructured + const manifestsDir = "manifests" + + entries, err := fs.ReadDir(rv1, manifestsDir) + if err != nil { + return nil, err + } + for _, e := range entries { + if e.IsDir() { + return nil, fmt.Errorf("subdirectories are not allowed within the %q directory of the bundle image filesystem: found %q", manifestsDir, filepath.Join(manifestsDir, e.Name())) + } + fileData, err := fs.ReadFile(rv1, filepath.Join(manifestsDir, e.Name())) + if err != nil { + return nil, err + } + + dec := apimachyaml.NewYAMLOrJSONDecoder(bytes.NewReader(fileData), 1024) + for { + obj := unstructured.Unstructured{} + err := dec.Decode(&obj) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("read %q: %v", e.Name(), err) + } + objects = append(objects, &obj) + } + } + + for _, obj := range objects { + obj := obj + switch obj.GetObjectKind().GroupVersionKind().Kind { + case "ClusterServiceVersion": + csv := v1alpha1.ClusterServiceVersion{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &csv); err != nil { + return nil, err + } + reg.CSV = csv + case "CustomResourceDefinition": + crd := apiextensionsv1.CustomResourceDefinition{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &crd); err != nil { + return nil, err + } + reg.CRDs = append(reg.CRDs, crd) + default: + reg.Others = append(reg.Others, *obj) + } + } + + plain, err := Convert(reg, "", watchNamespaces) + if err != nil { + return nil, err + } + + var manifest bytes.Buffer + for _, obj := range plain.Objects { + yamlData, err := yaml.Marshal(obj) + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(&manifest, "---\n%s\n", string(yamlData)); err != nil { + return nil, err + } + } + + now := time.Now() + plainFS := fstest.MapFS{ + ".": &fstest.MapFile{ + Data: nil, + Mode: fs.ModeDir | 0755, + ModTime: now, + }, + "manifests": &fstest.MapFile{ + Data: nil, + Mode: fs.ModeDir | 0755, + ModTime: now, + }, + "manifests/manifest.yaml": &fstest.MapFile{ + Data: manifest.Bytes(), + Mode: 0644, + ModTime: now, + }, + } + + return plainFS, nil +} + +func validateTargetNamespaces(supportedInstallModes sets.Set[string], installNamespace string, targetNamespaces []string) error { + set := sets.New[string](targetNamespaces...) + switch set.Len() { + case 0: + if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeAllNamespaces)) { + return nil + } + case 1: + if set.Has("") && supportedInstallModes.Has(string(v1alpha1.InstallModeTypeAllNamespaces)) { + return nil + } + if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeSingleNamespace)) { + return nil + } + if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeOwnNamespace)) && targetNamespaces[0] == installNamespace { + return nil + } + default: + if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeMultiNamespace)) && !set.Has("") { + return nil + } + } + return fmt.Errorf("supported install modes %v do not support target namespaces %v", sets.List[string](supportedInstallModes), targetNamespaces) +} + +func saNameOrDefault(saName string) string { + if saName == "" { + return "default" + } + return saName +} + +func Convert(in RegistryV1, installNamespace string, targetNamespaces []string) (*Plain, error) { + if installNamespace == "" { + installNamespace = in.CSV.Annotations["operatorframework.io/suggested-namespace"] + } + if installNamespace == "" { + installNamespace = fmt.Sprintf("%s-system", in.PackageName) + } + supportedInstallModes := sets.New[string]() + for _, im := range in.CSV.Spec.InstallModes { + if im.Supported { + supportedInstallModes.Insert(string(im.Type)) + } + } + if len(targetNamespaces) == 0 { + if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeAllNamespaces)) { + targetNamespaces = []string{""} + } else if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeOwnNamespace)) { + targetNamespaces = []string{installNamespace} + } + } + + if err := validateTargetNamespaces(supportedInstallModes, installNamespace, targetNamespaces); err != nil { + return nil, err + } + + if len(in.CSV.Spec.APIServiceDefinitions.Owned) > 0 { + return nil, fmt.Errorf("apiServiceDefintions are not supported") + } + + if len(in.CSV.Spec.WebhookDefinitions) > 0 { + return nil, fmt.Errorf("webhookDefinitions are not supported") + } + + deployments := []appsv1.Deployment{} + serviceAccounts := map[string]corev1.ServiceAccount{} + for _, depSpec := range in.CSV.Spec.InstallStrategy.StrategySpec.DeploymentSpecs { + annotations := util.MergeMaps(in.CSV.Annotations, depSpec.Spec.Template.Annotations) + annotations["olm.targetNamespaces"] = strings.Join(targetNamespaces, ",") + deployments = append(deployments, appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + + ObjectMeta: metav1.ObjectMeta{ + Namespace: installNamespace, + Name: depSpec.Name, + Labels: depSpec.Label, + Annotations: annotations, + }, + Spec: depSpec.Spec, + }) + saName := saNameOrDefault(depSpec.Spec.Template.Spec.ServiceAccountName) + serviceAccounts[saName] = newServiceAccount(installNamespace, saName) + } + + // NOTES: + // 1. There's an extra Role for OperatorConditions: get/update/patch; resourceName=csv.name + // - This is managed by the OperatorConditions controller here: https://github.com/operator-framework/operator-lifecycle-manager/blob/9ced412f3e263b8827680dc0ad3477327cd9a508/pkg/controller/operators/operatorcondition_controller.go#L106-L109 + // 2. There's an extra RoleBinding for the above mentioned role. + // - Every SA mentioned in the OperatorCondition.spec.serviceAccounts is a subject for this role binding: https://github.com/operator-framework/operator-lifecycle-manager/blob/9ced412f3e263b8827680dc0ad3477327cd9a508/pkg/controller/operators/operatorcondition_controller.go#L171-L177 + // 3. strategySpec.permissions are _also_ given a clusterrole/clusterrole binding. + // - (for AllNamespaces mode only?) + // - (where does the extra namespaces get/list/watch rule come from?) + + roles := []rbacv1.Role{} + roleBindings := []rbacv1.RoleBinding{} + clusterRoles := []rbacv1.ClusterRole{} + clusterRoleBindings := []rbacv1.ClusterRoleBinding{} + + permissions := in.CSV.Spec.InstallStrategy.StrategySpec.Permissions + clusterPermissions := in.CSV.Spec.InstallStrategy.StrategySpec.ClusterPermissions + allPermissions := append(permissions, clusterPermissions...) + + // Create all the service accounts + for _, permission := range allPermissions { + saName := saNameOrDefault(permission.ServiceAccountName) + if _, ok := serviceAccounts[saName]; !ok { + serviceAccounts[saName] = newServiceAccount(installNamespace, saName) + } + } + + // If we're in AllNamespaces mode, promote the permissions to clusterPermissions + if len(targetNamespaces) == 1 && targetNamespaces[0] == "" { + for _, p := range permissions { + p.Rules = append(p.Rules, rbacv1.PolicyRule{ + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{corev1.GroupName}, + Resources: []string{"namespaces"}, + }) + } + clusterPermissions = append(clusterPermissions, permissions...) + permissions = nil + } + + for _, ns := range targetNamespaces { + for _, permission := range permissions { + saName := saNameOrDefault(permission.ServiceAccountName) + name, err := generateName(fmt.Sprintf("%s-%s", in.CSV.Name, saName), permission) + if err != nil { + return nil, err + } + roles = append(roles, newRole(ns, name, permission.Rules)) + roleBindings = append(roleBindings, newRoleBinding(ns, name, name, installNamespace, saName)) + } + } + + for _, permission := range clusterPermissions { + saName := saNameOrDefault(permission.ServiceAccountName) + name, err := generateName(fmt.Sprintf("%s-%s", in.CSV.Name, saName), permission) + if err != nil { + return nil, err + } + clusterRoles = append(clusterRoles, newClusterRole(name, permission.Rules)) + clusterRoleBindings = append(clusterRoleBindings, newClusterRoleBinding(name, name, installNamespace, saName)) + } + + ns := &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{Name: installNamespace}, + } + objs := []client.Object{ns} + for _, obj := range serviceAccounts { + obj := obj + if obj.GetName() != "default" { + objs = append(objs, &obj) + } + } + for _, obj := range roles { + obj := obj + objs = append(objs, &obj) + } + for _, obj := range roleBindings { + obj := obj + objs = append(objs, &obj) + } + for _, obj := range clusterRoles { + obj := obj + objs = append(objs, &obj) + } + for _, obj := range clusterRoleBindings { + obj := obj + objs = append(objs, &obj) + } + for _, obj := range in.CRDs { + obj := obj + objs = append(objs, &obj) + } + for _, obj := range in.Others { + obj := obj + supported, namespaced := registrybundle.IsSupported(obj.GetKind()) + if !supported { + return nil, fmt.Errorf("bundle contains unsupported resource: Name: %v, Kind: %v", obj.GetName(), obj.GetKind()) + } + if namespaced { + obj.SetNamespace(installNamespace) + } + objs = append(objs, &obj) + } + for _, obj := range deployments { + obj := obj + objs = append(objs, &obj) + } + return &Plain{Objects: objs}, nil +} + +const maxNameLength = 63 + +func generateName(base string, o interface{}) (string, error) { + hashStr, err := util.DeepHashObject(o) + if err != nil { + return "", err + } + if len(base)+len(hashStr) > maxNameLength { + base = base[:maxNameLength-len(hashStr)-1] + } + + return fmt.Sprintf("%s-%s", base, hashStr), nil +} + +func newServiceAccount(namespace, name string) corev1.ServiceAccount { + return corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceAccount", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } +} + +func newRole(namespace, name string, rules []rbacv1.PolicyRule) rbacv1.Role { + return rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + Kind: "Role", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Rules: rules, + } +} + +func newClusterRole(name string, rules []rbacv1.PolicyRule) rbacv1.ClusterRole { + return rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: rules, + } +} + +func newRoleBinding(namespace, name, roleName, saNamespace string, saNames ...string) rbacv1.RoleBinding { + subjects := make([]rbacv1.Subject, 0, len(saNames)) + for _, saName := range saNames { + subjects = append(subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Namespace: saNamespace, + Name: saName, + }) + } + return rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "RoleBinding", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Subjects: subjects, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + }, + } +} + +func newClusterRoleBinding(name, roleName, saNamespace string, saNames ...string) rbacv1.ClusterRoleBinding { + subjects := make([]rbacv1.Subject, 0, len(saNames)) + for _, saName := range saNames { + subjects = append(subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Namespace: saNamespace, + Name: saName, + }) + } + return rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Subjects: subjects, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + } +} diff --git a/internal/rukpak/operator-registry/registry.go b/internal/rukpak/operator-registry/registry.go new file mode 100644 index 000000000..aaa34f57e --- /dev/null +++ b/internal/rukpak/operator-registry/registry.go @@ -0,0 +1,23 @@ +// Package registry contains the manually vendored type definitions from +// the operator-framework/operator-registry repository. +package registry + +// AnnotationsFile holds annotation information about a bundle +type AnnotationsFile struct { + // annotations is a list of annotations for a given bundle + Annotations Annotations `json:"annotations" yaml:"annotations"` +} + +// Annotations is a list of annotations for a given bundle +type Annotations struct { + // PackageName is the name of the overall package, ala `etcd`. + PackageName string `json:"operators.operatorframework.io.bundle.package.v1" yaml:"operators.operatorframework.io.bundle.package.v1"` + + // Channels are a comma separated list of the declared channels for the bundle, ala `stable` or `alpha`. + Channels string `json:"operators.operatorframework.io.bundle.channels.v1" yaml:"operators.operatorframework.io.bundle.channels.v1"` + + // DefaultChannelName is, if specified, the name of the default channel for the package. The + // default channel will be installed if no other channel is explicitly given. If the package + // has a single channel, then that channel is implicitly the default. + DefaultChannelName string `json:"operators.operatorframework.io.bundle.channel.default.v1" yaml:"operators.operatorframework.io.bundle.channel.default.v1"` +} diff --git a/internal/rukpak/source/common.go b/internal/rukpak/source/common.go new file mode 100644 index 000000000..100bceffd --- /dev/null +++ b/internal/rukpak/source/common.go @@ -0,0 +1,9 @@ +package source + +import ( + "fmt" +) + +func generateMessage(bundleName string) string { + return fmt.Sprintf("Successfully unpacked the %s Bundle", bundleName) +} diff --git a/internal/rukpak/source/image.go b/internal/rukpak/source/image.go new file mode 100644 index 000000000..81cff8f18 --- /dev/null +++ b/internal/rukpak/source/image.go @@ -0,0 +1,272 @@ +package source + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/nlepage/go-tarfs" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applyconfigurationcorev1 "k8s.io/client-go/applyconfigurations/core/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clusterextension "github.com/operator-framework/operator-controller/api/v1alpha1" + rukpakapi "github.com/operator-framework/operator-controller/internal/rukpak/api" + "github.com/operator-framework/operator-controller/internal/rukpak/util" +) + +type Image struct { + Client client.Client + KubeClient kubernetes.Interface + PodNamespace string + UnpackImage string +} + +const imageBundleUnpackContainerName = "bundle" + +func (i *Image) Unpack(ctx context.Context, bs *rukpakapi.BundleSource, ce *clusterextension.ClusterExtension) (*Result, error) { + if bs.Type != rukpakapi.SourceTypeImage { + return nil, fmt.Errorf("bundle source type %q not supported", bs.Type) + } + + pod := &corev1.Pod{} + op, err := i.ensureUnpackPod(ctx, bs, ce, pod) + if err != nil { + return nil, err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated || pod.DeletionTimestamp != nil { + return &Result{State: StatePending}, nil + } + + switch phase := pod.Status.Phase; phase { + case corev1.PodPending: + return pendingImagePodResult(pod), nil + case corev1.PodRunning: + return &Result{State: StateUnpacking}, nil + case corev1.PodFailed: + return nil, i.failedPodResult(ctx, pod) + case corev1.PodSucceeded: + return i.succeededPodResult(ctx, pod) + default: + return nil, i.handleUnexpectedPod(ctx, pod) + } +} + +func (i *Image) ensureUnpackPod(ctx context.Context, bs *rukpakapi.BundleSource, ce *clusterextension.ClusterExtension, pod *corev1.Pod) (controllerutil.OperationResult, error) { + existingPod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: i.PodNamespace, Name: ce.Name}} + if err := i.Client.Get(ctx, client.ObjectKeyFromObject(existingPod), existingPod); client.IgnoreNotFound(err) != nil { + return controllerutil.OperationResultNone, err + } + + podApplyConfig := i.getDesiredPodApplyConfig(bs, ce) + updatedPod, err := i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "rukpak-core"}) + if err != nil { + if !apierrors.IsInvalid(err) { + return controllerutil.OperationResultNone, err + } + if err := i.Client.Delete(ctx, existingPod); err != nil { + return controllerutil.OperationResultNone, err + } + updatedPod, err = i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "rukpak-core"}) + if err != nil { + return controllerutil.OperationResultNone, err + } + } + + // make sure the passed in pod value is updated with the latest + // version of the pod + *pod = *updatedPod + + // compare existingPod to newPod and return an appropriate + // OperatorResult value. + newPod := updatedPod.DeepCopy() + unsetNonComparedPodFields(existingPod, newPod) + if equality.Semantic.DeepEqual(existingPod, newPod) { + return controllerutil.OperationResultNone, nil + } + return controllerutil.OperationResultUpdated, nil +} + +func (i *Image) getDesiredPodApplyConfig(bs *rukpakapi.BundleSource, ce *clusterextension.ClusterExtension) *applyconfigurationcorev1.PodApplyConfiguration { + // TODO (tyslaton): Address unpacker pod allowing root users for image sources + // + // In our current implementation, we are creating a pod that uses the image + // provided by an image source. This pod is not always guaranteed to run as a + // non-root user and thus will fail to initialize if running as root in a PSA + // restricted namespace due to violations. As it currently stands, our compliance + // with PSA is baseline which allows for pods to run as root users. However, + // all RukPak processes and resources, except this unpacker pod for image sources, + // are runnable in a PSA restricted environment. We should consider ways to make + // this PSA definition either configurable or workable in a restricted namespace. + // + // See https://github.com/operator-framework/rukpak/pull/539 for more detail. + containerSecurityContext := applyconfigurationcorev1.SecurityContext(). + WithAllowPrivilegeEscalation(false). + WithCapabilities(applyconfigurationcorev1.Capabilities(). + WithDrop("ALL"), + ) + + // These references need to be based on ClusterExtension... + podApply := applyconfigurationcorev1.Pod(ce.Name, i.PodNamespace). + WithLabels(map[string]string{ + util.CoreOwnerKindKey: ce.Kind, + util.CoreOwnerNameKey: ce.Name, + }). + WithOwnerReferences(v1.OwnerReference(). + WithName(ce.Name). + WithKind(ce.Kind). + WithAPIVersion(ce.APIVersion). + WithUID(ce.UID). + WithController(true). + WithBlockOwnerDeletion(true), + ). + WithSpec(applyconfigurationcorev1.PodSpec(). + WithAutomountServiceAccountToken(false). + WithRestartPolicy(corev1.RestartPolicyNever). + WithInitContainers(applyconfigurationcorev1.Container(). + WithName("install-unpacker"). + WithImage(i.UnpackImage). + WithImagePullPolicy(corev1.PullIfNotPresent). + WithCommand("cp", "-Rv", "/unpack", "/util/bin/unpack"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/util/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithContainers(applyconfigurationcorev1.Container(). + WithName(imageBundleUnpackContainerName). + WithImage(bs.Image.Ref). + WithCommand("/bin/unpack", "--bundle-dir", "/"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithVolumes(applyconfigurationcorev1.Volume(). + WithName("util"). + WithEmptyDir(applyconfigurationcorev1.EmptyDirVolumeSource()), + ). + WithSecurityContext(applyconfigurationcorev1.PodSecurityContext(). + WithRunAsNonRoot(false). + WithSeccompProfile(applyconfigurationcorev1.SeccompProfile(). + WithType(corev1.SeccompProfileTypeRuntimeDefault), + ), + ), + ) + + if bs.Image.ImagePullSecretName != "" { + podApply.Spec = podApply.Spec.WithImagePullSecrets( + applyconfigurationcorev1.LocalObjectReference().WithName(bs.Image.ImagePullSecretName), + ) + } + return podApply +} + +func unsetNonComparedPodFields(pods ...*corev1.Pod) { + for _, p := range pods { + p.APIVersion = "" + p.Kind = "" + p.Status = corev1.PodStatus{} + } +} + +func (i *Image) failedPodResult(ctx context.Context, pod *corev1.Pod) error { + logs, err := i.getPodLogs(ctx, pod) + if err != nil { + return fmt.Errorf("unpack failed: failed to retrieve failed pod logs: %v", err) + } + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unpack failed: %v", string(logs)) +} + +func (i *Image) succeededPodResult(ctx context.Context, pod *corev1.Pod) (*Result, error) { + bundleFS, err := i.getBundleContents(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get bundle contents: %v", err) + } + + digest, err := i.getBundleImageDigest(pod) + if err != nil { + return nil, fmt.Errorf("get bundle image digest: %v", err) + } + + resolvedSource := &rukpakapi.BundleSource{ + Type: rukpakapi.SourceTypeImage, + Image: rukpakapi.ImageSource{Ref: digest}, + } + + message := generateMessage("image") + + return &Result{Bundle: bundleFS, ResolvedSource: resolvedSource, State: StateUnpacked, Message: message}, nil +} + +func (i *Image) getBundleContents(ctx context.Context, pod *corev1.Pod) (fs.FS, error) { + bundleData, err := i.getPodLogs(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get bundle contents: %v", err) + } + bd := struct { + Content []byte `json:"content"` + }{} + + if err := json.Unmarshal(bundleData, &bd); err != nil { + return nil, fmt.Errorf("parse bundle data: %v", err) + } + + gzr, err := gzip.NewReader(bytes.NewReader(bd.Content)) + if err != nil { + return nil, fmt.Errorf("read bundle content gzip: %v", err) + } + return tarfs.New(gzr) +} + +func (i *Image) getBundleImageDigest(pod *corev1.Pod) (string, error) { + for _, ps := range pod.Status.ContainerStatuses { + if ps.Name == imageBundleUnpackContainerName && ps.ImageID != "" { + return ps.ImageID, nil + } + } + return "", fmt.Errorf("bundle image digest not found") +} + +func (i *Image) getPodLogs(ctx context.Context, pod *corev1.Pod) ([]byte, error) { + logReader, err := i.KubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(ctx) + if err != nil { + return nil, fmt.Errorf("get pod logs: %v", err) + } + defer logReader.Close() + buf := &bytes.Buffer{} + if _, err := io.Copy(buf, logReader); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (i *Image) handleUnexpectedPod(ctx context.Context, pod *corev1.Pod) error { + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unexpected pod phase: %v", pod.Status.Phase) +} + +func pendingImagePodResult(pod *corev1.Pod) *Result { + var messages []string + for _, cStatus := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { + if waiting := cStatus.State.Waiting; waiting != nil { + if waiting.Reason == "ErrImagePull" || waiting.Reason == "ImagePullBackOff" { + messages = append(messages, waiting.Message) + } + } + } + return &Result{State: StatePending, Message: strings.Join(messages, "; ")} +} diff --git a/internal/rukpak/source/unpacker.go b/internal/rukpak/source/unpacker.go new file mode 100644 index 000000000..03f65d49c --- /dev/null +++ b/internal/rukpak/source/unpacker.go @@ -0,0 +1,110 @@ +package source + +import ( + "context" + "fmt" + "io/fs" + + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/manager" + + clusterextension "github.com/operator-framework/operator-controller/api/v1alpha1" + rukpakapi "github.com/operator-framework/operator-controller/internal/rukpak/api" +) + +// Unpacker unpacks bundle content, either synchronously or asynchronously and +// returns a Result, which conveys information about the progress of unpacking +// the bundle content. +// +// If a Source unpacks content asynchronously, it should register one or more +// watches with a controller to ensure that Bundles referencing this source +// can be reconciled as progress updates are available. +// +// For asynchronous Sources, multiple calls to Unpack should be made until the +// returned result includes state StateUnpacked. +// +// NOTE: A source is meant to be agnostic to specific bundle formats and +// specifications. A source should treat a bundle root directory as an opaque +// file tree and delegate bundle format concerns to bundle parsers. +type Unpacker interface { + Unpack(context.Context, *rukpakapi.BundleSource, *clusterextension.ClusterExtension) (*Result, error) +} + +// Result conveys progress information about unpacking bundle content. +type Result struct { + // Bundle contains the full filesystem of a bundle's root directory. + Bundle fs.FS + + // ResolvedSource is a reproducible view of a Bundle's Source. + // When possible, source implementations should return a ResolvedSource + // that pins the Source such that future fetches of the bundle content can + // be guaranteed to fetch the exact same bundle content as the original + // unpack. + // + // For example, resolved image sources should reference a container image + // digest rather than an image tag, and git sources should reference a + // commit hash rather than a branch or tag. + ResolvedSource *rukpakapi.BundleSource + + // State is the current state of unpacking the bundle content. + State State + + // Message is contextual information about the progress of unpacking the + // bundle content. + Message string +} + +type State string + +const ( + // StatePending conveys that a request for unpacking a bundle has been + // acknowledged, but not yet started. + StatePending State = "Pending" + + // StateUnpacking conveys that the source is currently unpacking a bundle. + // This state should be used when the bundle contents are being downloaded + // and processed. + StateUnpacking State = "Unpacking" + + // StateUnpacked conveys that the bundle has been successfully unpacked. + StateUnpacked State = "Unpacked" +) + +type unpacker struct { + sources map[rukpakapi.SourceType]Unpacker +} + +// NewUnpacker returns a new composite Source that unpacks bundles using the source +// mapping provided by the configured sources. +func NewUnpacker(sources map[rukpakapi.SourceType]Unpacker) Unpacker { + return &unpacker{sources: sources} +} + +func (s *unpacker) Unpack(ctx context.Context, bs *rukpakapi.BundleSource, ce *clusterextension.ClusterExtension) (*Result, error) { + source, ok := s.sources[bs.Type] + if !ok { + return nil, fmt.Errorf("cluster extension %q, source type %q not supported", ce.Name, bs.Type) + } + return source.Unpack(ctx, bs, ce) +} + +// NewDefaultUnpacker returns a new composite Source that unpacks bundles using +// a default source mapping with built-in implementations of all of the supported +// source types. +// +// TODO: refactor NewDefaultUnpacker due to growing parameter list +func NewDefaultUnpacker(mgr manager.Manager, namespace, unpackImage string) (Unpacker, error) { + cfg := mgr.GetConfig() + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + return NewUnpacker(map[rukpakapi.SourceType]Unpacker{ + rukpakapi.SourceTypeImage: &Image{ + Client: mgr.GetClient(), + KubeClient: kubeClient, + PodNamespace: namespace, + UnpackImage: unpackImage, + }, + }), nil +} diff --git a/internal/rukpak/util/hash.go b/internal/rukpak/util/hash.go new file mode 100644 index 000000000..a46bb3434 --- /dev/null +++ b/internal/rukpak/util/hash.go @@ -0,0 +1,39 @@ +package util + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "math/big" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(obj interface{}) (string, error) { + // While the most accurate encoding we could do for Kubernetes objects (runtime.Object) + // would use the API machinery serializers, those operate over entire objects - and + // we often need to operate on snippets. Checking with the experts and the implementation, + // we can see that the serializers are a thin wrapper over json.Marshal for encoding: + // https://github.com/kubernetes/kubernetes/blob/8509ab82b96caa2365552efa08c8ba8baf11c5ec/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go#L216-L247 + // Therefore, we can be confident that using json.Marshal() here will: + // 1. be stable & idempotent - the library sorts keys, etc. + // 2. be germane to our needs - only fields that serialize and are sent to the server + // will be encoded + + hasher := sha256.New224() + hasher.Reset() + encoder := json.NewEncoder(hasher) + if err := encoder.Encode(obj); err != nil { + return "", fmt.Errorf("couldn't encode object: %w", err) + } + + // base62(sha224(bytes)) is a useful hash and encoding for adding the contents of this + // to a Kubernetes identifier or other field which has length and character set requirements + var hash []byte + hash = hasher.Sum(hash) + + var i big.Int + i.SetBytes(hash[:]) + return i.Text(36), nil +} diff --git a/internal/rukpak/util/labels.go b/internal/rukpak/util/labels.go new file mode 100644 index 000000000..16240edd2 --- /dev/null +++ b/internal/rukpak/util/labels.go @@ -0,0 +1,6 @@ +package util + +const ( + CoreOwnerKindKey = "core.rukpak.io/owner-kind" + CoreOwnerNameKey = "core.rukpak.io/owner-name" +) diff --git a/internal/rukpak/util/util.go b/internal/rukpak/util/util.go new file mode 100644 index 000000000..edfc06c4f --- /dev/null +++ b/internal/rukpak/util/util.go @@ -0,0 +1,11 @@ +package util + +func MergeMaps(maps ...map[string]string) map[string]string { + out := map[string]string{} + for _, m := range maps { + for k, v := range m { + out[k] = v + } + } + return out +} From 2e7f3701f5bc63f24c9d13516fed1721c99f850a Mon Sep 17 00:00:00 2001 From: Todd Short Date: Tue, 16 Apr 2024 15:02:21 -0400 Subject: [PATCH 3/3] fixup! Copy over some rukpak code and replace BundleDeployment --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index b014fbf24..1ab679785 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,6 @@ require ( k8s.io/api v0.29.3 k8s.io/apiextensions-apiserver v0.29.3 k8s.io/apimachinery v0.29.3 - k8s.io/cli-runtime v0.29.2 k8s.io/client-go v0.29.3 k8s.io/component-base v0.29.3 k8s.io/utils v0.0.0-20240102154912-e7106e64919e @@ -204,6 +203,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.14.3 // indirect k8s.io/apiserver v0.29.3 // indirect + k8s.io/cli-runtime v0.29.2 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 // indirect k8s.io/kubectl v0.29.2 // indirect