Skip to content

Commit feeeee7

Browse files
Add initContainer to check for required CRDs availability
1 parent ac0ddb5 commit feeeee7

File tree

4 files changed

+42
-1
lines changed

4 files changed

+42
-1
lines changed

Makefile

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,9 +156,20 @@ build: fmt vet ## Build manager binary.
156156
-o bin/manager main.go
157157

158158
.PHONY: run
159-
run: manifests fmt vet ## Run a controller from your host.
159+
run: crds-check manifests fmt vet ## Run a controller from your host.
160160
go run ./main.go
161161

162+
.PHONY: crds-check
163+
crds-check:
164+
@{ \
165+
kubectl get crds | grep 'rayclusters.ray.io' > /dev/null && \
166+
kubectl get crds | grep 'rayjobs.ray.io' > /dev/null && \
167+
kubectl get crds | grep 'rayservices.ray.io' > /dev/null; \
168+
} || { \
169+
echo "One or more required KubeRay CRDs are missing."; \
170+
exit 1; \
171+
}
172+
162173
.PHONY: image-build
163174
image-build: test-unit ## Build container image with the manager.
164175
podman build -t ${IMG} .

config/manager/manager.yaml

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,29 @@ spec:
2424
spec:
2525
securityContext:
2626
runAsNonRoot: true
27+
initContainers:
28+
- name: check-ray-crds
29+
securityContext:
30+
runAsUser: 1000
31+
allowPrivilegeEscalation: false
32+
capabilities:
33+
drop:
34+
- "ALL"
35+
image: alpine/k8s:1.27.11
36+
command:
37+
- sh
38+
- -c
39+
- |
40+
set -e
41+
CRDS="rayclusters.ray.io rayjobs.ray.io rayservices.ray.io"
42+
for crd in $CRDS; do
43+
echo "Checking for $crd"
44+
until kubectl get crd $crd; do
45+
echo "$crd not available yet, retrying in 10 seconds..."
46+
sleep 10
47+
done
48+
done
49+
echo "All required CRDs are available."
2750
# TODO(user): For common cases that do not require escalating privileges
2851
# it is recommended to ensure that all your Pods/Containers are restrictive.
2952
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted

config/rbac/role.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@ metadata:
55
creationTimestamp: null
66
name: manager-role
77
rules:
8+
- apiGroups:
9+
- apiextensions.k8s.io
10+
resources:
11+
- customresourcedefinitions
12+
verbs:
13+
- get
814
- apiGroups:
915
- ""
1016
resources:

pkg/controllers/raycluster_controller.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ var (
7878
// +kubebuilder:rbac:groups=core,resources=services,verbs=patch;delete;get
7979
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=patch;delete;get
8080
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=patch;delete;get
81+
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get
8182

8283
// Reconcile is part of the main kubernetes reconciliation loop which aims to
8384
// move the current state of the cluster closer to the desired state.

0 commit comments

Comments
 (0)