feat: k8s helm rework (#396)

- remove helm-generated-yaml, replace with helm execs
- template/parameterise ceph csi

Reviewed-on: #396
This commit is contained in:
Ben Vincent 2025-09-20 17:40:41 +10:00
parent 4e77fb7ee7
commit 762f415d2d
22 changed files with 646 additions and 13979 deletions

View File

@ -1 +1 @@
rke2::node_token: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEACQ4IIE9qT7x+KBTtvksUwvko6U/wnRn4PZEHUc91u9StIcjd0N6uH3hrJ4ddJmwikXf9Sz/5t+kSPvcVewFcjzvMHzJXqaE57KX0hqjFpqSGMlqWy4hdrZTHm/W65PkB73j21HLn9gH7m4rmqLomSbIe21P7sFM5+1pbnQejx9Us43Mu7PNy+8rQCcdEeBnRmzLaVWR8c35aXMHANqefLaLPCWjTbDY4jhcHPkNse5thpyUW89Bnq6caTJjNNQRI9U2FX2qHqwFh/rcjtGwDk9JUJvpF1qb9mdY68txMfaQ2KQh0gH67nxVf0cDppUxJaQ86Uu71BQX54l/om9AFrDCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQFfKTlWnJq2zqP6jsLAzw94BwDu32IVvfvYvsuNcOJHc4ipI0PV/6AcatIDaXjK+K8BMLdWxpOVsVf4vL1zPatB5UTwc8rm1UvxWN8SbtqF8accmAA0GbIexQezsJYaAI3NT9gItGj/aJjTitl+ed7QfNCd36HVH9FEmfDaGwN7f/yw==]
rke2::node_token: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAVPo7WjzlJQQhxefhV1bu1/+Jo/LY3gMmVGicDnDloGQd1jbdDtNz9wi6Hqqht1xQPdn22XmvvrVXtPhsdjDCGqWxmfZ0qWQVl1Ju2WIh5WsyyZgdE96k2+y7Cg5Dl0brX2m9YSZfow5BF8J8EnCDdRZncOCtFl/SU8ipPEq2uJJR+Y9sJv6aJflnFLCEYgJNbZY9ljMcs5ssJ21VpIqWYA0Z6vKVqOUeIWWKbYZUIoEml7sj3ktmw3loMYA6ED0/nzyYvRVizTvtGXl3IWGVDSt8rQ/kNhzKqURVOsgwfbt7un4n2Kxkreiydj3R6PbLdkpHdtu25dbjue8HLQkc4zCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQMmIatr2oBei35+evghIkLIBwWT6F1WPDV3PA/QYNusfYcI1KrMRYFlDYyEkg/Tf+0gOpty9rdnYL+MQO2fTU9Br+INTr1oJcxJp/Lqap2+NibmBfZcLUIMn3q1S/jZp9BGQTk6RSwODqQ2x5GxDATinrtiUR4TXIIaiaP3KWlP8A7g==]

View File

@ -78,15 +78,15 @@ profiles::yum::global::repos:
name: rancher-rke2-common-latest
descr: rancher-rke2-common-latest
target: /etc/yum.repos.d/rke2-common.repo
baseurl: https://rpm.rancher.io/rke2/latest/common/centos/%{facts.os.release.major}/noarch
gpgkey: https://rpm.rancher.io/public.key
baseurl: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/common-daily/x86_64/os/
gpgkey: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/common-daily/x86_64/os/public.key
mirrorlist: absent
rancher-rke2-1-33-latest:
name: rancher-rke2-1-33-latest
descr: rancher-rke2-1-33-latest
target: /etc/yum.repos.d/rke2-1-33.repo
baseurl: https://rpm.rancher.io/rke2/latest/1.33/centos/%{facts.os.release.major}/x86_64
gpgkey: https://rpm.rancher.io/public.key
baseurl: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/1.33-daily/x86_64/os/
gpgkey: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/1.33-daily/x86_64/os/public.key
mirrorlist: absent
# dns

View File

@ -0,0 +1,2 @@
---
rke2::csi_ceph_key: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEApQ371O4nGSrFB5tOZFTSJP+kJj3wJyEcWiNfonYA5LmbaMnQ6pUortec1519WHMICpSWdpq3O8frivm2CK3taYoKczeTzbsFTxvVp7s6gIZJUsCeqGHuq81YyjPtJE+Yy5IOBJjhe/8ECkEFNr0JlhwKBPWfTx5hHOzRdkGlN464weGFQtCI8UgdGe7AWEePG+u3e4RL+xCriw5tfuqMeeo+isDwVf30nK9NxsnmliOd/+jNW+GrtzycHAeokQOKnxfgrKll5Y5+npy5WueuSCEw1E+Io0NI/4Jthi7zu24UQu0KT8iRsqhuD5mr1ymvCNREnvCcVWt8VVRTGXQV+TBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDS8VXZM3wEAFRALB/Fa19dgDCTRYhU8YY4g9zREP7epY9x2MRWaTT84Jx9w5Dc/XWaRrmL4yL2sK+QHSy2057jHzo=]

View File

@ -3,19 +3,35 @@
rke2::node_type: server
rke2::helm_install: true
rke2::helm_repos:
metallb: https://metallb.github.io/metallb
rancher-stable: https://releases.rancher.com/server-charts/stable
purelb: https://gitlab.com/api/v4/projects/20400619/packages/helm/stable
jetstack: https://charts.jetstack.io
harbor: https://helm.goharbor.io
traefik: https://traefik.github.io/charts
hashicorp: https://helm.releases.hashicorp.com
rke2::csi_ceph_enable: true
rke2::csi_ceph_clusterid: de96a98f-3d23-465a-a899-86d3d67edab8
rke2::csi_ceph_poolname: kubernetes
rke2::csi_ceph_monitors:
- 198.18.23.9:6789
- 198.18.23.10:6789
- 198.18.23.11:6789
- 198.18.23.12:6789
- 198.18.23.13:6789
rke2::csi_ceph_files:
- ceph-csi-nodeplugin-rbac
- ceph-csi-provisioner-rbac
- ceph-csi-rbdplugin-provisioner
- ceph-csi-rbdplugin
rke2::csi_ceph_templates:
- ceph-csi-config
- ceph-csi-secret
rke2::extra_config_files:
- 000_namespaces
- 010_rke2-canal-config
- 010_cert-manager
- 010_purelb
- 010_rancher
- 100_purelb_config
- 200_ingres_lb_nginx
- 201_ingres_route_rancher
- namespaces
- rke2-canal-config
- purelb_config
- ingres_lb_nginx
- ingres_route_rancher
rke2::config_hash:
advertise-address: "%{hiera('networking_loopback0_ip')}"
cluster-domain: "svc.k8s.unkin.net"

File diff suppressed because it is too large Load Diff

View File

@ -1,325 +0,0 @@
# Source: purelb/templates/serviceaccount-allocator.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: allocator
namespace: purelb
---
# Source: purelb/templates/serviceaccount-lbnodeagent.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: lbnodeagent
namespace: purelb
---
# Source: purelb/templates/clusterrole-allocator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:allocator
rules:
- apiGroups: [purelb.io]
resources: [servicegroups, lbnodeagents]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [services]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [services/status]
verbs: [update]
- apiGroups: ['']
resources: [events]
verbs: [create, patch]
- apiGroups: ['']
resources: [namespaces]
verbs: [get, list]
- apiGroups: [policy]
resourceNames: [allocator]
resources: [podsecuritypolicies]
verbs: [use]
---
# Source: purelb/templates/clusterrole-lbnodeagent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:lbnodeagent
rules:
- apiGroups: [purelb.io]
resources: [servicegroups, lbnodeagents]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [endpoints, nodes]
verbs: [get, list, watch]
- apiGroups: ['']
resources: [services]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [events]
verbs: [create, patch]
- apiGroups: ['']
resources: [namespaces]
verbs: [get, list]
- apiGroups: [policy]
resourceNames: [lbnodeagent]
resources: [podsecuritypolicies]
verbs: [use]
---
# Source: purelb/templates/clusterrolebinding-allocator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:allocator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: purelb:allocator
subjects:
- kind: ServiceAccount
name: allocator
namespace: purelb
---
# Source: purelb/templates/clusterrolebinding-lbnodeagent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:lbnodeagent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: purelb:lbnodeagent
subjects:
- kind: ServiceAccount
name: lbnodeagent
namespace: purelb
---
# Source: purelb/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: pod-lister
namespace: purelb
rules:
- apiGroups: ['']
resources: [pods]
verbs: [list]
---
# Source: purelb/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: pod-lister
namespace: purelb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: lbnodeagent
---
# Source: purelb/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: lbnodeagent
name: lbnodeagent
namespace: purelb
spec:
selector:
matchLabels:
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/component: lbnodeagent
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: lbnodeagent
spec:
containers:
- env:
- name: NETBOX_USER_TOKEN
valueFrom:
secretKeyRef:
name: netbox-client
key: user-token
optional: true
- name: DEFAULT_ANNOUNCER
value: PureLB
- name: PURELB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: PURELB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: PURELB_ML_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: PURELB_ML_LABELS
value: app.kubernetes.io/name=purelb,app.kubernetes.io/component=lbnodeagent
- name: ML_GROUP
value: 8sb7ikA5qHwQQqxc
image: registry.gitlab.com/purelb/purelb/lbnodeagent:v0.13.0
imagePullPolicy: Always
name: lbnodeagent
ports:
- containerPort: 7472
name: monitoring
resources:
securityContext:
capabilities:
add: [NET_ADMIN, NET_RAW]
drop: [ALL]
readOnlyRootFilesystem: false
runAsGroup: 0
runAsUser: 0
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: lbnodeagent
terminationGracePeriodSeconds: 2
---
# Source: purelb/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: allocator
name: allocator
namespace: purelb
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/component: allocator
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: allocator
spec:
containers:
- env:
- name: NETBOX_USER_TOKEN
valueFrom:
secretKeyRef:
name: netbox-client
key: user-token
optional: true
- name: DEFAULT_ANNOUNCER
value: PureLB
image: registry.gitlab.com/purelb/purelb/allocator:v0.13.0
imagePullPolicy: Always
name: allocator
ports:
- containerPort: 7472
name: monitoring
resources:
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [all]
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: allocator
terminationGracePeriodSeconds: 0
---
# Source: purelb/templates/lbnodeagent.yaml
apiVersion: purelb.io/v1
kind: LBNodeAgent
metadata:
name: default
namespace: purelb
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
spec:
local:
localint: default
extlbint: kube-lb0
sendgarp: false

View File

@ -1,695 +0,0 @@
# Source: rancher/templates/priorityClass.yaml
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: rancher-critical
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
value: 1000000000
globalDefault: false
description: Priority class used by pods critical to rancher's functionality.
---
# Source: rancher/templates/serviceAccount.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
---
# Source: rancher/templates/configMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-config
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
app.kubernetes.io/part-of: rancher
data:
priorityClassName: rancher-critical
---
# Source: rancher/templates/clusterRoleBinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
subjects:
- kind: ServiceAccount
name: rancher
namespace: cattle-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
# Source: rancher/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 443
targetPort: 444
protocol: TCP
name: https-internal
selector:
app: rancher
---
# Source: rancher/templates/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: rancher
annotations:
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
replicas: 3
selector:
matchLabels:
app: rancher
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: rancher
release: rancher
spec:
priorityClassName: rancher-critical
serviceAccountName: rancher
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values: [rancher]
topologyKey: kubernetes.io/hostname
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: NotIn
values: [windows]
tolerations:
- key: cattle.io/os
value: linux
effect: NoSchedule
operator: Equal
containers:
- image: docker.io/rancher/rancher:v2.12.1
imagePullPolicy: IfNotPresent
name: rancher
ports:
- containerPort: 80
protocol: TCP
- containerPort: 6666
protocol: TCP
args:
# Public trusted CA - clear ca certs
- --no-cacerts
- --http-listen-port=80
- --https-listen-port=443
- --add-local=true
env:
- name: CATTLE_NAMESPACE
value: cattle-system
- name: CATTLE_PEER_SERVICE
value: rancher
- name: CATTLE_BOOTSTRAP_PASSWORD
valueFrom:
secretKeyRef:
name: bootstrap-secret
key: bootstrapPassword
- name: IMPERATIVE_API_DIRECT
value: 'true'
- name: IMPERATIVE_API_APP_SELECTOR
value: rancher
startupProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
failureThreshold: 12
periodSeconds: 10
livenessProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 5
volumeMounts:
volumes:
---
# Source: rancher/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: '30'
nginx.ingress.kubernetes.io/proxy-read-timeout: '1800'
nginx.ingress.kubernetes.io/proxy-send-timeout: '1800'
spec:
rules:
- host: rancher.main.unkin.net # hostname to access rancher server
http:
paths:
- backend:
service:
name: rancher
port:
number: 80
pathType: ImplementationSpecific
path: /
tls:
- hosts: [rancher.main.unkin.net]
secretName: tls-rancher-ingress
---
# Source: rancher/templates/post-delete-hook-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
---
# Source: rancher/templates/pre-upgrade-hook-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
---
# Source: rancher/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-secret
namespace: cattle-system
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-weight: '-5'
helm.sh/resource-policy: keep
type: Opaque
data:
bootstrapPassword: YWRtaW4=
---
# Source: rancher/templates/post-delete-hook-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
data:
post-delete-hook.sh: |-
#!/bin/bash
set -e
namespaces="${NAMESPACES}"
rancher_namespace="${RANCHER_NAMESPACE}"
timeout="${TIMEOUT}"
ignoreTimeoutError="${IGNORETIMEOUTERROR}"
if [[ -z ${namespaces} ]]; then
echo "No namespace is provided."
exit 1
fi
if [[ -z ${rancher_namespace} ]]; then
echo "No rancher namespace is provided."
exit 1
fi
if [[ -z ${timeout} ]]; then
echo "No timeout value is provided."
exit 1
fi
if [[ -z ${ignoreTimeoutError} ]]; then
echo "No ignoreTimeoutError value is provided."
exit 1
fi
succeeded=()
failed=()
get_pod_count() {
kubectl get pods --selector app="${1}" -n "${2}" -o json | jq '.items | length'
}
echo "Uninstalling Rancher resources in the following namespaces: ${namespaces}"
for namespace in ${namespaces}; do
for app in $(helm list -n "${namespace}" -q); do
if [[ ${app} =~ .crd$ ]]; then
echo "--- Skip the app [${app}] in the namespace [${namespace}]"
continue
fi
echo "--- Deleting the app [${app}] in the namespace [${namespace}]"
if [[ ! $(helm uninstall "${app}" -n "${namespace}") ]]; then
failed=("${failed[@]}" "${app}")
continue
fi
t=0
while true; do
if [[ $(get_pod_count "${app}" "${namespace}") -eq 0 ]]; then
echo "successfully uninstalled [${app}] in the namespace [${namespace}]"
succeeded=("${succeeded[@]}" "${app}")
break
fi
if [[ ${t} -ge ${timeout} ]]; then
echo "timeout uninstalling [${app}] in the namespace [${namespace}]"
failed=("${failed[@]}" "${app}")
break
fi
# by default, wait 120 seconds in total for an app to be uninstalled
echo "waiting 5 seconds for pods of [${app}] to be terminated ..."
sleep 5
t=$((t + 5))
done
done
# delete the helm operator pods
for pod in $(kubectl get pods -n "${namespace}" -o name); do
if [[ ${pod} =~ ^pod\/helm-operation-* ]]; then
echo "--- Deleting the pod [${pod}] in the namespace [${namespace}]"
kubectl delete "${pod}" -n "${namespace}"
fi
done
done
echo "Removing Rancher bootstrap secret in the following namespace: ${rancher_namespace}"
kubectl --ignore-not-found=true delete secret bootstrap-secret -n "${rancher_namespace}"
echo "------ Summary ------"
if [[ ${#succeeded[@]} -ne 0 ]]; then
echo "Succeeded to uninstall the following apps:" "${succeeded[@]}"
fi
if [[ ${#failed[@]} -ne 0 ]]; then
echo "Failed to uninstall the following apps:" "${failed[@]}"
if [[ "${ignoreTimeoutError}" == "false" ]]; then
exit 2
fi
else
echo "Cleanup finished successfully."
fi
---
# Source: rancher/templates/pre-upgrade-hook-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
data:
pre-upgrade-hook.sh: |-
#!/bin/bash
set -eo pipefail
# Global counters
declare -A COUNTS
RESOURCES_FOUND=false
check_prerequisites() {
if ! command -v kubectl &>/dev/null; then
echo "Missing required tool: kubectl"
exit 1
fi
}
print_resource_table() {
local kind="$1"
local items="$2"
local -a headers=("${@:3}")
local count
count=$(wc -l <<< "$items")
COUNTS["$kind"]=$count
RESOURCES_FOUND=true
echo "Found $count $kind resource(s):"
echo
IFS=$'\n' read -r -d '' -a lines < <(printf '%s\0' "$items")
# Initialize max_lengths array with header lengths
local -a max_lengths
for i in "${!headers[@]}"; do
max_lengths[i]=${#headers[i]}
done
# Calculate max width for each column
for line in "${lines[@]}"; do
IFS=$'\t' read -r -a cols <<< "$line"
for i in "${!cols[@]}"; do
(( ${#cols[i]} > max_lengths[i] )) && max_lengths[i]=${#cols[i]}
done
done
for i in "${!headers[@]}"; do
printf "%-${max_lengths[i]}s " "${headers[i]}"
done
printf "\n"
for i in "${!headers[@]}"; do
printf "%-${max_lengths[i]}s " "$(printf '%*s' "${max_lengths[i]}" '' | tr ' ' '-')"
done
printf "\n"
for line in "${lines[@]}"; do
IFS=$'\t' read -r -a cols <<< "$line"
for i in "${!cols[@]}"; do
printf "%-${max_lengths[i]}s " "${cols[i]}"
done
printf "\n"
done
echo
}
detect_resource() {
local crd="$1"
local kind="$2"
local jsonpath="$3"
local -a headers=("${@:4}")
echo "Checking for $kind resources..."
local output
if ! output=$(kubectl get "$crd" --all-namespaces -o=jsonpath="$jsonpath" 2>&1); then
if grep -q "the server doesn't have a resource type" <<< "$output"; then
echo "Resource type $crd not found. Skipping."
echo
return 0
else
echo "Error retrieving $kind resources: $output"
exit 1
fi
fi
if [ -z "$output" ]; then
echo "No $kind resources found."
echo
else
print_resource_table "$kind" "$output" "${headers[@]}"
fi
}
print_summary() {
echo "===== SUMMARY ====="
local total=0
for kind in "${!COUNTS[@]}"; do
local count=${COUNTS[$kind]}
echo "$kind: $count"
total=$((total + count))
done
echo "Total resources detected: $total"
if [ "$RESOURCES_FOUND" = true ]; then
echo "Error: Rancher v2.12+ does not support RKE1.
Detected RKE1-related resources (listed above).
Please migrate these clusters to RKE2 or K3s, or delete the related resources.
More info: https://www.suse.com/c/rke-end-of-life-by-july-2025-replatform-to-rke2-or-k3s"
exit 1
else
echo "No RKE related resources found."
fi
}
main() {
check_prerequisites
detect_resource "clusters.management.cattle.io" "RKE Management Cluster" \
'{range .items[?(@.spec.rancherKubernetesEngineConfig)]}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAME" "DISPLAY NAME"
detect_resource "nodetemplates.management.cattle.io" "NodeTemplate" \
'{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAMESPACE" "NAME" "DISPLAY NAME"
detect_resource "clustertemplates.management.cattle.io" "ClusterTemplate" \
'{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAMESPACE" "NAME" "DISPLAY NAME"
print_summary
}
main
---
# Source: rancher/templates/post-delete-hook-cluster-role.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
rules:
- apiGroups: [extensions, apps]
resources: [deployments]
verbs: [get, list, delete]
- apiGroups: [batch]
resources: [jobs]
verbs: [get, list, watch, delete, create]
- apiGroups: [rbac.authorization.k8s.io]
resources: [clusterroles, clusterrolebindings, roles, rolebindings]
verbs: [get, list, delete, create]
- apiGroups: ['']
resources: [pods, secrets, services, configmaps]
verbs: [get, list, delete]
- apiGroups: ['']
resources: [serviceaccounts]
verbs: [get, list, delete, create]
- apiGroups: [networking.k8s.io]
resources: [networkpolicies]
verbs: [get, list, delete]
- apiGroups: [admissionregistration.k8s.io]
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: [get, list, delete]
- apiGroups: [networking.k8s.io]
resources: [ingresses]
verbs: [delete]
- apiGroups: [cert-manager.io]
resources: [issuers]
verbs: [delete]
---
# Source: rancher/templates/pre-upgrade-hook-cluster-role.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher-pre-upgrade
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
rules:
- apiGroups: [management.cattle.io]
resources: [clusters, nodetemplates, clustertemplates]
verbs: [get, list]
---
# Source: rancher/templates/post-delete-hook-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '2'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rancher-post-delete
subjects:
- kind: ServiceAccount
name: rancher-post-delete
namespace: cattle-system
---
# Source: rancher/templates/pre-upgrade-hook-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rancher-pre-upgrade
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rancher-pre-upgrade
subjects:
- kind: ServiceAccount
name: rancher-pre-upgrade
namespace: cattle-system
---
# Source: rancher/templates/post-delete-hook-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '3'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: 3
template:
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
serviceAccountName: rancher-post-delete
restartPolicy: OnFailure
containers:
- name: rancher-post-delete
image: rancher/shell:v0.5.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
command: [/scripts/post-delete-hook.sh]
volumeMounts:
- mountPath: /scripts
name: config-volume
env:
- name: NAMESPACES
value: cattle-fleet-system cattle-system rancher-operator-system
- name: RANCHER_NAMESPACE
value: cattle-system
- name: TIMEOUT
value: '120'
- name: IGNORETIMEOUTERROR
value: 'false'
volumes:
- name: config-volume
configMap:
name: rancher-post-delete
defaultMode: 0777
---
# Source: rancher/templates/pre-upgrade-hook-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher-pre-upgrade
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: 3
template:
metadata:
name: rancher-pre-upgrade
labels:
app: rancher-pre-upgrade
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
serviceAccountName: rancher-pre-upgrade
restartPolicy: Never
containers:
- name: rancher-pre-upgrade
image: rancher/shell:v0.5.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
command: [/scripts/pre-upgrade-hook.sh]
volumeMounts:
- mountPath: /scripts
name: config-volume
volumes:
- name: config-volume
configMap:
name: rancher-pre-upgrade
defaultMode: 0777

View File

@ -0,0 +1,48 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
namespace: cephcsi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
namespace: cephcsi
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,125 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
namespace: cephcsi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update", "patch", "create"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["get", "list", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["replication.storage.openshift.io"]
resources: ["volumegroupreplicationcontents"]
verbs: ["get", "list", "watch"]
- apiGroups: ["replication.storage.openshift.io"]
resources: ["volumegroupreplicationclasses"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: cephcsi
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-cfg
namespace: cephcsi
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
namespace: cephcsi
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: cephcsi
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,124 @@
---
apiVersion: v1
kind: Service
metadata:
name: csi-rbdplugin-provisioner
namespace: cephcsi
labels:
app: csimetrics
spec:
selector:
app: csirbdpluginprovisioner
ports:
- name: httpmetrics
port: 8080
protocol: TCP
targetPort: 8680
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: csirbdpluginprovisioner
namespace: cephcsi
spec:
replicas: 3
selector:
matchLabels:
app: csirbdpluginprovisioner
template:
metadata:
labels:
app: csirbdpluginprovisioner
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csirbdpluginprovisioner
topologyKey: "kubernetes.io/hostname"
serviceAccountName: rbdcsiprovisioner
priorityClassName: systemclustercritical
containers:
- name: csirbdplugin
image: quay.io/cephcsi/cephcsi:v3.15
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
- "--enableprofiling=false"
- "--setmetadata=true"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-dev
mountPath: /dev
- name: host-sys
mountPath: /sys
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: ceph-config
mountPath: /etc/ceph/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
# snapshotter & other sidecars omitted in this snippet for brevity
volumes:
- name: socket-dir
emptyDir:
medium: Memory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config
- name: ceph-config
configMap:
name: ceph-config
- name: keys-tmp-dir
emptyDir:
medium: Memory
# and other volumes as in the original

View File

@ -0,0 +1,155 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: csirbdplugin
namespace: cephcsi
spec:
selector:
matchLabels:
app: csirbdplugin
template:
metadata:
labels:
app: csirbdplugin
spec:
serviceAccountName: rbdcsinodeplugin
hostNetwork: true
hostPID: true
priorityClassName: systemnodecritical
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: csirbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:v3.15
args:
- "--nodeid=$(NODE_ID)"
- "--pluginpath=/var/lib/kubelet/plugins"
- "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--enableprofiling=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: host-dev
mountPath: /dev
- name: host-sys
mountPath: /sys
- name: host-mount
mountPath: /run/mount
- name: etc-selinux
mountPath: /etc/selinux
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: ceph-csi-encryption-kms-config
mountPath: /etc/ceph-csi-encryption-kms-config/
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
# possibly sidecars like driverregistrar, liveness, etc.
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: /var/log/ceph
type: DirectoryOrCreate
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
type: DirectoryOrCreate
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
type: DirectoryOrCreate
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: ceph-csi-encryption-kms-config
configMap:
name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir:
medium: Memory
---
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
namespace: cephcsi
labels:
app: csimetrics
spec:
ports:
- name: httpmetrics
port: 8080
protocol: TCP
targetPort: 8680
selector:
app: csirbdplugin

View File

@ -7,6 +7,13 @@ class rke2::config (
Stdlib::Fqdn $bootstrap_node = $rke2::bootstrap_node,
String $node_token = $rke2::node_token,
Array[String[1]] $extra_config_files = $rke2::extra_config_files,
Boolean $csi_ceph_enable = $rke2::csi_ceph_enable,
Array[String] $csi_ceph_files = $rke2::csi_ceph_files,
Array[String] $csi_ceph_templates = $rke2::csi_ceph_templates,
Optional[String[1]] $csi_ceph_key = $rke2::csi_ceph_key,
Optional[String[1] ] $csi_ceph_clusterid = $rke2::csi_ceph_clusterid,
Optional[Array[String]] $csi_ceph_monitors = $rke2::csi_ceph_monitors,
Optional[String[1]] $csi_ceph_poolname = $rke2::csi_ceph_poolname,
){
# if agent, add token. what other fields should i add?
@ -83,5 +90,33 @@ class rke2::config (
require => Service['rke2-server'],
}
}
# manage ceph files
if $csi_ceph_enable {
$csi_ceph_files.each |$file| {
file {"/var/lib/rancher/rke2/server/manifests/${file}.yaml":
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
source => "puppet:///modules/rke2/${file}.yaml",
require => Service['rke2-server'],
}
}
$csi_ceph_templates.each |$file| {
file {"/var/lib/rancher/rke2/server/manifests/${file}.yaml":
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
content => template("rke2/${file}.yaml.erb"),
require => Service['rke2-server'],
}
}
}
}
}

View File

@ -1,4 +1,3 @@
# manage helm
class rke2::helm (
Enum['server', 'agent'] $node_type = $rke2::node_type,
@ -39,6 +38,44 @@ class rke2::helm (
}
}
}
# install specific helm charts to bootstrap environment
$plb_cmd = 'helm install purelb purelb/purelb \
--create-namespace \
--namespace=purelb \
--repository-config /etc/helm/repositories.yaml'
exec { 'install_purelb':
command => $plb_cmd,
path => ['/usr/bin', '/bin'],
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
unless => 'helm list -n purelb | grep -q ^purelb',
}
$cm_cmd = 'helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--set crds.enabled=true \
--repository-config /etc/helm/repositories.yaml'
exec { 'install_cert_manager':
command => $cm_cmd,
path => ['/usr/bin', '/bin'],
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
unless => 'helm list -n cert-manager | grep -q ^cert-manager',
}
$r_cmd = 'helm install rancher rancher-stable/rancher \
--namespace cattle-system \
--create-namespace \
--set hostname=rancher.main.unkin.net \
--set bootstrapPassword=admin \
--set ingress.tls.source=secret \
--repository-config /etc/helm/repositories.yaml'
exec { 'install_rancher':
command => $r_cmd,
path => ['/usr/bin', '/bin'],
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
unless => 'helm list -n cattle-system | grep -q ^rancher',
}
}
}
}

View File

@ -12,6 +12,13 @@ class rke2 (
Hash $helm_repos = $rke2::params::helm_repos,
Array[String[1]] $extra_config_files = $rke2::params::extra_config_files,
Stdlib::HTTPUrl $container_archive_source = $rke2::params::container_archive_source,
Boolean $csi_ceph_enable = $rke2::params::csi_ceph_enable,
Array[String] $csi_ceph_files = $rke2::params::csi_ceph_files,
Array[String] $csi_ceph_templates = $rke2::params::csi_ceph_templates,
Optional[String[1]] $csi_ceph_key = $rke2::params::csi_ceph_key,
Optional[String[1] ] $csi_ceph_clusterid = $rke2::params::csi_ceph_clusterid,
Optional[Array[String]] $csi_ceph_monitors = $rke2::params::csi_ceph_monitors,
Optional[String[1]] $csi_ceph_poolname = $rke2::params::csi_ceph_poolname,
) inherits rke2::params {
include rke2::install

View File

@ -12,4 +12,11 @@ class rke2::params (
Hash $helm_repos = {},
Array[String[1]] $extra_config_files = [],
Stdlib::HTTPUrl $container_archive_source = 'https://github.com/rancher/rke2/releases/download',
Boolean $csi_ceph_enable = false,
Array[String] $csi_ceph_files = [],
Array[String] $csi_ceph_templates = [],
Optional[String[1]] $csi_ceph_key = undef,
Optional[String[1] ] $csi_ceph_clusterid = undef,
Optional[Array[String]] $csi_ceph_monitors = undef,
Optional[String[1]] $csi_ceph_poolname = undef,
) {}

View File

@ -0,0 +1,65 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ceph-csi
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-config
namespace: ceph-csi
data:
config.json: |-
[
{
"clusterID": "<%= @csi_ceph_clusterid %>",
"monitors": [
<% @csi_ceph_monitors.each_with_index do |mon, index| -%>
"<%= mon %>"<% if index < @csi_ceph_monitors.length - 1 %>,<% end %>
<% end -%>
]
}
]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-encryption-kms-config
namespace: ceph-csi
data:
config.json: |-
{}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-config
namespace: ceph-csi
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
keyring: |
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: <%= @csi_ceph_clusterid %>
pool: <%= @csi_ceph_poolname %>
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard

View File

@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: ceph-csi
stringData:
userID: kubernetes
userKey: <%= @csi_ceph_key %>