feat: manage rancher, purelb, cert-manager (#395)

This change will install rancher, purelb and cert-manager, then
configure a dmz and common ip pool to be used by loadbalancers. The
nginx ingres controller is configured to use 198.18.200.0 (common) and
announce the ip from all nodes so that it becomes an anycast ip in ospf.

- manage the install of rancher, purelb and cert-manager
- add rancher ingress routes
- add nginx externalip/loadBalancer

Reviewed-on: #395
This commit is contained in:
Ben Vincent 2025-09-14 20:59:39 +10:00
parent 6e4bc9fbc7
commit 4e77fb7ee7
10 changed files with 14089 additions and 3 deletions

View File

@ -23,7 +23,6 @@ rke2::config_hash:
- "country=%{facts.country}"
- "asset=%{facts.dmi.product.serial_number}"
- "zone=%{zone}"
- "environment=%{environment}"
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
python::manage_dev_package: false

View File

@ -5,8 +5,17 @@ rke2::helm_install: true
rke2::helm_repos:
metallb: https://metallb.github.io/metallb
rancher-stable: https://releases.rancher.com/server-charts/stable
purelb: https://gitlab.com/api/v4/projects/20400619/packages/helm/stable
jetstack: https://charts.jetstack.io
rke2::extra_config_files:
- rke2-canal-config
- 000_namespaces
- 010_rke2-canal-config
- 010_cert-manager
- 010_purelb
- 010_rancher
- 100_purelb_config
- 200_ingres_lb_nginx
- 201_ingres_route_rancher
rke2::config_hash:
advertise-address: "%{hiera('networking_loopback0_ip')}"
cluster-domain: "svc.k8s.unkin.net"
@ -28,6 +37,7 @@ rke2::config_hash:
kube-controller-manager-arg:
- '--node-monitor-period=4s'
protect-kernel-defaults: true
disable-kube-proxy: false
# configure consul service
consul::services:

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: cattle-system
labels:
kubernetes.io/metadata.name: cattle-system

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,325 @@
# Source: purelb/templates/serviceaccount-allocator.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: allocator
namespace: purelb
---
# Source: purelb/templates/serviceaccount-lbnodeagent.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: lbnodeagent
namespace: purelb
---
# Source: purelb/templates/clusterrole-allocator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:allocator
rules:
- apiGroups: [purelb.io]
resources: [servicegroups, lbnodeagents]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [services]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [services/status]
verbs: [update]
- apiGroups: ['']
resources: [events]
verbs: [create, patch]
- apiGroups: ['']
resources: [namespaces]
verbs: [get, list]
- apiGroups: [policy]
resourceNames: [allocator]
resources: [podsecuritypolicies]
verbs: [use]
---
# Source: purelb/templates/clusterrole-lbnodeagent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:lbnodeagent
rules:
- apiGroups: [purelb.io]
resources: [servicegroups, lbnodeagents]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [endpoints, nodes]
verbs: [get, list, watch]
- apiGroups: ['']
resources: [services]
verbs: [get, list, watch, update]
- apiGroups: ['']
resources: [events]
verbs: [create, patch]
- apiGroups: ['']
resources: [namespaces]
verbs: [get, list]
- apiGroups: [policy]
resourceNames: [lbnodeagent]
resources: [podsecuritypolicies]
verbs: [use]
---
# Source: purelb/templates/clusterrolebinding-allocator.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:allocator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: purelb:allocator
subjects:
- kind: ServiceAccount
name: allocator
namespace: purelb
---
# Source: purelb/templates/clusterrolebinding-lbnodeagent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: purelb:lbnodeagent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: purelb:lbnodeagent
subjects:
- kind: ServiceAccount
name: lbnodeagent
namespace: purelb
---
# Source: purelb/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: pod-lister
namespace: purelb
rules:
- apiGroups: ['']
resources: [pods]
verbs: [list]
---
# Source: purelb/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
name: pod-lister
namespace: purelb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-lister
subjects:
- kind: ServiceAccount
name: lbnodeagent
---
# Source: purelb/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: lbnodeagent
name: lbnodeagent
namespace: purelb
spec:
selector:
matchLabels:
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/component: lbnodeagent
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: lbnodeagent
spec:
containers:
- env:
- name: NETBOX_USER_TOKEN
valueFrom:
secretKeyRef:
name: netbox-client
key: user-token
optional: true
- name: DEFAULT_ANNOUNCER
value: PureLB
- name: PURELB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: PURELB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: PURELB_ML_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: PURELB_ML_LABELS
value: app.kubernetes.io/name=purelb,app.kubernetes.io/component=lbnodeagent
- name: ML_GROUP
value: 8sb7ikA5qHwQQqxc
image: registry.gitlab.com/purelb/purelb/lbnodeagent:v0.13.0
imagePullPolicy: Always
name: lbnodeagent
ports:
- containerPort: 7472
name: monitoring
resources:
securityContext:
capabilities:
add: [NET_ADMIN, NET_RAW]
drop: [ALL]
readOnlyRootFilesystem: false
runAsGroup: 0
runAsUser: 0
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: lbnodeagent
terminationGracePeriodSeconds: 2
---
# Source: purelb/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: allocator
name: allocator
namespace: purelb
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/component: allocator
template:
metadata:
annotations:
prometheus.io/port: '7472'
prometheus.io/scrape: 'true'
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: allocator
spec:
containers:
- env:
- name: NETBOX_USER_TOKEN
valueFrom:
secretKeyRef:
name: netbox-client
key: user-token
optional: true
- name: DEFAULT_ANNOUNCER
value: PureLB
image: registry.gitlab.com/purelb/purelb/allocator:v0.13.0
imagePullPolicy: Always
name: allocator
ports:
- containerPort: 7472
name: monitoring
resources:
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: [all]
readOnlyRootFilesystem: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: allocator
terminationGracePeriodSeconds: 0
---
# Source: purelb/templates/lbnodeagent.yaml
apiVersion: purelb.io/v1
kind: LBNodeAgent
metadata:
name: default
namespace: purelb
labels:
helm.sh/chart: purelb-v0.13.0
app.kubernetes.io/name: purelb
app.kubernetes.io/instance: purelb
app.kubernetes.io/version: v0.13.0
app.kubernetes.io/managed-by: Helm
spec:
local:
localint: default
extlbint: kube-lb0
sendgarp: false

View File

@ -0,0 +1,695 @@
# Source: rancher/templates/priorityClass.yaml
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: rancher-critical
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
value: 1000000000
globalDefault: false
description: Priority class used by pods critical to rancher's functionality.
---
# Source: rancher/templates/serviceAccount.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
---
# Source: rancher/templates/configMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-config
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
app.kubernetes.io/part-of: rancher
data:
priorityClassName: rancher-critical
---
# Source: rancher/templates/clusterRoleBinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
subjects:
- kind: ServiceAccount
name: rancher
namespace: cattle-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
# Source: rancher/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 443
targetPort: 444
protocol: TCP
name: https-internal
selector:
app: rancher
---
# Source: rancher/templates/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: rancher
annotations:
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
replicas: 3
selector:
matchLabels:
app: rancher
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: rancher
release: rancher
spec:
priorityClassName: rancher-critical
serviceAccountName: rancher
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values: [rancher]
topologyKey: kubernetes.io/hostname
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: NotIn
values: [windows]
tolerations:
- key: cattle.io/os
value: linux
effect: NoSchedule
operator: Equal
containers:
- image: docker.io/rancher/rancher:v2.12.1
imagePullPolicy: IfNotPresent
name: rancher
ports:
- containerPort: 80
protocol: TCP
- containerPort: 6666
protocol: TCP
args:
# Public trusted CA - clear ca certs
- --no-cacerts
- --http-listen-port=80
- --https-listen-port=443
- --add-local=true
env:
- name: CATTLE_NAMESPACE
value: cattle-system
- name: CATTLE_PEER_SERVICE
value: rancher
- name: CATTLE_BOOTSTRAP_PASSWORD
valueFrom:
secretKeyRef:
name: bootstrap-secret
key: bootstrapPassword
- name: IMPERATIVE_API_DIRECT
value: 'true'
- name: IMPERATIVE_API_APP_SELECTOR
value: rancher
startupProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
failureThreshold: 12
periodSeconds: 10
livenessProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 80
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 5
volumeMounts:
volumes:
---
# Source: rancher/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rancher
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
nginx.ingress.kubernetes.io/proxy-connect-timeout: '30'
nginx.ingress.kubernetes.io/proxy-read-timeout: '1800'
nginx.ingress.kubernetes.io/proxy-send-timeout: '1800'
spec:
rules:
- host: rancher.main.unkin.net # hostname to access rancher server
http:
paths:
- backend:
service:
name: rancher
port:
number: 80
pathType: ImplementationSpecific
path: /
tls:
- hosts: [rancher.main.unkin.net]
secretName: tls-rancher-ingress
---
# Source: rancher/templates/post-delete-hook-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
---
# Source: rancher/templates/pre-upgrade-hook-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
---
# Source: rancher/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-secret
namespace: cattle-system
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-weight: '-5'
helm.sh/resource-policy: keep
type: Opaque
data:
bootstrapPassword: YWRtaW4=
---
# Source: rancher/templates/post-delete-hook-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
data:
post-delete-hook.sh: |-
#!/bin/bash
set -e
namespaces="${NAMESPACES}"
rancher_namespace="${RANCHER_NAMESPACE}"
timeout="${TIMEOUT}"
ignoreTimeoutError="${IGNORETIMEOUTERROR}"
if [[ -z ${namespaces} ]]; then
echo "No namespace is provided."
exit 1
fi
if [[ -z ${rancher_namespace} ]]; then
echo "No rancher namespace is provided."
exit 1
fi
if [[ -z ${timeout} ]]; then
echo "No timeout value is provided."
exit 1
fi
if [[ -z ${ignoreTimeoutError} ]]; then
echo "No ignoreTimeoutError value is provided."
exit 1
fi
succeeded=()
failed=()
get_pod_count() {
kubectl get pods --selector app="${1}" -n "${2}" -o json | jq '.items | length'
}
echo "Uninstalling Rancher resources in the following namespaces: ${namespaces}"
for namespace in ${namespaces}; do
for app in $(helm list -n "${namespace}" -q); do
if [[ ${app} =~ .crd$ ]]; then
echo "--- Skip the app [${app}] in the namespace [${namespace}]"
continue
fi
echo "--- Deleting the app [${app}] in the namespace [${namespace}]"
if [[ ! $(helm uninstall "${app}" -n "${namespace}") ]]; then
failed=("${failed[@]}" "${app}")
continue
fi
t=0
while true; do
if [[ $(get_pod_count "${app}" "${namespace}") -eq 0 ]]; then
echo "successfully uninstalled [${app}] in the namespace [${namespace}]"
succeeded=("${succeeded[@]}" "${app}")
break
fi
if [[ ${t} -ge ${timeout} ]]; then
echo "timeout uninstalling [${app}] in the namespace [${namespace}]"
failed=("${failed[@]}" "${app}")
break
fi
# by default, wait 120 seconds in total for an app to be uninstalled
echo "waiting 5 seconds for pods of [${app}] to be terminated ..."
sleep 5
t=$((t + 5))
done
done
# delete the helm operator pods
for pod in $(kubectl get pods -n "${namespace}" -o name); do
if [[ ${pod} =~ ^pod\/helm-operation-* ]]; then
echo "--- Deleting the pod [${pod}] in the namespace [${namespace}]"
kubectl delete "${pod}" -n "${namespace}"
fi
done
done
echo "Removing Rancher bootstrap secret in the following namespace: ${rancher_namespace}"
kubectl --ignore-not-found=true delete secret bootstrap-secret -n "${rancher_namespace}"
echo "------ Summary ------"
if [[ ${#succeeded[@]} -ne 0 ]]; then
echo "Succeeded to uninstall the following apps:" "${succeeded[@]}"
fi
if [[ ${#failed[@]} -ne 0 ]]; then
echo "Failed to uninstall the following apps:" "${failed[@]}"
if [[ "${ignoreTimeoutError}" == "false" ]]; then
exit 2
fi
else
echo "Cleanup finished successfully."
fi
---
# Source: rancher/templates/pre-upgrade-hook-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
data:
pre-upgrade-hook.sh: |-
#!/bin/bash
set -eo pipefail
# Global counters
declare -A COUNTS
RESOURCES_FOUND=false
check_prerequisites() {
if ! command -v kubectl &>/dev/null; then
echo "Missing required tool: kubectl"
exit 1
fi
}
print_resource_table() {
local kind="$1"
local items="$2"
local -a headers=("${@:3}")
local count
count=$(wc -l <<< "$items")
COUNTS["$kind"]=$count
RESOURCES_FOUND=true
echo "Found $count $kind resource(s):"
echo
IFS=$'\n' read -r -d '' -a lines < <(printf '%s\0' "$items")
# Initialize max_lengths array with header lengths
local -a max_lengths
for i in "${!headers[@]}"; do
max_lengths[i]=${#headers[i]}
done
# Calculate max width for each column
for line in "${lines[@]}"; do
IFS=$'\t' read -r -a cols <<< "$line"
for i in "${!cols[@]}"; do
(( ${#cols[i]} > max_lengths[i] )) && max_lengths[i]=${#cols[i]}
done
done
for i in "${!headers[@]}"; do
printf "%-${max_lengths[i]}s " "${headers[i]}"
done
printf "\n"
for i in "${!headers[@]}"; do
printf "%-${max_lengths[i]}s " "$(printf '%*s' "${max_lengths[i]}" '' | tr ' ' '-')"
done
printf "\n"
for line in "${lines[@]}"; do
IFS=$'\t' read -r -a cols <<< "$line"
for i in "${!cols[@]}"; do
printf "%-${max_lengths[i]}s " "${cols[i]}"
done
printf "\n"
done
echo
}
detect_resource() {
local crd="$1"
local kind="$2"
local jsonpath="$3"
local -a headers=("${@:4}")
echo "Checking for $kind resources..."
local output
if ! output=$(kubectl get "$crd" --all-namespaces -o=jsonpath="$jsonpath" 2>&1); then
if grep -q "the server doesn't have a resource type" <<< "$output"; then
echo "Resource type $crd not found. Skipping."
echo
return 0
else
echo "Error retrieving $kind resources: $output"
exit 1
fi
fi
if [ -z "$output" ]; then
echo "No $kind resources found."
echo
else
print_resource_table "$kind" "$output" "${headers[@]}"
fi
}
print_summary() {
echo "===== SUMMARY ====="
local total=0
for kind in "${!COUNTS[@]}"; do
local count=${COUNTS[$kind]}
echo "$kind: $count"
total=$((total + count))
done
echo "Total resources detected: $total"
if [ "$RESOURCES_FOUND" = true ]; then
echo "Error: Rancher v2.12+ does not support RKE1.
Detected RKE1-related resources (listed above).
Please migrate these clusters to RKE2 or K3s, or delete the related resources.
More info: https://www.suse.com/c/rke-end-of-life-by-july-2025-replatform-to-rke2-or-k3s"
exit 1
else
echo "No RKE related resources found."
fi
}
main() {
check_prerequisites
detect_resource "clusters.management.cattle.io" "RKE Management Cluster" \
'{range .items[?(@.spec.rancherKubernetesEngineConfig)]}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAME" "DISPLAY NAME"
detect_resource "nodetemplates.management.cattle.io" "NodeTemplate" \
'{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAMESPACE" "NAME" "DISPLAY NAME"
detect_resource "clustertemplates.management.cattle.io" "ClusterTemplate" \
'{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.displayName}{"\n"}{end}' \
"NAMESPACE" "NAME" "DISPLAY NAME"
print_summary
}
main
---
# Source: rancher/templates/post-delete-hook-cluster-role.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
rules:
- apiGroups: [extensions, apps]
resources: [deployments]
verbs: [get, list, delete]
- apiGroups: [batch]
resources: [jobs]
verbs: [get, list, watch, delete, create]
- apiGroups: [rbac.authorization.k8s.io]
resources: [clusterroles, clusterrolebindings, roles, rolebindings]
verbs: [get, list, delete, create]
- apiGroups: ['']
resources: [pods, secrets, services, configmaps]
verbs: [get, list, delete]
- apiGroups: ['']
resources: [serviceaccounts]
verbs: [get, list, delete, create]
- apiGroups: [networking.k8s.io]
resources: [networkpolicies]
verbs: [get, list, delete]
- apiGroups: [admissionregistration.k8s.io]
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs: [get, list, delete]
- apiGroups: [networking.k8s.io]
resources: [ingresses]
verbs: [delete]
- apiGroups: [cert-manager.io]
resources: [issuers]
verbs: [delete]
---
# Source: rancher/templates/pre-upgrade-hook-cluster-role.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rancher-pre-upgrade
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
rules:
- apiGroups: [management.cattle.io]
resources: [clusters, nodetemplates, clustertemplates]
verbs: [get, list]
---
# Source: rancher/templates/post-delete-hook-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '2'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rancher-post-delete
subjects:
- kind: ServiceAccount
name: rancher-post-delete
namespace: cattle-system
---
# Source: rancher/templates/pre-upgrade-hook-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: rancher-pre-upgrade
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rancher-pre-upgrade
subjects:
- kind: ServiceAccount
name: rancher-pre-upgrade
namespace: cattle-system
---
# Source: rancher/templates/post-delete-hook-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: rancher-post-delete
namespace: cattle-system
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: '3'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: 3
template:
metadata:
name: rancher-post-delete
labels:
app: rancher
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
serviceAccountName: rancher-post-delete
restartPolicy: OnFailure
containers:
- name: rancher-post-delete
image: rancher/shell:v0.5.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
command: [/scripts/post-delete-hook.sh]
volumeMounts:
- mountPath: /scripts
name: config-volume
env:
- name: NAMESPACES
value: cattle-fleet-system cattle-system rancher-operator-system
- name: RANCHER_NAMESPACE
value: cattle-system
- name: TIMEOUT
value: '120'
- name: IGNORETIMEOUTERROR
value: 'false'
volumes:
- name: config-volume
configMap:
name: rancher-post-delete
defaultMode: 0777
---
# Source: rancher/templates/pre-upgrade-hook-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: rancher-pre-upgrade
namespace: cattle-system
labels:
app: rancher-pre-upgrade
chart: rancher-2.12.1
heritage: Helm
release: rancher
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-weight: '-1'
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
backoffLimit: 3
template:
metadata:
name: rancher-pre-upgrade
labels:
app: rancher-pre-upgrade
chart: rancher-2.12.1
heritage: Helm
release: rancher
spec:
serviceAccountName: rancher-pre-upgrade
restartPolicy: Never
containers:
- name: rancher-pre-upgrade
image: rancher/shell:v0.5.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
command: [/scripts/pre-upgrade-hook.sh]
volumeMounts:
- mountPath: /scripts
name: config-volume
volumes:
- name: config-volume
configMap:
name: rancher-pre-upgrade
defaultMode: 0777

View File

@ -1,4 +1,3 @@
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:

View File

@ -0,0 +1,45 @@
apiVersion: purelb.io/v1
kind: LBNodeAgent
metadata:
name: common
namespace: purelb
spec:
local:
extlbint: kube-lb0
localint: default
sendgarp: false
---
apiVersion: purelb.io/v1
kind: LBNodeAgent
metadata:
name: dmz
namespace: purelb
spec:
local:
extlbint: kube-lb0
localint: default
sendgarp: false
---
apiVersion: purelb.io/v1
kind: ServiceGroup
metadata:
name: dmz
namespace: purelb
spec:
local:
v4pools:
- subnet: 198.18.199.0/24
pool: 198.18.199.0/24
aggregation: /32
---
apiVersion: purelb.io/v1
kind: ServiceGroup
metadata:
name: common
namespace: purelb
spec:
local:
v4pools:
- subnet: 198.18.200.0/24
pool: 198.18.200.0/24
aggregation: /32

View File

@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: rke2-ingress-nginx-controller
namespace: kube-system
annotations:
purelb.io/service-group: common
spec:
type: LoadBalancer
externalTrafficPolicy: Cluster
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- name: https
port: 443
targetPort: https
protocol: TCP
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/component: controller
app.kubernetes.io/instance: rke2-ingress-nginx
loadBalancerIP: 198.18.200.0
---
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: rke2-ingress-nginx
namespace: kube-system
spec:
valuesContent: |-
controller:
hostPort:
enabled: false
service:
enabled: true
type: LoadBalancer
externalTrafficPolicy: Local
annotations:
purelb.io/service-group: common

View File

@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rancher
namespace: cattle-system
annotations:
kubernetes.io/ingress.class: nginx
spec:
tls:
- hosts: [rancher.main.unkin.net]
secretName: tls-rancher
rules:
- host: rancher.main.unkin.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rancher
port:
number: 80