- manage rke2 repos - add rke2 module (init, params, install, config, service) - split roles::infra::k8s::node -> control/compute roles - moved common k8s config into k8s.yaml - add bootstrap_node, manage server and token fields in rke2 config - manage install of helm - manage node attributes (from puppet facts) - manage frr exclusions for service/cluster network
This commit is contained in:
parent
0665873dc8
commit
139ec9803c
@ -155,6 +155,9 @@ lookup_options:
|
||||
zfs::datasets:
|
||||
merge:
|
||||
strategy: deep
|
||||
rke2::config_hash:
|
||||
merge:
|
||||
strategy: deep
|
||||
|
||||
facts_path: '/opt/puppetlabs/facter/facts.d'
|
||||
|
||||
|
||||
1
hieradata/roles/infra/k8s.eyaml
Normal file
1
hieradata/roles/infra/k8s.eyaml
Normal file
@ -0,0 +1 @@
|
||||
rke2::node_token: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEACQ4IIE9qT7x+KBTtvksUwvko6U/wnRn4PZEHUc91u9StIcjd0N6uH3hrJ4ddJmwikXf9Sz/5t+kSPvcVewFcjzvMHzJXqaE57KX0hqjFpqSGMlqWy4hdrZTHm/W65PkB73j21HLn9gH7m4rmqLomSbIe21P7sFM5+1pbnQejx9Us43Mu7PNy+8rQCcdEeBnRmzLaVWR8c35aXMHANqefLaLPCWjTbDY4jhcHPkNse5thpyUW89Bnq6caTJjNNQRI9U2FX2qHqwFh/rcjtGwDk9JUJvpF1qb9mdY68txMfaQ2KQh0gH67nxVf0cDppUxJaQ86Uu71BQX54l/om9AFrDCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQFfKTlWnJq2zqP6jsLAzw94BwDu32IVvfvYvsuNcOJHc4ipI0PV/6AcatIDaXjK+K8BMLdWxpOVsVf4vL1zPatB5UTwc8rm1UvxWN8SbtqF8accmAA0GbIexQezsJYaAI3NT9gItGj/aJjTitl+ed7QfNCd36HVH9FEmfDaGwN7f/yw==]
|
||||
169
hieradata/roles/infra/k8s.yaml
Normal file
169
hieradata/roles/infra/k8s.yaml
Normal file
@ -0,0 +1,169 @@
|
||||
---
|
||||
hiera_include:
|
||||
- profiles::selinux::setenforce
|
||||
- profiles::ceph::node
|
||||
- profiles::ceph::client
|
||||
- exporters::frr_exporter
|
||||
- frrouting
|
||||
- rke2
|
||||
|
||||
# manage rke2
|
||||
rke2::bootstrap_node: prodnxsr0001.main.unkin.net
|
||||
rke2::join_url: https://join-k8s.service.consul:9345
|
||||
rke2::config_hash:
|
||||
bind-address: "%{hiera('networking_loopback0_ip')}"
|
||||
node-ip: "%{hiera('networking_loopback0_ip')}"
|
||||
node-external-ip: "%{hiera('networking_loopback0_ip')}"
|
||||
write-kubeconfig-mode: 644
|
||||
kubelet-arg:
|
||||
- '--node-status-update-frequency=4s'
|
||||
- '--max-pods=100'
|
||||
node-label:
|
||||
- "region=%{facts.region}"
|
||||
- "country=%{facts.country}"
|
||||
- "asset=%{facts.dmi.product.serial_number}"
|
||||
- "zone=%{zone}"
|
||||
- "environment=%{environment}"
|
||||
|
||||
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
||||
python::manage_dev_package: false
|
||||
|
||||
profiles::packages::include:
|
||||
bridge-utils: {}
|
||||
cephadm: {}
|
||||
|
||||
profiles::selinux::setenforce::mode: disabled
|
||||
|
||||
profiles::ceph::client::manage_ceph_conf: false
|
||||
profiles::ceph::client::manage_ceph_package: false
|
||||
profiles::ceph::client::manage_ceph_paths: false
|
||||
profiles::ceph::client::fsid: 'de96a98f-3d23-465a-a899-86d3d67edab8'
|
||||
profiles::ceph::client::mons:
|
||||
- 198.18.23.9
|
||||
- 198.18.23.10
|
||||
- 198.18.23.11
|
||||
- 198.18.23.12
|
||||
- 198.18.23.13
|
||||
|
||||
# additional repos
|
||||
profiles::yum::global::repos:
|
||||
ceph:
|
||||
name: ceph
|
||||
descr: ceph repository
|
||||
target: /etc/yum.repos.d/ceph.repo
|
||||
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/%{facts.os.architecture}
|
||||
gpgkey: https://download.ceph.com/keys/release.asc
|
||||
mirrorlist: absent
|
||||
ceph-noarch:
|
||||
name: ceph-noarch
|
||||
descr: ceph-noarch repository
|
||||
target: /etc/yum.repos.d/ceph-noarch.repo
|
||||
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/noarch
|
||||
gpgkey: https://download.ceph.com/keys/release.asc
|
||||
mirrorlist: absent
|
||||
frr-extras:
|
||||
name: frr-extras
|
||||
descr: frr-extras repository
|
||||
target: /etc/yum.repos.d/frr-extras.repo
|
||||
baseurl: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os
|
||||
gpgkey: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||
mirrorlist: absent
|
||||
frr-stable:
|
||||
name: frr-stable
|
||||
descr: frr-stable repository
|
||||
target: /etc/yum.repos.d/frr-stable.repo
|
||||
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
||||
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||
mirrorlist: absent
|
||||
rancher-rke2-common-latest:
|
||||
name: rancher-rke2-common-latest
|
||||
descr: rancher-rke2-common-latest
|
||||
target: /etc/yum.repos.d/rke2-common.repo
|
||||
baseurl: https://rpm.rancher.io/rke2/latest/common/centos/%{facts.os.release.major}/noarch
|
||||
gpgkey: https://rpm.rancher.io/public.key
|
||||
mirrorlist: absent
|
||||
rancher-rke2-1-33-latest:
|
||||
name: rancher-rke2-1-33-latest
|
||||
descr: rancher-rke2-1-33-latest
|
||||
target: /etc/yum.repos.d/rke2-1-33.repo
|
||||
baseurl: https://rpm.rancher.io/rke2/latest/1.33/centos/%{facts.os.release.major}/x86_64
|
||||
gpgkey: https://rpm.rancher.io/public.key
|
||||
mirrorlist: absent
|
||||
|
||||
# dns
|
||||
profiles::dns::base::primary_interface: loopback0
|
||||
|
||||
# networking
|
||||
systemd::manage_networkd: true
|
||||
systemd::manage_all_network_files: true
|
||||
networking::interfaces:
|
||||
"%{hiera('networking_1000_iface')}":
|
||||
type: physical
|
||||
ipaddress: "%{hiera('networking_1000_ip')}"
|
||||
gateway: 198.18.15.254
|
||||
txqueuelen: 10000
|
||||
forwarding: true
|
||||
"%{hiera('networking_2500_iface')}":
|
||||
type: physical
|
||||
ipaddress: "%{hiera('networking_2500_ip')}"
|
||||
mtu: 1500
|
||||
txqueuelen: 10000
|
||||
forwarding: true
|
||||
loopback0:
|
||||
type: dummy
|
||||
ipaddress: "%{hiera('networking_loopback0_ip')}"
|
||||
netmask: 255.255.255.255
|
||||
mtu: 1500
|
||||
loopback1:
|
||||
type: dummy
|
||||
ipaddress: "%{hiera('networking_loopback1_ip')}"
|
||||
netmask: 255.255.255.255
|
||||
mtu: 1500
|
||||
loopback2:
|
||||
type: dummy
|
||||
ipaddress: "%{hiera('networking_loopback2_ip')}"
|
||||
netmask: 255.255.255.255
|
||||
mtu: 1500
|
||||
|
||||
# configure consul service
|
||||
profiles::consul::client::host_addr: "%{hiera('networking_loopback0_ip')}"
|
||||
profiles::consul::client::node_rules:
|
||||
- resource: service
|
||||
segment: frr_exporter
|
||||
disposition: write
|
||||
|
||||
# frrouting
|
||||
exporters::frr_exporter::enable: true
|
||||
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
||||
frrouting::ospf_preferred_source_enable: true
|
||||
frrouting::ospf_preferred_source: "%{hiera('networking_loopback0_ip')}"
|
||||
frrouting::ospfd_redistribute:
|
||||
- connected
|
||||
frrouting::ospfd_interfaces:
|
||||
"%{hiera('networking_1000_iface')}":
|
||||
area: 0.0.0.0
|
||||
"%{hiera('networking_2500_iface')}":
|
||||
area: 0.0.0.0
|
||||
loopback0:
|
||||
area: 0.0.0.0
|
||||
loopback1:
|
||||
area: 0.0.0.0
|
||||
loopback2:
|
||||
area: 0.0.0.0
|
||||
frrouting::daemons:
|
||||
ospfd: true
|
||||
frrouting::ospf_exclude_k8s_enable: true
|
||||
frrouting::k8s_cluster_cidr: '10.42.0.0/16' # RKE2 cluster-cidr (pods)
|
||||
frrouting::k8s_service_cidr: '10.43.0.0/16' # RKE2 service-cidr
|
||||
|
||||
# add loopback interfaces to ssh list
|
||||
ssh::server::options:
|
||||
ListenAddress:
|
||||
- "%{hiera('networking_loopback0_ip')}"
|
||||
- "%{hiera('networking_1000_ip')}"
|
||||
- "%{hiera('networking_2500_ip')}"
|
||||
|
||||
profiles::ssh::sign::principals:
|
||||
- "%{hiera('networking_loopback0_ip')}"
|
||||
- "%{hiera('networking_1000_ip')}"
|
||||
- "%{hiera('networking_2500_ip')}"
|
||||
@ -1,10 +1,3 @@
|
||||
---
|
||||
# networking
|
||||
systemd::manage_networkd: true
|
||||
systemd::manage_all_network_files: true
|
||||
networking::interfaces:
|
||||
eth0:
|
||||
type: physical
|
||||
forwarding: true
|
||||
dhcp: true
|
||||
mtu: 1500
|
||||
# manage rke2
|
||||
rke2::node_type: agent
|
||||
|
||||
@ -1,42 +1,73 @@
|
||||
---
|
||||
profiles::pki::vault::alt_names:
|
||||
- k8s-control.service.consul
|
||||
- k8s-control.query.consul
|
||||
- "k8s-control.service.%{facts.country}-%{facts.region}.consul"
|
||||
|
||||
profiles::ssh::sign::principals:
|
||||
- k8s-control.service.consul
|
||||
- k8s-control.query.consul
|
||||
- "k8s-control.service.%{facts.country}-%{facts.region}.consul"
|
||||
# manage rke2
|
||||
rke2::node_type: server
|
||||
rke2::helm_install: true
|
||||
rke2::helm_repos:
|
||||
metallb: https://metallb.github.io/metallb
|
||||
rancher-stable: https://releases.rancher.com/server-charts/stable
|
||||
rke2::extra_config_files:
|
||||
- rke2-canal-config
|
||||
rke2::config_hash:
|
||||
advertise-address: "%{hiera('networking_loopback0_ip')}"
|
||||
cluster-domain: "svc.k8s.unkin.net"
|
||||
tls-san:
|
||||
- "join-k8s.service.consul"
|
||||
- "api-k8s.service.consul"
|
||||
- "api.k8s.unkin.net"
|
||||
- "join.k8s.unkin.net"
|
||||
cni: canal
|
||||
cluster-cidr: 10.42.0.0/16
|
||||
service-cidr: 10.43.0.0/16
|
||||
cluster-dns: 10.43.0.10
|
||||
etcd-arg: "--quota-backend-bytes 2048000000"
|
||||
etcd-snapshot-schedule-cron: "0 3 * * *"
|
||||
etcd-snapshot-retention: 10
|
||||
kube-apiserver-arg:
|
||||
- '--default-not-ready-toleration-seconds=30'
|
||||
- '--default-unreachable-toleration-seconds=30'
|
||||
kube-controller-manager-arg:
|
||||
- '--node-monitor-period=4s'
|
||||
protect-kernel-defaults: true
|
||||
|
||||
# configure consul service
|
||||
consul::services:
|
||||
k8s-control:
|
||||
service_name: 'k8s-control'
|
||||
tags:
|
||||
- 'k8s'
|
||||
- 'container'
|
||||
api-k8s:
|
||||
service_name: 'api-k8s'
|
||||
address: "%{facts.networking.fqdn}"
|
||||
port: 6443
|
||||
checks:
|
||||
- id: 'k8s-control_https_check'
|
||||
name: 'k8s-control HTTPS Check'
|
||||
http: "https://%{facts.networking.fqdn}:6443"
|
||||
method: 'GET'
|
||||
tls_skip_verify: true
|
||||
- id: 'api-k8s_livez_check'
|
||||
name: 'api-k8s livez Check'
|
||||
args:
|
||||
- sudo
|
||||
- /usr/local/bin/check_k8s_api.sh
|
||||
interval: '10s'
|
||||
timeout: '1s'
|
||||
join-k8s:
|
||||
service_name: 'join-k8s'
|
||||
address: "%{facts.networking.fqdn}"
|
||||
port: 9345
|
||||
checks:
|
||||
- id: 'rke2_tcp_check_9345'
|
||||
name: 'rke2 TCP Check 9345'
|
||||
tcp: "%{hiera('networking_loopback0_ip')}:9345"
|
||||
interval: '10s'
|
||||
timeout: '1s'
|
||||
profiles::consul::client::node_rules:
|
||||
- resource: service
|
||||
segment: k8s-control
|
||||
segment: api-k8s
|
||||
disposition: write
|
||||
- resource: service
|
||||
segment: join-k8s
|
||||
disposition: write
|
||||
|
||||
# networking
|
||||
systemd::manage_networkd: true
|
||||
systemd::manage_all_network_files: true
|
||||
networking::interfaces:
|
||||
eth0:
|
||||
type: physical
|
||||
forwarding: true
|
||||
dhcp: true
|
||||
mtu: 1500
|
||||
profiles::pki::vault::alt_names:
|
||||
- api-k8s.service.consul
|
||||
- api-k8s.query.consul
|
||||
- "api-k8s.service.%{facts.country}-%{facts.region}.consul"
|
||||
|
||||
sudo::configs:
|
||||
consul-checks:
|
||||
priority: 20
|
||||
content: |
|
||||
consul ALL=(ALL) NOPASSWD: /usr/local/bin/check_k8s_api.sh
|
||||
|
||||
@ -5,6 +5,24 @@ hiera_include:
|
||||
- profiles::ceph::node
|
||||
- profiles::ceph::client
|
||||
- exporters::frr_exporter
|
||||
- profiles::rke2::node
|
||||
|
||||
# manage rke2
|
||||
profiles::rke2::node::servers:
|
||||
- prodnxsr0001.main.unkin.net
|
||||
- prodnxsr0002.main.unkin.net
|
||||
- prodnxsr0003.main.unkin.net
|
||||
|
||||
rke2::config_hash:
|
||||
bind-address: "%{hiera('networking_loopback0_ip')}"
|
||||
advertise-address: "%{hiera('networking_loopback0_ip')}"
|
||||
node-ip: "%{hiera('networking_loopback0_ip')}"
|
||||
node-external-ip: "%{hiera('networking_loopback0_ip')}"
|
||||
cluster-domain: "svc.k8s.unkin.net"
|
||||
tls-san:
|
||||
- "api.k8s.unkin.net"
|
||||
- "join.k8s.unkin.net"
|
||||
cni: cilium
|
||||
|
||||
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
||||
python::manage_dev_package: false
|
||||
@ -25,6 +43,7 @@ profiles::ceph::client::mons:
|
||||
- 198.18.23.11
|
||||
- 198.18.23.12
|
||||
- 198.18.23.13
|
||||
|
||||
# additional repos
|
||||
profiles::yum::global::repos:
|
||||
ceph:
|
||||
@ -55,6 +74,20 @@ profiles::yum::global::repos:
|
||||
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
||||
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||
mirrorlist: absent
|
||||
rancher-rke2-common-latest:
|
||||
name: rancher-rke2-common-latest
|
||||
descr: rancher-rke2-common-latest
|
||||
target: /etc/yum.repos.d/rke2-common.repo
|
||||
baseurl: https://rpm.rancher.io/rke2/latest/common/centos/%{facts.os.release.major}/noarch
|
||||
gpgkey: https://rpm.rancher.io/public.key
|
||||
mirrorlist: absent
|
||||
rancher-rke2-1-33-latest:
|
||||
name: rancher-rke2-1-33-latest
|
||||
descr: rancher-rke2-1-33-latest
|
||||
target: /etc/yum.repos.d/rke2-1-33.repo
|
||||
baseurl: https://rpm.rancher.io/rke2/latest/1.33/centos/%{facts.os.release.major}/x86_64
|
||||
gpgkey: https://rpm.rancher.io/public.key
|
||||
mirrorlist: absent
|
||||
|
||||
# dns
|
||||
profiles::dns::base::primary_interface: loopback0
|
||||
@ -91,9 +124,38 @@ networking::interfaces:
|
||||
netmask: 255.255.255.255
|
||||
mtu: 1500
|
||||
|
||||
# consul
|
||||
# configure consul service
|
||||
profiles::consul::client::host_addr: "%{hiera('networking_loopback0_ip')}"
|
||||
consul::services:
|
||||
api-k8s:
|
||||
service_name: 'api-k8s'
|
||||
address: "%{facts.networking.fqdn}"
|
||||
port: 6443
|
||||
checks:
|
||||
- id: 'api-k8s_https_check'
|
||||
name: 'api-k8s HTTPS Check'
|
||||
http: "https://%{facts.networking.fqdn}:6443"
|
||||
method: 'GET'
|
||||
tls_skip_verify: true
|
||||
interval: '10s'
|
||||
timeout: '1s'
|
||||
join-k8s:
|
||||
service_name: 'join-k8s'
|
||||
address: "%{facts.networking.fqdn}"
|
||||
port: 9345
|
||||
checks:
|
||||
- id: 'etcd_tcp_check_9345'
|
||||
name: 'ETCD TCP Check 9345'
|
||||
tcp: "%{facts.networking.fqdn}:9345"
|
||||
interval: '10s'
|
||||
timeout: '1s'
|
||||
profiles::consul::client::node_rules:
|
||||
- resource: service
|
||||
segment: api-k8s
|
||||
disposition: write
|
||||
- resource: service
|
||||
segment: join-k8s
|
||||
disposition: write
|
||||
- resource: service
|
||||
segment: frr_exporter
|
||||
disposition: write
|
||||
@ -130,3 +192,8 @@ profiles::ssh::sign::principals:
|
||||
- "%{hiera('networking_loopback0_ip')}"
|
||||
- "%{hiera('networking_1000_ip')}"
|
||||
- "%{hiera('networking_2500_ip')}"
|
||||
|
||||
profiles::pki::vault::alt_names:
|
||||
- api-k8s.service.consul
|
||||
- api-k8s.query.consul
|
||||
- "api-k8s.service.%{facts.country}-%{facts.region}.consul"
|
||||
|
||||
@ -16,8 +16,16 @@ class frrouting (
|
||||
Array[String] $mpls_ldp_interfaces = [],
|
||||
Boolean $ospf_preferred_source_enable = false,
|
||||
Optional[Stdlib::IP::Address] $ospf_preferred_source = undef,
|
||||
Boolean $ospf_exclude_k8s_enable = false,
|
||||
Optional[Stdlib::IP::Address::V4::CIDR] $k8s_cluster_cidr = undef, # pod/cluster CIDR (e.g. 10.42.0.0/16)
|
||||
Optional[Stdlib::IP::Address::V4::CIDR] $k8s_service_cidr = undef, # service CIDR (e.g. 10.43.0.0/16)
|
||||
) {
|
||||
|
||||
# sanity check
|
||||
if $ospf_exclude_k8s_enable and $k8s_cluster_cidr == undef and $k8s_service_cidr == undef {
|
||||
warning('frrouting: ospf_exclude_k8s_enable is true but no k8s_*_cidr provided; nothing will be filtered.')
|
||||
}
|
||||
|
||||
$daemons_defaults = {
|
||||
'bgpd' => false,
|
||||
'ospfd' => true,
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
frr defaults traditional
|
||||
hostname <%= @hostname %>
|
||||
no ipv6 forwarding
|
||||
|
||||
<% @ospfd_interfaces.each do |iface, params| -%>
|
||||
interface <%= iface %>
|
||||
<% if params['area'] -%>
|
||||
@ -12,12 +13,31 @@ interface <%= iface %>
|
||||
<% end -%>
|
||||
exit
|
||||
<% end -%>
|
||||
|
||||
<%# ---- K8s exclude (prefix-list + route-map) ---- -%>
|
||||
<% if @ospf_exclude_k8s_enable && (@k8s_cluster_cidr || @k8s_service_cidr) -%>
|
||||
! Do not redistribute Kubernetes Pod/Service CIDRs
|
||||
ip prefix-list K8S-BLOCK seq 5 permit <%= @k8s_cluster_cidr %> le 32<% if !@k8s_cluster_cidr %> ! (unset)<% end %>
|
||||
<% if @k8s_service_cidr -%>
|
||||
ip prefix-list K8S-BLOCK seq 10 permit <%= @k8s_service_cidr %> le 32
|
||||
<% end -%>
|
||||
route-map CONNECTED-NON-K8S deny 5
|
||||
match ip address prefix-list K8S-BLOCK
|
||||
exit
|
||||
route-map CONNECTED-NON-K8S permit 100
|
||||
exit
|
||||
<% end -%>
|
||||
|
||||
router ospf
|
||||
ospf router-id <%= @ospfd_router_id %>
|
||||
log-adjacency-changes detail
|
||||
<% @ospfd_redistribute.each do |type| -%>
|
||||
<% if @ospf_exclude_k8s_enable && type == 'connected' && (@k8s_cluster_cidr || @k8s_service_cidr) -%>
|
||||
redistribute connected route-map CONNECTED-NON-K8S
|
||||
<% else -%>
|
||||
redistribute <%= type %>
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
<% @ospfd_networks.each do |network| -%>
|
||||
network <%= network %>
|
||||
<% end -%>
|
||||
@ -31,6 +51,8 @@ router ospf
|
||||
mpls-te inter-as area 0.0.0.0
|
||||
<% end -%>
|
||||
exit
|
||||
|
||||
<%# ---- MPLS/LDP config ---- -%>
|
||||
<% if @mpls_ldp_router_id and @mpls_ldp_transport_addr and @mpls_ldp_interfaces.any? -%>
|
||||
mpls ldp
|
||||
router-id <%= @mpls_ldp_router_id %>
|
||||
@ -43,6 +65,8 @@ mpls ldp
|
||||
exit-address-family
|
||||
exit
|
||||
<% end -%>
|
||||
|
||||
<%# ---- Preferred OSPF source ---- -%>
|
||||
<% if @ospf_preferred_source_enable && @ospf_preferred_source -%>
|
||||
ip prefix-list ANY seq 5 permit 0.0.0.0/0 le 32
|
||||
route-map OSPF-SRC permit 10
|
||||
|
||||
2
modules/rke2/files/check_k8s_api.sh
Normal file
2
modules/rke2/files/check_k8s_api.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#!/usr/bin/bash
|
||||
/var/lib/rancher/rke2/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml get --raw /livez
|
||||
10
modules/rke2/files/rke2-canal-config.yaml
Normal file
10
modules/rke2/files/rke2-canal-config.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChartConfig
|
||||
metadata:
|
||||
name: rke2-canal
|
||||
namespace: kube-system
|
||||
spec:
|
||||
valuesContent: |-
|
||||
flannel:
|
||||
iface: "loopback0"
|
||||
15
modules/rke2/lib/facter/helm_repos.rb
Normal file
15
modules/rke2/lib/facter/helm_repos.rb
Normal file
@ -0,0 +1,15 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'facter/util/helm'
|
||||
|
||||
Facter.add(:helm_repos) do
|
||||
confine kernel: 'Linux'
|
||||
confine enc_role: [
|
||||
'roles::infra::k8s::control',
|
||||
'roles::infra::k8s::compute'
|
||||
]
|
||||
|
||||
setcode do
|
||||
Facter::Util::Helm.get_helm_repos('/usr/bin/helm')
|
||||
end
|
||||
end
|
||||
31
modules/rke2/lib/facter/util/helm.rb
Normal file
31
modules/rke2/lib/facter/util/helm.rb
Normal file
@ -0,0 +1,31 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'facter'
|
||||
require 'json'
|
||||
|
||||
# a simple helm module
|
||||
module Facter::Util::Helm
|
||||
def self.get_helm_repos(helm_cmd)
|
||||
return [] unless File.executable?(helm_cmd)
|
||||
|
||||
output = Facter::Core::Execution.execute(
|
||||
"#{helm_cmd} repo list --output json --repository-config /etc/helm/repositories.yaml",
|
||||
on_fail: nil
|
||||
)
|
||||
return [] if output.to_s.strip.empty?
|
||||
|
||||
parse_helm_output(output)
|
||||
rescue StandardError => e
|
||||
Facter.debug("helm_repos fact error: #{e}")
|
||||
[]
|
||||
end
|
||||
|
||||
def self.parse_helm_output(output)
|
||||
JSON.parse(output).map do |repo|
|
||||
{
|
||||
'name' => repo['name'],
|
||||
'url' => repo['url']
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
87
modules/rke2/manifests/config.pp
Normal file
87
modules/rke2/manifests/config.pp
Normal file
@ -0,0 +1,87 @@
|
||||
# config rke2
|
||||
class rke2::config (
|
||||
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||
Stdlib::Absolutepath $config_file = $rke2::config_file,
|
||||
Hash $config_hash = $rke2::config_hash,
|
||||
Stdlib::HTTPSUrl $join_url = $rke2::join_url,
|
||||
Stdlib::Fqdn $bootstrap_node = $rke2::bootstrap_node,
|
||||
String $node_token = $rke2::node_token,
|
||||
Array[String[1]] $extra_config_files = $rke2::extra_config_files,
|
||||
){
|
||||
|
||||
# if agent, add token. what other fields should i add?
|
||||
# how can I add a tls secret using kubectl to add ephemeral certs.
|
||||
|
||||
# if its not the bootstrap node, add join path to config
|
||||
if $node_type == 'server' {
|
||||
if $trusted['certname'] != $bootstrap_node {
|
||||
$config = merge($config_hash, {
|
||||
server => $join_url,
|
||||
token => $node_token,
|
||||
} )
|
||||
}else{
|
||||
$config = $config_hash
|
||||
}
|
||||
} elsif $node_type == 'agent' {
|
||||
$config = merge($config_hash, {
|
||||
server => $join_url,
|
||||
token => $node_token,
|
||||
} )
|
||||
}else{
|
||||
$config = $config_hash
|
||||
}
|
||||
|
||||
# create the config file
|
||||
file { $config_file:
|
||||
ensure => file,
|
||||
content => Sensitive($config.to_yaml),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
require => Package["rke2-${node_type}"],
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
|
||||
# create a script to verify k8s api is up (used by consul)
|
||||
file {'/usr/local/bin/check_k8s_api.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => 'puppet:///modules/rke2/check_k8s_api.sh',
|
||||
}
|
||||
|
||||
# symlink kubectl to path
|
||||
file {'/usr/bin/kubectl':
|
||||
ensure => link,
|
||||
target => '/var/lib/rancher/rke2/bin/kubectl',
|
||||
require => Package["rke2-${node_type}"],
|
||||
}
|
||||
|
||||
# when ProtectKernelDefaults=true
|
||||
sysctl { 'vm.overcommit_memory':
|
||||
value => '1',
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
sysctl { 'kernel.panic':
|
||||
value => '10',
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
|
||||
# on the controller nodes only
|
||||
if $node_type == 'server' {
|
||||
|
||||
# manage extra config config
|
||||
$extra_config_files.each |$file| {
|
||||
|
||||
file {"/var/lib/rancher/rke2/server/manifests/${file}.yaml":
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/rke2/${file}.yaml",
|
||||
require => Service['rke2-server'],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
44
modules/rke2/manifests/helm.pp
Normal file
44
modules/rke2/manifests/helm.pp
Normal file
@ -0,0 +1,44 @@
|
||||
|
||||
# manage helm
|
||||
class rke2::helm (
|
||||
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||
Stdlib::Fqdn $bootstrap_node = $rke2::bootstrap_node,
|
||||
Boolean $helm_install = $rke2::helm_install,
|
||||
Hash $helm_repos = $rke2::helm_repos
|
||||
){
|
||||
|
||||
# when installing helm, manage the repos
|
||||
if $helm_install {
|
||||
|
||||
package {'helm':
|
||||
ensure => installed,
|
||||
}
|
||||
|
||||
file { '/etc/helm':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
# on the controller nodes only
|
||||
if $node_type == 'server' {
|
||||
|
||||
# check if the repo already exists
|
||||
$helm_repos.each | String $repo, Stdlib::HTTPSUrl $url | {
|
||||
|
||||
# if repo isnt in repo list from helm, install it
|
||||
if ! $facts['helm_repos'].any |$existing| { $existing['name'] == $repo } {
|
||||
|
||||
exec { "helm_add_repo_${repo}":
|
||||
command => "helm repo add ${repo} ${url} --repository-config /etc/helm/repositories.yaml",
|
||||
path => ['/usr/bin'],
|
||||
environment => [
|
||||
'KUBECONFIG=/etc/rancher/rke2/rke2.yaml',
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
23
modules/rke2/manifests/init.pp
Normal file
23
modules/rke2/manifests/init.pp
Normal file
@ -0,0 +1,23 @@
|
||||
# manage rke2
|
||||
class rke2 (
|
||||
Enum['server', 'agent'] $node_type = $rke2::params::node_type,
|
||||
String $rke2_version = $rke2::params::rke2_version,
|
||||
String $rke2_release = $rke2::params::rke2_release,
|
||||
Stdlib::Absolutepath $config_file = $rke2::params::config_file,
|
||||
Hash $config_hash = $rke2::params::config_hash,
|
||||
Stdlib::HTTPSUrl $join_url = $rke2::params::join_url,
|
||||
Stdlib::Fqdn $bootstrap_node = $rke2::params::bootstrap_node,
|
||||
String $node_token = $rke2::params::node_token,
|
||||
Boolean $helm_install = $rke2::params::helm_install,
|
||||
Hash $helm_repos = $rke2::params::helm_repos,
|
||||
Array[String[1]] $extra_config_files = $rke2::params::extra_config_files,
|
||||
Stdlib::HTTPUrl $container_archive_source = $rke2::params::container_archive_source,
|
||||
) inherits rke2::params {
|
||||
|
||||
include rke2::install
|
||||
include rke2::config
|
||||
include rke2::service
|
||||
include rke2::helm
|
||||
|
||||
Class['rke2::install'] -> Class['rke2::service'] -> Class['rke2::helm']
|
||||
}
|
||||
53
modules/rke2/manifests/install.pp
Normal file
53
modules/rke2/manifests/install.pp
Normal file
@ -0,0 +1,53 @@
|
||||
# install rke2
|
||||
class rke2::install (
|
||||
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||
String $rke2_version = $rke2::rke2_version,
|
||||
String $rke2_release = $rke2::rke2_release,
|
||||
Stdlib::HTTPUrl $container_archive_source = $rke2::container_archive_source,
|
||||
){
|
||||
|
||||
# versionlock rke2
|
||||
yum::versionlock{"rke2-${node_type}":
|
||||
ensure => present,
|
||||
version => "${rke2_version}~${rke2_release}",
|
||||
}
|
||||
|
||||
# install rke2
|
||||
package {"rke2-${node_type}":
|
||||
ensure => "${rke2_version}~${rke2_release}",
|
||||
}
|
||||
|
||||
# ensure images path exists
|
||||
file { ['/var/lib/rancher/rke2/agent', '/var/lib/rancher/rke2/agent/images']:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0750',
|
||||
require => Package["rke2-${node_type}"],
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
|
||||
# download required archive of containers
|
||||
archive { '/var/lib/rancher/rke2/agent/images/rke2-images.linux-amd64.tar.zst':
|
||||
ensure => present,
|
||||
source => "https://github.com/rancher/rke2/releases/download/v${rke2_version}%2B${rke2_release}/rke2-images.linux-amd64.tar.zst",
|
||||
require => [
|
||||
Package["rke2-${node_type}"],
|
||||
File['/var/lib/rancher/rke2/agent/images'],
|
||||
],
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
|
||||
# ensure the images cache file exists
|
||||
file {'/var/lib/rancher/rke2/agent/images/.cache.json':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
require => [
|
||||
Package["rke2-${node_type}"],
|
||||
File['/var/lib/rancher/rke2/agent/images'],
|
||||
],
|
||||
before => Service["rke2-${node_type}"],
|
||||
}
|
||||
}
|
||||
15
modules/rke2/manifests/params.pp
Normal file
15
modules/rke2/manifests/params.pp
Normal file
@ -0,0 +1,15 @@
|
||||
# rke2 params
|
||||
class rke2::params (
|
||||
Enum['server', 'agent'] $node_type = 'agent',
|
||||
String $rke2_version = '1.33.4',
|
||||
String $rke2_release = 'rke2r1',
|
||||
Stdlib::Absolutepath $config_file = '/etc/rancher/rke2/config.yaml',
|
||||
Hash $config_hash = {},
|
||||
Stdlib::HTTPSUrl $join_url = 'https://127.0.0.1:9345',
|
||||
Stdlib::Fqdn $bootstrap_node = 'localhost.localdomain',
|
||||
String $node_token = '',
|
||||
Boolean $helm_install = false,
|
||||
Hash $helm_repos = {},
|
||||
Array[String[1]] $extra_config_files = [],
|
||||
Stdlib::HTTPUrl $container_archive_source = 'https://github.com/rancher/rke2/releases/download',
|
||||
) {}
|
||||
13
modules/rke2/manifests/service.pp
Normal file
13
modules/rke2/manifests/service.pp
Normal file
@ -0,0 +1,13 @@
|
||||
# manage rke2 service
|
||||
class rke2::service (
|
||||
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||
Stdlib::Absolutepath $config_file = $rke2::config_file,
|
||||
){
|
||||
|
||||
service {"rke2-${node_type}":
|
||||
ensure => true,
|
||||
enable => true,
|
||||
subscribe => File[$config_file],
|
||||
}
|
||||
|
||||
}
|
||||
@ -47,7 +47,7 @@ class profiles::dns::base (
|
||||
$facts['networking']['interfaces'].each | $interface, $data | {
|
||||
|
||||
# exclude those without ipv4 address, lo, docker0 and anycast addresses
|
||||
if $data['ip'] and $interface != 'lo' and $interface != 'docker0' and $interface !~ /^anycast[0-9]$/ {
|
||||
if $data['ip'] and $interface != 'lo' and $interface != 'docker0' and $interface !~ /^anycast[0-9]$/ and $interface !~ /^cilium_/ {
|
||||
|
||||
# use defaults for the primary_interface
|
||||
if $interface == $primary_interface {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user