diff --git a/hieradata/roles/infra/dns/master.yaml b/hieradata/roles/infra/dns/master.yaml index c83c101..f644cc9 100644 --- a/hieradata/roles/infra/dns/master.yaml +++ b/hieradata/roles/infra/dns/master.yaml @@ -17,6 +17,7 @@ profiles::dns::master::acls: - 198.18.27.0/24 - 198.18.28.0/24 - 198.18.29.0/24 + - 198.18.30.0/24 profiles::dns::master::zones: main.unkin.net: diff --git a/hieradata/roles/infra/dns/resolver.yaml b/hieradata/roles/infra/dns/resolver.yaml index e9adbdf..2986bbf 100644 --- a/hieradata/roles/infra/dns/resolver.yaml +++ b/hieradata/roles/infra/dns/resolver.yaml @@ -26,6 +26,7 @@ profiles::dns::resolver::acls: - 198.18.27.0/24 - 198.18.28.0/24 - 198.18.29.0/24 + - 198.18.30.0/24 acl-nomad-jobs: addresses: - 198.18.64.0/24 diff --git a/hieradata/roles/infra/k8s.yaml b/hieradata/roles/infra/k8s.yaml index fa50942..1d28fbe 100644 --- a/hieradata/roles/infra/k8s.yaml +++ b/hieradata/roles/infra/k8s.yaml @@ -1,11 +1,30 @@ +--- +hiera_include: + - k8s + k8s::node::manage_simple_cni: false k8s::server::resources::manage_flannel: false k8s::container_manager: containerd -k8s::version: 1.28.14 +k8s::version: 1.31.9 k8s::etcd_version: 3.5.16 +k8s::cluster_domain: k8s.au-syd1.unkin.net +k8s::manage_firewall: false +k8s::manage_kube_proxy: true +k8s::puppetdb_discovery: true +k8s::service_cidr: 198.18.30.0/24 +k8s::pod_cidr: 10.240.0.0/16 ### k8s::install::crictl k8s::install::crictl::config: 'runtime-endpoint': 'unix:///run/containerd/containerd.sock' 'image-endpoint': 'unix:///run/containerd/containerd.sock' + +### networking +systemd::manage_networkd: true +systemd::manage_all_network_files: true +networking::interfaces: + eth0: + type: physical + forwarding: true + dhcp: true diff --git a/hieradata/roles/infra/k8s/controller.yaml b/hieradata/roles/infra/k8s/controller.yaml index 458bd53..ed45214 100644 --- a/hieradata/roles/infra/k8s/controller.yaml +++ b/hieradata/roles/infra/k8s/controller.yaml @@ -1,23 +1,9 @@ --- -hiera_include: - - profiles::k8s::controller - -### K8S::Server +k8s::role: server k8s::server::node_on_server: false k8s::server::manage_kubeadm: true -k8s::server::etcd::generate_ca: true -#k8s::server::etcd::client_ca_cert: '/etc/pki/tls/vault/certificate.crt' -#k8s::server::etcd::client_ca_key: '/etc/pki/tls/vault/private.key' - -### K8S::Server::Apiserver -# Choose an interface which is for cluster communications. -# The apiserver will expose a port on the controller -# and all the workers need to be able to reach it. k8s::server::apiserver::advertise_address: "%{facts.networking.ip}" -### K8S::Server::Resources -k8s::server::resources::manage_flannel: false - consul::services: k8s: service_name: 'k8s' diff --git a/hieradata/roles/infra/k8s/etcd.yaml b/hieradata/roles/infra/k8s/etcd.yaml new file mode 100644 index 0000000..313b164 --- /dev/null +++ b/hieradata/roles/infra/k8s/etcd.yaml @@ -0,0 +1,7 @@ +--- +hiera_include: + - k8s::server::etcd +k8s::role: none +k8s::server::etcd::generate_ca: true +k8s::server::etcd::manage_members: true +k8s::server::etcd::self_signed_tls: true diff --git a/hieradata/roles/infra/k8s/worker.yaml b/hieradata/roles/infra/k8s/worker.yaml index 315f539..6c10ca4 100644 --- a/hieradata/roles/infra/k8s/worker.yaml +++ b/hieradata/roles/infra/k8s/worker.yaml @@ -1,7 +1,5 @@ --- -hiera_include: - - profiles::k8s::worker - ### K8S::Node +k8s::role: node k8s::node::node_token: "puppet.%{lookup('k8s::server::resources::bootstrap::secret')}" k8s::node::manage_crictl: true diff --git a/modules/libs/lib/facter/subnet_facts.rb b/modules/libs/lib/facter/subnet_facts.rb index 6bc2886..0131a9f 100644 --- a/modules/libs/lib/facter/subnet_facts.rb +++ b/modules/libs/lib/facter/subnet_facts.rb @@ -21,7 +21,8 @@ class SubnetAttributes '198.18.26.0/24' => { environment: 'prod', region: 'syd1', country: 'au' }, # common node0010 '198.18.27.0/24' => { environment: 'prod', region: 'syd1', country: 'au' }, # common node0011 '198.18.28.0/24' => { environment: 'prod', region: 'syd1', country: 'au' }, # common node0012 - '198.18.29.0/24' => { environment: 'prod', region: 'syd1', country: 'au' } # common node0013 + '198.18.29.0/24' => { environment: 'prod', region: 'syd1', country: 'au' }, # common node0013 + '198.18.30.0/24' => { environment: 'prod', region: 'syd1', country: 'au' } # k8s service network }.freeze # Default attributes if no subnet matches, also defined as a constant diff --git a/site/profiles/manifests/k8s/controller.pp b/site/profiles/manifests/k8s/controller.pp deleted file mode 100644 index 37ee9ca..0000000 --- a/site/profiles/manifests/k8s/controller.pp +++ /dev/null @@ -1,37 +0,0 @@ -# Class: profile::k8s::controller -# -# @param container_manager set the cri, like cri-o or containerd, if controller should be also a worker -# @param etcd_version version of etcd -# @param k8s_version version of kubernetes -# @param manage_firewall whether to manage firewall or not -# @param manage_kube_proxy whether to manage manage_kube_proxy or not -# @param control_plane_url -# api server url where the server/nodes connect to. -# this is most likely a load balanced dns with all the controllers in the backend. -# on single head clusters this may be the dns name:port of the controller node. -# @param role role in the cluster, server, node, none -# @param puppetdb_discovery whether to use puppetdb or not -# @param service_cidr address space for the services -# @param pod_cidr address space for the pods -# -# lint:ignore:autoloader_layout -class profiles::k8s::controller ( - # lint:endignore - Boolean $manage_firewall = false, # k8s-class default: false - Boolean $manage_kube_proxy = true, # k8s-class default: true - Boolean $puppetdb_discovery = true, # k8s-class default: false - Stdlib::HTTPUrl $control_plane_url = 'https://k8s.service.consul:6443', # k8s-class default: https://kubernetes:6443 - Enum['server'] $role = 'server', # k8s-class default: none - K8s::CIDR $service_cidr = '10.20.0.0/20', # k8s-class default: 10.1.0.0/24 - K8s::CIDR $pod_cidr = '10.20.16.0/20', # k8s-class default: 10.0.0.0/16 -) { - class { 'k8s': - manage_firewall => $manage_firewall, - manage_kube_proxy => $manage_kube_proxy, - control_plane_url => $control_plane_url, - role => $role, - service_cluster_cidr => $service_cidr, - cluster_cidr => $pod_cidr, - puppetdb_discovery => $puppetdb_discovery, - } -} diff --git a/site/profiles/manifests/k8s/worker.pp b/site/profiles/manifests/k8s/worker.pp deleted file mode 100644 index 3e6bd93..0000000 --- a/site/profiles/manifests/k8s/worker.pp +++ /dev/null @@ -1,30 +0,0 @@ -# Class: profile::k8s::worker -# -# @param role role in the cluster, server, node, none -# @param control_plane_url -# cluster url where the server/nodes connect to. -# this is most likely a load balanced dns with all the controllers in the backend. -# on single head clusters this may be the dns name:port of the controller node. -# @param k8s_version version of kubernetes -# @param puppetdb_discovery whether to use puppetdb or not -# @param manage_firewall whether to manage firewall or not -# @param manage_kube_proxy whether to manage manage_kube_proxy or not, for cilium this is not needed -# @param container_manager set the cri, like cri-o or containerd -# -# lint:ignore:autoloader_layout -class profiles::k8s::worker ( - # lint:endignore - Boolean $manage_firewall = true, # k8s-class default: false - Boolean $manage_kube_proxy = true, # k8s-class default: true - Boolean $puppetdb_discovery = true, # k8s-class default: false - Enum['node'] $role = 'node', # k8s-class default: none - Stdlib::HTTPUrl $control_plane_url = $profiles::k8s::controller::control_plane_url, -) { - class { 'k8s': - manage_firewall => $manage_firewall, - manage_kube_proxy => $manage_kube_proxy, - control_plane_url => $control_plane_url, - puppetdb_discovery => $puppetdb_discovery, - role => $role, - } -} diff --git a/site/roles/manifests/infra/k8s/etcd.pp b/site/roles/manifests/infra/k8s/etcd.pp new file mode 100644 index 0000000..84a9938 --- /dev/null +++ b/site/roles/manifests/infra/k8s/etcd.pp @@ -0,0 +1,11 @@ +# deploy k8s workers +class roles::infra::k8s::etcd { + if $facts['firstrun'] { + include profiles::defaults + include profiles::firstrun::init + }else{ + include profiles::defaults + include profiles::base + include profiles::base::datavol + } +}