Compare commits

..

1 Commits

Author SHA1 Message Date
3a798a20d7 feat: implement nested groups
- use includegroups feature to nest groups
- remove the trailing ',' from includegroups
2024-09-26 17:15:51 +10:00
411 changed files with 1691 additions and 11402 deletions

View File

@ -3,8 +3,3 @@
detectors:
FeatureEnvy:
enabled: false
TooManyStatements:
enabled: false
UncommunicativeVariableName:
accept:
- e

View File

@ -8,6 +8,3 @@ Style/Documentation:
Layout/LineLength:
Max: 140
Metrics/BlockNesting:
Max: 4

View File

@ -1,10 +0,0 @@
when:
- event: pull_request
steps:
- name: pre-commit
image: git.unkin.net/unkin/almalinux9-base:latest
commands:
- dnf groupinstall -y "Development Tools" -y
- dnf install uv rubygems ruby-devel gcc make redhat-rpm-config glibc-headers glibc-devel libffi libffi-devel -y
- uvx pre-commit run --all-files

View File

@ -2,57 +2,52 @@ forge 'forge.puppetlabs.com'
moduledir 'external_modules'
# puppetlabs
mod 'puppetlabs-stdlib', '9.7.0'
mod 'puppetlabs-inifile', '6.2.0'
mod 'puppetlabs-concat', '9.1.0'
mod 'puppetlabs-vcsrepo', '7.0.0'
mod 'puppetlabs-yumrepo_core', '2.1.0'
mod 'puppetlabs-apt', '10.0.1'
mod 'puppetlabs-lvm', '3.0.1'
mod 'puppetlabs-puppetdb', '7.14.0'
mod 'puppetlabs-postgresql', '9.2.0'
mod 'puppetlabs-firewall', '8.1.4'
mod 'puppetlabs-accounts', '8.2.2'
mod 'puppetlabs-mysql', '16.2.0'
mod 'puppetlabs-stdlib', '9.1.0'
mod 'puppetlabs-inifile', '6.0.0'
mod 'puppetlabs-concat', '9.0.0'
mod 'puppetlabs-vcsrepo', '6.1.0'
mod 'puppetlabs-yumrepo_core', '2.0.0'
mod 'puppetlabs-apt', '9.4.0'
mod 'puppetlabs-lvm', '2.1.0'
mod 'puppetlabs-puppetdb', '7.13.0'
mod 'puppetlabs-postgresql', '9.1.0'
mod 'puppetlabs-firewall', '6.0.0'
mod 'puppetlabs-accounts', '8.1.0'
mod 'puppetlabs-mysql', '15.0.0'
mod 'puppetlabs-xinetd', '3.4.1'
mod 'puppetlabs-haproxy', '8.2.0'
mod 'puppetlabs-java', '11.1.0'
mod 'puppetlabs-reboot', '5.1.0'
mod 'puppetlabs-docker', '10.2.0'
mod 'puppetlabs-mailalias_core', '1.2.0'
mod 'puppetlabs-haproxy', '8.0.0'
mod 'puppetlabs-java', '10.1.2'
mod 'puppetlabs-reboot', '5.0.0'
mod 'puppetlabs-docker', '10.0.1'
# puppet
mod 'puppet-python', '7.4.0'
mod 'puppet-systemd', '8.1.0'
mod 'puppet-yum', '7.2.0'
mod 'puppet-archive', '7.1.0'
mod 'puppet-chrony', '3.0.0'
mod 'puppet-puppetboard', '11.0.0'
mod 'puppet-nginx', '6.0.1'
mod 'puppet-selinux', '5.0.0'
mod 'puppet-prometheus', '16.0.0'
mod 'puppet-grafana', '14.1.0'
mod 'puppet-consul', '9.1.0'
mod 'puppet-vault', '4.1.1'
mod 'puppet-python', '7.0.0'
mod 'puppet-systemd', '5.1.0'
mod 'puppet-yum', '7.0.0'
mod 'puppet-archive', '7.0.0'
mod 'puppet-chrony', '2.6.0'
mod 'puppet-puppetboard', '9.0.0'
mod 'puppet-nginx', '5.0.0'
mod 'puppet-selinux', '4.1.0'
mod 'puppet-prometheus', '13.4.0'
mod 'puppet-grafana', '13.1.0'
mod 'puppet-consul', '8.0.0'
mod 'puppet-vault', '4.1.0'
mod 'puppet-dhcp', '6.1.0'
mod 'puppet-keepalived', '5.1.0'
mod 'puppet-extlib', '7.5.1'
mod 'puppet-network', '2.2.1'
mod 'puppet-kmod', '4.1.0'
mod 'puppet-extlib', '7.0.0'
mod 'puppet-network', '2.2.0'
mod 'puppet-kmod', '4.0.1'
mod 'puppet-filemapper', '4.0.0'
mod 'puppet-letsencrypt', '11.1.0'
mod 'puppet-rundeck', '9.2.0'
mod 'puppet-redis', '11.1.0'
mod 'puppet-nodejs', '11.0.0'
mod 'puppet-postfix', '5.1.0'
mod 'puppet-alternatives', '6.0.0'
mod 'puppet-letsencrypt', '11.0.0'
mod 'puppet-rundeck', '9.1.0'
mod 'puppet-redis', '11.0.0'
# other
mod 'saz-sudo', '9.0.2'
mod 'saz-ssh', '13.1.0'
mod 'saz-limits', '5.0.0'
mod 'ghoneycutt-timezone', '4.0.0'
mod 'ghoneycutt-puppet', '3.3.0'
mod 'saz-sudo', '8.0.0'
mod 'saz-ssh', '12.1.0'
mod 'ghoneycutt-timezone', '4.0.0'
mod 'dalen-puppetdbquery', '3.0.1'
mod 'markt-galera', '3.1.0'
mod 'kogitoapp-minio', '1.1.4'
@ -61,9 +56,6 @@ mod 'stm-file_capability', '6.0.0'
mod 'h0tw1r3-gitea', '3.2.0'
mod 'rehan-mkdir', '2.0.0'
mod 'tailoredautomation-patroni', '2.0.0'
mod 'ssm-crypto_policies', '0.3.3'
mod 'thias-sysctl', '1.0.8'
mod 'cirrax-dovecot', '1.3.3'
mod 'bind',
:git => 'https://git.service.au-syd1.consul/unkinben/puppet-bind.git',

View File

@ -28,98 +28,6 @@ Always refer back to the official documentation at https://docs.ceph.com/en/late
sudo ceph fs set mediafs max_mds 2
```
## managing cephfs with subvolumes
Create erasure code profiles. The K and M values are equivalent to the number of data disks (K) and parity disks (M) in RAID5, RAID6, etc.
sudo ceph osd erasure-code-profile set ec_6_2 k=6 m=2
sudo ceph osd erasure-code-profile set ec_4_1 k=4 m=1
Create data pools using the erasure-code-profile, set some required options
sudo ceph osd pool create cephfs_data_ssd_ec_6_2 erasure ec_6_2
sudo ceph osd pool set cephfs_data_ssd_ec_6_2 allow_ec_overwrites true
sudo ceph osd pool set cephfs_data_ssd_ec_6_2 bulk true
sudo ceph osd pool create cephfs_data_ssd_ec_4_1 erasure ec_4_1
sudo ceph osd pool set cephfs_data_ssd_ec_4_1 allow_ec_overwrites true
sudo ceph osd pool set cephfs_data_ssd_ec_4_1 bulk true
Add the pool to the fs `cephfs`
sudo ceph fs add_data_pool cephfs cephfs_data_ssd_ec_6_2
sudo ceph fs add_data_pool cephfs cephfs_data_ssd_ec_4_1
Create a subvolumegroup using the new data pool
sudo ceph fs subvolumegroup create cephfs csi_ssd_ec_6_2 --pool_layout cephfs_data_ssd_ec_6_2
sudo ceph fs subvolumegroup create cephfs csi_ssd_ec_4_1 --pool_layout cephfs_data_ssd_ec_4_1
All together:
sudo ceph osd erasure-code-profile set ec_6_2 k=6 m=2
sudo ceph osd pool create cephfs_data_ssd_ec_6_2 erasure ec_6_2
sudo ceph osd pool set cephfs_data_ssd_ec_6_2 allow_ec_overwrites true
sudo ceph osd pool set cephfs_data_ssd_ec_6_2 bulk true
sudo ceph fs add_data_pool cephfs cephfs_data_ssd_ec_6_2
sudo ceph fs subvolumegroup create cephfs csi_ssd_ec_6_2 --pool_layout cephfs_data_ssd_ec_6_2
sudo ceph osd erasure-code-profile set ec_4_1 k=4 m=1
sudo ceph osd pool create cephfs_data_ssd_ec_4_1 erasure ec_4_1
sudo ceph osd pool set cephfs_data_ssd_ec_4_1 allow_ec_overwrites true
sudo ceph osd pool set cephfs_data_ssd_ec_4_1 bulk true
sudo ceph fs add_data_pool cephfs cephfs_data_ssd_ec_4_1
sudo ceph fs subvolumegroup create cephfs csi_ssd_ec_4_1 --pool_layout cephfs_data_ssd_ec_4_1
Create a key with access to the new subvolume groups. Check if the user already exists first:
sudo ceph auth get client.kubernetes-cephfs
If it doesnt:
sudo ceph auth get-or-create client.kubernetes-cephfs \
mgr 'allow rw' \
osd 'allow rw tag cephfs metadata=cephfs, allow rw tag cephfs data=cephfs' \
mds 'allow r fsname=cephfs path=/volumes, allow rws fsname=cephfs path=/volumes/csi_ssd_ec_6_2, allow rws fsname=cephfs path=/volumes/csi_ssd_ec_4_1' \
mon 'allow r fsname=cephfs'
If it does, use `sudo ceph auth caps client.kubernetes-cephfs ...` instead to update existing capabilities.
## removing a cephfs subvolumegroup from cephfs
This will cleanup the subvolumegroup, and subvolumes if they exist, then remove the pool.
Check for subvolumegroups first, then for subvolumes in it
sudo ceph fs subvolumegroup ls cephfs
sudo ceph fs subvolume ls cephfs --group_name csi_raid6
If subvolumes exist, remove each one-by-one:
sudo ceph fs subvolume rm cephfs <subvol_name> --group_name csi_raid6
If you have snapshots, remove snapshots first:
sudo ceph fs subvolume snapshot ls cephfs <subvol_name> --group_name csi_raid6
sudo ceph fs subvolume snapshot rm cephfs <subvol_name> <snap_name> --group_name csi_raid6
Once the group is empty, remove it:
sudo ceph fs subvolumegroup rm cephfs csi_raid6
If it complains its not empty, go back as theres still a subvolume or snapshot.
If you added it with `ceph fs add_data_pool`. Undo with `rm_data_pool`:
sudo ceph fs rm_data_pool cephfs cephfs_data_csi_raid6
After its detached from CephFS, you can delete it.
sudo ceph osd pool rm cephfs_data_csi_raid6 cephfs_data_csi_raid6 --yes-i-really-really-mean-it
## creating authentication tokens
- this will create a client keyring named media
@ -150,78 +58,3 @@ this will overwrite the current capabilities of a given client.user
mon 'allow r' \
mds 'allow rw path=/' \
osd 'allow rw pool=media_data'
## adding a new osd on new node
create the ceph conf (automate this?)
cat <<EOF | sudo tee /etc/ceph/ceph.conf
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
fsid = de96a98f-3d23-465a-a899-86d3d67edab8
mon_allow_pool_delete = true
mon_initial_members = prodnxsr0009,prodnxsr0010,prodnxsr0011,prodnxsr0012,prodnxsr0013
mon_host = 198.18.23.9,198.18.23.10,198.18.23.11,198.18.23.12,198.18.23.13
ms_bind_ipv4 = true
ms_bind_ipv6 = false
osd_crush_chooseleaf_type = 1
osd_pool_default_min_size = 2
osd_pool_default_size = 3
osd_pool_default_pg_num = 128
public_network = 198.18.23.1/32,198.18.23.2/32,198.18.23.3/32,198.18.23.4/32,198.18.23.5/32,198.18.23.6/32,198.18.23.7/32,198.18.23.8/32,198.18.23.9/32,198.18.23.10/32,198.18.23.11/32,198.18.23.12/32,198.18.23.13/32
EOF
ssh to one of the monitor hosts, then transfer the keys required
sudo cat /etc/ceph/ceph.client.admin.keyring | ssh prodnxsr0003 'sudo tee /etc/ceph/ceph.client.admin.keyring'
sudo cat /var/lib/ceph/bootstrap-osd/ceph.keyring | ssh prodnxsr0003 'sudo tee /var/lib/ceph/bootstrap-osd/ceph.keyring'
assuming we are adding /dev/sda to the cluster, first zap the disk to remove partitions/lvm/metadata
sudo ceph-volume lvm zap /dev/sda --destroy
then add it to the cluster
sudo ceph-volume lvm create --data /dev/sda
## removing an osd
check what OSD IDs were on this host (if you know it)
sudo ceph osd tree
or check for any DOWN osds
sudo ceph osd stat
sudo ceph health detail
once you identify the old OSD ID, remove it with these steps, replace X with the actual OSD ID:
sudo ceph osd out osd.X
sudo ceph osd down osd.X
sudo ceph osd crush remove osd.X
sudo ceph auth del osd.X
sudo ceph osd rm osd.X
## maintenance mode for the cluster
from one node in the cluster disable recovery
sudo ceph osd set noout
sudo ceph osd set nobackfill
sudo ceph osd set norecover
sudo ceph osd set norebalance
sudo ceph osd set nodown
sudo ceph osd set pause
to undo the change, use unset
sudo ceph osd unset noout
sudo ceph osd unset nobackfill
sudo ceph osd unset norecover
sudo ceph osd unset norebalance
sudo ceph osd unset nodown
sudo ceph osd unset pause

View File

@ -29,21 +29,3 @@ these steps are required when adding additional puppet masters, as the subject a
sudo systemctl start puppetserver
sudo cp /root/current_crl.pem /etc/puppetlabs/puppet/ssl/crl.pem
## troubleshooting
### Issue 1:
[sysadmin@ausyd1nxvm2056 ~]$ sudo puppet agent -t
Error: The CRL issued by 'CN=Puppet CA: prodinf01n01.main.unkin.net' is missing
Find another puppetserver that IS working, copy the `/etc/puppetlabs/puppet/ssl/crl.pem` to this host, run puppet again.
### Issue 2:
[sysadmin@ausyd1nxvm2097 ~]$ sudo puppet agent -t
Error: Failed to parse CA certificates as PEM
The puppet-agents CA cert `/etc/puppetlabs/puppet/ssl/certs/ca.pem` is empty or missing. Grab it from any other host. Run puppet again.

View File

@ -1,6 +1,6 @@
---
profiles::accounts::sysadmin::password: ENC[PKCS7,MIIBqQYJKoZIhvcNAQcDoIIBmjCCAZYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAoS7GyofFaXBNTWU+GtSiz4eCX/9j/sh3fDDRgOgNv1qpcQ87ZlTTenbHo9lxeURxKQ2HVVt7IsrBo/SC/WgipAKnliRkkIvo7nfAs+i+kEE8wakjAs0DcB4mhqtIZRuBkLG2Nay//DcG6cltVkbKEEKmKLMkDFZgTWreOZal8nDljpVe1S8QwtwP4/6hKTef5xsOnrisxuffWTXvwYJhj/VXrjdoH7EhtHGLybzEalglkVHEGft/WrrD/0bwJpmR0RegWI4HTsSvGiHgvf5DZJx8fXPZNPnicGtlfA9ccQPuVo17bY4Qf/WIc1A8Ssv4kHSbNIYJKRymI3UFb0Z4wzBsBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBBxDLb6pCGbittkcX6asd/gEBmMcUNupDjSECq5H09YA70eVwWWe0fBqxTxrr2cXCXtRKFvOk8SJmL0xHAWodaLN9+krTWHJcWbAK8JXEPC7rn]
profiles::accounts::root::password: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAIgzGQLoHrm7JSnWG4vdAtxSuETmnqbV7kUsQS8WCRUwFGenDFkps+OGMnOGEHLzMJzXihLfgfdWwTAI4fp48M+zhTMo9TQkdzZqtbFk3+RjV2jDF0wfe4kVUIpReOq+EkaDSkoRSG8V6hWvszhDHUrJBC9eDhomL0f3xNAWxmy5EIX/uMEvg9Ux5YX+E6k2pEIKnHNoVIaWDojlofSIzIqTSS7l3jQtJhs3YqBzLL1DsoF1kdn+Rwl5kcsKkhV+vzl76wEbpYVZW8lu4bFfP6QHMLPcep2tuUDMCDvARRXD7YyZcAtS7aMuqll+BLAszpWxAA7EU2hgvdr6t2uyVCTCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQ4D5oDoyE6LPdjpVtGPoJD4BwfnQ9ORjYFPvHQmt+lgU4jMqh6BhqP0VN3lqVfUpOmiVMIqkO/cYtlwVLKEg36TPCHBSpqvhuahSF5saCVr8JY3xWOAmTSgnNjQOPlGrPnYWYbuRLxVRsU+KUkpAzR0c6VN0wYi6bI85Pcv8yHF3UYA==]
profiles::accounts::root::password: ENC[PKCS7,MIIBeQYJKoZIhvcNAQcDoIIBajCCAWYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAM79PRxeAZHrDcSm4eSFqU94/LjuSbdUmJWivX/Pa8GumoW2e/PT9nGHW3p98zHthMgCglk52PECQ+TBKjxr+9dTyNK5ePG6ZJEqSHNRqsPGm+kfQj/hlTmq8vOBaFM5GapD1iTHs5JFbGngI56swKBEVXW9+Z37BjQb2xJuyLsu5Bo/tA0BaOKuCtjq1a6E38bOX+nJ+YF1uZgV9ofAEh1YvkcTmnEWYXFRPWd7AaNcWn03V2pfhGqxc+xydak620I47P+FE+qIY72+aQ6tmLU3X9vyA1HLF2Tv572l4a2i+YIk6nAgQdi+hQKznqNL9M9YV+s1AcmcKLT7cfLrjsjA8BgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCMWrdCWBQgtW3NOEpERwP+gBA3KDiqe4pQq6DwRfsEXQNZ]
profiles::consul::client::secret_id_salt: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAS7pNFRX4onccFaR87zB/eFFORuF22j6xqjyeAqqjgEduYhkt6w5kkz+YfUoHUesU0Q6F2p6HrCSZ8yAsx5M25NCiud9P4hIpjKmOZ4zCNO7uhATh4AQDYw3BrdRwfO+c6jOl5wOiNLCfDBJ0sFT3akCvcuPS1xIoRJq4Gyn+uCbOsMbvSl25ld2xKt1/cqs8gc1d8mkpjwWto7t+qZSUFMCehTbehH3G4a3Q5rvfBoNwv42Wbs676BDcCurDaAzHNqE7pDbOWhGuVOBl+q+BU0Ri/CRkGcTViN9fr8Dc9SveVC6EPsMbw+05/8/NlfzQse3KAwQ34nR9tR2PQw5qEzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB7LywscQtF7cG2nomfEsu9gDDVqJBFP1jAX2eGZ2crYS5gnBcsRwhc0HNo2/WWdhZprMW+vEJOOGXDelI53NxA3o0=]
profiles::consul::token::node_editor::secret_id: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAO8IIF2r18dFf0bVKEwjJUe1TmXHH0AIzsQHxkHwV7d37kvH1cY9rYw0TtdHn7GTxvotJG7GZbWvbunpBs1g2p2RPADiM6TMhbO8mJ0tAWLnMk7bQ221xu8Pc7KceqWmU17dmgNhVCohyfwJNqbA756TlHVgxGA0LtNrKoLOmgKGXAL1VYZoKEQnWq7xOpO+z3e1UfjoO6CvX/Od2hGYfUkHdro8mwRw4GFKzU7XeKFdAMUGpn5rVmY3xe+1ARXwGFaSrTHzk2n85pvwhPRlQ+OwqzyT19Qo2FNeAO6RoCRIFTtqbsjTWPUlseHIhw4Q5bHO1I0Mrlm5IHDESw/22IzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCEe9wD72qxnpeq5nCi/d7BgDCP29sDFObkFTabt2uZ/nF9MT1g+QOrrdFKgnG6ThnwH1hwpZPsSVgIs+yRQH8laB4=]
profiles::consul::server::acl_tokens_initial_management: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAi1UH7AZirJ1PdxWy+KEgS5ufm0wbn2xy9rkg14hKYpcVjBa4pOZpSLMGMiiUpBIqBytDMZM4ezYa/luktpkBImJbM/TE16beGtsacQGA+9eZk2Tihs9GR2qbAQiu5lLITiDlwNnf0GeWdqHM8CTeD68DczQF320d9U14/k6pG/7z+w/MGLcjsQoSuOFTm42JVn1BI46t1CYSCHMXQc/9Tfs+FzI+vumohI8DxAYBIuyzU5HBX/MntAsvD/yixMJS1pZL9WwgqZJC/wK34rVRB39DpxWf/WROrI+WLuSJwr7WBjaeF9Ju+89WKCgsI53EWhFTj8GgDZm/jqPoE478NjBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBAoACRzJdQKNYXZv6cghFIIgDAzB81DMcuY815nb8POtZpiA06jT/068AoZmSctHoFK/zW9tY229N5r1Tb+WHElqLk=]

View File

@ -36,12 +36,6 @@ lookup_options:
profiles::haproxy::server::listeners:
merge:
strategy: deep
profiles::accounts::root::sshkeys:
merge:
strategy: deep
profiles::accounts::sysadmin::sshkeys:
merge:
strategy: deep
haproxy::backend:
merge:
strategy: deep
@ -129,9 +123,6 @@ lookup_options:
profiles::ceph::client::keyrings:
merge:
strategy: deep
profiles::ceph::conf::config:
merge:
strategy: deep
profiles::nginx::simpleproxy::locations:
merge:
strategy: deep
@ -144,38 +135,6 @@ lookup_options:
keepalived::vrrp_instance:
merge:
strategy: deep
profiles::etcd::node::initial_cluster_token:
convert_to: Sensitive
sysctl::base::values:
merge:
strategy: deep
limits::entries:
merge:
strategy: deep
zfs::zpools:
merge:
strategy: deep
zfs::datasets:
merge:
strategy: deep
rke2::config_hash:
merge:
strategy: deep
postfix::configs:
merge:
strategy: deep
postfix::maps:
merge:
strategy: deep
postfix::virtuals:
merge:
strategy: deep
stalwart::postgresql_password:
convert_to: Sensitive
stalwart::s3_secret_key:
convert_to: Sensitive
stalwart::fallback_admin_password:
convert_to: Sensitive
facts_path: '/opt/puppetlabs/facter/facts.d'
@ -184,32 +143,21 @@ hiera_include:
- networking
- ssh::server
- profiles::accounts::rundeck
- limits
- sysctl::base
- exporters::node_exporter
profiles::ntp::client::ntp_role: 'roles::infra::ntp::server'
profiles::ntp::client::use_ntp: 'region'
profiles::ntp::client::peers:
- 0.au.pool.ntp.org
- 1.au.pool.ntp.org
- 2.au.pool.ntp.org
- 3.au.pool.ntp.org
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
consul::install_method: 'package'
consul::manage_repo: false
consul::bin_dir: /usr/bin
profiles::base::puppet_servers:
- 'prodinf01n01.main.unkin.net'
vault::install_method: 'repo'
vault::manage_repo: false
vault::bin_dir: /usr/bin
vault::manage_service_file: true
vault::manage_config_dir: true
vault::disable_mlock: false
profiles::dns::base::nameservers:
- 198.18.19.16
profiles::dns::master::basedir: '/var/named/sources'
#profiles::dns::base::ns_role: 'roles::infra::dns::resolver'
#profiles::dns::base::use_ns: 'region'
profiles::dns::base::ns_role: 'roles::infra::dns::resolver'
profiles::dns::base::use_ns: 'region'
profiles::consul::server::members_role: roles::infra::storage::consul
profiles::consul::token::node_editor::accessor_id: '024e27bd-c5bb-41e7-a578-b766509e11bc'
profiles::consul::client::members_lookup: true
@ -224,9 +172,6 @@ profiles::consul::client::node_rules:
- resource: node
segment: ''
disposition: read
- resource: service
segment: node_exporter
disposition: write
profiles::packages::include:
bash-completion: {}
@ -310,8 +255,7 @@ profiles::puppet::client::dns_alt_names:
puppetdbapi: puppetdbapi.query.consul
puppetdbsql: puppetdbsql.service.au-syd1.consul
exporters::node_exporter::enable: true
exporters::node_exporter::cleanup_old_node_exporter: true
prometheus::node_exporter::export_scrape_job: true
prometheus::systemd_exporter::export_scrape_job: true
ssh::server::storeconfigs_enabled: false
@ -376,81 +320,11 @@ networking::route_defaults:
netmask: 0.0.0.0
network: default
# logging:
victorialogs::client::journald::enable: true
victorialogs::client::journald::inserturl: https://vlinsert.service.consul:9428/insert/journald
# FIXME these are for the proxmox ceph cluster
profiles::ceph::client::fsid: 7f7f00cb-95de-498c-8dcc-14b54e4e9ca8
profiles::ceph::client::mons:
- 10.18.15.1
- 10.18.15.2
- 10.18.15.3
profiles::ceph::conf::config:
global:
auth_client_required: 'cephx'
auth_cluster_required: 'cephx'
auth_service_required: 'cephx'
fsid: 'de96a98f-3d23-465a-a899-86d3d67edab8'
mon_allow_pool_delete: true
mon_initial_members: 'prodnxsr0009,prodnxsr0010,prodnxsr0011,prodnxsr0012,prodnxsr0013'
mon_host: '198.18.23.9,198.18.23.10,198.18.23.11,198.18.23.12,198.18.23.13'
ms_bind_ipv4: true
ms_bind_ipv6: false
osd_crush_chooseleaf_type: 1
osd_pool_default_min_size: 2
osd_pool_default_size: 3
osd_pool_default_pg_num: 128
public_network: >
198.18.23.1/32,198.18.23.2/32,198.18.23.3/32,198.18.23.4/32,
198.18.23.5/32,198.18.23.6/32,198.18.23.7/32,198.18.23.8/32,
198.18.23.9/32,198.18.23.10/32,198.18.23.11/32,198.18.23.12/32,
198.18.23.13/32
client.rgw.ausyd1nxvm2115:
rgw_realm: unkin
rgw_zonegroup: au
rgw_zone: syd1
client.rgw.ausyd1nxvm2116:
rgw_realm: unkin
rgw_zonegroup: au
rgw_zone: syd1
client.rgw.ausyd1nxvm2117:
rgw_realm: unkin
rgw_zonegroup: au
rgw_zone: syd1
client.rgw.ausyd1nxvm2118:
rgw_realm: unkin
rgw_zonegroup: au
rgw_zone: syd1
client.rgw.ausyd1nxvm2119:
rgw_realm: unkin
rgw_zonegroup: au
rgw_zone: syd1
mds:
keyring: /var/lib/ceph/mds/ceph-$id/keyring
mds_standby_replay: true
mds.prodnxsr0009-1:
host: prodnxsr0009
mds.prodnxsr0009-2:
host: prodnxsr0009
mds.prodnxsr0010-1:
host: prodnxsr0010
mds.prodnxsr0010-2:
host: prodnxsr0010
mds.prodnxsr0011-1:
host: prodnxsr0011
mds.prodnxsr0011-2:
host: prodnxsr0011
mds.prodnxsr0012-1:
host: prodnxsr0012
mds.prodnxsr0012-2:
host: prodnxsr0012
mds.prodnxsr0013-1:
host: prodnxsr0013
mds.prodnxsr0013-2:
host: prodnxsr0013
#profiles::base::hosts::additional_hosts:
# - ip: 198.18.17.9
# hostname: prodinf01n09.main.unkin.net

View File

@ -1,9 +1,2 @@
---
timezone::timezone: 'Australia/Darwin'
profiles_dns_upstream_forwarder_unkin:
- 198.18.17.23
- 198.18.17.24
profiles_dns_upstream_forwarder_consul:
- 198.18.17.34
- 198.18.17.35
- 198.18.17.36

View File

@ -1 +1,52 @@
---
profiles::dns::resolver::zones:
main.unkin.net-forward:
domain: 'main.unkin.net'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
13.18.198.in-addr.arpa-forward:
domain: '13.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
14.18.198.in-addr.arpa-forward:
domain: '14.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
15.18.198.in-addr.arpa-forward:
domain: '15.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
16.18.198.in-addr.arpa-forward:
domain: '16.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
17.18.198.in-addr.arpa-forward:
domain: '17.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.17.23
- 198.18.17.24
forward: 'only'
consul-forward:
domain: 'consul'
zone_type: 'forward'
forwarders:
- 198.18.17.34
- 198.18.17.35
- 198.18.17.36
forward: 'only'

View File

@ -1,9 +1,3 @@
---
timezone::timezone: 'Australia/Sydney'
certbot::client::webserver: ausyd1nxvm2057.main.unkin.net
profiles_dns_upstream_forwarder_unkin:
- 198.18.19.15
profiles_dns_upstream_forwarder_consul:
- 198.18.19.14
profiles_dns_upstream_forwarder_k8s:
- 198.18.19.20
certbot::client::webserver: ausyd1nxvm1021.main.unkin.net

View File

@ -1 +1,52 @@
---
profiles::dns::resolver::zones:
main.unkin.net-forward:
domain: 'main.unkin.net'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
13.18.198.in-addr.arpa-forward:
domain: '13.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
14.18.198.in-addr.arpa-forward:
domain: '14.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
15.18.198.in-addr.arpa-forward:
domain: '15.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
16.18.198.in-addr.arpa-forward:
domain: '16.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
17.18.198.in-addr.arpa-forward:
domain: '17.18.198.in-addr.arpa'
zone_type: 'forward'
forwarders:
- 198.18.13.14
- 198.18.13.15
forward: 'only'
consul-forward:
domain: 'consul'
zone_type: 'forward'
forwarders:
- 198.18.13.19
- 198.18.13.20
- 198.18.13.21
forward: 'only'

View File

@ -3,7 +3,7 @@ hiera_include:
- keepalived
# keepalived
profiles::haproxy::dns::ipaddr: '198.18.13.250'
profiles::haproxy::dns::vrrp_ipaddr: '198.18.13.250'
profiles::haproxy::dns::vrrp_cnames:
- sonarr.main.unkin.net
- radarr.main.unkin.net
@ -260,7 +260,6 @@ profiles::haproxy::dns::cnames:
- au-syd1-pve-api.main.unkin.net
# letsencrypt certificates
certbot::client::service: haproxy
certbot::client::domains:
- au-syd1-pve.main.unkin.net
- au-syd1-pve-api.main.unkin.net

View File

@ -1,424 +0,0 @@
---
profiles::haproxy::dns::ipaddr: "%{hiera('anycast_ip')}"
profiles::haproxy::dns::vrrp_cnames:
- sonarr.main.unkin.net
- radarr.main.unkin.net
- lidarr.main.unkin.net
- readarr.main.unkin.net
- prowlarr.main.unkin.net
- nzbget.main.unkin.net
- git.unkin.net
- fafflix.unkin.net
- grafana.unkin.net
- dashboard.ceph.unkin.net
- mail-webadmin.main.unkin.net
- mail-in.main.unkin.net
- mail.main.unkin.net
- autoconfig.main.unkin.net
- autodiscover.main.unkin.net
profiles::haproxy::mappings:
fe_http:
ensure: present
mappings:
- 'au-syd1-pve.main.unkin.net be_ausyd1pve_web'
- 'au-syd1-pve-api.main.unkin.net be_ausyd1pve_api'
- 'sonarr.main.unkin.net be_sonarr'
- 'radarr.main.unkin.net be_radarr'
- 'lidarr.main.unkin.net be_lidarr'
- 'readarr.main.unkin.net be_readarr'
- 'prowlarr.main.unkin.net be_prowlarr'
- 'nzbget.main.unkin.net be_nzbget'
- 'jellyfin.main.unkin.net be_jellyfin'
- 'fafflix.unkin.net be_jellyfin'
- 'git.unkin.net be_gitea'
- 'grafana.unkin.net be_grafana'
- 'dashboard.ceph.unkin.net be_ceph_dashboard'
- 'mail-webadmin.main.unkin.net be_stalwart_webadmin'
- 'autoconfig.main.unkin.net be_stalwart_webadmin'
- 'autodiscovery.main.unkin.net be_stalwart_webadmin'
fe_https:
ensure: present
mappings:
- 'au-syd1-pve.main.unkin.net be_ausyd1pve_web'
- 'au-syd1-pve-api.main.unkin.net be_ausyd1pve_api'
- 'sonarr.main.unkin.net be_sonarr'
- 'radarr.main.unkin.net be_radarr'
- 'lidarr.main.unkin.net be_lidarr'
- 'readarr.main.unkin.net be_readarr'
- 'prowlarr.main.unkin.net be_prowlarr'
- 'nzbget.main.unkin.net be_nzbget'
- 'jellyfin.main.unkin.net be_jellyfin'
- 'fafflix.unkin.net be_jellyfin'
- 'git.unkin.net be_gitea'
- 'grafana.unkin.net be_grafana'
- 'dashboard.ceph.unkin.net be_ceph_dashboard'
- 'mail-webadmin.main.unkin.net be_stalwart_webadmin'
- 'autoconfig.main.unkin.net be_stalwart_webadmin'
- 'autodiscovery.main.unkin.net be_stalwart_webadmin'
profiles::haproxy::frontends:
fe_http:
options:
use_backend:
- "%[req.hdr(host),lower,map(/etc/haproxy/fe_http.map,be_default)]"
fe_https:
options:
acl:
- 'acl_ausyd1pve req.hdr(host) -i au-syd1-pve.main.unkin.net'
- 'acl_sonarr req.hdr(host) -i sonarr.main.unkin.net'
- 'acl_radarr req.hdr(host) -i radarr.main.unkin.net'
- 'acl_lidarr req.hdr(host) -i lidarr.main.unkin.net'
- 'acl_readarr req.hdr(host) -i readarr.main.unkin.net'
- 'acl_prowlarr req.hdr(host) -i prowlarr.main.unkin.net'
- 'acl_nzbget req.hdr(host) -i nzbget.main.unkin.net'
- 'acl_jellyfin req.hdr(host) -i jellyfin.main.unkin.net'
- 'acl_fafflix req.hdr(host) -i fafflix.unkin.net'
- 'acl_gitea req.hdr(host) -i git.unkin.net'
- 'acl_grafana req.hdr(host) -i grafana.unkin.net'
- 'acl_ceph_dashboard req.hdr(host) -i dashboard.ceph.unkin.net'
- 'acl_stalwart_webadmin req.hdr(host) -i mail-webadmin.main.unkin.net'
- 'acl_stalwart_webadmin req.hdr(host) -i autoconfig.main.unkin.net'
- 'acl_stalwart_webadmin req.hdr(host) -i autodiscovery.main.unkin.net'
- 'acl_internalsubnets src 198.18.0.0/16 10.10.12.0/24'
use_backend:
- "%[req.hdr(host),lower,map(/etc/haproxy/fe_https.map,be_default)]"
http-request:
- 'deny if { hdr_dom(host) -i au-syd1-pve.main.unkin.net } !acl_internalsubnets'
http-response:
- 'set-header X-Frame-Options DENY if acl_ausyd1pve'
- 'set-header X-Frame-Options DENY if acl_sonarr'
- 'set-header X-Frame-Options DENY if acl_radarr'
- 'set-header X-Frame-Options DENY if acl_lidarr'
- 'set-header X-Frame-Options DENY if acl_readarr'
- 'set-header X-Frame-Options DENY if acl_prowlarr'
- 'set-header X-Frame-Options DENY if acl_nzbget'
- 'set-header X-Frame-Options DENY if acl_jellyfin'
- 'set-header X-Frame-Options DENY if acl_fafflix'
- 'set-header X-Frame-Options DENY if acl_gitea'
- 'set-header X-Frame-Options DENY if acl_grafana'
- 'set-header X-Frame-Options DENY if acl_ceph_dashboard'
- 'set-header X-Frame-Options DENY if acl_stalwart_webadmin'
- 'set-header X-Content-Type-Options nosniff'
- 'set-header X-XSS-Protection 1;mode=block'
profiles::haproxy::backends:
be_ausyd1pve_web:
description: Backend for au-syd1 pve cluster (Web)
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_ausyd1pve_api:
description: Backend for au-syd1 pve cluster (API only)
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_sonarr:
description: Backend for au-syd1 sonarr
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_radarr:
description: Backend for au-syd1 radarr
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_lidarr:
description: Backend for au-syd1 lidarr
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_readarr:
description: Backend for au-syd1 readarr
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_prowlarr:
description: Backend for au-syd1 prowlarr
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_nzbget:
description: Backend for au-syd1 nzbget
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /consul/health
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_jellyfin:
description: Backend for au-syd1 jellyfin
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
be_gitea:
description: Backend for gitea cluster
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
be_grafana:
description: Backend for grafana nodes
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 443 }
redirect: 'scheme https if !{ ssl_fc }'
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
be_ceph_dashboard:
description: Backend for Ceph Dashboard from Mgr instances
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-check:
- expect status 200
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 9443 }
redirect: 'scheme https if !{ ssl_fc }'
stick-table: 'type ip size 200k expire 30m'
be_stalwart_webadmin:
description: Backend for Stalwart Webadmin
collect_exported: false # handled in custom function
options:
balance: roundrobin
option:
- httpchk GET /
- forwardfor
- http-keep-alive
- prefer-last-server
cookie: SRVNAME insert indirect nocache
http-reuse: always
http-check:
- expect status 200
http-request:
- set-header X-Forwarded-Port %[dst_port]
- add-header X-Forwarded-Proto https if { dst_port 9443 }
redirect: 'scheme https if !{ ssl_fc }'
stick-table: 'type ip size 200k expire 30m'
be_stalwart_imap:
description: Backend for Stalwart IMAP (STARTTLS)
collect_exported: false
options:
mode: tcp
balance: roundrobin
option:
- tcp-check
- prefer-last-server
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
tcp-check:
- connect port 143 send-proxy
- expect string "* OK"
- send "A001 STARTTLS\r\n"
- expect rstring "A001 (OK|2.0.0)"
be_stalwart_imaps:
description: Backend for Stalwart IMAPS (implicit TLS)
collect_exported: false
options:
mode: tcp
balance: roundrobin
option:
- tcp-check
- prefer-last-server
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
tcp-check:
- connect ssl send-proxy
- expect string "* OK"
be_stalwart_smtp:
description: Backend for Stalwart SMTP
collect_exported: false
options:
mode: tcp
balance: roundrobin
option:
- tcp-check
- prefer-last-server
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
tcp-check:
- connect port 25 send-proxy
- expect string "220 "
be_stalwart_submission:
description: Backend for Stalwart SMTP Submission
collect_exported: false
options:
mode: tcp
balance: roundrobin
option:
- tcp-check
- prefer-last-server
stick-table: 'type ip size 200k expire 30m'
stick: 'on src'
tcp-check:
- connect port 587 send-proxy
- expect string "220 "
profiles::haproxy::certlist::enabled: true
profiles::haproxy::certlist::certificates:
- /etc/pki/tls/letsencrypt/au-syd1-pve.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/au-syd1-pve-api.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/sonarr.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/radarr.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/lidarr.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/readarr.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/prowlarr.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/nzbget.main.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/fafflix.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/git.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/grafana.unkin.net/fullchain_combined.pem
- /etc/pki/tls/letsencrypt/dashboard.ceph.unkin.net/fullchain_combined.pem
- /etc/pki/tls/vault/certificate.pem
# additional altnames
profiles::pki::vault::alt_names:
- au-syd1-pve.main.unkin.net
- au-syd1-pve-api.main.unkin.net
- jellyfin.main.unkin.net
- mail-webadmin.main.unkin.net
# additional cnames
profiles::haproxy::dns::cnames:
- au-syd1-pve.main.unkin.net
- au-syd1-pve-api.main.unkin.net
# letsencrypt certificates
certbot::client::service: haproxy
certbot::client::domains:
- au-syd1-pve.main.unkin.net
- au-syd1-pve-api.main.unkin.net
- sonarr.main.unkin.net
- radarr.main.unkin.net
- lidarr.main.unkin.net
- readarr.main.unkin.net
- prowlarr.main.unkin.net
- nzbget.main.unkin.net
- fafflix.unkin.net
- git.unkin.net
- grafana.unkin.net
- dashboard.ceph.unkin.net

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.10
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.11
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.12
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.13
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.14
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.15
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.16
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.17
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.18
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.19
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.20
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.21
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.22
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.23
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.24
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,13 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.25
networking::routes:
default:
gateway: 198.18.13.254
profiles::haproxy::dns::vrrp_master: true
keepalived::vrrp_instance:
VI_250:
state: 'MASTER'
priority: 101

View File

@ -0,0 +1,12 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.26
networking::routes:
default:
gateway: 198.18.13.254
keepalived::vrrp_instance:
VI_250:
state: 'BACKUP'
priority: 100

View File

@ -0,0 +1,11 @@
---
profiles::cobbler::params::is_cobbler_master: true
networking::interfaces:
ens18:
ipaddress: 198.18.13.27
networking::routes:
default:
gateway: 198.18.13.254
interface: ens18
profiles::almalinux::base::remove_ens18: false

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.28
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.29
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.30
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,10 @@
---
networking::interfaces:
ens18:
ipaddress: 198.18.13.31
networking::routes:
default:
gateway: 198.18.13.254
interface: ens18
profiles::almalinux::base::remove_ens18: false

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.32
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.33
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.34
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.35
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.36
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.37
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.38
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.39
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.40
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.41
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.42
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.43
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.44
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.45
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -13,3 +13,9 @@ profiles::ssh::sign::principals:
profiles::puppet::puppetca::is_puppetca: true
profiles::puppet::puppetca::allow_subject_alt_names: true
networking::interfaces:
eth0:
ipaddress: 198.18.13.46
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.47
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.47
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.48
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.49
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.50
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.50
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.51
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.51
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.52
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.52
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.53
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.53
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.54
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.55
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.56
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.57
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.57
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,14 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.58
ens19:
ensure: present
family: inet
method: static
ipaddress: 10.18.15.58
netmask: 255.255.255.0
onboot: true
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.59
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.60
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.61
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.62
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.63
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.64
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.65
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.66
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.67
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.68
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -0,0 +1,7 @@
---
networking::interfaces:
eth0:
ipaddress: 198.18.13.69
networking::routes:
default:
gateway: 198.18.13.254

View File

@ -1,2 +0,0 @@
---
profiles::cobbler::params::is_cobbler_master: true

View File

@ -0,0 +1,12 @@
---
profiles::puppet::server::dns_alt_names:
- puppetca.main.unkin.net
- puppetca.service.consul
- puppetca.query.consul
- puppetca
profiles::puppet::puppetca::is_puppetca: false
profiles::puppet::puppetca::allow_subject_alt_names: true
hiera_exclude:
- networking

View File

@ -1,13 +1,5 @@
---
networking_loopback0_ip: 198.18.19.1 # management loopback
networking_loopback1_ip: 198.18.22.1 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.1 # ceph-public loopback
networking_1000_ip: 198.18.15.1 # 1gbe network
networking_2500_ip: 198.18.21.1 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:75:c3:60
"%{hiera('networking_2500_iface')}":
mac: 00:ac:d0:00:00:50
profiles::proxmox::params::pve_clusterinit_master: true
profiles::proxmox::params::pve_ceph_mon: true
profiles::proxmox::params::pve_ceph_mgr: true
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,4 @@
---
networking_loopback0_ip: 198.18.19.2 # management loopback
networking_loopback1_ip: 198.18.22.2 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.2 # ceph-public loopback
networking_1000_ip: 198.18.15.2 # 1gbe network
networking_2500_ip: 198.18.21.2 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:74:b6:08
"%{hiera('networking_2500_iface')}":
mac: 00:e0:4c:68:08:43
profiles::proxmox::params::pve_ceph_mon: true
profiles::proxmox::params::pve_ceph_mgr: true
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,4 @@
---
networking_loopback0_ip: 198.18.19.3 # management loopback
networking_loopback1_ip: 198.18.22.3 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.3 # ceph-public loopback
networking_1000_ip: 198.18.15.3 # 1gbe network
networking_2500_ip: 198.18.21.3 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: b8:85:84:a3:25:c5
"%{hiera('networking_2500_iface')}":
mac: 00:e0:4c:68:07:82
profiles::proxmox::params::pve_ceph_mon: true
profiles::proxmox::params::pve_ceph_mgr: true
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,2 @@
---
networking_loopback0_ip: 198.18.19.4 # management loopback
networking_loopback1_ip: 198.18.22.4 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.4 # ceph-public loopback
networking_1000_ip: 198.18.15.4 # 1gbe network
networking_2500_ip: 198.18.21.4 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:75:d5:00
"%{hiera('networking_2500_iface')}":
mac: 00:ac:d0:00:00:43
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,2 @@
---
networking_loopback0_ip: 198.18.19.5 # management loopback
networking_loopback1_ip: 198.18.22.5 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.5 # ceph-public loopback
networking_1000_ip: 198.18.15.5 # 1gbe network
networking_2500_ip: 198.18.21.5 # 2.5gbe network
networking_1000_iface: enp1s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: 54:bf:64:a0:08:64
"%{hiera('networking_2500_iface')}":
mac: 00:e0:4c:68:07:79
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,2 @@
---
networking_loopback0_ip: 198.18.19.6 # management loopback
networking_loopback1_ip: 198.18.22.6 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.6 # ceph-public loopback
networking_1000_ip: 198.18.15.6 # 1gbe network
networking_2500_ip: 198.18.21.6 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:75:10:8d
"%{hiera('networking_2500_iface')}":
mac: 00:ac:d0:00:00:53
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,2 @@
---
networking_loopback0_ip: 198.18.19.7 # management loopback
networking_loopback1_ip: 198.18.22.7 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.7 # ceph-public loopback
networking_1000_ip: 198.18.15.7 # 1gbe network
networking_2500_ip: 198.18.21.7 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:74:b4:27
"%{hiera('networking_2500_iface')}":
mac: 00:ac:d0:00:00:5b
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,13 +1,2 @@
---
networking_loopback0_ip: 198.18.19.8 # management loopback
networking_loopback1_ip: 198.18.22.8 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.8 # ceph-public loopback
networking_1000_ip: 198.18.15.8 # 1gbe network
networking_2500_ip: 198.18.21.8 # 2.5gbe network
networking_1000_iface: enp2s0
networking_2500_iface: enp3s0
networking::interfaces:
"%{hiera('networking_1000_iface')}":
mac: d8:9e:f3:75:06:18
"%{hiera('networking_2500_iface')}":
mac: 00:e0:4c:68:08:4b
profiles::proxmox::params::pve_ceph_osd: true

View File

@ -1,18 +0,0 @@
---
networking_loopback0_ip: 198.18.19.9 # management loopback
networking_loopback1_ip: 198.18.22.9 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.9 # ceph-public loopback
networking_br10_ip: 198.18.25.254
networking::interfaces:
enp2s0:
mac: 70:b5:e8:38:e9:8d
ipaddress: 198.18.15.9
gateway: 198.18.15.254
enp3s0:
mac: 00:e0:4c:68:0f:5d
ipaddress: 198.18.21.9
#zfs::zpools:
# fastpool:
# ensure: present
# disk: /dev/nvme0n1

View File

@ -1,13 +0,0 @@
---
networking_loopback0_ip: 198.18.19.10 # management loopback
networking_loopback1_ip: 198.18.22.10 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.10 # ceph-public loopback
networking_br10_ip: 198.18.26.254
networking::interfaces:
enp2s0:
mac: 70:b5:e8:38:e9:37
ipaddress: 198.18.15.10
gateway: 198.18.15.254
enp3s0:
mac: 00:e0:4c:68:0f:de
ipaddress: 198.18.21.10

View File

@ -1,13 +0,0 @@
---
networking_loopback0_ip: 198.18.19.11 # management loopback
networking_loopback1_ip: 198.18.22.11 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.11 # ceph-public loopback
networking_br10_ip: 198.18.27.254
networking::interfaces:
enp2s0:
mac: 70:b5:e8:38:e9:0f
ipaddress: 198.18.15.11
gateway: 198.18.15.254
enp3s0:
mac: 00:e0:4c:68:0f:55
ipaddress: 198.18.21.11

View File

@ -1,13 +0,0 @@
---
networking_loopback0_ip: 198.18.19.12 # management loopback
networking_loopback1_ip: 198.18.22.12 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.12 # ceph-public loopback
networking_br10_ip: 198.18.28.254
networking::interfaces:
enp2s0:
mac: 70:b5:e8:4f:05:1e
ipaddress: 198.18.15.12
gateway: 198.18.15.254
enp3s0:
mac: 00:e0:4c:68:0f:e5
ipaddress: 198.18.21.12

View File

@ -1,13 +0,0 @@
---
networking_loopback0_ip: 198.18.19.13 # management loopback
networking_loopback1_ip: 198.18.22.13 # ceph-cluster loopback
networking_loopback2_ip: 198.18.23.13 # ceph-public loopback
networking_br10_ip: 198.18.29.254
networking::interfaces:
enp2s0:
mac: 70:b5:e8:4f:04:b0
ipaddress: 198.18.15.13
gateway: 198.18.15.254
enp3s0:
mac: 00:e0:4c:68:0f:36
ipaddress: 198.18.21.13

View File

@ -1,10 +1,2 @@
# hieradata/os/AlmaLinux/AlmaLinux8.yaml
---
crypto_policies::policy: 'DEFAULT'
profiles::packages::include:
network-scripts: {}
profiles::yum::global::repos:
powertools:
ensure: present

View File

@ -1,7 +1,2 @@
# hieradata/os/AlmaLinux/AlmaLinux9.yaml
---
crypto_policies::policy: 'DEFAULT:SHA1'
profiles::yum::global::repos:
crb:
ensure: present

View File

@ -3,15 +3,14 @@
profiles::firewall::firewalld::ensure_package: 'absent'
profiles::firewall::firewalld::ensure_service: 'stopped'
profiles::firewall::firewalld::enable_service: false
profiles::puppet::agent::version: '7.37.2'
profiles::puppet::agent::openvox_enable: true
profiles::puppet::agent::puppet_version: '7.26.0'
hiera_include:
- profiles::almalinux::base
profiles::packages::include:
crypto-policies-scripts: {}
lzo: {}
network-scripts: {}
policycoreutils: {}
unar: {}
xz: {}
@ -23,248 +22,56 @@ profiles::yum::global::repos:
name: baseos
descr: baseos repository
target: /etc/yum.repos.d/baseos.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/BaseOS/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/BaseOS/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
baseurl: https://edgecache.query.consul/almalinux/%{facts.os.release.full}/BaseOS/%{facts.os.architecture}/os
gpgkey: http://edgecache.query.consul/almalinux/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
extras:
name: extras
descr: extras repository
target: /etc/yum.repos.d/extras.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/extras/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/extras/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
baseurl: https://edgecache.query.consul/almalinux/%{facts.os.release.full}/extras/%{facts.os.architecture}/os
gpgkey: http://edgecache.query.consul/almalinux/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
appstream:
name: appstream
descr: appstream repository
target: /etc/yum.repos.d/appstream.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/AppStream/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/AppStream/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
baseurl: https://edgecache.query.consul/almalinux/%{facts.os.release.full}/AppStream/%{facts.os.architecture}/os
gpgkey: http://edgecache.query.consul/almalinux/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
powertools:
name: powertools
descr: powertools repository
target: /etc/yum.repos.d/powertools.repo
baseurl: https://edgecache.query.consul/almalinux/%{facts.os.release.full}/PowerTools/%{facts.os.architecture}/os
gpgkey: http://edgecache.query.consul/almalinux/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
highavailability:
name: highavailability
descr: highavailability repository
target: /etc/yum.repos.d/highavailability.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/HighAvailability/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/HighAvailability/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
crb:
ensure: absent
name: crb
descr: crb repository
target: /etc/yum.repos.d/crb.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/CRB/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/CRB/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
powertools:
ensure: absent
name: powertools
descr: powertools repository
target: /etc/yum.repos.d/powertools.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/PowerTools/%{facts.os.architecture}/os/
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/almalinux/%{facts.os.release.full}/PowerTools/%{facts.os.architecture}/os/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
baseurl: https://edgecache.query.consul/almalinux/%{facts.os.release.full}/HighAvailability/%{facts.os.architecture}/os
gpgkey: http://edgecache.query.consul/almalinux/RPM-GPG-KEY-AlmaLinux-%{facts.os.release.major}
mirrorlist: absent
epel:
name: epel
descr: epel repository
target: /etc/yum.repos.d/epel.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/epel/%{facts.os.release.major}/Everything/%{facts.os.architecture}
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/epel/RPM-GPG-KEY-EPEL-%{facts.os.release.major}
baseurl: https://edgecache.query.consul/epel/%{facts.os.release.major}/Everything/%{facts.os.architecture}
gpgkey: http://edgecache.query.consul/epel/RPM-GPG-KEY-EPEL-%{facts.os.release.major}
mirrorlist: absent
unkinben:
name: unkinben
descr: unkinben repository
target: /etc/yum.repos.d/unkin.repo
baseurl: https://git.query.consul/api/packages/unkinben/rpm/el%{facts.os.release.major}
gpgkey: https://git.query.consul/api/packages/unkinben/rpm/repository.key
gpgcheck: false
mirrorlist: absent
# Additional repositories - default to absent, roles can override with ensure: present
# FRRouting repositories
frr-extras:
ensure: absent
name: frr-extras
descr: frr-extras repository
target: /etc/yum.repos.d/frr-extras.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/frr/el%{facts.os.release.major}/extras
gpgcheck: false
mirrorlist: absent
frr-stable:
ensure: absent
name: frr-stable
descr: frr-stable repository
target: /etc/yum.repos.d/frr-stable.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/frr/el%{facts.os.release.major}/frr
gpgcheck: false
mirrorlist: absent
# PostgreSQL repositories
postgresql-15:
ensure: absent
name: postgresql-15
descr: postgresql-15 repository
target: /etc/yum.repos.d/postgresql.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/15/redhat/rhel-%{facts.os.release.major}-%{facts.os.architecture}
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/keys/PGDG-RPM-GPG-KEY-RHEL
postgresql-17:
ensure: absent
name: postgresql-17
descr: postgresql-17 repository
target: /etc/yum.repos.d/postgresql.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/17/redhat/rhel-%{facts.os.release.major}-%{facts.os.architecture}
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/keys/PGDG-RPM-GPG-KEY-RHEL
postgresql-common:
ensure: absent
name: postgresql-common
descr: postgresql-common repository
target: /etc/yum.repos.d/postgresql.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/common/redhat/rhel-%{facts.os.release.major}-%{facts.os.architecture}
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/postgresql/keys/PGDG-RPM-GPG-KEY-RHEL
# Ceph repositories
ceph:
ensure: absent
name: ceph
descr: ceph repository
target: /etc/yum.repos.d/ceph.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/ceph-reef/el%{facts.os.release.major}/%{facts.os.architecture}
gpgcheck: false
mirrorlist: absent
ceph-noarch:
ensure: absent
name: ceph-noarch
descr: ceph noarch repository
target: /etc/yum.repos.d/ceph.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/ceph-reef/el%{facts.os.release.major}/noarch
gpgcheck: false
mirrorlist: absent
# Rancher RKE2 repositories
rancher-rke2-common-latest:
ensure: absent
name: rancher-rke2-common
descr: rancher-rke2-common repository
target: /etc/yum.repos.d/rancher-rke2-common.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/rke2/rke2/latest/common/centos/%{facts.os.release.major}/noarch
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/rke2/public.key
gpgcheck: 1
mirrorlist: absent
rancher-rke2-1-33-latest:
ensure: absent
name: rancher-rke2-1.33-latest
descr: rancher-rke2-1.33-latest repository
target: /etc/yum.repos.d/rancher-rke2.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/rke2/rke2/latest/1.33/centos/%{facts.os.release.major}/%{facts.os.architecture}
gpgkey: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/rke2/public.key
gpgcheck: 1
mirrorlist: absent
# CentOS repositories for legacy systems
centos_8_advanced_virtualization:
ensure: absent
name: centos_8_advanced_virtualization
descr: centos_8_advanced_virtualization repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/virt/x86_64/advanced-virtualization
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Virtualization
gpgcheck: 1
mirrorlist: absent
centos_8_ceph_pacific:
ensure: absent
name: centos_8_ceph_pacific
descr: centos_8_ceph_pacific repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/storage/x86_64/ceph-pacific
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Storage
gpgcheck: 1
mirrorlist: absent
centos_8_rabbitmq_38:
ensure: absent
name: centos_8_rabbitmq_38
descr: centos_8_rabbitmq_38 repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/messaging/x86_64/rabbitmq-38
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Messaging
gpgcheck: 1
mirrorlist: absent
centos_8_nfv_openvswitch:
ensure: absent
name: centos_8_nfv_openvswitch
descr: centos_8_nfv_openvswitch repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/nfv/x86_64/openvswitch-2
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-NFV
gpgcheck: 1
mirrorlist: absent
centos_8_openstack_xena:
ensure: absent
name: centos_8_openstack_xena
descr: centos_8_openstack_xena repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/cloud/x86_64/openstack-xena
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Cloud
gpgcheck: 1
mirrorlist: absent
centos_8_opstools:
ensure: absent
name: centos_8_opstools
descr: centos_8_opstools repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/opstools/x86_64/collectd-5
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-OpsTools
gpgcheck: 1
mirrorlist: absent
centos_8_ovirt45:
ensure: absent
name: centos_8_ovirt45
descr: centos_8_ovirt45 repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8/virt/x86_64/ovirt-45
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Virtualization
gpgcheck: 1
mirrorlist: absent
centos_8_stream_gluster10:
ensure: absent
name: centos_8_stream_gluster10
descr: centos_8_stream_gluster10 repository
target: /etc/yum.repos.d/centos.repo
baseurl: https://edgecache.query.consul/centos/8-stream/storage/x86_64/gluster-10
gpgkey: http://edgecache.query.consul/centos/RPM-GPG-KEY-CentOS-SIG-Storage
gpgcheck: 1
mirrorlist: absent
# Additional repositories
zfs-kmod:
ensure: absent
name: zfs-kmod
descr: zfs-kmod repository
target: /etc/yum.repos.d/zfs.repo
baseurl: https://artifactapi.k8s.syd1.au.unkin.net/api/v1/remote/zfs/epel/%{facts.os.release.major}/kmod/%{facts.os.architecture}/
gpgcheck: false
mirrorlist: absent
rpmfusion-free:
ensure: absent
name: rpmfusion-free
descr: rpmfusion-free repository
target: /etc/yum.repos.d/rpmfusion-free.repo
baseurl: https://packagerepo.service.consul/rpmfusion-free-el%{facts.os.release.major}-%{facts.os.architecture}/
gpgkey: https://packagerepo.service.consul/rpmfusion-free-el%{facts.os.release.major}-%{facts.os.architecture}/repodata/repomd.xml.key
gpgcheck: 1
mirrorlist: absent
rpmfusion-nonfree:
ensure: absent
name: rpmfusion-nonfree
descr: rpmfusion-nonfree repository
target: /etc/yum.repos.d/rpmfusion-nonfree.repo
baseurl: https://packagerepo.service.consul/rpmfusion-nonfree-el%{facts.os.release.major}-%{facts.os.architecture}/
gpgkey: https://packagerepo.service.consul/rpmfusion-nonfree-el%{facts.os.release.major}-%{facts.os.architecture}/repodata/repomd.xml.key
gpgcheck: 1
puppet:
name: puppet
descr: puppet repository
target: /etc/yum.repos.d/puppet.repo
baseurl: https://yum.puppet.com/puppet7/el/%{facts.os.release.major}/%{facts.os.architecture}
gpgkey: https://yum.puppet.com/RPM-GPG-KEY-puppet-20250406
mirrorlist: absent
unkin:
name: unkin
descr: unkin repository
target: /etc/yum.repos.d/unkin.repo
baseurl: https://git.query.consul/api/packages/unkin/rpm/almalinux/el%{facts.os.release.major}
gpgkey: https://git.query.consul/api/packages/unkin/rpm/repository.key
baseurl: https://git.query.consul/api/packages/unkinben/rpm/el%{facts.os.release.major}
gpgkey: https://git.query.consul/api/packages/unkinben/rpm/repository.key
gpgcheck: false
mirrorlist: absent

View File

@ -11,4 +11,4 @@ profiles::apt::components:
- main
- non-free
profiles::puppet::agent::version: '7.25.0-1bullseye'
profiles::puppet::agent::puppet_version: '7.25.0-1bullseye'

View File

@ -12,4 +12,4 @@ profiles::apt::components:
- non-free
- non-free-firmware
profiles::puppet::agent::version: 'latest'
profiles::puppet::agent::puppet_version: 'latest'

View File

@ -13,7 +13,3 @@ profiles::packages::include:
lm-sensors::package: lm-sensors
networking::nwmgr_dns_none: false
consul::install_method: 'url'
consul::manage_repo: false
consul::bin_dir: /usr/local/bin

View File

@ -1 +0,0 @@
profiles::jupyter::jupyterhub::ldap_bind_pass: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAJspN3e2WzA0uZaLgFZ0Ewqii9dY0tTgbirsW70M2VZtLY+s+C6HE8ZZUtpfnRsFwUUhOj7s25X9xVOZNTpZIGPyfx9MWlSyFw2RFuXSEwaydf1DcBbg8261YrTTysA4Jsa1L4DLsX55q+XJUyeUbimVQkIacVIvzTdnZCBKnVNUh3U2PNAmV7SOL2KH8Jpbfs/EQfBt8XuGMCg3I/4RDyoNERqthW6W2KiMX2Gmd8iQ5+W9udH0lEAMx415oyImmN+dEuThcx9FGMi8BWYtnxH96yWafpT5qltwW6EVzIGWuLhiD1LcWYc5RB8jc3DhbeouChpKsN6c4EHoKt3aWsTBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBC8jcnqilJgY1/AnHWHfX4bgDCi2a3Rj43Z0dgfB5HaHdpfked3Cx+u94r2S5+Cg3QogU1AIF04rjzOL+bD2HdaMfo=]

View File

@ -1,74 +0,0 @@
---
profiles::packages::include:
python3.12: {}
python3.12-pip: {}
hiera_include:
- docker
- profiles::nginx::simpleproxy
# manage docker
docker::version: latest
docker::curl_ensure: false
docker::root_dir: /data/docker
# manage a simple nginx reverse proxy
profiles::nginx::simpleproxy::nginx_vhost: 'jupyterhub.query.consul'
profiles::nginx::simpleproxy::nginx_aliases:
- jupyterhub.service.consul
- jupyterhub.query.consul
- "jupyterhub.service.%{facts.country}-%{facts.region}.consul"
profiles::nginx::simpleproxy::proxy_host: 127.0.0.1
profiles::nginx::simpleproxy::proxy_port: 8000
profiles::nginx::simpleproxy::proxy_path: '/'
profiles::nginx::simpleproxy::use_default_location: false
nginx::client_max_body_size: 20M
profiles::nginx::simpleproxy::locations:
# authorised access from external
default:
ensure: 'present'
server: "%{lookup('profiles::nginx::simpleproxy::nginx_vhost')}"
ssl_only: true
location: '/'
proxy: "http://%{lookup('profiles::nginx::simpleproxy::proxy_host')}:%{lookup('profiles::nginx::simpleproxy::proxy_port')}"
proxy_set_header:
- 'Host $host'
- 'X-Real-IP $remote_addr'
- 'X-Forwarded-For $proxy_add_x_forwarded_for'
- 'X-Forwarded-Host $host'
- 'X-Forwarded-Proto $scheme'
- 'Upgrade $http_upgrade'
- 'Connection $http_connection'
- 'X-Scheme $scheme'
proxy_redirect: 'off'
proxy_http_version: '1.1'
proxy_buffering: 'off'
# additional altnames
profiles::pki::vault::alt_names:
- jupyterhub.service.consul
- jupyterhub.query.consul
- "jupyterhub.service.%{facts.country}-%{facts.region}.consul"
# configure consul service
consul::services:
jupyterhub:
service_name: 'jupyterhub'
tags:
- 'jupyterhub'
address: "%{facts.networking.ip}"
port: 443
checks:
- id: 'jupyterhub_http_check'
name: 'jupyterhub HTTP Check'
http: "https://%{facts.networking.fqdn}"
method: 'GET'
tls_skip_verify: true
interval: '10s'
timeout: '1s'
profiles::consul::client::node_rules:
- resource: service
segment: jupyterhub
disposition: write

View File

@ -3,8 +3,13 @@ hiera_include:
- profiles::nginx::simpleproxy
profiles::yum::global::repos:
ceph:
ensure: present
ceph-reef:
name: ceph-reef
descr: ceph reef repository
target: /etc/yum.repos.d/ceph-reef.repo
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/%{facts.os.architecture}
gpgcheck: 0,
mirrorlist: absent
profiles::ceph::client::keyrings:
media:
@ -64,7 +69,8 @@ profiles::nginx::simpleproxy::locations:
location_allow:
- 127.0.0.1
- "%{facts.networking.ip}"
- 198.18.24.0/24
- 198.18.13.25
- 198.18.13.26
location_deny:
- all
# authorised access from external

View File

@ -2,12 +2,6 @@
hiera_include:
- jellyfin
profiles::packages::include:
intel-media-driver: {}
libva-intel-driver: {}
libva-intel-hybrid-driver: {}
intel-mediasdk: {}
# manage jellyfin
jellyfin::params::service_enable: true
@ -54,8 +48,16 @@ profiles::consul::client::node_rules:
profiles::yum::global::repos:
rpmfusion-free:
ensure: present
name: rpmfusion-free
descr: rpmfusion-free repository
target: /etc/yum.repos.d/rpmfusion.repo
baseurl: https://download1.rpmfusion.org/free/el/updates/%{facts.os.release.major}/%{facts.os.architecture}
gpgkey: https://download1.rpmfusion.org/free/el/RPM-GPG-KEY-rpmfusion-free-el-%{facts.os.release.major}
mirrorlist: absent
rpmfusion-nonfree:
ensure: present
unkinben:
ensure: present
name: rpmfusion-nonfree
descr: rpmfusion-nonfree repository
target: /etc/yum.repos.d/rpmfusion.repo
baseurl: https://download1.rpmfusion.org/nonfree/el/updates/%{facts.os.release.major}/%{facts.os.architecture}
gpgkey: https://download1.rpmfusion.org/nonfree/el/RPM-GPG-KEY-rpmfusion-nonfree-el-%{facts.os.release.major}
mirrorlist: absent

View File

@ -2,7 +2,6 @@
hiera_include:
- lidarr
- profiles::nginx::ldapauth
- profiles::media::lidarr
# manage lidarr
lidarr::params::user: lidarr

Some files were not shown because too many files have changed in this diff Show More