Compare commits
54 Commits
benvin/exp
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
| 383bbb0507 | |||
| 6f51bffeaa | |||
| 57870658b5 | |||
| f8caa71f34 | |||
| a2c56c9e46 | |||
| 40d8e924ee | |||
| 0aec795aec | |||
| 9854403b02 | |||
| 6400c89853 | |||
| 9eff241003 | |||
| 35614060bd | |||
| 1b0fd10fd7 | |||
| 2c9fb3d86a | |||
| 559c453906 | |||
| 5b0365c096 | |||
| 1e7dfb9d9d | |||
| 9dd74013ea | |||
| 92a48b4113 | |||
| 78adef0eee | |||
| 81f289a185 | |||
| a2a8edb731 | |||
| e129d1cf7a | |||
| e95a59b88a | |||
| 8bed80eac8 | |||
| 5ba483c68a | |||
| 766233c3e5 | |||
| 98b866fce7 | |||
| e724326d43 | |||
| d8b354558d | |||
| fac90c66db | |||
| efbbb6bcb1 | |||
| 16e654fdd7 | |||
| 66d8815e16 | |||
| a9c959d924 | |||
| b224cfb516 | |||
| 4c9204858e | |||
| 571a9b25a7 | |||
| 762f415d2d | |||
| 4e77fb7ee7 | |||
| 6e4bc9fbc7 | |||
| 012e842d7d | |||
| 98a433d366 | |||
| fcd1b049d6 | |||
| 938a6ac990 | |||
| 0665873dc8 | |||
| ae4eb3a5eb | |||
| 65fb52da55 | |||
| d97cbfd570 | |||
| 8f5d102945 | |||
| 62aade77ff | |||
| 83bb3e1085 | |||
| 92728047e7 | |||
| f4af5e7b64 | |||
| 308d97d783 |
@ -19,6 +19,7 @@ mod 'puppetlabs-haproxy', '8.2.0'
|
|||||||
mod 'puppetlabs-java', '11.1.0'
|
mod 'puppetlabs-java', '11.1.0'
|
||||||
mod 'puppetlabs-reboot', '5.1.0'
|
mod 'puppetlabs-reboot', '5.1.0'
|
||||||
mod 'puppetlabs-docker', '10.2.0'
|
mod 'puppetlabs-docker', '10.2.0'
|
||||||
|
mod 'puppetlabs-mailalias_core', '1.2.0'
|
||||||
|
|
||||||
# puppet
|
# puppet
|
||||||
mod 'puppet-python', '7.4.0'
|
mod 'puppet-python', '7.4.0'
|
||||||
@ -43,6 +44,8 @@ mod 'puppet-letsencrypt', '11.1.0'
|
|||||||
mod 'puppet-rundeck', '9.2.0'
|
mod 'puppet-rundeck', '9.2.0'
|
||||||
mod 'puppet-redis', '11.1.0'
|
mod 'puppet-redis', '11.1.0'
|
||||||
mod 'puppet-nodejs', '11.0.0'
|
mod 'puppet-nodejs', '11.0.0'
|
||||||
|
mod 'puppet-postfix', '5.1.0'
|
||||||
|
mod 'puppet-alternatives', '6.0.0'
|
||||||
|
|
||||||
# other
|
# other
|
||||||
mod 'saz-sudo', '9.0.2'
|
mod 'saz-sudo', '9.0.2'
|
||||||
@ -60,6 +63,7 @@ mod 'rehan-mkdir', '2.0.0'
|
|||||||
mod 'tailoredautomation-patroni', '2.0.0'
|
mod 'tailoredautomation-patroni', '2.0.0'
|
||||||
mod 'ssm-crypto_policies', '0.3.3'
|
mod 'ssm-crypto_policies', '0.3.3'
|
||||||
mod 'thias-sysctl', '1.0.8'
|
mod 'thias-sysctl', '1.0.8'
|
||||||
|
mod 'cirrax-dovecot', '1.3.3'
|
||||||
|
|
||||||
mod 'bind',
|
mod 'bind',
|
||||||
:git => 'https://git.service.au-syd1.consul/unkinben/puppet-bind.git',
|
:git => 'https://git.service.au-syd1.consul/unkinben/puppet-bind.git',
|
||||||
|
|||||||
@ -29,3 +29,21 @@ these steps are required when adding additional puppet masters, as the subject a
|
|||||||
|
|
||||||
sudo systemctl start puppetserver
|
sudo systemctl start puppetserver
|
||||||
sudo cp /root/current_crl.pem /etc/puppetlabs/puppet/ssl/crl.pem
|
sudo cp /root/current_crl.pem /etc/puppetlabs/puppet/ssl/crl.pem
|
||||||
|
|
||||||
|
|
||||||
|
## troubleshooting
|
||||||
|
|
||||||
|
### Issue 1:
|
||||||
|
|
||||||
|
[sysadmin@ausyd1nxvm2056 ~]$ sudo puppet agent -t
|
||||||
|
Error: The CRL issued by 'CN=Puppet CA: prodinf01n01.main.unkin.net' is missing
|
||||||
|
|
||||||
|
Find another puppetserver that IS working, copy the `/etc/puppetlabs/puppet/ssl/crl.pem` to this host, run puppet again.
|
||||||
|
|
||||||
|
|
||||||
|
### Issue 2:
|
||||||
|
|
||||||
|
[sysadmin@ausyd1nxvm2097 ~]$ sudo puppet agent -t
|
||||||
|
Error: Failed to parse CA certificates as PEM
|
||||||
|
|
||||||
|
The puppet-agents CA cert `/etc/puppetlabs/puppet/ssl/certs/ca.pem` is empty or missing. Grab it from any other host. Run puppet again.
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
profiles::accounts::sysadmin::password: ENC[PKCS7,MIIBqQYJKoZIhvcNAQcDoIIBmjCCAZYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAoS7GyofFaXBNTWU+GtSiz4eCX/9j/sh3fDDRgOgNv1qpcQ87ZlTTenbHo9lxeURxKQ2HVVt7IsrBo/SC/WgipAKnliRkkIvo7nfAs+i+kEE8wakjAs0DcB4mhqtIZRuBkLG2Nay//DcG6cltVkbKEEKmKLMkDFZgTWreOZal8nDljpVe1S8QwtwP4/6hKTef5xsOnrisxuffWTXvwYJhj/VXrjdoH7EhtHGLybzEalglkVHEGft/WrrD/0bwJpmR0RegWI4HTsSvGiHgvf5DZJx8fXPZNPnicGtlfA9ccQPuVo17bY4Qf/WIc1A8Ssv4kHSbNIYJKRymI3UFb0Z4wzBsBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBBxDLb6pCGbittkcX6asd/gEBmMcUNupDjSECq5H09YA70eVwWWe0fBqxTxrr2cXCXtRKFvOk8SJmL0xHAWodaLN9+krTWHJcWbAK8JXEPC7rn]
|
profiles::accounts::sysadmin::password: ENC[PKCS7,MIIBqQYJKoZIhvcNAQcDoIIBmjCCAZYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAoS7GyofFaXBNTWU+GtSiz4eCX/9j/sh3fDDRgOgNv1qpcQ87ZlTTenbHo9lxeURxKQ2HVVt7IsrBo/SC/WgipAKnliRkkIvo7nfAs+i+kEE8wakjAs0DcB4mhqtIZRuBkLG2Nay//DcG6cltVkbKEEKmKLMkDFZgTWreOZal8nDljpVe1S8QwtwP4/6hKTef5xsOnrisxuffWTXvwYJhj/VXrjdoH7EhtHGLybzEalglkVHEGft/WrrD/0bwJpmR0RegWI4HTsSvGiHgvf5DZJx8fXPZNPnicGtlfA9ccQPuVo17bY4Qf/WIc1A8Ssv4kHSbNIYJKRymI3UFb0Z4wzBsBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBBxDLb6pCGbittkcX6asd/gEBmMcUNupDjSECq5H09YA70eVwWWe0fBqxTxrr2cXCXtRKFvOk8SJmL0xHAWodaLN9+krTWHJcWbAK8JXEPC7rn]
|
||||||
profiles::accounts::root::password: ENC[PKCS7,MIIBeQYJKoZIhvcNAQcDoIIBajCCAWYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAM79PRxeAZHrDcSm4eSFqU94/LjuSbdUmJWivX/Pa8GumoW2e/PT9nGHW3p98zHthMgCglk52PECQ+TBKjxr+9dTyNK5ePG6ZJEqSHNRqsPGm+kfQj/hlTmq8vOBaFM5GapD1iTHs5JFbGngI56swKBEVXW9+Z37BjQb2xJuyLsu5Bo/tA0BaOKuCtjq1a6E38bOX+nJ+YF1uZgV9ofAEh1YvkcTmnEWYXFRPWd7AaNcWn03V2pfhGqxc+xydak620I47P+FE+qIY72+aQ6tmLU3X9vyA1HLF2Tv572l4a2i+YIk6nAgQdi+hQKznqNL9M9YV+s1AcmcKLT7cfLrjsjA8BgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCMWrdCWBQgtW3NOEpERwP+gBA3KDiqe4pQq6DwRfsEXQNZ]
|
profiles::accounts::root::password: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAIgzGQLoHrm7JSnWG4vdAtxSuETmnqbV7kUsQS8WCRUwFGenDFkps+OGMnOGEHLzMJzXihLfgfdWwTAI4fp48M+zhTMo9TQkdzZqtbFk3+RjV2jDF0wfe4kVUIpReOq+EkaDSkoRSG8V6hWvszhDHUrJBC9eDhomL0f3xNAWxmy5EIX/uMEvg9Ux5YX+E6k2pEIKnHNoVIaWDojlofSIzIqTSS7l3jQtJhs3YqBzLL1DsoF1kdn+Rwl5kcsKkhV+vzl76wEbpYVZW8lu4bFfP6QHMLPcep2tuUDMCDvARRXD7YyZcAtS7aMuqll+BLAszpWxAA7EU2hgvdr6t2uyVCTCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQ4D5oDoyE6LPdjpVtGPoJD4BwfnQ9ORjYFPvHQmt+lgU4jMqh6BhqP0VN3lqVfUpOmiVMIqkO/cYtlwVLKEg36TPCHBSpqvhuahSF5saCVr8JY3xWOAmTSgnNjQOPlGrPnYWYbuRLxVRsU+KUkpAzR0c6VN0wYi6bI85Pcv8yHF3UYA==]
|
||||||
profiles::consul::client::secret_id_salt: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAS7pNFRX4onccFaR87zB/eFFORuF22j6xqjyeAqqjgEduYhkt6w5kkz+YfUoHUesU0Q6F2p6HrCSZ8yAsx5M25NCiud9P4hIpjKmOZ4zCNO7uhATh4AQDYw3BrdRwfO+c6jOl5wOiNLCfDBJ0sFT3akCvcuPS1xIoRJq4Gyn+uCbOsMbvSl25ld2xKt1/cqs8gc1d8mkpjwWto7t+qZSUFMCehTbehH3G4a3Q5rvfBoNwv42Wbs676BDcCurDaAzHNqE7pDbOWhGuVOBl+q+BU0Ri/CRkGcTViN9fr8Dc9SveVC6EPsMbw+05/8/NlfzQse3KAwQ34nR9tR2PQw5qEzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB7LywscQtF7cG2nomfEsu9gDDVqJBFP1jAX2eGZ2crYS5gnBcsRwhc0HNo2/WWdhZprMW+vEJOOGXDelI53NxA3o0=]
|
profiles::consul::client::secret_id_salt: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAS7pNFRX4onccFaR87zB/eFFORuF22j6xqjyeAqqjgEduYhkt6w5kkz+YfUoHUesU0Q6F2p6HrCSZ8yAsx5M25NCiud9P4hIpjKmOZ4zCNO7uhATh4AQDYw3BrdRwfO+c6jOl5wOiNLCfDBJ0sFT3akCvcuPS1xIoRJq4Gyn+uCbOsMbvSl25ld2xKt1/cqs8gc1d8mkpjwWto7t+qZSUFMCehTbehH3G4a3Q5rvfBoNwv42Wbs676BDcCurDaAzHNqE7pDbOWhGuVOBl+q+BU0Ri/CRkGcTViN9fr8Dc9SveVC6EPsMbw+05/8/NlfzQse3KAwQ34nR9tR2PQw5qEzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB7LywscQtF7cG2nomfEsu9gDDVqJBFP1jAX2eGZ2crYS5gnBcsRwhc0HNo2/WWdhZprMW+vEJOOGXDelI53NxA3o0=]
|
||||||
profiles::consul::token::node_editor::secret_id: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAO8IIF2r18dFf0bVKEwjJUe1TmXHH0AIzsQHxkHwV7d37kvH1cY9rYw0TtdHn7GTxvotJG7GZbWvbunpBs1g2p2RPADiM6TMhbO8mJ0tAWLnMk7bQ221xu8Pc7KceqWmU17dmgNhVCohyfwJNqbA756TlHVgxGA0LtNrKoLOmgKGXAL1VYZoKEQnWq7xOpO+z3e1UfjoO6CvX/Od2hGYfUkHdro8mwRw4GFKzU7XeKFdAMUGpn5rVmY3xe+1ARXwGFaSrTHzk2n85pvwhPRlQ+OwqzyT19Qo2FNeAO6RoCRIFTtqbsjTWPUlseHIhw4Q5bHO1I0Mrlm5IHDESw/22IzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCEe9wD72qxnpeq5nCi/d7BgDCP29sDFObkFTabt2uZ/nF9MT1g+QOrrdFKgnG6ThnwH1hwpZPsSVgIs+yRQH8laB4=]
|
profiles::consul::token::node_editor::secret_id: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAO8IIF2r18dFf0bVKEwjJUe1TmXHH0AIzsQHxkHwV7d37kvH1cY9rYw0TtdHn7GTxvotJG7GZbWvbunpBs1g2p2RPADiM6TMhbO8mJ0tAWLnMk7bQ221xu8Pc7KceqWmU17dmgNhVCohyfwJNqbA756TlHVgxGA0LtNrKoLOmgKGXAL1VYZoKEQnWq7xOpO+z3e1UfjoO6CvX/Od2hGYfUkHdro8mwRw4GFKzU7XeKFdAMUGpn5rVmY3xe+1ARXwGFaSrTHzk2n85pvwhPRlQ+OwqzyT19Qo2FNeAO6RoCRIFTtqbsjTWPUlseHIhw4Q5bHO1I0Mrlm5IHDESw/22IzBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBCEe9wD72qxnpeq5nCi/d7BgDCP29sDFObkFTabt2uZ/nF9MT1g+QOrrdFKgnG6ThnwH1hwpZPsSVgIs+yRQH8laB4=]
|
||||||
profiles::consul::server::acl_tokens_initial_management: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAi1UH7AZirJ1PdxWy+KEgS5ufm0wbn2xy9rkg14hKYpcVjBa4pOZpSLMGMiiUpBIqBytDMZM4ezYa/luktpkBImJbM/TE16beGtsacQGA+9eZk2Tihs9GR2qbAQiu5lLITiDlwNnf0GeWdqHM8CTeD68DczQF320d9U14/k6pG/7z+w/MGLcjsQoSuOFTm42JVn1BI46t1CYSCHMXQc/9Tfs+FzI+vumohI8DxAYBIuyzU5HBX/MntAsvD/yixMJS1pZL9WwgqZJC/wK34rVRB39DpxWf/WROrI+WLuSJwr7WBjaeF9Ju+89WKCgsI53EWhFTj8GgDZm/jqPoE478NjBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBAoACRzJdQKNYXZv6cghFIIgDAzB81DMcuY815nb8POtZpiA06jT/068AoZmSctHoFK/zW9tY229N5r1Tb+WHElqLk=]
|
profiles::consul::server::acl_tokens_initial_management: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAi1UH7AZirJ1PdxWy+KEgS5ufm0wbn2xy9rkg14hKYpcVjBa4pOZpSLMGMiiUpBIqBytDMZM4ezYa/luktpkBImJbM/TE16beGtsacQGA+9eZk2Tihs9GR2qbAQiu5lLITiDlwNnf0GeWdqHM8CTeD68DczQF320d9U14/k6pG/7z+w/MGLcjsQoSuOFTm42JVn1BI46t1CYSCHMXQc/9Tfs+FzI+vumohI8DxAYBIuyzU5HBX/MntAsvD/yixMJS1pZL9WwgqZJC/wK34rVRB39DpxWf/WROrI+WLuSJwr7WBjaeF9Ju+89WKCgsI53EWhFTj8GgDZm/jqPoE478NjBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBAoACRzJdQKNYXZv6cghFIIgDAzB81DMcuY815nb8POtZpiA06jT/068AoZmSctHoFK/zW9tY229N5r1Tb+WHElqLk=]
|
||||||
|
|||||||
@ -129,6 +129,9 @@ lookup_options:
|
|||||||
profiles::ceph::client::keyrings:
|
profiles::ceph::client::keyrings:
|
||||||
merge:
|
merge:
|
||||||
strategy: deep
|
strategy: deep
|
||||||
|
profiles::ceph::conf::config:
|
||||||
|
merge:
|
||||||
|
strategy: deep
|
||||||
profiles::nginx::simpleproxy::locations:
|
profiles::nginx::simpleproxy::locations:
|
||||||
merge:
|
merge:
|
||||||
strategy: deep
|
strategy: deep
|
||||||
@ -155,6 +158,24 @@ lookup_options:
|
|||||||
zfs::datasets:
|
zfs::datasets:
|
||||||
merge:
|
merge:
|
||||||
strategy: deep
|
strategy: deep
|
||||||
|
rke2::config_hash:
|
||||||
|
merge:
|
||||||
|
strategy: deep
|
||||||
|
postfix::configs:
|
||||||
|
merge:
|
||||||
|
strategy: deep
|
||||||
|
postfix::maps:
|
||||||
|
merge:
|
||||||
|
strategy: deep
|
||||||
|
postfix::virtuals:
|
||||||
|
merge:
|
||||||
|
strategy: deep
|
||||||
|
stalwart::postgresql_password:
|
||||||
|
convert_to: Sensitive
|
||||||
|
stalwart::s3_secret_key:
|
||||||
|
convert_to: Sensitive
|
||||||
|
stalwart::fallback_admin_password:
|
||||||
|
convert_to: Sensitive
|
||||||
|
|
||||||
facts_path: '/opt/puppetlabs/facter/facts.d'
|
facts_path: '/opt/puppetlabs/facter/facts.d'
|
||||||
|
|
||||||
@ -173,9 +194,6 @@ profiles::ntp::client::peers:
|
|||||||
- 2.au.pool.ntp.org
|
- 2.au.pool.ntp.org
|
||||||
- 3.au.pool.ntp.org
|
- 3.au.pool.ntp.org
|
||||||
|
|
||||||
profiles::base::puppet_servers:
|
|
||||||
- 'prodinf01n01.main.unkin.net'
|
|
||||||
|
|
||||||
consul::install_method: 'package'
|
consul::install_method: 'package'
|
||||||
consul::manage_repo: false
|
consul::manage_repo: false
|
||||||
consul::bin_dir: /usr/bin
|
consul::bin_dir: /usr/bin
|
||||||
@ -368,6 +386,71 @@ profiles::ceph::client::mons:
|
|||||||
- 10.18.15.1
|
- 10.18.15.1
|
||||||
- 10.18.15.2
|
- 10.18.15.2
|
||||||
- 10.18.15.3
|
- 10.18.15.3
|
||||||
|
|
||||||
|
profiles::ceph::conf::config:
|
||||||
|
global:
|
||||||
|
auth_client_required: 'cephx'
|
||||||
|
auth_cluster_required: 'cephx'
|
||||||
|
auth_service_required: 'cephx'
|
||||||
|
fsid: 'de96a98f-3d23-465a-a899-86d3d67edab8'
|
||||||
|
mon_allow_pool_delete: true
|
||||||
|
mon_initial_members: 'prodnxsr0009,prodnxsr0010,prodnxsr0011,prodnxsr0012,prodnxsr0013'
|
||||||
|
mon_host: '198.18.23.9,198.18.23.10,198.18.23.11,198.18.23.12,198.18.23.13'
|
||||||
|
ms_bind_ipv4: true
|
||||||
|
ms_bind_ipv6: false
|
||||||
|
osd_crush_chooseleaf_type: 1
|
||||||
|
osd_pool_default_min_size: 2
|
||||||
|
osd_pool_default_size: 3
|
||||||
|
osd_pool_default_pg_num: 128
|
||||||
|
public_network: >
|
||||||
|
198.18.23.1/32,198.18.23.2/32,198.18.23.3/32,198.18.23.4/32,
|
||||||
|
198.18.23.5/32,198.18.23.6/32,198.18.23.7/32,198.18.23.8/32,
|
||||||
|
198.18.23.9/32,198.18.23.10/32,198.18.23.11/32,198.18.23.12/32,
|
||||||
|
198.18.23.13/32
|
||||||
|
client.rgw.ausyd1nxvm2115:
|
||||||
|
rgw_realm: unkin
|
||||||
|
rgw_zonegroup: au
|
||||||
|
rgw_zone: syd1
|
||||||
|
client.rgw.ausyd1nxvm2116:
|
||||||
|
rgw_realm: unkin
|
||||||
|
rgw_zonegroup: au
|
||||||
|
rgw_zone: syd1
|
||||||
|
client.rgw.ausyd1nxvm2117:
|
||||||
|
rgw_realm: unkin
|
||||||
|
rgw_zonegroup: au
|
||||||
|
rgw_zone: syd1
|
||||||
|
client.rgw.ausyd1nxvm2118:
|
||||||
|
rgw_realm: unkin
|
||||||
|
rgw_zonegroup: au
|
||||||
|
rgw_zone: syd1
|
||||||
|
client.rgw.ausyd1nxvm2119:
|
||||||
|
rgw_realm: unkin
|
||||||
|
rgw_zonegroup: au
|
||||||
|
rgw_zone: syd1
|
||||||
|
mds:
|
||||||
|
keyring: /var/lib/ceph/mds/ceph-$id/keyring
|
||||||
|
mds_standby_replay: true
|
||||||
|
mds.prodnxsr0009-1:
|
||||||
|
host: prodnxsr0009
|
||||||
|
mds.prodnxsr0009-2:
|
||||||
|
host: prodnxsr0009
|
||||||
|
mds.prodnxsr0010-1:
|
||||||
|
host: prodnxsr0010
|
||||||
|
mds.prodnxsr0010-2:
|
||||||
|
host: prodnxsr0010
|
||||||
|
mds.prodnxsr0011-1:
|
||||||
|
host: prodnxsr0011
|
||||||
|
mds.prodnxsr0011-2:
|
||||||
|
host: prodnxsr0011
|
||||||
|
mds.prodnxsr0012-1:
|
||||||
|
host: prodnxsr0012
|
||||||
|
mds.prodnxsr0012-2:
|
||||||
|
host: prodnxsr0012
|
||||||
|
mds.prodnxsr0013-1:
|
||||||
|
host: prodnxsr0013
|
||||||
|
mds.prodnxsr0013-2:
|
||||||
|
host: prodnxsr0013
|
||||||
|
|
||||||
#profiles::base::hosts::additional_hosts:
|
#profiles::base::hosts::additional_hosts:
|
||||||
# - ip: 198.18.17.9
|
# - ip: 198.18.17.9
|
||||||
# hostname: prodinf01n09.main.unkin.net
|
# hostname: prodinf01n09.main.unkin.net
|
||||||
|
|||||||
@ -5,3 +5,5 @@ profiles_dns_upstream_forwarder_unkin:
|
|||||||
- 198.18.19.15
|
- 198.18.19.15
|
||||||
profiles_dns_upstream_forwarder_consul:
|
profiles_dns_upstream_forwarder_consul:
|
||||||
- 198.18.19.14
|
- 198.18.19.14
|
||||||
|
profiles_dns_upstream_forwarder_k8s:
|
||||||
|
- 198.18.19.20
|
||||||
|
|||||||
@ -10,6 +10,12 @@ profiles::haproxy::dns::vrrp_cnames:
|
|||||||
- git.unkin.net
|
- git.unkin.net
|
||||||
- fafflix.unkin.net
|
- fafflix.unkin.net
|
||||||
- grafana.unkin.net
|
- grafana.unkin.net
|
||||||
|
- dashboard.ceph.unkin.net
|
||||||
|
- mail-webadmin.main.unkin.net
|
||||||
|
- mail-in.main.unkin.net
|
||||||
|
- mail.main.unkin.net
|
||||||
|
- autoconfig.main.unkin.net
|
||||||
|
- autodiscover.main.unkin.net
|
||||||
|
|
||||||
profiles::haproxy::mappings:
|
profiles::haproxy::mappings:
|
||||||
fe_http:
|
fe_http:
|
||||||
@ -27,6 +33,10 @@ profiles::haproxy::mappings:
|
|||||||
- 'fafflix.unkin.net be_jellyfin'
|
- 'fafflix.unkin.net be_jellyfin'
|
||||||
- 'git.unkin.net be_gitea'
|
- 'git.unkin.net be_gitea'
|
||||||
- 'grafana.unkin.net be_grafana'
|
- 'grafana.unkin.net be_grafana'
|
||||||
|
- 'dashboard.ceph.unkin.net be_ceph_dashboard'
|
||||||
|
- 'mail-webadmin.main.unkin.net be_stalwart_webadmin'
|
||||||
|
- 'autoconfig.main.unkin.net be_stalwart_webadmin'
|
||||||
|
- 'autodiscovery.main.unkin.net be_stalwart_webadmin'
|
||||||
fe_https:
|
fe_https:
|
||||||
ensure: present
|
ensure: present
|
||||||
mappings:
|
mappings:
|
||||||
@ -42,6 +52,10 @@ profiles::haproxy::mappings:
|
|||||||
- 'fafflix.unkin.net be_jellyfin'
|
- 'fafflix.unkin.net be_jellyfin'
|
||||||
- 'git.unkin.net be_gitea'
|
- 'git.unkin.net be_gitea'
|
||||||
- 'grafana.unkin.net be_grafana'
|
- 'grafana.unkin.net be_grafana'
|
||||||
|
- 'dashboard.ceph.unkin.net be_ceph_dashboard'
|
||||||
|
- 'mail-webadmin.main.unkin.net be_stalwart_webadmin'
|
||||||
|
- 'autoconfig.main.unkin.net be_stalwart_webadmin'
|
||||||
|
- 'autodiscovery.main.unkin.net be_stalwart_webadmin'
|
||||||
|
|
||||||
profiles::haproxy::frontends:
|
profiles::haproxy::frontends:
|
||||||
fe_http:
|
fe_http:
|
||||||
@ -62,6 +76,10 @@ profiles::haproxy::frontends:
|
|||||||
- 'acl_fafflix req.hdr(host) -i fafflix.unkin.net'
|
- 'acl_fafflix req.hdr(host) -i fafflix.unkin.net'
|
||||||
- 'acl_gitea req.hdr(host) -i git.unkin.net'
|
- 'acl_gitea req.hdr(host) -i git.unkin.net'
|
||||||
- 'acl_grafana req.hdr(host) -i grafana.unkin.net'
|
- 'acl_grafana req.hdr(host) -i grafana.unkin.net'
|
||||||
|
- 'acl_ceph_dashboard req.hdr(host) -i dashboard.ceph.unkin.net'
|
||||||
|
- 'acl_stalwart_webadmin req.hdr(host) -i mail-webadmin.main.unkin.net'
|
||||||
|
- 'acl_stalwart_webadmin req.hdr(host) -i autoconfig.main.unkin.net'
|
||||||
|
- 'acl_stalwart_webadmin req.hdr(host) -i autodiscovery.main.unkin.net'
|
||||||
- 'acl_internalsubnets src 198.18.0.0/16 10.10.12.0/24'
|
- 'acl_internalsubnets src 198.18.0.0/16 10.10.12.0/24'
|
||||||
use_backend:
|
use_backend:
|
||||||
- "%[req.hdr(host),lower,map(/etc/haproxy/fe_https.map,be_default)]"
|
- "%[req.hdr(host),lower,map(/etc/haproxy/fe_https.map,be_default)]"
|
||||||
@ -79,6 +97,8 @@ profiles::haproxy::frontends:
|
|||||||
- 'set-header X-Frame-Options DENY if acl_fafflix'
|
- 'set-header X-Frame-Options DENY if acl_fafflix'
|
||||||
- 'set-header X-Frame-Options DENY if acl_gitea'
|
- 'set-header X-Frame-Options DENY if acl_gitea'
|
||||||
- 'set-header X-Frame-Options DENY if acl_grafana'
|
- 'set-header X-Frame-Options DENY if acl_grafana'
|
||||||
|
- 'set-header X-Frame-Options DENY if acl_ceph_dashboard'
|
||||||
|
- 'set-header X-Frame-Options DENY if acl_stalwart_webadmin'
|
||||||
- 'set-header X-Content-Type-Options nosniff'
|
- 'set-header X-Content-Type-Options nosniff'
|
||||||
- 'set-header X-XSS-Protection 1;mode=block'
|
- 'set-header X-XSS-Protection 1;mode=block'
|
||||||
|
|
||||||
@ -262,6 +282,102 @@ profiles::haproxy::backends:
|
|||||||
redirect: 'scheme https if !{ ssl_fc }'
|
redirect: 'scheme https if !{ ssl_fc }'
|
||||||
stick-table: 'type ip size 200k expire 30m'
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
stick: 'on src'
|
stick: 'on src'
|
||||||
|
be_ceph_dashboard:
|
||||||
|
description: Backend for Ceph Dashboard from Mgr instances
|
||||||
|
collect_exported: false # handled in custom function
|
||||||
|
options:
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- httpchk GET /
|
||||||
|
- forwardfor
|
||||||
|
- http-keep-alive
|
||||||
|
- prefer-last-server
|
||||||
|
cookie: SRVNAME insert indirect nocache
|
||||||
|
http-reuse: always
|
||||||
|
http-check:
|
||||||
|
- expect status 200
|
||||||
|
http-request:
|
||||||
|
- set-header X-Forwarded-Port %[dst_port]
|
||||||
|
- add-header X-Forwarded-Proto https if { dst_port 9443 }
|
||||||
|
redirect: 'scheme https if !{ ssl_fc }'
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
be_stalwart_webadmin:
|
||||||
|
description: Backend for Stalwart Webadmin
|
||||||
|
collect_exported: false # handled in custom function
|
||||||
|
options:
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- httpchk GET /
|
||||||
|
- forwardfor
|
||||||
|
- http-keep-alive
|
||||||
|
- prefer-last-server
|
||||||
|
cookie: SRVNAME insert indirect nocache
|
||||||
|
http-reuse: always
|
||||||
|
http-check:
|
||||||
|
- expect status 200
|
||||||
|
http-request:
|
||||||
|
- set-header X-Forwarded-Port %[dst_port]
|
||||||
|
- add-header X-Forwarded-Proto https if { dst_port 9443 }
|
||||||
|
redirect: 'scheme https if !{ ssl_fc }'
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
be_stalwart_imap:
|
||||||
|
description: Backend for Stalwart IMAP (STARTTLS)
|
||||||
|
collect_exported: false
|
||||||
|
options:
|
||||||
|
mode: tcp
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- tcp-check
|
||||||
|
- prefer-last-server
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
stick: 'on src'
|
||||||
|
tcp-check:
|
||||||
|
- connect port 143 send-proxy
|
||||||
|
- expect string "* OK"
|
||||||
|
- send "A001 STARTTLS\r\n"
|
||||||
|
- expect rstring "A001 (OK|2.0.0)"
|
||||||
|
be_stalwart_imaps:
|
||||||
|
description: Backend for Stalwart IMAPS (implicit TLS)
|
||||||
|
collect_exported: false
|
||||||
|
options:
|
||||||
|
mode: tcp
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- tcp-check
|
||||||
|
- prefer-last-server
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
stick: 'on src'
|
||||||
|
tcp-check:
|
||||||
|
- connect ssl send-proxy
|
||||||
|
- expect string "* OK"
|
||||||
|
be_stalwart_smtp:
|
||||||
|
description: Backend for Stalwart SMTP
|
||||||
|
collect_exported: false
|
||||||
|
options:
|
||||||
|
mode: tcp
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- tcp-check
|
||||||
|
- prefer-last-server
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
stick: 'on src'
|
||||||
|
tcp-check:
|
||||||
|
- connect port 25 send-proxy
|
||||||
|
- expect string "220 "
|
||||||
|
be_stalwart_submission:
|
||||||
|
description: Backend for Stalwart SMTP Submission
|
||||||
|
collect_exported: false
|
||||||
|
options:
|
||||||
|
mode: tcp
|
||||||
|
balance: roundrobin
|
||||||
|
option:
|
||||||
|
- tcp-check
|
||||||
|
- prefer-last-server
|
||||||
|
stick-table: 'type ip size 200k expire 30m'
|
||||||
|
stick: 'on src'
|
||||||
|
tcp-check:
|
||||||
|
- connect port 587 send-proxy
|
||||||
|
- expect string "220 "
|
||||||
|
|
||||||
profiles::haproxy::certlist::enabled: true
|
profiles::haproxy::certlist::enabled: true
|
||||||
profiles::haproxy::certlist::certificates:
|
profiles::haproxy::certlist::certificates:
|
||||||
@ -276,6 +392,7 @@ profiles::haproxy::certlist::certificates:
|
|||||||
- /etc/pki/tls/letsencrypt/fafflix.unkin.net/fullchain_combined.pem
|
- /etc/pki/tls/letsencrypt/fafflix.unkin.net/fullchain_combined.pem
|
||||||
- /etc/pki/tls/letsencrypt/git.unkin.net/fullchain_combined.pem
|
- /etc/pki/tls/letsencrypt/git.unkin.net/fullchain_combined.pem
|
||||||
- /etc/pki/tls/letsencrypt/grafana.unkin.net/fullchain_combined.pem
|
- /etc/pki/tls/letsencrypt/grafana.unkin.net/fullchain_combined.pem
|
||||||
|
- /etc/pki/tls/letsencrypt/dashboard.ceph.unkin.net/fullchain_combined.pem
|
||||||
- /etc/pki/tls/vault/certificate.pem
|
- /etc/pki/tls/vault/certificate.pem
|
||||||
|
|
||||||
# additional altnames
|
# additional altnames
|
||||||
@ -283,6 +400,7 @@ profiles::pki::vault::alt_names:
|
|||||||
- au-syd1-pve.main.unkin.net
|
- au-syd1-pve.main.unkin.net
|
||||||
- au-syd1-pve-api.main.unkin.net
|
- au-syd1-pve-api.main.unkin.net
|
||||||
- jellyfin.main.unkin.net
|
- jellyfin.main.unkin.net
|
||||||
|
- mail-webadmin.main.unkin.net
|
||||||
|
|
||||||
# additional cnames
|
# additional cnames
|
||||||
profiles::haproxy::dns::cnames:
|
profiles::haproxy::dns::cnames:
|
||||||
@ -303,3 +421,4 @@ certbot::client::domains:
|
|||||||
- fafflix.unkin.net
|
- fafflix.unkin.net
|
||||||
- git.unkin.net
|
- git.unkin.net
|
||||||
- grafana.unkin.net
|
- grafana.unkin.net
|
||||||
|
- dashboard.ceph.unkin.net
|
||||||
|
|||||||
@ -3,7 +3,8 @@
|
|||||||
profiles::firewall::firewalld::ensure_package: 'absent'
|
profiles::firewall::firewalld::ensure_package: 'absent'
|
||||||
profiles::firewall::firewalld::ensure_service: 'stopped'
|
profiles::firewall::firewalld::ensure_service: 'stopped'
|
||||||
profiles::firewall::firewalld::enable_service: false
|
profiles::firewall::firewalld::enable_service: false
|
||||||
profiles::puppet::agent::puppet_version: '7.34.0'
|
profiles::puppet::agent::version: '7.37.2'
|
||||||
|
profiles::puppet::agent::openvox_enable: true
|
||||||
|
|
||||||
hiera_include:
|
hiera_include:
|
||||||
- profiles::almalinux::base
|
- profiles::almalinux::base
|
||||||
@ -53,13 +54,6 @@ profiles::yum::global::repos:
|
|||||||
baseurl: https://packagerepo.service.consul/epel/%{facts.os.release.major}/everything-daily/%{facts.os.architecture}/os/
|
baseurl: https://packagerepo.service.consul/epel/%{facts.os.release.major}/everything-daily/%{facts.os.architecture}/os/
|
||||||
gpgkey: https://packagerepo.service.consul/epel/%{facts.os.release.major}/everything-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-EPEL-%{facts.os.release.major}
|
gpgkey: https://packagerepo.service.consul/epel/%{facts.os.release.major}/everything-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-EPEL-%{facts.os.release.major}
|
||||||
mirrorlist: absent
|
mirrorlist: absent
|
||||||
puppet:
|
|
||||||
name: puppet
|
|
||||||
descr: puppet repository
|
|
||||||
target: /etc/yum.repos.d/puppet.repo
|
|
||||||
baseurl: https://packagerepo.service.consul/puppet7/el/%{facts.os.release.major}-daily/%{facts.os.architecture}/os/
|
|
||||||
gpgkey: https://packagerepo.service.consul/puppet7/el/%{facts.os.release.major}-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-puppet-20250406
|
|
||||||
mirrorlist: absent
|
|
||||||
unkinben:
|
unkinben:
|
||||||
name: unkinben
|
name: unkinben
|
||||||
descr: unkinben repository
|
descr: unkinben repository
|
||||||
|
|||||||
@ -11,4 +11,4 @@ profiles::apt::components:
|
|||||||
- main
|
- main
|
||||||
- non-free
|
- non-free
|
||||||
|
|
||||||
profiles::puppet::agent::puppet_version: '7.25.0-1bullseye'
|
profiles::puppet::agent::version: '7.25.0-1bullseye'
|
||||||
|
|||||||
@ -12,4 +12,4 @@ profiles::apt::components:
|
|||||||
- non-free
|
- non-free
|
||||||
- non-free-firmware
|
- non-free-firmware
|
||||||
|
|
||||||
profiles::puppet::agent::puppet_version: 'latest'
|
profiles::puppet::agent::version: 'latest'
|
||||||
|
|||||||
@ -70,7 +70,7 @@ profiles::nginx::simpleproxy::locations:
|
|||||||
arrstack_web_external:
|
arrstack_web_external:
|
||||||
location_satisfy: any
|
location_satisfy: any
|
||||||
location_allow:
|
location_allow:
|
||||||
- 198.18.13.47
|
- 198.18.26.161
|
||||||
- 198.18.13.50
|
- 198.18.27.131
|
||||||
- 198.18.13.51
|
- 198.18.28.165
|
||||||
- 198.18.13.52
|
- 198.18.29.32
|
||||||
|
|||||||
@ -211,6 +211,18 @@ glauth::users:
|
|||||||
loginshell: '/bin/bash'
|
loginshell: '/bin/bash'
|
||||||
homedir: '/home/waewak'
|
homedir: '/home/waewak'
|
||||||
passsha256: 'd9bb99634215fe031c3bdca94149a165192fe8384ecaa238a19354c2f760a811'
|
passsha256: 'd9bb99634215fe031c3bdca94149a165192fe8384ecaa238a19354c2f760a811'
|
||||||
|
debvin:
|
||||||
|
user_name: 'debvin'
|
||||||
|
givenname: 'Debbie'
|
||||||
|
sn: 'Vincent'
|
||||||
|
mail: 'debvin@users.main.unkin.net'
|
||||||
|
uidnumber: 20009
|
||||||
|
primarygroup: 20000
|
||||||
|
othergroups:
|
||||||
|
- 20010 # jelly
|
||||||
|
loginshell: '/bin/bash'
|
||||||
|
homedir: '/home/debvin'
|
||||||
|
passsha256: 'cdac05ddb02e665d4ea65a974995f38a10236bc158731d92d78f6cde89b294a1'
|
||||||
|
|
||||||
glauth::services:
|
glauth::services:
|
||||||
svc_jellyfin:
|
svc_jellyfin:
|
||||||
|
|||||||
8
hieradata/roles/infra/ceph/rgw.eyaml
Normal file
8
hieradata/roles/infra/ceph/rgw.eyaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
profiles::ceph::rgw::ceph_client_keys:
|
||||||
|
ausyd1nxvm2115: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAovOUUtTm/9nXWj6D+kVhmbZedVVkA5N80ULNYqISrv0A+KlXCx/V2sB56SpJ9eL6aPHdC+CKkrtrrdo1h2j2em7wTA5ghhbFVF0NIS7bbn0jzpl5YqbUrNxOtDSpjzX1aCGnyMvw69Lp+NcHwxIj+1XFgK6335138s9wbX3HmYF3jcsQkMqTzynELe1OQPWFXVKTjUfFFLdCQOFryp8UY8L9j/PpV6wd4w6p7R6eXhX21rjSaN4aqN1zjsnF2OVhL8Ge0QxMhePWKGOqsUfi72kh3II028DjqU0DcZQvoxnoqRPyUUjysH0nTKoLeXOGNgJdphY1dHBJ+SJnw2gqQDBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB9wA3ZJjmU95W+9r8va5uagDAMAv2APfdzrPnAU7NJPL+IW08osGuQWWamqF+XGVeHRoBmoFKwZ7grYRV2e3aabyc=]
|
||||||
|
ausyd1nxvm2116: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAf5ksy/pyUZSwTh+HiKw+1Uhj16A0DVZEKAbkKUQzXVmc+QpL4Dn7YBoXlEwrY8CcsTrxTQjADvtu9FC3o34QIdh06noSgYYA+7fna2A+9+oYoNtgwC3b8LeglxO/SQ9dKoJ90jRtmlw5P/CtrxA2RelMK6FNRekp1CaWMM4q20fJGgr/E33vgx38UJyp4/q0bTu2lLehCuDUP80j3XGbSNZ2snfYdIo91Cl+nSxLSU2TdnFpWaabsH19HwDnkWGiILlLBVvvhY7copCxs5DS1ueoOTCsqnWSrTrBMJjnu7WZd/s4NLw/0q/UP5xcFA51caY3Kv+sI6bfIYkNoLazwDBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDrCF16nrtukRPamx1VbGY+gDAK5dw0kV8MpATpwxTkJG6JtlFlwdpU9THs1bNwqSRD1ZhEWxQeWwsyyTtjUXi4bP8=]
|
||||||
|
ausyd1nxvm2117: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAKtvsgDk2QTmL8flBBE4nA43sRSnroq4I6T2CUAYv/lRdzCCWvE961o/51zyQEz8L5QyDo7L3gcGYIBqthYjRe9Gp5a5d4qds0qskupgQKnb0KR2RLFVUtH5vxHqyJZHjXaP+PQreyRoSIfRWXAdoZu544FeJ9DKKbmEzZaH5B2OdDMrf4Ufuud0maEAw0PJthS//ghCfGi74F1xlJnIWVvMhp66b0iMxC+ACClEHunG3oKx7M/w05HllG0wcxPTg4PFrbnFXjRuIxsykF9aVHJkRnCdgbMXRM4o6FrYyZRR74F1HKRujFCUA7kYWDKLxHxJpYCvCHp4HMhfzjs824zBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBbaSig6kgVVNfmSI53sNimgDDQ5O10Dzfa7S7RdJVLLUFBaZ5TG2g3Bwmy0k3wKZvABYMuYyOxQdfk6eMsKC+sC5w=]
|
||||||
|
ausyd1nxvm2118: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAdKl0ude3ZxW0ihyA758mpQp43qZvzTI2Bp83WzCp2ifJCgAIjRWdd01P64rSHaa4lus/wqB9jxg38g6DrN4ejX56Y/CJ6GQKxz6b1BO5nDfsLx6QEzCt+cfg5d/PPoTtEpz2VSvDfxFUrHiABA6++Sqzb9Og+nQCFMYJD3NHCk67QpkjPGQ/ejZk4MNXZQVCfKOlFqay/fF0jEmQixFOlX/Fdm9UoKttbrKluUmzpaVUzfGRaTTFVgzc3x2t/z1q1k0P7ClI9Uu02kUXpFVs9LPX99Zc2GtrnP06mYqqARhWF1NMK0zlmxtKpfObahRP/HmtI3fgnQsU1Cpwah0emTBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBAILqpYx3FKY3xXLJRu2oDlgDCIOXeX6hxpu0qpj5c/9jMUSeV2DIydnxO+MiT3mceS50ip8B+zGQy5UedPmLt36Zs=]
|
||||||
|
ausyd1nxvm2119: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEASlZglUxazp+9azfV3QkgRv+ACo+MH0RO5b18blbelgdmr38iwK7MwwEFpEfVJEyc/ph9RunWwrMmofDQHj5bBribfzZ2pH2CGiOrR0i5lZMtN0yQXPBA/+jm1Pi1AWGJLtoquuhMbibuHOTiXwBCBVrHHHaFTR5Xt34ABN/p/mCaG+N9nWux93msHCCextCalKBMmPhmI2q6HodfjanEVgYAe3/5hRPnpsi6IGSDNGygsTC3MG+hjGMpNF8izbwk9Lpzn6kY51aeNxI2ed9Jm8UZ/k+8b+o7ZQyWIBbf7DTFpEzk4G46puaDbXIorBWQ4azCjN3gt8VB91hwihtzcDBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB6U+9z4cSzMTA1z9bmoX82gDBfy5zbRPK8GxImJo6evecMOTtaY2c4aEnESXtGBCS02enmxljv9dv1UYQD0/a6S3A=]
|
||||||
60
hieradata/roles/infra/ceph/rgw.yaml
Normal file
60
hieradata/roles/infra/ceph/rgw.yaml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
hiera_include:
|
||||||
|
- profiles::ceph::rgw
|
||||||
|
- profiles::nginx::simpleproxy
|
||||||
|
|
||||||
|
profiles::ceph::rgw::enable: true
|
||||||
|
|
||||||
|
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
||||||
|
python::manage_dev_package: false
|
||||||
|
|
||||||
|
# additional altnames
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
- radosgw.main.unkin.net
|
||||||
|
- radosgw.service.consul
|
||||||
|
- radosgw.query.consul
|
||||||
|
- "radosgw.service.%{facts.country}-%{facts.region}.consul"
|
||||||
|
|
||||||
|
# additional repos
|
||||||
|
profiles::yum::global::repos:
|
||||||
|
ceph:
|
||||||
|
name: ceph
|
||||||
|
descr: ceph repository
|
||||||
|
target: /etc/yum.repos.d/ceph.repo
|
||||||
|
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/%{facts.os.architecture}
|
||||||
|
gpgkey: https://download.ceph.com/keys/release.asc
|
||||||
|
mirrorlist: absent
|
||||||
|
ceph-noarch:
|
||||||
|
name: ceph-noarch
|
||||||
|
descr: ceph-noarch repository
|
||||||
|
target: /etc/yum.repos.d/ceph-noarch.repo
|
||||||
|
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/noarch
|
||||||
|
gpgkey: https://download.ceph.com/keys/release.asc
|
||||||
|
mirrorlist: absent
|
||||||
|
|
||||||
|
# manage a simple nginx reverse proxy
|
||||||
|
profiles::nginx::simpleproxy::nginx_vhost: 'radosgw.service.consul'
|
||||||
|
profiles::nginx::simpleproxy::nginx_aliases:
|
||||||
|
- radosgw.service.au-syd1.consul
|
||||||
|
profiles::nginx::simpleproxy::proxy_port: 7480
|
||||||
|
profiles::nginx::simpleproxy::proxy_path: '/'
|
||||||
|
nginx::client_max_body_size: 5000M
|
||||||
|
|
||||||
|
# manage consul service
|
||||||
|
consul::services:
|
||||||
|
radosgw:
|
||||||
|
service_name: 'radosgw'
|
||||||
|
address: "%{facts.networking.ip}"
|
||||||
|
port: 443
|
||||||
|
checks:
|
||||||
|
- id: 'radosgw_https_check'
|
||||||
|
name: 'RADOSGW HTTPS Check'
|
||||||
|
http: "https://%{facts.networking.fqdn}:443"
|
||||||
|
method: 'GET'
|
||||||
|
tls_skip_verify: true
|
||||||
|
interval: '10s'
|
||||||
|
timeout: '1s'
|
||||||
|
profiles::consul::client::node_rules:
|
||||||
|
- resource: service
|
||||||
|
segment: radosgw
|
||||||
|
disposition: write
|
||||||
2
hieradata/roles/infra/dns/externaldns.eyaml
Normal file
2
hieradata/roles/infra/dns/externaldns.eyaml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
externaldns::externaldns_key_secret: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEABqbZiK1NDTU+w2k7orz2HrB0EXwun7hn4pR6TeCHMp2IfrkPxlQT+f1J9c0PqJaAKvnyz+Cx0xNCrlnONqk+J57f48kYKYV+Vw+L0AYHYFj8/TizY5CwLpJS2XKyfRd4iEsWMonvfIYn71t3+YuXm4dkoEqGekW93qCr/KFtjAu0K3e+ypyl4EJqWokiUs7IbcSBNvrjUkP4yR8F/wHVKM1E5yfr+D1+nmMmt7Ob/J+am14492TppE2C7Xadg4us+kdYtuBsv9kTSi1GwwqUDjbeJVmfK3pKHjXdF+PI07AFLzo5bBZTJOzQfQ4SywpH8R5BDQoUCyHiaskB5wrmSDBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBB2LU9ZhefSg9PqqkwnfV65gDBvXuXco0moKCGjHqm5KcojWCK1BoS/+mltlr8kw9grZjN9jxHRLn1FjgBlq418c8w=]
|
||||||
65
hieradata/roles/infra/dns/externaldns.yaml
Normal file
65
hieradata/roles/infra/dns/externaldns.yaml
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
---
|
||||||
|
hiera_include:
|
||||||
|
- externaldns
|
||||||
|
- frrouting
|
||||||
|
- exporters::frr_exporter
|
||||||
|
|
||||||
|
externaldns::bind_master_hostname: 'ausyd1nxvm2127.main.unkin.net'
|
||||||
|
externaldns::k8s_zones:
|
||||||
|
- 'k8s.syd1.au.unkin.net'
|
||||||
|
- '200.18.198.in-addr.arpa'
|
||||||
|
externaldns::slave_servers:
|
||||||
|
- 'ausyd1nxvm2128.main.unkin.net'
|
||||||
|
- 'ausyd1nxvm2129.main.unkin.net'
|
||||||
|
externaldns::externaldns_key_algorithm: 'hmac-sha256'
|
||||||
|
|
||||||
|
# networking
|
||||||
|
anycast_ip: 198.18.19.20
|
||||||
|
systemd::manage_networkd: true
|
||||||
|
systemd::manage_all_network_files: true
|
||||||
|
networking::interfaces:
|
||||||
|
eth0:
|
||||||
|
type: physical
|
||||||
|
forwarding: true
|
||||||
|
dhcp: true
|
||||||
|
anycast0:
|
||||||
|
type: dummy
|
||||||
|
ipaddress: "%{hiera('anycast_ip')}"
|
||||||
|
netmask: 255.255.255.255
|
||||||
|
mtu: 1500
|
||||||
|
|
||||||
|
# frrouting
|
||||||
|
exporters::frr_exporter::enable: true
|
||||||
|
frrouting::ospfd_router_id: "%{facts.networking.ip}"
|
||||||
|
frrouting::ospfd_redistribute:
|
||||||
|
- connected
|
||||||
|
frrouting::ospfd_interfaces:
|
||||||
|
eth0:
|
||||||
|
area: 0.0.0.0
|
||||||
|
anycast0:
|
||||||
|
area: 0.0.0.0
|
||||||
|
frrouting::daemons:
|
||||||
|
ospfd: true
|
||||||
|
|
||||||
|
# consul
|
||||||
|
profiles::consul::client::node_rules:
|
||||||
|
- resource: service
|
||||||
|
segment: frr_exporter
|
||||||
|
disposition: write
|
||||||
|
|
||||||
|
# additional repos
|
||||||
|
profiles::yum::global::repos:
|
||||||
|
frr-extras:
|
||||||
|
name: frr-extras
|
||||||
|
descr: frr-extras repository
|
||||||
|
target: /etc/yum.repos.d/frr-extras.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os
|
||||||
|
gpgkey: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||||
|
mirrorlist: absent
|
||||||
|
frr-stable:
|
||||||
|
name: frr-stable
|
||||||
|
descr: frr-stable repository
|
||||||
|
target: /etc/yum.repos.d/frr-stable.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
||||||
|
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||||
|
mirrorlist: absent
|
||||||
@ -82,6 +82,11 @@ profiles::dns::resolver::zones:
|
|||||||
- 10.10.16.32
|
- 10.10.16.32
|
||||||
- 10.10.16.33
|
- 10.10.16.33
|
||||||
forward: 'only'
|
forward: 'only'
|
||||||
|
k8s.syd1.au.unkin.net-forward:
|
||||||
|
domain: 'k8s.syd1.au.unkin.net'
|
||||||
|
zone_type: 'forward'
|
||||||
|
forwarders: "%{alias('profiles_dns_upstream_forwarder_k8s')}"
|
||||||
|
forward: 'only'
|
||||||
unkin.net-forward:
|
unkin.net-forward:
|
||||||
domain: 'unkin.net'
|
domain: 'unkin.net'
|
||||||
zone_type: 'forward'
|
zone_type: 'forward'
|
||||||
@ -172,6 +177,11 @@ profiles::dns::resolver::zones:
|
|||||||
zone_type: 'forward'
|
zone_type: 'forward'
|
||||||
forwarders: "%{alias('profiles_dns_upstream_forwarder_unkin')}"
|
forwarders: "%{alias('profiles_dns_upstream_forwarder_unkin')}"
|
||||||
forward: 'only'
|
forward: 'only'
|
||||||
|
200.18.198.in-addr.arpa-forward:
|
||||||
|
domain: '200.18.198.in-addr.arpa'
|
||||||
|
zone_type: 'forward'
|
||||||
|
forwarders: "%{alias('profiles_dns_upstream_forwarder_k8s')}"
|
||||||
|
forward: 'only'
|
||||||
consul-forward:
|
consul-forward:
|
||||||
domain: 'consul'
|
domain: 'consul'
|
||||||
zone_type: 'forward'
|
zone_type: 'forward'
|
||||||
@ -188,6 +198,7 @@ profiles::dns::resolver::views:
|
|||||||
- network.unkin.net-forward
|
- network.unkin.net-forward
|
||||||
- prod.unkin.net-forward
|
- prod.unkin.net-forward
|
||||||
- consul-forward
|
- consul-forward
|
||||||
|
- k8s.syd1.au.unkin.net-forward
|
||||||
- 13.18.198.in-addr.arpa-forward
|
- 13.18.198.in-addr.arpa-forward
|
||||||
- 14.18.198.in-addr.arpa-forward
|
- 14.18.198.in-addr.arpa-forward
|
||||||
- 15.18.198.in-addr.arpa-forward
|
- 15.18.198.in-addr.arpa-forward
|
||||||
|
|||||||
@ -2,14 +2,15 @@
|
|||||||
hiera_include:
|
hiera_include:
|
||||||
- docker
|
- docker
|
||||||
- profiles::gitea::runner
|
- profiles::gitea::runner
|
||||||
|
- incus::client
|
||||||
|
|
||||||
docker::version: latest
|
docker::version: latest
|
||||||
docker::curl_ensure: false
|
docker::curl_ensure: false
|
||||||
docker::root_dir: /data/docker
|
docker::root_dir: /data/docker
|
||||||
|
|
||||||
|
profiles::gitea::runner::instance: https://git.unkin.net
|
||||||
profiles::gitea::runner::home: /data/runner
|
profiles::gitea::runner::home: /data/runner
|
||||||
profiles::gitea::runner::version: '0.2.10'
|
profiles::gitea::runner::version: '0.2.12'
|
||||||
profiles::gitea::runner::source: "https://gitea.com/gitea/act_runner/releases/download/v%{hiera('profiles::gitea::runner::version')}/act_runner-%{hiera('profiles::gitea::runner::version')}-linux-amd64"
|
|
||||||
profiles::gitea::runner::config:
|
profiles::gitea::runner::config:
|
||||||
log:
|
log:
|
||||||
level: info
|
level: info
|
||||||
@ -39,7 +40,8 @@ profiles::gitea::runner::config:
|
|||||||
privileged: false
|
privileged: false
|
||||||
options:
|
options:
|
||||||
workdir_parent: /workspace
|
workdir_parent: /workspace
|
||||||
valid_volumes: []
|
valid_volumes:
|
||||||
|
- /etc/pki/tls/vault
|
||||||
docker_host: ""
|
docker_host: ""
|
||||||
force_pull: true
|
force_pull: true
|
||||||
force_rebuild: false
|
force_rebuild: false
|
||||||
|
|||||||
@ -71,7 +71,7 @@ profiles::nginx::simpleproxy::nginx_aliases:
|
|||||||
|
|
||||||
profiles::nginx::simpleproxy::proxy_port: 3000
|
profiles::nginx::simpleproxy::proxy_port: 3000
|
||||||
profiles::nginx::simpleproxy::proxy_path: '/'
|
profiles::nginx::simpleproxy::proxy_path: '/'
|
||||||
nginx::client_max_body_size: 1024M
|
nginx::client_max_body_size: 5144M
|
||||||
|
|
||||||
# enable external access via haproxy
|
# enable external access via haproxy
|
||||||
profiles::gitea::haproxy::enable: true
|
profiles::gitea::haproxy::enable: true
|
||||||
|
|||||||
@ -163,6 +163,50 @@ profiles::haproxy::frontends:
|
|||||||
- 'set-header X-Forwarded-Proto https'
|
- 'set-header X-Forwarded-Proto https'
|
||||||
- 'set-header X-Real-IP %[src]'
|
- 'set-header X-Real-IP %[src]'
|
||||||
- 'use-service prometheus-exporter if { path /metrics }'
|
- 'use-service prometheus-exporter if { path /metrics }'
|
||||||
|
fe_imap:
|
||||||
|
description: 'Frontend for Stalwart IMAP (STARTTLS)'
|
||||||
|
bind:
|
||||||
|
0.0.0.0:143: []
|
||||||
|
mode: 'tcp'
|
||||||
|
options:
|
||||||
|
log: global
|
||||||
|
default_backend: be_stalwart_imap
|
||||||
|
tcp-request:
|
||||||
|
- inspect-delay 5s
|
||||||
|
- content accept if { req_len 0 }
|
||||||
|
fe_imaps:
|
||||||
|
description: 'Frontend for Stalwart IMAPS (implicit TLS)'
|
||||||
|
bind:
|
||||||
|
0.0.0.0:993: []
|
||||||
|
mode: 'tcp'
|
||||||
|
options:
|
||||||
|
log: global
|
||||||
|
default_backend: be_stalwart_imaps
|
||||||
|
tcp-request:
|
||||||
|
- inspect-delay 5s
|
||||||
|
- content accept if { req_len 0 }
|
||||||
|
fe_smtp:
|
||||||
|
description: 'Frontend for Stalwart SMTP'
|
||||||
|
bind:
|
||||||
|
0.0.0.0:25: []
|
||||||
|
mode: 'tcp'
|
||||||
|
options:
|
||||||
|
log: global
|
||||||
|
default_backend: be_stalwart_smtp
|
||||||
|
tcp-request:
|
||||||
|
- inspect-delay 5s
|
||||||
|
- content accept if { req_len 0 }
|
||||||
|
fe_submission:
|
||||||
|
description: 'Frontend for Stalwart SMTP Submission'
|
||||||
|
bind:
|
||||||
|
0.0.0.0:587: []
|
||||||
|
mode: 'tcp'
|
||||||
|
options:
|
||||||
|
log: global
|
||||||
|
default_backend: be_stalwart_submission
|
||||||
|
tcp-request:
|
||||||
|
- inspect-delay 5s
|
||||||
|
- content accept if { req_len 0 }
|
||||||
|
|
||||||
profiles::haproxy::backends:
|
profiles::haproxy::backends:
|
||||||
be_letsencrypt:
|
be_letsencrypt:
|
||||||
|
|||||||
@ -6,6 +6,7 @@ hiera_include:
|
|||||||
- zfs
|
- zfs
|
||||||
- profiles::ceph::node
|
- profiles::ceph::node
|
||||||
- profiles::ceph::client
|
- profiles::ceph::client
|
||||||
|
- profiles::ceph::dashboard
|
||||||
- profiles::storage::cephfsvols
|
- profiles::storage::cephfsvols
|
||||||
- exporters::frr_exporter
|
- exporters::frr_exporter
|
||||||
|
|
||||||
@ -36,6 +37,7 @@ profiles::ssh::sign::principals:
|
|||||||
- "%{facts.networking.interfaces.enp3s0.ip}"
|
- "%{facts.networking.interfaces.enp3s0.ip}"
|
||||||
|
|
||||||
# configure consul service
|
# configure consul service
|
||||||
|
profiles::consul::client::host_addr: "%{hiera('networking_loopback0_ip')}"
|
||||||
consul::services:
|
consul::services:
|
||||||
incus:
|
incus:
|
||||||
service_name: 'incus'
|
service_name: 'incus'
|
||||||
@ -121,6 +123,9 @@ profiles::yum::global::repos:
|
|||||||
# dns
|
# dns
|
||||||
profiles::dns::base::primary_interface: loopback0
|
profiles::dns::base::primary_interface: loopback0
|
||||||
|
|
||||||
|
# dashboard/haproxy
|
||||||
|
profiles::ceph::dashboard::ipaddress: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
|
||||||
# networking
|
# networking
|
||||||
systemd::manage_networkd: true
|
systemd::manage_networkd: true
|
||||||
systemd::manage_all_network_files: true
|
systemd::manage_all_network_files: true
|
||||||
@ -153,6 +158,8 @@ networking::interfaces:
|
|||||||
# frrouting
|
# frrouting
|
||||||
exporters::frr_exporter::enable: true
|
exporters::frr_exporter::enable: true
|
||||||
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
frrouting::ospf_preferred_source_enable: true
|
||||||
|
frrouting::ospf_preferred_source: "%{hiera('networking_loopback0_ip')}"
|
||||||
frrouting::ospfd_redistribute:
|
frrouting::ospfd_redistribute:
|
||||||
- connected
|
- connected
|
||||||
frrouting::ospfd_interfaces:
|
frrouting::ospfd_interfaces:
|
||||||
|
|||||||
1
hieradata/roles/infra/k8s.eyaml
Normal file
1
hieradata/roles/infra/k8s.eyaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
rke2::node_token: ENC[PKCS7,MIIB2gYJKoZIhvcNAQcDoIIByzCCAccCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAOD+w5nJFqEYWFj+tZQ65Oi19eDhaWtpLQ0gwEdBtMmY9sPJ63l1q2qH933NH6TOd1UlMDvGfoDLCae+yt/MAW//cJ15X3QbiVQ23DdfCOlUEZN6fjVrveEt/yIeFQmWvnkMS4pRfPbgQu2OHm37PpuPE7s6dUyGItAYjchrRhtQ7ibhXDnN7miG+oVRXP2T8b/V5WPdmA222DSV6r/AnqaWkna9W/oh/I2sNKeEm5q3f8bh8Gxt1dDy3VwaZ3lAh3uR+SUm7P/6PTYw8opxiumFBvos0mRiXIdOwUuqrAS8hafWBhxnDLlTBfz62Nc4wQmQ8gz0bHJZSipH9G6mIEDCBnAYJKoZIhvcNAQcBMB0GCWCGSAFlAwQBKgQQ2WG6ROWFlQdXx0TuO5oABoBwTDtAXIj7y6I1B3zCnFoMHETf5d7ulPGdgwZsENf0UIHpg2l0w503MUHHbu6YDFiDiTE0oDNJPVHid7TO+XwWFgh5v1MWi/XeEBgCs6nMCW8qkX0Z3UXaZdSBUll1M4sRtuqscBnoD/LLs2kKfxrqQg==]
|
||||||
168
hieradata/roles/infra/k8s.yaml
Normal file
168
hieradata/roles/infra/k8s.yaml
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
---
|
||||||
|
hiera_include:
|
||||||
|
- profiles::selinux::setenforce
|
||||||
|
- profiles::ceph::node
|
||||||
|
- profiles::ceph::client
|
||||||
|
- exporters::frr_exporter
|
||||||
|
- frrouting
|
||||||
|
- rke2
|
||||||
|
|
||||||
|
# manage rke2
|
||||||
|
rke2::bootstrap_node: prodnxsr0001.main.unkin.net
|
||||||
|
rke2::join_url: https://join-k8s.service.consul:9345
|
||||||
|
rke2::config_hash:
|
||||||
|
bind-address: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
node-ip: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
node-external-ip: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
write-kubeconfig-mode: 644
|
||||||
|
kubelet-arg:
|
||||||
|
- '--node-status-update-frequency=4s'
|
||||||
|
- '--max-pods=100'
|
||||||
|
node-label:
|
||||||
|
- "region=%{facts.region}"
|
||||||
|
- "country=%{facts.country}"
|
||||||
|
- "asset=%{facts.dmi.product.serial_number}"
|
||||||
|
- "zone=%{zone}"
|
||||||
|
|
||||||
|
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
||||||
|
python::manage_dev_package: false
|
||||||
|
|
||||||
|
profiles::packages::include:
|
||||||
|
bridge-utils: {}
|
||||||
|
cephadm: {}
|
||||||
|
|
||||||
|
profiles::selinux::setenforce::mode: disabled
|
||||||
|
|
||||||
|
profiles::ceph::client::manage_ceph_conf: false
|
||||||
|
profiles::ceph::client::manage_ceph_package: false
|
||||||
|
profiles::ceph::client::manage_ceph_paths: false
|
||||||
|
profiles::ceph::client::fsid: 'de96a98f-3d23-465a-a899-86d3d67edab8'
|
||||||
|
profiles::ceph::client::mons:
|
||||||
|
- 198.18.23.9
|
||||||
|
- 198.18.23.10
|
||||||
|
- 198.18.23.11
|
||||||
|
- 198.18.23.12
|
||||||
|
- 198.18.23.13
|
||||||
|
|
||||||
|
# additional repos
|
||||||
|
profiles::yum::global::repos:
|
||||||
|
ceph:
|
||||||
|
name: ceph
|
||||||
|
descr: ceph repository
|
||||||
|
target: /etc/yum.repos.d/ceph.repo
|
||||||
|
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/%{facts.os.architecture}
|
||||||
|
gpgkey: https://download.ceph.com/keys/release.asc
|
||||||
|
mirrorlist: absent
|
||||||
|
ceph-noarch:
|
||||||
|
name: ceph-noarch
|
||||||
|
descr: ceph-noarch repository
|
||||||
|
target: /etc/yum.repos.d/ceph-noarch.repo
|
||||||
|
baseurl: https://edgecache.query.consul/ceph/yum/el%{facts.os.release.major}/noarch
|
||||||
|
gpgkey: https://download.ceph.com/keys/release.asc
|
||||||
|
mirrorlist: absent
|
||||||
|
frr-extras:
|
||||||
|
name: frr-extras
|
||||||
|
descr: frr-extras repository
|
||||||
|
target: /etc/yum.repos.d/frr-extras.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os
|
||||||
|
gpgkey: https://packagerepo.service.consul/frr/el9/extras-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||||
|
mirrorlist: absent
|
||||||
|
frr-stable:
|
||||||
|
name: frr-stable
|
||||||
|
descr: frr-stable repository
|
||||||
|
target: /etc/yum.repos.d/frr-stable.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
||||||
|
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||||
|
mirrorlist: absent
|
||||||
|
rancher-rke2-common-latest:
|
||||||
|
name: rancher-rke2-common-latest
|
||||||
|
descr: rancher-rke2-common-latest
|
||||||
|
target: /etc/yum.repos.d/rke2-common.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/common-daily/x86_64/os/
|
||||||
|
gpgkey: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/common-daily/x86_64/os/public.key
|
||||||
|
mirrorlist: absent
|
||||||
|
rancher-rke2-1-33-latest:
|
||||||
|
name: rancher-rke2-1-33-latest
|
||||||
|
descr: rancher-rke2-1-33-latest
|
||||||
|
target: /etc/yum.repos.d/rke2-1-33.repo
|
||||||
|
baseurl: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/1.33-daily/x86_64/os/
|
||||||
|
gpgkey: https://packagerepo.service.consul/rke2/rhel%{facts.os.release.major}/1.33-daily/x86_64/os/public.key
|
||||||
|
mirrorlist: absent
|
||||||
|
|
||||||
|
# dns
|
||||||
|
profiles::dns::base::primary_interface: loopback0
|
||||||
|
|
||||||
|
# networking
|
||||||
|
systemd::manage_networkd: true
|
||||||
|
systemd::manage_all_network_files: true
|
||||||
|
networking::interfaces:
|
||||||
|
"%{hiera('networking_1000_iface')}":
|
||||||
|
type: physical
|
||||||
|
ipaddress: "%{hiera('networking_1000_ip')}"
|
||||||
|
gateway: 198.18.15.254
|
||||||
|
txqueuelen: 10000
|
||||||
|
forwarding: true
|
||||||
|
"%{hiera('networking_2500_iface')}":
|
||||||
|
type: physical
|
||||||
|
ipaddress: "%{hiera('networking_2500_ip')}"
|
||||||
|
mtu: 1500
|
||||||
|
txqueuelen: 10000
|
||||||
|
forwarding: true
|
||||||
|
loopback0:
|
||||||
|
type: dummy
|
||||||
|
ipaddress: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
netmask: 255.255.255.255
|
||||||
|
mtu: 1500
|
||||||
|
loopback1:
|
||||||
|
type: dummy
|
||||||
|
ipaddress: "%{hiera('networking_loopback1_ip')}"
|
||||||
|
netmask: 255.255.255.255
|
||||||
|
mtu: 1500
|
||||||
|
loopback2:
|
||||||
|
type: dummy
|
||||||
|
ipaddress: "%{hiera('networking_loopback2_ip')}"
|
||||||
|
netmask: 255.255.255.255
|
||||||
|
mtu: 1500
|
||||||
|
|
||||||
|
# configure consul service
|
||||||
|
profiles::consul::client::host_addr: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
profiles::consul::client::node_rules:
|
||||||
|
- resource: service
|
||||||
|
segment: frr_exporter
|
||||||
|
disposition: write
|
||||||
|
|
||||||
|
# frrouting
|
||||||
|
exporters::frr_exporter::enable: true
|
||||||
|
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
frrouting::ospf_preferred_source_enable: true
|
||||||
|
frrouting::ospf_preferred_source: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
frrouting::ospfd_redistribute:
|
||||||
|
- connected
|
||||||
|
frrouting::ospfd_interfaces:
|
||||||
|
"%{hiera('networking_1000_iface')}":
|
||||||
|
area: 0.0.0.0
|
||||||
|
"%{hiera('networking_2500_iface')}":
|
||||||
|
area: 0.0.0.0
|
||||||
|
loopback0:
|
||||||
|
area: 0.0.0.0
|
||||||
|
loopback1:
|
||||||
|
area: 0.0.0.0
|
||||||
|
loopback2:
|
||||||
|
area: 0.0.0.0
|
||||||
|
frrouting::daemons:
|
||||||
|
ospfd: true
|
||||||
|
frrouting::ospf_exclude_k8s_enable: true
|
||||||
|
frrouting::k8s_cluster_cidr: '10.42.0.0/16' # RKE2 cluster-cidr (pods)
|
||||||
|
frrouting::k8s_service_cidr: '10.43.0.0/16' # RKE2 service-cidr
|
||||||
|
|
||||||
|
# add loopback interfaces to ssh list
|
||||||
|
ssh::server::options:
|
||||||
|
ListenAddress:
|
||||||
|
- "%{hiera('networking_loopback0_ip')}"
|
||||||
|
- "%{hiera('networking_1000_ip')}"
|
||||||
|
- "%{hiera('networking_2500_ip')}"
|
||||||
|
|
||||||
|
profiles::ssh::sign::principals:
|
||||||
|
- "%{hiera('networking_loopback0_ip')}"
|
||||||
|
- "%{hiera('networking_1000_ip')}"
|
||||||
|
- "%{hiera('networking_2500_ip')}"
|
||||||
@ -1,10 +1,3 @@
|
|||||||
---
|
---
|
||||||
# networking
|
# manage rke2
|
||||||
systemd::manage_networkd: true
|
rke2::node_type: agent
|
||||||
systemd::manage_all_network_files: true
|
|
||||||
networking::interfaces:
|
|
||||||
eth0:
|
|
||||||
type: physical
|
|
||||||
forwarding: true
|
|
||||||
dhcp: true
|
|
||||||
mtu: 1500
|
|
||||||
|
|||||||
1
hieradata/roles/infra/k8s/control.eyaml
Normal file
1
hieradata/roles/infra/k8s/control.eyaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
---
|
||||||
@ -1,42 +1,84 @@
|
|||||||
---
|
---
|
||||||
profiles::pki::vault::alt_names:
|
# manage rke2
|
||||||
- k8s-control.service.consul
|
rke2::node_type: server
|
||||||
- k8s-control.query.consul
|
rke2::helm_install: true
|
||||||
- "k8s-control.service.%{facts.country}-%{facts.region}.consul"
|
rke2::helm_repos:
|
||||||
|
rancher-stable: https://releases.rancher.com/server-charts/stable
|
||||||
profiles::ssh::sign::principals:
|
purelb: https://gitlab.com/api/v4/projects/20400619/packages/helm/stable
|
||||||
- k8s-control.service.consul
|
jetstack: https://charts.jetstack.io
|
||||||
- k8s-control.query.consul
|
harbor: https://helm.goharbor.io
|
||||||
- "k8s-control.service.%{facts.country}-%{facts.region}.consul"
|
traefik: https://traefik.github.io/charts
|
||||||
|
hashicorp: https://helm.releases.hashicorp.com
|
||||||
|
rke2::extra_config_files:
|
||||||
|
- rke2-canal-config
|
||||||
|
- rke2-nginx-ingress-config
|
||||||
|
rke2::config_hash:
|
||||||
|
advertise-address: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
tls-san:
|
||||||
|
- "join-k8s.service.consul"
|
||||||
|
- "api-k8s.service.consul"
|
||||||
|
- "api.k8s.unkin.net"
|
||||||
|
- "join.k8s.unkin.net"
|
||||||
|
cni: canal
|
||||||
|
cluster-cidr: 10.42.0.0/16
|
||||||
|
service-cidr: 10.43.0.0/16
|
||||||
|
cluster-dns: 10.43.0.10
|
||||||
|
etcd-arg: "--quota-backend-bytes 2048000000"
|
||||||
|
etcd-snapshot-schedule-cron: "0 3 * * *"
|
||||||
|
etcd-snapshot-retention: 10
|
||||||
|
kube-apiserver-arg:
|
||||||
|
- '--default-not-ready-toleration-seconds=30'
|
||||||
|
- '--default-unreachable-toleration-seconds=30'
|
||||||
|
kube-controller-manager-arg:
|
||||||
|
- '--node-monitor-period=4s'
|
||||||
|
protect-kernel-defaults: true
|
||||||
|
disable-kube-proxy: false
|
||||||
|
|
||||||
# configure consul service
|
# configure consul service
|
||||||
consul::services:
|
consul::services:
|
||||||
k8s-control:
|
api-k8s:
|
||||||
service_name: 'k8s-control'
|
service_name: 'api-k8s'
|
||||||
tags:
|
|
||||||
- 'k8s'
|
|
||||||
- 'container'
|
|
||||||
address: "%{facts.networking.fqdn}"
|
address: "%{facts.networking.fqdn}"
|
||||||
port: 6443
|
port: 6443
|
||||||
checks:
|
checks:
|
||||||
- id: 'k8s-control_https_check'
|
- id: 'api-k8s_livez_check'
|
||||||
name: 'k8s-control HTTPS Check'
|
name: 'api-k8s livez Check'
|
||||||
http: "https://%{facts.networking.fqdn}:6443"
|
args:
|
||||||
method: 'GET'
|
- sudo
|
||||||
tls_skip_verify: true
|
- /usr/local/bin/check_k8s_api.sh
|
||||||
interval: '10s'
|
interval: '10s'
|
||||||
timeout: '1s'
|
timeout: '1s'
|
||||||
|
join-k8s:
|
||||||
|
service_name: 'join-k8s'
|
||||||
|
address: "%{facts.networking.fqdn}"
|
||||||
|
port: 9345
|
||||||
|
checks:
|
||||||
|
- id: 'rke2_tcp_check_9345'
|
||||||
|
name: 'rke2 TCP Check 9345'
|
||||||
|
tcp: "%{hiera('networking_loopback0_ip')}:9345"
|
||||||
|
interval: '10s'
|
||||||
|
timeout: '1s'
|
||||||
|
- id: 'rke2_server_ping_check'
|
||||||
|
name: 'rke2 Server Ping Check'
|
||||||
|
http: "https://%{hiera('networking_loopback0_ip')}:9345/ping"
|
||||||
|
interval: '10s'
|
||||||
|
timeout: '3s'
|
||||||
|
tls_skip_verify: true
|
||||||
profiles::consul::client::node_rules:
|
profiles::consul::client::node_rules:
|
||||||
- resource: service
|
- resource: service
|
||||||
segment: k8s-control
|
segment: api-k8s
|
||||||
|
disposition: write
|
||||||
|
- resource: service
|
||||||
|
segment: join-k8s
|
||||||
disposition: write
|
disposition: write
|
||||||
|
|
||||||
# networking
|
profiles::pki::vault::alt_names:
|
||||||
systemd::manage_networkd: true
|
- api-k8s.service.consul
|
||||||
systemd::manage_all_network_files: true
|
- api-k8s.query.consul
|
||||||
networking::interfaces:
|
- "api-k8s.service.%{facts.country}-%{facts.region}.consul"
|
||||||
eth0:
|
|
||||||
type: physical
|
sudo::configs:
|
||||||
forwarding: true
|
consul-checks:
|
||||||
dhcp: true
|
priority: 20
|
||||||
mtu: 1500
|
content: |
|
||||||
|
consul ALL=(ALL) NOPASSWD: /usr/local/bin/check_k8s_api.sh
|
||||||
|
|||||||
@ -5,6 +5,24 @@ hiera_include:
|
|||||||
- profiles::ceph::node
|
- profiles::ceph::node
|
||||||
- profiles::ceph::client
|
- profiles::ceph::client
|
||||||
- exporters::frr_exporter
|
- exporters::frr_exporter
|
||||||
|
- profiles::rke2::node
|
||||||
|
|
||||||
|
# manage rke2
|
||||||
|
profiles::rke2::node::servers:
|
||||||
|
- prodnxsr0001.main.unkin.net
|
||||||
|
- prodnxsr0002.main.unkin.net
|
||||||
|
- prodnxsr0003.main.unkin.net
|
||||||
|
|
||||||
|
rke2::config_hash:
|
||||||
|
bind-address: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
advertise-address: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
node-ip: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
node-external-ip: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
cluster-domain: "svc.k8s.unkin.net"
|
||||||
|
tls-san:
|
||||||
|
- "api.k8s.unkin.net"
|
||||||
|
- "join.k8s.unkin.net"
|
||||||
|
cni: cilium
|
||||||
|
|
||||||
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
# FIXME: puppet-python wants to try manage python-dev, which is required by the ceph package
|
||||||
python::manage_dev_package: false
|
python::manage_dev_package: false
|
||||||
@ -25,6 +43,7 @@ profiles::ceph::client::mons:
|
|||||||
- 198.18.23.11
|
- 198.18.23.11
|
||||||
- 198.18.23.12
|
- 198.18.23.12
|
||||||
- 198.18.23.13
|
- 198.18.23.13
|
||||||
|
|
||||||
# additional repos
|
# additional repos
|
||||||
profiles::yum::global::repos:
|
profiles::yum::global::repos:
|
||||||
ceph:
|
ceph:
|
||||||
@ -55,6 +74,20 @@ profiles::yum::global::repos:
|
|||||||
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
baseurl: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os
|
||||||
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
gpgkey: https://packagerepo.service.consul/frr/el9/stable-daily/%{facts.os.architecture}/os/RPM-GPG-KEY-FRR
|
||||||
mirrorlist: absent
|
mirrorlist: absent
|
||||||
|
rancher-rke2-common-latest:
|
||||||
|
name: rancher-rke2-common-latest
|
||||||
|
descr: rancher-rke2-common-latest
|
||||||
|
target: /etc/yum.repos.d/rke2-common.repo
|
||||||
|
baseurl: https://rpm.rancher.io/rke2/latest/common/centos/%{facts.os.release.major}/noarch
|
||||||
|
gpgkey: https://rpm.rancher.io/public.key
|
||||||
|
mirrorlist: absent
|
||||||
|
rancher-rke2-1-33-latest:
|
||||||
|
name: rancher-rke2-1-33-latest
|
||||||
|
descr: rancher-rke2-1-33-latest
|
||||||
|
target: /etc/yum.repos.d/rke2-1-33.repo
|
||||||
|
baseurl: https://rpm.rancher.io/rke2/latest/1.33/centos/%{facts.os.release.major}/x86_64
|
||||||
|
gpgkey: https://rpm.rancher.io/public.key
|
||||||
|
mirrorlist: absent
|
||||||
|
|
||||||
# dns
|
# dns
|
||||||
profiles::dns::base::primary_interface: loopback0
|
profiles::dns::base::primary_interface: loopback0
|
||||||
@ -91,8 +124,38 @@ networking::interfaces:
|
|||||||
netmask: 255.255.255.255
|
netmask: 255.255.255.255
|
||||||
mtu: 1500
|
mtu: 1500
|
||||||
|
|
||||||
# consul
|
# configure consul service
|
||||||
|
profiles::consul::client::host_addr: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
consul::services:
|
||||||
|
api-k8s:
|
||||||
|
service_name: 'api-k8s'
|
||||||
|
address: "%{facts.networking.fqdn}"
|
||||||
|
port: 6443
|
||||||
|
checks:
|
||||||
|
- id: 'api-k8s_https_check'
|
||||||
|
name: 'api-k8s HTTPS Check'
|
||||||
|
http: "https://%{facts.networking.fqdn}:6443"
|
||||||
|
method: 'GET'
|
||||||
|
tls_skip_verify: true
|
||||||
|
interval: '10s'
|
||||||
|
timeout: '1s'
|
||||||
|
join-k8s:
|
||||||
|
service_name: 'join-k8s'
|
||||||
|
address: "%{facts.networking.fqdn}"
|
||||||
|
port: 9345
|
||||||
|
checks:
|
||||||
|
- id: 'etcd_tcp_check_9345'
|
||||||
|
name: 'ETCD TCP Check 9345'
|
||||||
|
tcp: "%{facts.networking.fqdn}:9345"
|
||||||
|
interval: '10s'
|
||||||
|
timeout: '1s'
|
||||||
profiles::consul::client::node_rules:
|
profiles::consul::client::node_rules:
|
||||||
|
- resource: service
|
||||||
|
segment: api-k8s
|
||||||
|
disposition: write
|
||||||
|
- resource: service
|
||||||
|
segment: join-k8s
|
||||||
|
disposition: write
|
||||||
- resource: service
|
- resource: service
|
||||||
segment: frr_exporter
|
segment: frr_exporter
|
||||||
disposition: write
|
disposition: write
|
||||||
@ -100,6 +163,8 @@ profiles::consul::client::node_rules:
|
|||||||
# frrouting
|
# frrouting
|
||||||
exporters::frr_exporter::enable: true
|
exporters::frr_exporter::enable: true
|
||||||
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
frrouting::ospfd_router_id: "%{hiera('networking_loopback0_ip')}"
|
||||||
|
frrouting::ospf_preferred_source_enable: true
|
||||||
|
frrouting::ospf_preferred_source: "%{hiera('networking_loopback0_ip')}"
|
||||||
frrouting::ospfd_redistribute:
|
frrouting::ospfd_redistribute:
|
||||||
- connected
|
- connected
|
||||||
frrouting::ospfd_interfaces:
|
frrouting::ospfd_interfaces:
|
||||||
@ -127,3 +192,8 @@ profiles::ssh::sign::principals:
|
|||||||
- "%{hiera('networking_loopback0_ip')}"
|
- "%{hiera('networking_loopback0_ip')}"
|
||||||
- "%{hiera('networking_1000_ip')}"
|
- "%{hiera('networking_1000_ip')}"
|
||||||
- "%{hiera('networking_2500_ip')}"
|
- "%{hiera('networking_2500_ip')}"
|
||||||
|
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
- api-k8s.service.consul
|
||||||
|
- api-k8s.query.consul
|
||||||
|
- "api-k8s.service.%{facts.country}-%{facts.region}.consul"
|
||||||
|
|||||||
@ -14,6 +14,8 @@ victorialogs::node::options:
|
|||||||
envflag.enable: 'true'
|
envflag.enable: 'true'
|
||||||
select.disable: 'undef'
|
select.disable: 'undef'
|
||||||
storageNode.tls: 'undef'
|
storageNode.tls: 'undef'
|
||||||
|
syslog.listenAddr.tcp: ':21514'
|
||||||
|
syslog.timezone: 'Australia/Sydney'
|
||||||
storageNode:
|
storageNode:
|
||||||
- ausyd1nxvm2108.main.unkin.net:9428
|
- ausyd1nxvm2108.main.unkin.net:9428
|
||||||
- ausyd1nxvm2109.main.unkin.net:9428
|
- ausyd1nxvm2109.main.unkin.net:9428
|
||||||
@ -45,7 +47,20 @@ consul::services:
|
|||||||
tls_skip_verify: true
|
tls_skip_verify: true
|
||||||
interval: '10s'
|
interval: '10s'
|
||||||
timeout: '1s'
|
timeout: '1s'
|
||||||
|
syslog:
|
||||||
|
service_name: 'syslog'
|
||||||
|
address: "%{facts.networking.ip}"
|
||||||
|
port: 21514
|
||||||
|
checks:
|
||||||
|
- id: 'vlinsert_syslog_tcp_check'
|
||||||
|
name: 'VictoriaLogs Syslog TCP Check'
|
||||||
|
tcp: "%{facts.networking.fqdn}:21514"
|
||||||
|
interval: '30s'
|
||||||
|
timeout: '5s'
|
||||||
profiles::consul::client::node_rules:
|
profiles::consul::client::node_rules:
|
||||||
- resource: service
|
- resource: service
|
||||||
segment: vlinsert
|
segment: vlinsert
|
||||||
disposition: write
|
disposition: write
|
||||||
|
- resource: service
|
||||||
|
segment: syslog
|
||||||
|
disposition: write
|
||||||
|
|||||||
5
hieradata/roles/infra/mail/backend.eyaml
Normal file
5
hieradata/roles/infra/mail/backend.eyaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
profiles::sql::postgresdb::dbpass: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAEZkKX2ThGom2PffofEuRBHbyiq68PCsq0+19eSa02fpVPKgZ/5BEjzBhwvrt0BWZWsjYGhccFQ69DR+lTuqS50GcRSAiNQ2LDX2a3J1pu39oIKsNVmcJTza0f5T0VeI3A7sZkn7jL+NVz5ANp8V0EMfAjaduGQ7Jac+8dBsvTrLbJ+1AZVrjaKPxOI1+5tpE7qx35mM0oDVy0NwmlaVf8vbK6jyzyUJRs4Sb+mpioPi5sDxHgClzsQnu93HqqAIqR5UzsUv7MDMljOGYUF5ITyPU836I1LEZ9UfiVO7AikQ3A31LaSUWvwsxRHKxiQXJ7v6W/O+Nt3jdIR0eoqC5xTBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBC11+SUDLmz6bBrGyfYT9DPgDB3UhuhJ3kUruMTdRCW0Y6hoSBBQYCO+ZRFJToGTkz/BcxVw2Xtwjc7UmKmLodsDAo=]
|
||||||
|
stalwart::s3_access_key: ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAm1fkuVcAt4UKFnpljH0o3eb3x1r2ieEU4NUzuSx8DDdOv4rh8rkqghPGn1xs6LGd9zLprc9ZWFowXCP4I2L0gZ4PekI04rV4lL02xYechIah1JAntdwECEks7rmI4BbPabIUIkaHW4i/WtntRNvv38g9JjiWrvOoJSABEsiIVZL0ct6NykLgQk27r4rcP8j7ukNQqPCAA02d4y9CB/5g6RKtYkv6FwZfLA/rFTUIXhKJsNtTa9Gm1yhvb/Y859X4qvsSFymCm/B8+2Bz5H57lE0r6xOBBBS5PfpeQ2YRxmedlkj0HxRFpl2e1W94OazMbbI6dlXa5ceHqHXDZL78EzBMBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDhiBSQPgE+fwD1Y9Gc1wQ3gCCGeGdA4BKCvIhYaQmzv9wUFuNaO75qHrub0vY8Od1ilQ==]
|
||||||
|
stalwart::s3_secret_key: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAdFOGR2axfPIksXExnW8vAZBSBm8V/QqPtIMtuzleDLOGsOuyLgJAbHPZJoj/w5bBV9bBKMGfWs94GGbKZ2PafiJeCtqfFJnUAIpVBTZ5cfUhQXERAHcketMvKUi1J8jKLSFc9I3uTjIVcIasjk0oe7WmCPnoikjl1qJZ/lVDH0cXHevjMuohxEyyka5jzC0ixCOkxyqOV2LOqSc6J5d0WSsSWqw0lDmY7vJAtqNtH6y6ghKZo5zdLQOsF2Bseg3oVejRNqYwfUsaDnfiRwJS/Rm/TmtXb4i6Jn8coYplDaJHtxQSXJ+KvOy6LS7M8X5sQ/UZSRqcwM647Yg4pwVWqDBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBKBF7hTs0e0FKdRpFYbx4kgDB0xkNe78n6VLbUTHX7vTw6J+K/mxf+XCV95/EIkvBbWBYuU8ZMHNQKMEExmci4C4o=]
|
||||||
|
stalwart::fallback_admin_password: ENC[PKCS7,MIIBmQYJKoZIhvcNAQcDoIIBijCCAYYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAMp9wmIhRwj5kxfUcvc+/q/oUs/vBhSqP19ZfErM4vLDK20VOBTnPhSP2lfVh9pqO0c2hpWFeuqBWMynghO+HUBJfAn29Vrc8a9iSBxQ3XuF/uiRq1inOKCQpdsU18TyCrYV9AJFNf9U20JuUoav79m7EKLHS07PHAZ0osqIYy93eXdCFhwXAGHijp4wMMQz/5z1F1mZoSrc1cXe3y8iBeAvvjnRfpw14gOKZBjmEGUbo7AIyc3wax5hbOQYf/v+Hd90JarvAufxGytg9WKO20cChWYbmYDnIkytVt3vHdHf4RT8M635l6qwLr/70O1MdE7bkrVRKP8M3KLyH072pJTBcBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBDSJwptBDvPd0WpxiIovZsjgDBBwesNW+UNo4b0idhyqsyWL2rtO7wLStWHgUIvRFJACCrTKKqlu7sta6mhu/ZsnF0=]
|
||||||
46
hieradata/roles/infra/mail/backend.yaml
Normal file
46
hieradata/roles/infra/mail/backend.yaml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
hiera_include:
|
||||||
|
- stalwart
|
||||||
|
- profiles::sql::postgresdb
|
||||||
|
- profiles::stalwart::haproxy
|
||||||
|
|
||||||
|
# additional altnames
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
- mail.main.unkin.net
|
||||||
|
- mail-webadmin.main.unkin.net
|
||||||
|
- main-in.main.unkin.net
|
||||||
|
- autoconfig.main.unkin.net
|
||||||
|
- autodiscovery.main.unkin.net
|
||||||
|
|
||||||
|
# manage a pgsql database + user
|
||||||
|
profiles::sql::postgresdb::cluster_name: "patroni-shared-%{facts.environment}"
|
||||||
|
profiles::sql::postgresdb::dbname: stalwart
|
||||||
|
profiles::sql::postgresdb::dbuser: stalwart
|
||||||
|
|
||||||
|
# export backends to haproxy
|
||||||
|
profiles::stalwart::haproxy::enable: true
|
||||||
|
|
||||||
|
# Cluster role for node discovery
|
||||||
|
stalwart::cluster_role: "%{facts.enc_role}"
|
||||||
|
|
||||||
|
# PostgreSQL connection
|
||||||
|
stalwart::postgresql_host: "master.%{hiera('profiles::sql::postgresdb::cluster_name')}.service.%{facts.country}-%{facts.region}.consul"
|
||||||
|
stalwart::postgresql_database: "%{hiera('profiles::sql::postgresdb::dbname')}"
|
||||||
|
stalwart::postgresql_user: "%{hiera('profiles::sql::postgresdb::dbuser')}"
|
||||||
|
stalwart::postgresql_password: "%{hiera('profiles::sql::postgresdb::dbpass')}"
|
||||||
|
|
||||||
|
# S3/Ceph-RGW connection
|
||||||
|
stalwart::s3_endpoint: 'https://radosgw.service.consul'
|
||||||
|
stalwart::s3_bucket: 'stalwart-maildata'
|
||||||
|
stalwart::s3_region: "%{facts.region}"
|
||||||
|
|
||||||
|
# Domains and relay
|
||||||
|
stalwart::domains:
|
||||||
|
- 'mail.unkin.net'
|
||||||
|
stalwart::postfix_relay_host: 'out-mta.main.unkin.net'
|
||||||
|
stalwart::service_hostname: 'mail.main.unkin.net'
|
||||||
|
stalwart::manage_dns_records: false
|
||||||
|
|
||||||
|
## With load balancer:
|
||||||
|
#stalwart::manage_dns_records: true
|
||||||
|
#stalwart::loadbalancer_host: 'mail-lb.example.com'
|
||||||
52
hieradata/roles/infra/mail/gateway.yaml
Normal file
52
hieradata/roles/infra/mail/gateway.yaml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
# additional altnames
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
- in-mta.main.unkin.net
|
||||||
|
|
||||||
|
# base postfix configuration (passed to postfix class)
|
||||||
|
postfix::relayhost: 'direct'
|
||||||
|
postfix::myorigin: 'main.unkin.net'
|
||||||
|
postfix::mydestination: 'blank'
|
||||||
|
postfix::mynetworks: '127.0.0.0/8 [::1]/128'
|
||||||
|
postfix::mta: true
|
||||||
|
postfix::manage_aliases: true
|
||||||
|
|
||||||
|
# profile parameters for customization
|
||||||
|
profiles::postfix::gateway::myhostname: 'in-mta.main.unkin.net'
|
||||||
|
|
||||||
|
# postfix map content (templates)
|
||||||
|
profiles::postfix::gateway::relay_recipients_maps:
|
||||||
|
'@main.unkin.net': 'OK'
|
||||||
|
|
||||||
|
profiles::postfix::gateway::relay_domains_maps:
|
||||||
|
'main.unkin.net': 'OK'
|
||||||
|
|
||||||
|
profiles::postfix::gateway::postscreen_access_maps:
|
||||||
|
'127.0.0.1/32': 'permit'
|
||||||
|
'10.10.12.200/32': 'permit'
|
||||||
|
|
||||||
|
profiles::postfix::gateway::helo_access_maps:
|
||||||
|
'.dynamic.': 'REJECT'
|
||||||
|
'.dialup.': 'REJECT'
|
||||||
|
'unknown': 'REJECT'
|
||||||
|
'localhost': 'REJECT You are not localhost'
|
||||||
|
|
||||||
|
# postfix transports
|
||||||
|
postfix::transports:
|
||||||
|
'main.unkin.net':
|
||||||
|
ensure: present
|
||||||
|
destination: 'relay'
|
||||||
|
nexthop: 'mail-in.main.unkin.net:25'
|
||||||
|
|
||||||
|
# postfix virtuals
|
||||||
|
postfix::virtuals:
|
||||||
|
'root':
|
||||||
|
ensure: present
|
||||||
|
destination: 'ben@main.unkin.net'
|
||||||
|
'postmaster':
|
||||||
|
ensure: present
|
||||||
|
destination: 'ben@main.unkin.net'
|
||||||
|
'abuse':
|
||||||
|
ensure: present
|
||||||
|
destination: 'ben@main.unkin.net'
|
||||||
@ -11,6 +11,9 @@ profiles::metrics::grafana::db_name: "%{hiera('profiles::sql::postgresdb::dbname
|
|||||||
profiles::metrics::grafana::db_user: "%{hiera('profiles::sql::postgresdb::dbuser')}"
|
profiles::metrics::grafana::db_user: "%{hiera('profiles::sql::postgresdb::dbuser')}"
|
||||||
profiles::metrics::grafana::db_pass: "%{hiera('profiles::sql::postgresdb::dbpass')}"
|
profiles::metrics::grafana::db_pass: "%{hiera('profiles::sql::postgresdb::dbpass')}"
|
||||||
profiles::metrics::grafana::pgsql_backend: true
|
profiles::metrics::grafana::pgsql_backend: true
|
||||||
|
profiles::metrics::grafana::plugins:
|
||||||
|
victoriametrics-logs-datasource:
|
||||||
|
ensure: present
|
||||||
|
|
||||||
# additional altnames
|
# additional altnames
|
||||||
profiles::pki::vault::alt_names:
|
profiles::pki::vault::alt_names:
|
||||||
|
|||||||
@ -3,6 +3,16 @@ hiera_include:
|
|||||||
- vmcluster::vmagent
|
- vmcluster::vmagent
|
||||||
|
|
||||||
vmcluster::vmagent::enable: true
|
vmcluster::vmagent::enable: true
|
||||||
|
vmcluster::vmagent::static_targets:
|
||||||
|
vyos_node:
|
||||||
|
targets:
|
||||||
|
- '198.18.21.160:9100'
|
||||||
|
scrape_interval: '15s'
|
||||||
|
metrics_path: '/metrics'
|
||||||
|
scheme: 'http'
|
||||||
|
labels:
|
||||||
|
instance: 'syrtvm0001.main.unkin.net'
|
||||||
|
job: 'vyos_node'
|
||||||
vmcluster::vmagent::options:
|
vmcluster::vmagent::options:
|
||||||
tls: 'true'
|
tls: 'true'
|
||||||
tlsCertFile: '/etc/pki/tls/vault/certificate.crt'
|
tlsCertFile: '/etc/pki/tls/vault/certificate.crt'
|
||||||
|
|||||||
@ -15,3 +15,4 @@ certbot::domains:
|
|||||||
- fafflix.unkin.net
|
- fafflix.unkin.net
|
||||||
- git.unkin.net
|
- git.unkin.net
|
||||||
- grafana.unkin.net
|
- grafana.unkin.net
|
||||||
|
- dashboard.ceph.unkin.net
|
||||||
|
|||||||
@ -1,3 +1,3 @@
|
|||||||
---
|
---
|
||||||
profiles::packages::include:
|
profiles::packages::include:
|
||||||
puppetserver: {}
|
openvox-server: {}
|
||||||
|
|||||||
@ -3,6 +3,41 @@ profiles::packages::include:
|
|||||||
createrepo: {}
|
createrepo: {}
|
||||||
|
|
||||||
profiles::reposync::repos_list:
|
profiles::reposync::repos_list:
|
||||||
|
almalinux_9.7_baseos:
|
||||||
|
repository: 'baseos'
|
||||||
|
description: 'AlmaLinux 9.7 BaseOS'
|
||||||
|
osname: 'almalinux'
|
||||||
|
release: '9.7'
|
||||||
|
mirrorlist: 'https://mirrors.almalinux.org/mirrorlist/9.7/baseos'
|
||||||
|
gpgkey: 'http://mirror.aarnet.edu.au/pub/almalinux/RPM-GPG-KEY-AlmaLinux-9'
|
||||||
|
almalinux_9.7_appstream:
|
||||||
|
repository: 'appstream'
|
||||||
|
description: 'AlmaLinux 9.7 AppStream'
|
||||||
|
osname: 'almalinux'
|
||||||
|
release: '9.7'
|
||||||
|
mirrorlist: 'https://mirrors.almalinux.org/mirrorlist/9.7/appstream'
|
||||||
|
gpgkey: 'http://mirror.aarnet.edu.au/pub/almalinux/RPM-GPG-KEY-AlmaLinux-9'
|
||||||
|
almalinux_9.7_crb:
|
||||||
|
repository: 'crb'
|
||||||
|
description: 'AlmaLinux 9.7 CRB'
|
||||||
|
osname: 'almalinux'
|
||||||
|
release: '9.7'
|
||||||
|
mirrorlist: 'https://mirrors.almalinux.org/mirrorlist/9.7/crb'
|
||||||
|
gpgkey: 'http://mirror.aarnet.edu.au/pub/almalinux/RPM-GPG-KEY-AlmaLinux-9'
|
||||||
|
almalinux_9.7_ha:
|
||||||
|
repository: 'ha'
|
||||||
|
description: 'AlmaLinux 9.7 HighAvailability'
|
||||||
|
osname: 'almalinux'
|
||||||
|
release: '9.7'
|
||||||
|
mirrorlist: 'https://mirrors.almalinux.org/mirrorlist/9.7/highavailability'
|
||||||
|
gpgkey: 'http://mirror.aarnet.edu.au/pub/almalinux/RPM-GPG-KEY-AlmaLinux-9'
|
||||||
|
almalinux_9.7_extras:
|
||||||
|
repository: 'extras'
|
||||||
|
description: 'AlmaLinux 9.7 extras'
|
||||||
|
osname: 'almalinux'
|
||||||
|
release: '9.7'
|
||||||
|
mirrorlist: 'https://mirrors.almalinux.org/mirrorlist/9.7/extras'
|
||||||
|
gpgkey: 'http://mirror.aarnet.edu.au/pub/almalinux/RPM-GPG-KEY-AlmaLinux-9'
|
||||||
almalinux_9.6_baseos:
|
almalinux_9.6_baseos:
|
||||||
repository: 'baseos'
|
repository: 'baseos'
|
||||||
description: 'AlmaLinux 9.6 BaseOS'
|
description: 'AlmaLinux 9.6 BaseOS'
|
||||||
@ -283,6 +318,20 @@ profiles::reposync::repos_list:
|
|||||||
release: 'rhel9'
|
release: 'rhel9'
|
||||||
baseurl: 'https://download.postgresql.org/pub/repos/yum/17/redhat/rhel-9-x86_64/'
|
baseurl: 'https://download.postgresql.org/pub/repos/yum/17/redhat/rhel-9-x86_64/'
|
||||||
gpgkey: 'https://download.postgresql.org/pub/repos/yum/keys/PGDG-RPM-GPG-KEY-RHEL'
|
gpgkey: 'https://download.postgresql.org/pub/repos/yum/keys/PGDG-RPM-GPG-KEY-RHEL'
|
||||||
|
rke2_common_el9:
|
||||||
|
repository: 'common'
|
||||||
|
description: 'RKE2 common RHEL 9'
|
||||||
|
osname: 'rke2'
|
||||||
|
release: "rhel9"
|
||||||
|
baseurl: "https://rpm.rancher.io/rke2/latest/common/centos/9/noarch"
|
||||||
|
gpgkey: "https://rpm.rancher.io/public.key"
|
||||||
|
rke2_1_33_el9:
|
||||||
|
repository: '1.33'
|
||||||
|
description: 'RKE2 1.33 RHEL 9'
|
||||||
|
osname: 'rke2'
|
||||||
|
release: "rhel9"
|
||||||
|
baseurl: "https://rpm.rancher.io/rke2/latest/1.33/centos/9/x86_64"
|
||||||
|
gpgkey: "https://rpm.rancher.io/public.key"
|
||||||
zfs_dkms_rhel8:
|
zfs_dkms_rhel8:
|
||||||
repository: 'dkms'
|
repository: 'dkms'
|
||||||
description: 'ZFS DKMS RHEL 8'
|
description: 'ZFS DKMS RHEL 8'
|
||||||
|
|||||||
@ -4,7 +4,8 @@ profiles::vault::server::members_lookup: true
|
|||||||
profiles::vault::server::data_dir: /data/vault
|
profiles::vault::server::data_dir: /data/vault
|
||||||
profiles::vault::server::manage_storage_dir: true
|
profiles::vault::server::manage_storage_dir: true
|
||||||
profiles::vault::server::tls_disable: false
|
profiles::vault::server::tls_disable: false
|
||||||
vault::download_url: http://repos.main.unkin.net/unkin/8/x86_64/os/Archives/vault_1.15.5_linux_amd64.zip
|
vault::package_name: openbao
|
||||||
|
vault::package_ensure: latest
|
||||||
|
|
||||||
# additional altnames
|
# additional altnames
|
||||||
profiles::pki::vault::alt_names:
|
profiles::pki::vault::alt_names:
|
||||||
|
|||||||
@ -16,6 +16,14 @@ class exporters::frr_exporter (
|
|||||||
ensure => installed,
|
ensure => installed,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ensure the frr_exporter user can read the directory
|
||||||
|
file { $socket_dir:
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'frr',
|
||||||
|
group => 'frr',
|
||||||
|
mode => '0751',
|
||||||
|
}
|
||||||
|
|
||||||
# manage the user/group
|
# manage the user/group
|
||||||
if $manage_user {
|
if $manage_user {
|
||||||
group { $group:
|
group { $group:
|
||||||
|
|||||||
15
modules/externaldns/manifests/init.pp
Normal file
15
modules/externaldns/manifests/init.pp
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# ExternalDNS BIND module - automatically configures master or slave
|
||||||
|
class externaldns (
|
||||||
|
Stdlib::Fqdn $bind_master_hostname,
|
||||||
|
Array[Stdlib::Fqdn] $k8s_zones = [],
|
||||||
|
Array[Stdlib::Fqdn] $slave_servers = [],
|
||||||
|
String $externaldns_key_secret = '',
|
||||||
|
String $externaldns_key_algorithm = 'hmac-sha256',
|
||||||
|
) {
|
||||||
|
|
||||||
|
if $trusted['certname'] == $bind_master_hostname {
|
||||||
|
include externaldns::master
|
||||||
|
} else {
|
||||||
|
include externaldns::slave
|
||||||
|
}
|
||||||
|
}
|
||||||
45
modules/externaldns/manifests/master.pp
Normal file
45
modules/externaldns/manifests/master.pp
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# ExternalDNS BIND master server class
|
||||||
|
class externaldns::master inherits externaldns {
|
||||||
|
|
||||||
|
include bind
|
||||||
|
|
||||||
|
# Query PuppetDB for slave server IP addresses
|
||||||
|
$slave_ips = $externaldns::slave_servers.map |$fqdn| {
|
||||||
|
puppetdb_query("inventory[facts.networking.ip] { certname = '${fqdn}' }")[0]['facts.networking.ip']
|
||||||
|
}.filter |$ip| { $ip != undef }
|
||||||
|
|
||||||
|
# Create TSIG key for ExternalDNS authentication
|
||||||
|
bind::key { 'externaldns-key':
|
||||||
|
algorithm => $externaldns::externaldns_key_algorithm,
|
||||||
|
secret => $externaldns::externaldns_key_secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create ACL for slave servers
|
||||||
|
if !empty($slave_ips) {
|
||||||
|
bind::acl { 'dns-slaves':
|
||||||
|
addresses => $slave_ips,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create master zones for each Kubernetes domain
|
||||||
|
$externaldns::k8s_zones.each |$zone| {
|
||||||
|
bind::zone { $zone:
|
||||||
|
zone_type => 'master',
|
||||||
|
dynamic => true,
|
||||||
|
allow_updates => ['key externaldns-key'],
|
||||||
|
allow_transfers => empty($slave_ips) ? {
|
||||||
|
true => [],
|
||||||
|
false => ['dns-slaves'],
|
||||||
|
},
|
||||||
|
ns_notify => !empty($slave_ips),
|
||||||
|
also_notify => $slave_ips,
|
||||||
|
dnssec => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create default view to include the zones
|
||||||
|
bind::view { 'externaldns':
|
||||||
|
recursion => false,
|
||||||
|
zones => $externaldns::k8s_zones,
|
||||||
|
}
|
||||||
|
}
|
||||||
36
modules/externaldns/manifests/slave.pp
Normal file
36
modules/externaldns/manifests/slave.pp
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# ExternalDNS BIND slave server class
|
||||||
|
class externaldns::slave inherits externaldns {
|
||||||
|
|
||||||
|
include bind
|
||||||
|
|
||||||
|
# Query PuppetDB for master server IP address
|
||||||
|
$query = "inventory[facts.networking.ip] { certname = '${externaldns::bind_master_hostname}' }"
|
||||||
|
$master_ip = puppetdb_query($query)[0]['facts.networking.ip']
|
||||||
|
|
||||||
|
# Create TSIG key for zone transfers (same as master)
|
||||||
|
bind::key { 'externaldns-key':
|
||||||
|
algorithm => $externaldns::externaldns_key_algorithm,
|
||||||
|
secret => $externaldns::externaldns_key_secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create ACL for master server
|
||||||
|
bind::acl { 'dns-master':
|
||||||
|
addresses => [$master_ip],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create slave zones for each Kubernetes domain
|
||||||
|
$externaldns::k8s_zones.each |$zone| {
|
||||||
|
bind::zone { $zone:
|
||||||
|
zone_type => 'slave',
|
||||||
|
masters => [$master_ip],
|
||||||
|
allow_notify => ['dns-master'],
|
||||||
|
ns_notify => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create default view to include the zones
|
||||||
|
bind::view { 'externaldns':
|
||||||
|
recursion => false,
|
||||||
|
zones => $externaldns::k8s_zones,
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -14,8 +14,18 @@ class frrouting (
|
|||||||
Optional[String] $mpls_ldp_router_id = undef,
|
Optional[String] $mpls_ldp_router_id = undef,
|
||||||
Optional[String] $mpls_ldp_transport_addr = undef,
|
Optional[String] $mpls_ldp_transport_addr = undef,
|
||||||
Array[String] $mpls_ldp_interfaces = [],
|
Array[String] $mpls_ldp_interfaces = [],
|
||||||
|
Boolean $ospf_preferred_source_enable = false,
|
||||||
|
Optional[Stdlib::IP::Address] $ospf_preferred_source = undef,
|
||||||
|
Boolean $ospf_exclude_k8s_enable = false,
|
||||||
|
Optional[Stdlib::IP::Address::V4::CIDR] $k8s_cluster_cidr = undef, # pod/cluster CIDR (e.g. 10.42.0.0/16)
|
||||||
|
Optional[Stdlib::IP::Address::V4::CIDR] $k8s_service_cidr = undef, # service CIDR (e.g. 10.43.0.0/16)
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
if $ospf_exclude_k8s_enable and $k8s_cluster_cidr == undef and $k8s_service_cidr == undef {
|
||||||
|
warning('frrouting: ospf_exclude_k8s_enable is true but no k8s_*_cidr provided; nothing will be filtered.')
|
||||||
|
}
|
||||||
|
|
||||||
$daemons_defaults = {
|
$daemons_defaults = {
|
||||||
'bgpd' => false,
|
'bgpd' => false,
|
||||||
'ospfd' => true,
|
'ospfd' => true,
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
frr defaults traditional
|
frr defaults traditional
|
||||||
hostname <%= @hostname %>
|
hostname <%= @hostname %>
|
||||||
no ipv6 forwarding
|
no ipv6 forwarding
|
||||||
|
|
||||||
<% @ospfd_interfaces.each do |iface, params| -%>
|
<% @ospfd_interfaces.each do |iface, params| -%>
|
||||||
interface <%= iface %>
|
interface <%= iface %>
|
||||||
<% if params['area'] -%>
|
<% if params['area'] -%>
|
||||||
@ -12,12 +13,31 @@ interface <%= iface %>
|
|||||||
<% end -%>
|
<% end -%>
|
||||||
exit
|
exit
|
||||||
<% end -%>
|
<% end -%>
|
||||||
|
|
||||||
|
<%# ---- K8s exclude (prefix-list + route-map) ---- -%>
|
||||||
|
<% if @ospf_exclude_k8s_enable && (@k8s_cluster_cidr || @k8s_service_cidr) -%>
|
||||||
|
! Do not redistribute Kubernetes Pod/Service CIDRs
|
||||||
|
ip prefix-list K8S-BLOCK seq 5 permit <%= @k8s_cluster_cidr %> le 32<% if !@k8s_cluster_cidr %> ! (unset)<% end %>
|
||||||
|
<% if @k8s_service_cidr -%>
|
||||||
|
ip prefix-list K8S-BLOCK seq 10 permit <%= @k8s_service_cidr %> le 32
|
||||||
|
<% end -%>
|
||||||
|
route-map CONNECTED-NON-K8S deny 5
|
||||||
|
match ip address prefix-list K8S-BLOCK
|
||||||
|
exit
|
||||||
|
route-map CONNECTED-NON-K8S permit 100
|
||||||
|
exit
|
||||||
|
<% end -%>
|
||||||
|
|
||||||
router ospf
|
router ospf
|
||||||
ospf router-id <%= @ospfd_router_id %>
|
ospf router-id <%= @ospfd_router_id %>
|
||||||
log-adjacency-changes detail
|
log-adjacency-changes detail
|
||||||
<% @ospfd_redistribute.each do |type| -%>
|
<% @ospfd_redistribute.each do |type| -%>
|
||||||
|
<% if @ospf_exclude_k8s_enable && type == 'connected' && (@k8s_cluster_cidr || @k8s_service_cidr) -%>
|
||||||
|
redistribute connected route-map CONNECTED-NON-K8S
|
||||||
|
<% else -%>
|
||||||
redistribute <%= type %>
|
redistribute <%= type %>
|
||||||
<% end -%>
|
<% end -%>
|
||||||
|
<% end -%>
|
||||||
<% @ospfd_networks.each do |network| -%>
|
<% @ospfd_networks.each do |network| -%>
|
||||||
network <%= network %>
|
network <%= network %>
|
||||||
<% end -%>
|
<% end -%>
|
||||||
@ -31,6 +51,8 @@ router ospf
|
|||||||
mpls-te inter-as area 0.0.0.0
|
mpls-te inter-as area 0.0.0.0
|
||||||
<% end -%>
|
<% end -%>
|
||||||
exit
|
exit
|
||||||
|
|
||||||
|
<%# ---- MPLS/LDP config ---- -%>
|
||||||
<% if @mpls_ldp_router_id and @mpls_ldp_transport_addr and @mpls_ldp_interfaces.any? -%>
|
<% if @mpls_ldp_router_id and @mpls_ldp_transport_addr and @mpls_ldp_interfaces.any? -%>
|
||||||
mpls ldp
|
mpls ldp
|
||||||
router-id <%= @mpls_ldp_router_id %>
|
router-id <%= @mpls_ldp_router_id %>
|
||||||
@ -43,3 +65,13 @@ mpls ldp
|
|||||||
exit-address-family
|
exit-address-family
|
||||||
exit
|
exit
|
||||||
<% end -%>
|
<% end -%>
|
||||||
|
|
||||||
|
<%# ---- Preferred OSPF source ---- -%>
|
||||||
|
<% if @ospf_preferred_source_enable && @ospf_preferred_source -%>
|
||||||
|
ip prefix-list ANY seq 5 permit 0.0.0.0/0 le 32
|
||||||
|
route-map OSPF-SRC permit 10
|
||||||
|
match ip address prefix-list ANY
|
||||||
|
set src <%= @ospf_preferred_source %>
|
||||||
|
exit
|
||||||
|
ip protocol ospf route-map OSPF-SRC
|
||||||
|
<% end -%>
|
||||||
|
|||||||
28
modules/incus/lib/facter/incus_trust_list.rb
Normal file
28
modules/incus/lib/facter/incus_trust_list.rb
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
# lib/facter/incus_trust_list.rb
|
||||||
|
require 'json'
|
||||||
|
|
||||||
|
Facter.add(:incus_trust_list) do
|
||||||
|
confine do
|
||||||
|
# Only run on systems that have incus installed and running
|
||||||
|
incus_path = Facter::Util::Resolution.which('incus')
|
||||||
|
incus_path && File.exist?('/var/lib/incus/server.key')
|
||||||
|
end
|
||||||
|
|
||||||
|
setcode do
|
||||||
|
incus_path = Facter::Util::Resolution.which('incus')
|
||||||
|
next {} unless incus_path
|
||||||
|
|
||||||
|
begin
|
||||||
|
# Run incus config trust list --format=json
|
||||||
|
trust_output = Facter::Core::Execution.execute("#{incus_path} config trust list --format=json")
|
||||||
|
next {} if trust_output.empty?
|
||||||
|
|
||||||
|
# Parse the JSON output
|
||||||
|
JSON.parse(trust_output)
|
||||||
|
rescue StandardError
|
||||||
|
{}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
16
modules/incus/manifests/client.pp
Normal file
16
modules/incus/manifests/client.pp
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# incus::client
|
||||||
|
#
|
||||||
|
# This class configures a host as an incus client and exports its certificate
|
||||||
|
# for automatic trust management on incus servers.
|
||||||
|
#
|
||||||
|
class incus::client {
|
||||||
|
|
||||||
|
# Export this client's certificate for collection by incus servers
|
||||||
|
@@incus::client_cert { $facts['networking']['fqdn']:
|
||||||
|
hostname => $facts['networking']['fqdn'],
|
||||||
|
certificate => $facts['vault_cert_content'],
|
||||||
|
fingerprint => $facts['vault_cert_fingerprint'],
|
||||||
|
tag => 'incus_client',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
41
modules/incus/manifests/client_cert.pp
Normal file
41
modules/incus/manifests/client_cert.pp
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# Define the exported resource type for incus client certificates
|
||||||
|
define incus::client_cert (
|
||||||
|
String $hostname,
|
||||||
|
Optional[String] $certificate = undef,
|
||||||
|
Optional[String] $fingerprint = undef,
|
||||||
|
) {
|
||||||
|
|
||||||
|
# Only proceed if we have both certificate and fingerprint
|
||||||
|
if $certificate and $fingerprint {
|
||||||
|
|
||||||
|
$trust_list = $facts['incus_trust_list']
|
||||||
|
$existing_client = $trust_list.filter |$client| { $client['name'] == $hostname }
|
||||||
|
|
||||||
|
if $existing_client.empty {
|
||||||
|
# Add new certificate
|
||||||
|
exec { "incus_trust_add_${hostname}":
|
||||||
|
path => ['/bin', '/usr/bin'],
|
||||||
|
command => "echo '${certificate}' > /tmp/${hostname}.crt && \
|
||||||
|
incus config trust add-certificate /tmp/${hostname}.crt --name ${hostname} && \
|
||||||
|
rm -f /tmp/${hostname}.crt",
|
||||||
|
unless => "incus config trust list --format=json | grep '\"name\":\"${hostname}\"'",
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
# Check if fingerprints are different
|
||||||
|
$existing_fingerprint = $existing_client[0]['fingerprint']
|
||||||
|
|
||||||
|
if $existing_fingerprint != $fingerprint {
|
||||||
|
# Remove existing and add new certificate only if fingerprints differ
|
||||||
|
exec { "incus_trust_update_${hostname}":
|
||||||
|
path => ['/bin', '/usr/bin'],
|
||||||
|
command => "incus config trust remove ${existing_fingerprint} && \
|
||||||
|
echo '${certificate}' > /tmp/${hostname}.crt && \
|
||||||
|
incus config trust add-certificate /tmp/${hostname}.crt --name ${hostname} && \
|
||||||
|
rm -f /tmp/${hostname}.crt",
|
||||||
|
onlyif => "incus config trust list --format=json | grep '${existing_fingerprint}'",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# If fingerprints match, do nothing (certificate is already correct)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -21,6 +21,10 @@ class incus (
|
|||||||
enable => true,
|
enable => true,
|
||||||
hasstatus => true,
|
hasstatus => true,
|
||||||
hasrestart => true,
|
hasrestart => true,
|
||||||
|
subscribe => [
|
||||||
|
File['/var/lib/incus/server.crt'],
|
||||||
|
File['/var/lib/incus/server.key'],
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
file_line { 'subuid_root':
|
file_line { 'subuid_root':
|
||||||
@ -55,6 +59,22 @@ class incus (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
file { '/var/lib/incus/server.crt':
|
||||||
|
ensure => file,
|
||||||
|
source => '/etc/pki/tls/vault/certificate.crt',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/var/lib/incus/server.key':
|
||||||
|
ensure => file,
|
||||||
|
source => '/etc/pki/tls/vault/private.key',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
|
||||||
if $facts['incus'] and $facts['incus']['config'] {
|
if $facts['incus'] and $facts['incus']['config'] {
|
||||||
# set core.https_address
|
# set core.https_address
|
||||||
if $facts['incus']['config']['core.https_address'] != "${server_addr}:${server_port}" {
|
if $facts['incus']['config']['core.https_address'] != "${server_addr}:${server_port}" {
|
||||||
@ -72,5 +92,10 @@ class incus (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Collect exported client certificates and manage trust
|
||||||
|
Incus::Client_cert <<| tag == 'incus_client' |>> {
|
||||||
|
require => Service['incus'],
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
11
modules/libs/lib/facter/vault_cert_content.rb
Normal file
11
modules/libs/lib/facter/vault_cert_content.rb
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
# lib/facter/vault_cert_content.rb
|
||||||
|
|
||||||
|
Facter.add(:vault_cert_content) do
|
||||||
|
confine kernel: 'Linux'
|
||||||
|
setcode do
|
||||||
|
cert_path = '/etc/pki/tls/vault/certificate.crt'
|
||||||
|
File.read(cert_path) if File.exist?(cert_path) && File.readable?(cert_path)
|
||||||
|
end
|
||||||
|
end
|
||||||
23
modules/libs/lib/facter/vault_cert_fingerprint.rb
Normal file
23
modules/libs/lib/facter/vault_cert_fingerprint.rb
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
# lib/facter/vault_cert_fingerprint.rb
|
||||||
|
|
||||||
|
Facter.add(:vault_cert_fingerprint) do
|
||||||
|
confine kernel: 'Linux'
|
||||||
|
setcode do
|
||||||
|
require 'openssl'
|
||||||
|
require 'digest'
|
||||||
|
|
||||||
|
cert_path = '/etc/pki/tls/vault/certificate.crt'
|
||||||
|
if File.exist?(cert_path) && File.readable?(cert_path)
|
||||||
|
begin
|
||||||
|
cert_content = File.read(cert_path)
|
||||||
|
cert = OpenSSL::X509::Certificate.new(cert_content)
|
||||||
|
# Calculate SHA256 fingerprint like incus does
|
||||||
|
Digest::SHA256.hexdigest(cert.to_der)
|
||||||
|
rescue StandardError
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
2
modules/rke2/files/check_k8s_api.sh
Normal file
2
modules/rke2/files/check_k8s_api.sh
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
#!/usr/bin/bash
|
||||||
|
/var/lib/rancher/rke2/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml get --raw /livez
|
||||||
23
modules/rke2/files/ingress-route-rancher.yaml
Normal file
23
modules/rke2/files/ingress-route-rancher.yaml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: rancher
|
||||||
|
namespace: cattle-system
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
spec:
|
||||||
|
tls:
|
||||||
|
- hosts: [rancher.main.unkin.net]
|
||||||
|
secretName: tls-rancher
|
||||||
|
rules:
|
||||||
|
- host: rancher.main.unkin.net
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: rancher
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
45
modules/rke2/files/purelb-config.yaml
Normal file
45
modules/rke2/files/purelb-config.yaml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
apiVersion: purelb.io/v1
|
||||||
|
kind: LBNodeAgent
|
||||||
|
metadata:
|
||||||
|
name: common
|
||||||
|
namespace: purelb
|
||||||
|
spec:
|
||||||
|
local:
|
||||||
|
extlbint: kube-lb0
|
||||||
|
localint: default
|
||||||
|
sendgarp: false
|
||||||
|
---
|
||||||
|
apiVersion: purelb.io/v1
|
||||||
|
kind: LBNodeAgent
|
||||||
|
metadata:
|
||||||
|
name: dmz
|
||||||
|
namespace: purelb
|
||||||
|
spec:
|
||||||
|
local:
|
||||||
|
extlbint: kube-lb0
|
||||||
|
localint: default
|
||||||
|
sendgarp: false
|
||||||
|
---
|
||||||
|
apiVersion: purelb.io/v1
|
||||||
|
kind: ServiceGroup
|
||||||
|
metadata:
|
||||||
|
name: dmz
|
||||||
|
namespace: purelb
|
||||||
|
spec:
|
||||||
|
local:
|
||||||
|
v4pools:
|
||||||
|
- subnet: 198.18.199.0/24
|
||||||
|
pool: 198.18.199.0/24
|
||||||
|
aggregation: /32
|
||||||
|
---
|
||||||
|
apiVersion: purelb.io/v1
|
||||||
|
kind: ServiceGroup
|
||||||
|
metadata:
|
||||||
|
name: common
|
||||||
|
namespace: purelb
|
||||||
|
spec:
|
||||||
|
local:
|
||||||
|
v4pools:
|
||||||
|
- subnet: 198.18.200.0/24
|
||||||
|
pool: 198.18.200.0/24
|
||||||
|
aggregation: /32
|
||||||
9
modules/rke2/files/rke2-canal-config.yaml
Normal file
9
modules/rke2/files/rke2-canal-config.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
apiVersion: helm.cattle.io/v1
|
||||||
|
kind: HelmChartConfig
|
||||||
|
metadata:
|
||||||
|
name: rke2-canal
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
valuesContent: |-
|
||||||
|
flannel:
|
||||||
|
iface: "loopback0"
|
||||||
20
modules/rke2/files/rke2-nginx-ingress-config.yaml
Normal file
20
modules/rke2/files/rke2-nginx-ingress-config.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
apiVersion: helm.cattle.io/v1
|
||||||
|
kind: HelmChartConfig
|
||||||
|
metadata:
|
||||||
|
name: rke2-ingress-nginx
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
valuesContent: |-
|
||||||
|
controller:
|
||||||
|
hostPort:
|
||||||
|
enabled: false
|
||||||
|
service:
|
||||||
|
enabled: true
|
||||||
|
type: LoadBalancer
|
||||||
|
externalTrafficPolicy: Local
|
||||||
|
loadBalancerClass: purelb.io/purelb
|
||||||
|
allocateLoadBalancerNodePorts: false
|
||||||
|
annotations:
|
||||||
|
purelb.io/service-group: common
|
||||||
|
purelb.io/addresses: "198.18.200.0"
|
||||||
15
modules/rke2/lib/facter/helm_repos.rb
Normal file
15
modules/rke2/lib/facter/helm_repos.rb
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'facter/util/helm'
|
||||||
|
|
||||||
|
Facter.add(:helm_repos) do
|
||||||
|
confine kernel: 'Linux'
|
||||||
|
confine enc_role: [
|
||||||
|
'roles::infra::k8s::control',
|
||||||
|
'roles::infra::k8s::compute'
|
||||||
|
]
|
||||||
|
|
||||||
|
setcode do
|
||||||
|
Facter::Util::Helm.get_helm_repos('/usr/bin/helm')
|
||||||
|
end
|
||||||
|
end
|
||||||
39
modules/rke2/lib/facter/k8s_masters.rb
Normal file
39
modules/rke2/lib/facter/k8s_masters.rb
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'json'
|
||||||
|
require 'open3'
|
||||||
|
|
||||||
|
Facter.add(:k8s_masters) do
|
||||||
|
confine do
|
||||||
|
File.exist?('/etc/rancher/rke2/rke2.yaml') &&
|
||||||
|
File.executable?('/usr/bin/kubectl')
|
||||||
|
end
|
||||||
|
|
||||||
|
setcode do
|
||||||
|
env = { 'KUBECONFIG' => '/etc/rancher/rke2/rke2.yaml' }
|
||||||
|
cmd = ['/usr/bin/kubectl', 'get', 'nodes', '-o', 'json']
|
||||||
|
|
||||||
|
stdout, stderr, status = Open3.capture3(env, *cmd)
|
||||||
|
|
||||||
|
if status.success?
|
||||||
|
json = JSON.parse(stdout)
|
||||||
|
|
||||||
|
master_count = json['items'].count do |item|
|
||||||
|
roles = item.dig('metadata', 'labels') || {}
|
||||||
|
|
||||||
|
# Look for well-known labels assigned to control-plane nodes
|
||||||
|
roles.any? do |key, _|
|
||||||
|
key =~ %r{node-role\.kubernetes\.io/(control-plane|master|etcd)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
master_count
|
||||||
|
else
|
||||||
|
Facter.debug("kubectl error: #{stderr}")
|
||||||
|
0
|
||||||
|
end
|
||||||
|
rescue StandardError => e
|
||||||
|
Facter.debug("Exception in k8s_masters fact: #{e.message}")
|
||||||
|
0
|
||||||
|
end
|
||||||
|
end
|
||||||
29
modules/rke2/lib/facter/k8s_namespaces.rb
Normal file
29
modules/rke2/lib/facter/k8s_namespaces.rb
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'json'
|
||||||
|
require 'open3'
|
||||||
|
|
||||||
|
Facter.add(:k8s_namespaces) do
|
||||||
|
confine do
|
||||||
|
File.exist?('/etc/rancher/rke2/rke2.yaml') &&
|
||||||
|
File.executable?('/usr/bin/kubectl') # Adjust this path if needed
|
||||||
|
end
|
||||||
|
|
||||||
|
setcode do
|
||||||
|
env = { 'KUBECONFIG' => '/etc/rancher/rke2/rke2.yaml' }
|
||||||
|
cmd = ['/usr/bin/kubectl', 'get', 'namespaces', '-o', 'json']
|
||||||
|
|
||||||
|
stdout, stderr, status = Open3.capture3(env, *cmd)
|
||||||
|
|
||||||
|
if status.success?
|
||||||
|
json = JSON.parse(stdout)
|
||||||
|
json['items'].map { |item| item['metadata']['name'] }
|
||||||
|
else
|
||||||
|
Facter.debug("kubectl error: #{stderr}")
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
rescue StandardError => e
|
||||||
|
Facter.debug("Exception in k8s_namespaces fact: #{e.message}")
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
31
modules/rke2/lib/facter/util/helm.rb
Normal file
31
modules/rke2/lib/facter/util/helm.rb
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'facter'
|
||||||
|
require 'json'
|
||||||
|
|
||||||
|
# a simple helm module
|
||||||
|
module Facter::Util::Helm
|
||||||
|
def self.get_helm_repos(helm_cmd)
|
||||||
|
return [] unless File.executable?(helm_cmd)
|
||||||
|
|
||||||
|
output = Facter::Core::Execution.execute(
|
||||||
|
"#{helm_cmd} repo list --output json --repository-config /etc/helm/repositories.yaml",
|
||||||
|
on_fail: nil
|
||||||
|
)
|
||||||
|
return [] if output.to_s.strip.empty?
|
||||||
|
|
||||||
|
parse_helm_output(output)
|
||||||
|
rescue StandardError => e
|
||||||
|
Facter.debug("helm_repos fact error: #{e}")
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.parse_helm_output(output)
|
||||||
|
JSON.parse(output).map do |repo|
|
||||||
|
{
|
||||||
|
'name' => repo['name'],
|
||||||
|
'url' => repo['url']
|
||||||
|
}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
109
modules/rke2/manifests/config.pp
Normal file
109
modules/rke2/manifests/config.pp
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
# config rke2
|
||||||
|
class rke2::config (
|
||||||
|
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||||
|
Stdlib::Absolutepath $config_file = $rke2::config_file,
|
||||||
|
Hash $config_hash = $rke2::config_hash,
|
||||||
|
Stdlib::HTTPSUrl $join_url = $rke2::join_url,
|
||||||
|
Stdlib::Fqdn $bootstrap_node = $rke2::bootstrap_node,
|
||||||
|
String $node_token = $rke2::node_token,
|
||||||
|
Array[String[1]] $extra_config_files = $rke2::extra_config_files,
|
||||||
|
){
|
||||||
|
|
||||||
|
# if its not the bootstrap node, add join path to config
|
||||||
|
if $node_type == 'server' {
|
||||||
|
if $trusted['certname'] != $bootstrap_node {
|
||||||
|
$config = merge($config_hash, {
|
||||||
|
server => $join_url,
|
||||||
|
token => $node_token,
|
||||||
|
} )
|
||||||
|
}else{
|
||||||
|
$config = merge($config_hash, {})
|
||||||
|
}
|
||||||
|
} elsif $node_type == 'agent' {
|
||||||
|
$config = merge($config_hash, {
|
||||||
|
server => $join_url,
|
||||||
|
token => $node_token,
|
||||||
|
} )
|
||||||
|
}else{
|
||||||
|
$config = $config_hash
|
||||||
|
}
|
||||||
|
|
||||||
|
# create the config file
|
||||||
|
file { $config_file:
|
||||||
|
ensure => file,
|
||||||
|
content => Sensitive($config.to_yaml),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
require => Package["rke2-${node_type}"],
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# create a script to verify k8s api is up (used by consul)
|
||||||
|
file {'/usr/local/bin/check_k8s_api.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0755',
|
||||||
|
source => 'puppet:///modules/rke2/check_k8s_api.sh',
|
||||||
|
}
|
||||||
|
|
||||||
|
# symlink kubectl to path
|
||||||
|
file {'/usr/bin/kubectl':
|
||||||
|
ensure => link,
|
||||||
|
target => '/var/lib/rancher/rke2/bin/kubectl',
|
||||||
|
require => Package["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# when ProtectKernelDefaults=true
|
||||||
|
sysctl { 'vm.overcommit_memory':
|
||||||
|
value => '1',
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
sysctl { 'kernel.panic':
|
||||||
|
value => '10',
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# on the controller nodes only
|
||||||
|
if $node_type == 'server' and $facts['k8s_masters'] and $facts['k8s_masters'] > 2 {
|
||||||
|
|
||||||
|
# wait for purelb helm to setup namespace
|
||||||
|
if 'purelb' in $facts['k8s_namespaces'] {
|
||||||
|
file {'/var/lib/rancher/rke2/server/manifests/purelb-config.yaml':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
source => 'puppet:///modules/rke2/purelb-config.yaml',
|
||||||
|
require => Service['rke2-server'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# wait for rancher helm to setup namespace
|
||||||
|
if 'cattle-system' in $facts['k8s_namespaces'] {
|
||||||
|
file {'/var/lib/rancher/rke2/server/manifests/ingress-route-rancher.yaml':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
source => 'puppet:///modules/rke2/ingress-route-rancher.yaml',
|
||||||
|
require => Service['rke2-server'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# manage extra config config (these are not dependent on helm)
|
||||||
|
$extra_config_files.each |$file| {
|
||||||
|
|
||||||
|
file {"/var/lib/rancher/rke2/server/manifests/${file}.yaml":
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
source => "puppet:///modules/rke2/${file}.yaml",
|
||||||
|
require => Service['rke2-server'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
81
modules/rke2/manifests/helm.pp
Normal file
81
modules/rke2/manifests/helm.pp
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# manage helm
|
||||||
|
class rke2::helm (
|
||||||
|
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||||
|
Stdlib::Fqdn $bootstrap_node = $rke2::bootstrap_node,
|
||||||
|
Boolean $helm_install = $rke2::helm_install,
|
||||||
|
Hash $helm_repos = $rke2::helm_repos
|
||||||
|
){
|
||||||
|
|
||||||
|
# when installing helm, manage the repos
|
||||||
|
if $helm_install {
|
||||||
|
|
||||||
|
package {'helm':
|
||||||
|
ensure => installed,
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/etc/helm':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
|
||||||
|
# on the controller nodes only, and after 3 master nodes exist
|
||||||
|
if $node_type == 'server' and $facts['k8s_masters'] and $facts['k8s_masters'] > 2 {
|
||||||
|
|
||||||
|
# check if the repo already exists
|
||||||
|
$helm_repos.each | String $repo, Stdlib::HTTPSUrl $url | {
|
||||||
|
|
||||||
|
# if repo isnt in repo list from helm, install it
|
||||||
|
if ! $facts['helm_repos'].any |$existing| { $existing['name'] == $repo } {
|
||||||
|
|
||||||
|
exec { "helm_add_repo_${repo}":
|
||||||
|
command => "helm repo add ${repo} ${url} --repository-config /etc/helm/repositories.yaml",
|
||||||
|
path => ['/usr/bin'],
|
||||||
|
environment => [
|
||||||
|
'KUBECONFIG=/etc/rancher/rke2/rke2.yaml',
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# install specific helm charts to bootstrap environment
|
||||||
|
$plb_cmd = 'helm install purelb purelb/purelb \
|
||||||
|
--create-namespace \
|
||||||
|
--namespace=purelb \
|
||||||
|
--repository-config /etc/helm/repositories.yaml'
|
||||||
|
exec { 'install_purelb':
|
||||||
|
command => $plb_cmd,
|
||||||
|
path => ['/usr/bin', '/bin'],
|
||||||
|
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
|
||||||
|
unless => 'helm list -n purelb | grep -q ^purelb',
|
||||||
|
}
|
||||||
|
|
||||||
|
$cm_cmd = 'helm install cert-manager jetstack/cert-manager \
|
||||||
|
--namespace cert-manager \
|
||||||
|
--create-namespace \
|
||||||
|
--set crds.enabled=true \
|
||||||
|
--repository-config /etc/helm/repositories.yaml'
|
||||||
|
exec { 'install_cert_manager':
|
||||||
|
command => $cm_cmd,
|
||||||
|
path => ['/usr/bin', '/bin'],
|
||||||
|
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
|
||||||
|
unless => 'helm list -n cert-manager | grep -q ^cert-manager',
|
||||||
|
}
|
||||||
|
|
||||||
|
$r_cmd = 'helm install rancher rancher-stable/rancher \
|
||||||
|
--namespace cattle-system \
|
||||||
|
--create-namespace \
|
||||||
|
--set hostname=rancher.main.unkin.net \
|
||||||
|
--set bootstrapPassword=admin \
|
||||||
|
--set ingress.tls.source=secret \
|
||||||
|
--repository-config /etc/helm/repositories.yaml'
|
||||||
|
exec { 'install_rancher':
|
||||||
|
command => $r_cmd,
|
||||||
|
path => ['/usr/bin', '/bin'],
|
||||||
|
environment => ['KUBECONFIG=/etc/rancher/rke2/rke2.yaml'],
|
||||||
|
unless => 'helm list -n cattle-system | grep -q ^rancher',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
23
modules/rke2/manifests/init.pp
Normal file
23
modules/rke2/manifests/init.pp
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# manage rke2
|
||||||
|
class rke2 (
|
||||||
|
Enum['server', 'agent'] $node_type = $rke2::params::node_type,
|
||||||
|
String $rke2_version = $rke2::params::rke2_version,
|
||||||
|
String $rke2_release = $rke2::params::rke2_release,
|
||||||
|
Stdlib::Absolutepath $config_file = $rke2::params::config_file,
|
||||||
|
Hash $config_hash = $rke2::params::config_hash,
|
||||||
|
Stdlib::HTTPSUrl $join_url = $rke2::params::join_url,
|
||||||
|
Stdlib::Fqdn $bootstrap_node = $rke2::params::bootstrap_node,
|
||||||
|
String $node_token = $rke2::params::node_token,
|
||||||
|
Boolean $helm_install = $rke2::params::helm_install,
|
||||||
|
Hash $helm_repos = $rke2::params::helm_repos,
|
||||||
|
Array[String[1]] $extra_config_files = $rke2::params::extra_config_files,
|
||||||
|
Stdlib::HTTPUrl $container_archive_source = $rke2::params::container_archive_source,
|
||||||
|
) inherits rke2::params {
|
||||||
|
|
||||||
|
include rke2::install
|
||||||
|
include rke2::config
|
||||||
|
include rke2::service
|
||||||
|
include rke2::helm
|
||||||
|
|
||||||
|
Class['rke2::install'] -> Class['rke2::service'] -> Class['rke2::helm']
|
||||||
|
}
|
||||||
53
modules/rke2/manifests/install.pp
Normal file
53
modules/rke2/manifests/install.pp
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# install rke2
|
||||||
|
class rke2::install (
|
||||||
|
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||||
|
String $rke2_version = $rke2::rke2_version,
|
||||||
|
String $rke2_release = $rke2::rke2_release,
|
||||||
|
Stdlib::HTTPUrl $container_archive_source = $rke2::container_archive_source,
|
||||||
|
){
|
||||||
|
|
||||||
|
# versionlock rke2
|
||||||
|
yum::versionlock{"rke2-${node_type}":
|
||||||
|
ensure => present,
|
||||||
|
version => "${rke2_version}~${rke2_release}",
|
||||||
|
}
|
||||||
|
|
||||||
|
# install rke2
|
||||||
|
package {"rke2-${node_type}":
|
||||||
|
ensure => "${rke2_version}~${rke2_release}",
|
||||||
|
}
|
||||||
|
|
||||||
|
# ensure images path exists
|
||||||
|
file { ['/var/lib/rancher/rke2/agent', '/var/lib/rancher/rke2/agent/images']:
|
||||||
|
ensure => 'directory',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0750',
|
||||||
|
require => Package["rke2-${node_type}"],
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# download required archive of containers
|
||||||
|
archive { '/var/lib/rancher/rke2/agent/images/rke2-images.linux-amd64.tar.zst':
|
||||||
|
ensure => present,
|
||||||
|
source => "https://github.com/rancher/rke2/releases/download/v${rke2_version}%2B${rke2_release}/rke2-images.linux-amd64.tar.zst",
|
||||||
|
require => [
|
||||||
|
Package["rke2-${node_type}"],
|
||||||
|
File['/var/lib/rancher/rke2/agent/images'],
|
||||||
|
],
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# ensure the images cache file exists
|
||||||
|
file {'/var/lib/rancher/rke2/agent/images/.cache.json':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
require => [
|
||||||
|
Package["rke2-${node_type}"],
|
||||||
|
File['/var/lib/rancher/rke2/agent/images'],
|
||||||
|
],
|
||||||
|
before => Service["rke2-${node_type}"],
|
||||||
|
}
|
||||||
|
}
|
||||||
15
modules/rke2/manifests/params.pp
Normal file
15
modules/rke2/manifests/params.pp
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# rke2 params
|
||||||
|
class rke2::params (
|
||||||
|
Enum['server', 'agent'] $node_type = 'agent',
|
||||||
|
String $rke2_version = '1.33.4',
|
||||||
|
String $rke2_release = 'rke2r1',
|
||||||
|
Stdlib::Absolutepath $config_file = '/etc/rancher/rke2/config.yaml',
|
||||||
|
Hash $config_hash = {},
|
||||||
|
Stdlib::HTTPSUrl $join_url = 'https://127.0.0.1:9345',
|
||||||
|
Stdlib::Fqdn $bootstrap_node = 'localhost.localdomain',
|
||||||
|
String $node_token = '',
|
||||||
|
Boolean $helm_install = false,
|
||||||
|
Hash $helm_repos = {},
|
||||||
|
Array[String[1]] $extra_config_files = [],
|
||||||
|
Stdlib::HTTPUrl $container_archive_source = 'https://github.com/rancher/rke2/releases/download',
|
||||||
|
) {}
|
||||||
13
modules/rke2/manifests/service.pp
Normal file
13
modules/rke2/manifests/service.pp
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# manage rke2 service
|
||||||
|
class rke2::service (
|
||||||
|
Enum['server', 'agent'] $node_type = $rke2::node_type,
|
||||||
|
Stdlib::Absolutepath $config_file = $rke2::config_file,
|
||||||
|
){
|
||||||
|
|
||||||
|
service {"rke2-${node_type}":
|
||||||
|
ensure => true,
|
||||||
|
enable => true,
|
||||||
|
subscribe => File[$config_file],
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
230
modules/stalwart/README.md
Normal file
230
modules/stalwart/README.md
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
# Stalwart Mail Server Module
|
||||||
|
|
||||||
|
This Puppet module manages Stalwart Mail Server, a modern, secure, and scalable mail server implementation that supports IMAP, JMAP, WebDAV, and SMTP protocols.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The `stalwart` module provides a comprehensive solution for deploying Stalwart Mail Server in a clustered environment with:
|
||||||
|
|
||||||
|
- **PostgreSQL backend** for data, full-text search, and in-memory storage
|
||||||
|
- **S3/Ceph-RGW backend** for blob storage (emails, attachments, sieve scripts)
|
||||||
|
- **Automatic cluster discovery** using `query_nodes()`
|
||||||
|
- **DNS autodiscovery records** for email client configuration
|
||||||
|
- **TLS certificate management** integration
|
||||||
|
- **Postfix relay integration** for SMTP routing
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- ✅ **Multi-node clustering** with peer-to-peer coordination
|
||||||
|
- ✅ **PostgreSQL authentication** with SQL directory backend
|
||||||
|
- ✅ **S3 blob storage** with compression support
|
||||||
|
- ✅ **IMAP/IMAPS protocols** for email access
|
||||||
|
- ✅ **HTTP/HTTPS protocols** for JMAP, WebDAV, and autodiscovery
|
||||||
|
- ✅ **SMTP relay** for postfix integration
|
||||||
|
- ✅ **DNS autodiscovery** record management
|
||||||
|
- ✅ **Automatic role distribution** across cluster nodes
|
||||||
|
- ✅ **TLS security** with Vault PKI integration
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- **Puppet 6+** with `query_nodes()` function support
|
||||||
|
- **Stalwart RPM package** (creates user, directories, systemd service)
|
||||||
|
- **PostgreSQL cluster** for data storage
|
||||||
|
- **S3-compatible storage** (Ceph-RGW, MinIO, AWS S3)
|
||||||
|
- **DNS management** via `profiles::dns::record`
|
||||||
|
- **PKI management** via `profiles::pki::vault::alt_names`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Recommended Usage with Role
|
||||||
|
|
||||||
|
The recommended way to use this module is via the `roles::infra::mail::backend` role with hieradata configuration:
|
||||||
|
|
||||||
|
```puppet
|
||||||
|
include roles::infra::mail::backend
|
||||||
|
```
|
||||||
|
|
||||||
|
Configure all parameters in `hieradata/roles/infra/mail/backend.yaml` - see `examples/role-hieradata.yaml` for a complete example.
|
||||||
|
|
||||||
|
### Direct Class Usage
|
||||||
|
|
||||||
|
```puppet
|
||||||
|
class { 'stalwart':
|
||||||
|
node_id => 1,
|
||||||
|
cluster_role => 'mail-backend',
|
||||||
|
postgresql_host => 'pgsql.example.com',
|
||||||
|
postgresql_database => 'stalwart',
|
||||||
|
postgresql_user => 'stalwart',
|
||||||
|
postgresql_password => Sensitive('secretpassword'),
|
||||||
|
s3_endpoint => 'https://ceph-rgw.example.com',
|
||||||
|
s3_bucket => 'stalwart-blobs',
|
||||||
|
s3_access_key => 'accesskey',
|
||||||
|
s3_secret_key => Sensitive('secretkey'),
|
||||||
|
domains => ['example.com'],
|
||||||
|
postfix_relay_host => 'postfix.example.com',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Hieradata Configuration
|
||||||
|
|
||||||
|
See `examples/role-hieradata.yaml` for a complete example of role-based hieradata configuration.
|
||||||
|
|
||||||
|
### Required Parameters
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Cluster role for node discovery
|
||||||
|
stalwart::cluster_role: 'mail-backend'
|
||||||
|
|
||||||
|
# Optional: Unique node identifier (auto-calculated if not specified)
|
||||||
|
# stalwart::node_id: 1
|
||||||
|
|
||||||
|
# PostgreSQL connection
|
||||||
|
stalwart::postgresql_host: 'pgsql.example.com'
|
||||||
|
stalwart::postgresql_database: 'stalwart'
|
||||||
|
stalwart::postgresql_user: 'stalwart'
|
||||||
|
stalwart::postgresql_password: >
|
||||||
|
ENC[PKCS7,encrypted_password...]
|
||||||
|
|
||||||
|
# S3/Ceph-RGW connection
|
||||||
|
stalwart::s3_endpoint: 'https://ceph-rgw.example.com'
|
||||||
|
stalwart::s3_bucket: 'stalwart-blobs'
|
||||||
|
stalwart::s3_access_key: 'access_key'
|
||||||
|
stalwart::s3_secret_key: >
|
||||||
|
ENC[PKCS7,encrypted_secret...]
|
||||||
|
|
||||||
|
# Domains and relay
|
||||||
|
stalwart::domains:
|
||||||
|
- 'example.com'
|
||||||
|
stalwart::postfix_relay_host: 'postfix.example.com'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Cluster Setup
|
||||||
|
|
||||||
|
The module automatically discovers cluster members using `query_nodes()` based on:
|
||||||
|
- `enc_role` matching `cluster_role` parameter
|
||||||
|
- `country` fact matching the node's country fact
|
||||||
|
- `region` fact matching the node's region fact
|
||||||
|
|
||||||
|
**Node ID Assignment:**
|
||||||
|
- Node IDs are **automatically extracted** from the last 4 digits of the hostname
|
||||||
|
- Example: `ausyd1nxvm1234` → node ID `1234`
|
||||||
|
- Manual override available via `stalwart::node_id` parameter if needed
|
||||||
|
- Hostname must end with 4 digits for automatic extraction to work
|
||||||
|
- Ensures unique IDs when following consistent hostname patterns
|
||||||
|
|
||||||
|
### Storage Layout
|
||||||
|
|
||||||
|
- **Data Store**: PostgreSQL (metadata, folders, settings)
|
||||||
|
- **Full-Text Search**: PostgreSQL (search indexes)
|
||||||
|
- **In-Memory Store**: PostgreSQL (caching, sessions)
|
||||||
|
- **Blob Store**: S3/Ceph-RGW (emails, attachments, files)
|
||||||
|
|
||||||
|
### Directory Structure (Created by RPM)
|
||||||
|
|
||||||
|
- **Config**: `/opt/stalwart/etc/config.toml`
|
||||||
|
- **Data**: `/var/lib/stalwart/` (queue, reports)
|
||||||
|
- **Logs**: `/var/log/stalwart/stalwart.log`
|
||||||
|
- **Binary**: `/opt/stalwart/bin/stalwart`
|
||||||
|
- **User**: `stalwart:stalwart` (system user)
|
||||||
|
|
||||||
|
### Network Ports
|
||||||
|
|
||||||
|
- **143**: IMAP (STARTTLS)
|
||||||
|
- **993**: IMAPS (implicit TLS)
|
||||||
|
- **443**: HTTPS (JMAP, WebDAV, autodiscovery)
|
||||||
|
- **2525**: SMTP relay (postfix communication)
|
||||||
|
- **11200**: Cluster coordination (peer-to-peer)
|
||||||
|
- **9090**: Prometheus metrics
|
||||||
|
|
||||||
|
### DNS Records
|
||||||
|
|
||||||
|
When `manage_dns_records: true`, the module creates:
|
||||||
|
- `autoconfig.domain.com` → server FQDN (Thunderbird)
|
||||||
|
- `autodiscover.domain.com` → server FQDN (Outlook)
|
||||||
|
- `_imap._tcp.domain.com` SRV record
|
||||||
|
- `_imaps._tcp.domain.com` SRV record
|
||||||
|
- `_caldav._tcp.domain.com` SRV record
|
||||||
|
- `_carddav._tcp.domain.com` SRV record
|
||||||
|
|
||||||
|
## PostgreSQL Schema
|
||||||
|
|
||||||
|
The module expects these tables in the PostgreSQL database:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE accounts (
|
||||||
|
name TEXT PRIMARY KEY,
|
||||||
|
secret TEXT,
|
||||||
|
description TEXT,
|
||||||
|
type TEXT NOT NULL,
|
||||||
|
quota INTEGER DEFAULT 0,
|
||||||
|
active BOOLEAN DEFAULT true
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE group_members (
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
member_of TEXT NOT NULL,
|
||||||
|
PRIMARY KEY (name, member_of)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE emails (
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
address TEXT NOT NULL,
|
||||||
|
type TEXT,
|
||||||
|
PRIMARY KEY (name, address)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
- **TLS required** for all connections
|
||||||
|
- **PostgreSQL SSL** enabled by default
|
||||||
|
- **S3 HTTPS** endpoints required
|
||||||
|
- **Password hashing** supported (SHA512, BCRYPT, etc.)
|
||||||
|
- **Certificate management** via Vault PKI
|
||||||
|
|
||||||
|
### Fallback Administrator
|
||||||
|
|
||||||
|
Stalwart includes a fallback administrator account for initial setup and emergency access:
|
||||||
|
|
||||||
|
- **Default username**: `admin` (configurable via `stalwart::fallback_admin_user`)
|
||||||
|
- **Default password**: `admin` (configurable via `stalwart::fallback_admin_password`)
|
||||||
|
- **Purpose**: Initial server configuration and emergency access when directory services are unavailable
|
||||||
|
- **Security**: Password is automatically hashed using SHA-512 crypt format
|
||||||
|
|
||||||
|
**Important**: Change the default password in production by setting different hieradata values:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
stalwart::fallback_admin_password: "your-secure-password"
|
||||||
|
```
|
||||||
|
|
||||||
|
The fallback admin should only be used for initial setup and emergencies. Create regular admin accounts in PostgreSQL for day-to-day management.
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
- **Prometheus metrics** on port 9090
|
||||||
|
- **Log files** in `/var/log/stalwart/`
|
||||||
|
- **Queue monitoring** in `/var/lib/stalwart/queue/`
|
||||||
|
- **Service status** via systemd (`stalwart.service`)
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Cluster Formation Issues
|
||||||
|
- Verify `query_nodes()` returns expected nodes
|
||||||
|
- Check `country` and `region` facts are consistent
|
||||||
|
- Ensure `cluster_role` matches across all nodes
|
||||||
|
|
||||||
|
### Storage Connection Issues
|
||||||
|
- Test PostgreSQL connectivity and credentials
|
||||||
|
- Verify S3 endpoint accessibility and credentials
|
||||||
|
- Check network connectivity between nodes
|
||||||
|
|
||||||
|
### TLS Certificate Issues
|
||||||
|
- Ensure PKI alt_names include all required domains
|
||||||
|
- Verify certificate paths exist and are readable
|
||||||
|
- Check certificate expiration dates
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This module is part of the internal infrastructure management system.
|
||||||
57
modules/stalwart/examples/hieradata.yaml
Normal file
57
modules/stalwart/examples/hieradata.yaml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
# Example hieradata for profiles::mail::stalwart
|
||||||
|
# This shows the required and optional parameters for Stalwart configuration
|
||||||
|
|
||||||
|
# Required: Unique node ID for each server in the cluster (1, 2, 3, etc.)
|
||||||
|
profiles::mail::stalwart::node_id: 1
|
||||||
|
|
||||||
|
# Required: Cluster role name for query_nodes() discovery
|
||||||
|
profiles::mail::stalwart::cluster_role: 'mail-backend'
|
||||||
|
|
||||||
|
# Required: PostgreSQL connection settings
|
||||||
|
profiles::mail::stalwart::postgresql_host: 'pgsql.example.com'
|
||||||
|
profiles::mail::stalwart::postgresql_port: 5432
|
||||||
|
profiles::mail::stalwart::postgresql_database: 'stalwart'
|
||||||
|
profiles::mail::stalwart::postgresql_user: 'stalwart'
|
||||||
|
profiles::mail::stalwart::postgresql_password: >
|
||||||
|
ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAxample...]
|
||||||
|
profiles::mail::stalwart::postgresql_ssl: true
|
||||||
|
|
||||||
|
# Required: S3/Ceph-RGW connection settings
|
||||||
|
profiles::mail::stalwart::s3_endpoint: 'https://ceph-rgw.example.com'
|
||||||
|
profiles::mail::stalwart::s3_bucket: 'stalwart-blobs'
|
||||||
|
profiles::mail::stalwart::s3_region: 'default'
|
||||||
|
profiles::mail::stalwart::s3_access_key: 'stalwart_access_key'
|
||||||
|
profiles::mail::stalwart::s3_secret_key: >
|
||||||
|
ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAxample...]
|
||||||
|
profiles::mail::stalwart::s3_key_prefix: 'stalwart/'
|
||||||
|
|
||||||
|
# Required: Domains this mail backend serves
|
||||||
|
profiles::mail::stalwart::domains:
|
||||||
|
- 'example.com'
|
||||||
|
- 'mail.example.com'
|
||||||
|
|
||||||
|
# Required: Postfix relay host for SMTP delivery
|
||||||
|
profiles::mail::stalwart::postfix_relay_host: 'postfix.example.com'
|
||||||
|
|
||||||
|
# Optional: Protocol configuration (defaults shown)
|
||||||
|
profiles::mail::stalwart::enable_imap: true
|
||||||
|
profiles::mail::stalwart::enable_imap_tls: true
|
||||||
|
profiles::mail::stalwart::enable_http: true
|
||||||
|
profiles::mail::stalwart::enable_smtp_relay: true
|
||||||
|
|
||||||
|
# Optional: Management settings
|
||||||
|
profiles::mail::stalwart::manage_dns_records: true
|
||||||
|
profiles::mail::stalwart::log_level: 'info'
|
||||||
|
|
||||||
|
# Optional: TLS certificate paths (defaults shown)
|
||||||
|
profiles::mail::stalwart::tls_cert: '/etc/pki/tls/vault/certificate.crt'
|
||||||
|
profiles::mail::stalwart::tls_key: '/etc/pki/tls/vault/private.key'
|
||||||
|
|
||||||
|
# Example PKI alt_names configuration for TLS certificates
|
||||||
|
# This should include all domains and hostnames that need certificates
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
mail-backend:
|
||||||
|
- 'imap.example.com'
|
||||||
|
- 'mail.example.com'
|
||||||
|
- 'autoconfig.example.com'
|
||||||
|
- 'autodiscover.example.com'
|
||||||
58
modules/stalwart/examples/role-hieradata.yaml
Normal file
58
modules/stalwart/examples/role-hieradata.yaml
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Example hieradata for roles::infra::mail::backend
|
||||||
|
# Place this in: hieradata/roles/infra/mail/backend.yaml
|
||||||
|
|
||||||
|
# Stalwart module configuration - all parameters passed directly to the module
|
||||||
|
# stalwart::node_id: 1234 # Optional - automatically extracted from last 4 digits of hostname
|
||||||
|
stalwart::cluster_role: 'mail-backend'
|
||||||
|
|
||||||
|
# PostgreSQL connection settings
|
||||||
|
stalwart::postgresql_host: 'pgsql.example.com'
|
||||||
|
stalwart::postgresql_port: 5432
|
||||||
|
stalwart::postgresql_database: 'stalwart'
|
||||||
|
stalwart::postgresql_user: 'stalwart'
|
||||||
|
stalwart::postgresql_password: >
|
||||||
|
ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAxample...]
|
||||||
|
stalwart::postgresql_ssl: true
|
||||||
|
|
||||||
|
# S3/Ceph-RGW connection settings
|
||||||
|
stalwart::s3_endpoint: 'https://ceph-rgw.example.com'
|
||||||
|
stalwart::s3_bucket: 'stalwart-blobs'
|
||||||
|
stalwart::s3_region: 'default'
|
||||||
|
stalwart::s3_access_key: 'stalwart_access_key'
|
||||||
|
stalwart::s3_secret_key: >
|
||||||
|
ENC[PKCS7,MIIBiQYJKoZIhvcNAQcDoIIBejCCAXYCAQAxggEhMIIBHQIBADAFMAACAQEwDQYJKoZIhvcNAQEBBQAEggEAxample...]
|
||||||
|
stalwart::s3_key_prefix: 'stalwart/'
|
||||||
|
|
||||||
|
# Domains this mail backend serves
|
||||||
|
stalwart::domains:
|
||||||
|
- 'example.com'
|
||||||
|
- 'mail.example.com'
|
||||||
|
|
||||||
|
# Postfix relay host for SMTP delivery
|
||||||
|
stalwart::postfix_relay_host: 'postfix.example.com'
|
||||||
|
|
||||||
|
# Optional protocol configuration (defaults shown)
|
||||||
|
stalwart::enable_imap: true
|
||||||
|
stalwart::enable_imap_tls: true
|
||||||
|
stalwart::enable_http: true
|
||||||
|
stalwart::enable_smtp_relay: true
|
||||||
|
|
||||||
|
# Optional management settings
|
||||||
|
stalwart::manage_dns_records: true
|
||||||
|
stalwart::log_level: 'info'
|
||||||
|
|
||||||
|
# Optional TLS certificate paths (defaults work with profiles::pki::vault)
|
||||||
|
# stalwart::tls_cert: '/etc/pki/tls/vault/certificate.crt'
|
||||||
|
# stalwart::tls_key: '/etc/pki/tls/vault/private.key'
|
||||||
|
|
||||||
|
# Optional path overrides (RPM package sets up these defaults)
|
||||||
|
# stalwart::config_dir: '/opt/stalwart/etc'
|
||||||
|
# stalwart::data_dir: '/var/lib/stalwart'
|
||||||
|
|
||||||
|
# PKI alt_names configuration for TLS certificates
|
||||||
|
# This should include all domains and hostnames that need certificates
|
||||||
|
profiles::pki::vault::alt_names:
|
||||||
|
- 'imap.example.com'
|
||||||
|
- 'mail.example.com'
|
||||||
|
- 'autoconfig.example.com'
|
||||||
|
- 'autodiscover.example.com'
|
||||||
84
modules/stalwart/manifests/config.pp
Normal file
84
modules/stalwart/manifests/config.pp
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
# @summary Manages Stalwart Mail Server configuration
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class stalwart::config {
|
||||||
|
assert_private()
|
||||||
|
|
||||||
|
# Create base directories (package creates user/group and base dirs)
|
||||||
|
file { [$stalwart::config_dir, $stalwart::data_dir, $stalwart::webadmin_unpack_path]:
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'stalwart',
|
||||||
|
group => 'stalwart',
|
||||||
|
mode => '0750',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ensure log directory exists
|
||||||
|
file { '/var/log/stalwart':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'stalwart',
|
||||||
|
group => 'stalwart',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main configuration file
|
||||||
|
file { "${stalwart::config_dir}/config.toml":
|
||||||
|
ensure => file,
|
||||||
|
owner => 'stalwart',
|
||||||
|
group => 'stalwart',
|
||||||
|
mode => '0640',
|
||||||
|
content => epp('stalwart/config.toml.epp', {
|
||||||
|
'cluster_size' => $stalwart::cluster_size,
|
||||||
|
'other_cluster_members' => $stalwart::other_cluster_members,
|
||||||
|
'haproxy_ips' => $stalwart::haproxy_ips,
|
||||||
|
'effective_node_id' => $stalwart::effective_node_id,
|
||||||
|
'bind_address' => $stalwart::bind_address,
|
||||||
|
'advertise_address' => $stalwart::advertise_address,
|
||||||
|
'postgresql_host' => $stalwart::postgresql_host,
|
||||||
|
'postgresql_port' => $stalwart::postgresql_port,
|
||||||
|
'postgresql_database' => $stalwart::postgresql_database,
|
||||||
|
'postgresql_user' => $stalwart::postgresql_user,
|
||||||
|
'postgresql_password' => $stalwart::postgresql_password.unwrap,
|
||||||
|
'postgresql_ssl' => $stalwart::postgresql_ssl,
|
||||||
|
's3_endpoint' => $stalwart::s3_endpoint,
|
||||||
|
's3_bucket' => $stalwart::s3_bucket,
|
||||||
|
's3_region' => $stalwart::s3_region,
|
||||||
|
's3_access_key' => $stalwart::s3_access_key,
|
||||||
|
's3_secret_key' => $stalwart::s3_secret_key.unwrap,
|
||||||
|
's3_key_prefix' => $stalwart::s3_key_prefix,
|
||||||
|
'domains' => $stalwart::domains,
|
||||||
|
'postfix_relay_host' => $stalwart::postfix_relay_host,
|
||||||
|
'enable_imap' => $stalwart::enable_imap,
|
||||||
|
'enable_imap_tls' => $stalwart::enable_imap_tls,
|
||||||
|
'enable_http' => $stalwart::enable_http,
|
||||||
|
'enable_smtp_submission' => $stalwart::enable_smtp_submission,
|
||||||
|
'data_dir' => $stalwart::data_dir,
|
||||||
|
'tls_cert' => $stalwart::tls_cert,
|
||||||
|
'tls_key' => $stalwart::tls_key,
|
||||||
|
'log_level' => $stalwart::log_level,
|
||||||
|
'service_hostname' => $stalwart::service_hostname,
|
||||||
|
'fallback_admin_user' => $stalwart::fallback_admin_user,
|
||||||
|
'fallback_admin_password' => $stalwart::fallback_admin_password,
|
||||||
|
'webadmin_unpack_path' => $stalwart::webadmin_unpack_path,
|
||||||
|
'webadmin_resource_url' => $stalwart::webadmin_resource_url,
|
||||||
|
'webadmin_auto_update' => $stalwart::webadmin_auto_update,
|
||||||
|
'node_facts' => $facts,
|
||||||
|
}),
|
||||||
|
notify => Service['stalwart'],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create directories for storage
|
||||||
|
file { "${stalwart::data_dir}/queue":
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'stalwart',
|
||||||
|
group => 'stalwart',
|
||||||
|
mode => '0750',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { "${stalwart::data_dir}/reports":
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'stalwart',
|
||||||
|
group => 'stalwart',
|
||||||
|
mode => '0750',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
67
modules/stalwart/manifests/dns.pp
Normal file
67
modules/stalwart/manifests/dns.pp
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
# @summary Manages DNS autodiscovery records for Stalwart
|
||||||
|
#
|
||||||
|
# @param target_host
|
||||||
|
# FQDN to point DNS records to (defaults to current server)
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class stalwart::dns (
|
||||||
|
Stdlib::Fqdn $target_host = $facts['networking']['fqdn'],
|
||||||
|
) {
|
||||||
|
assert_private()
|
||||||
|
|
||||||
|
# Create autodiscovery DNS records for each domain
|
||||||
|
$stalwart::domains.each |$domain| {
|
||||||
|
|
||||||
|
# Autoconfig record for Thunderbird/Mozilla clients
|
||||||
|
profiles::dns::record { "autoconfig_${domain}":
|
||||||
|
record => "autoconfig.${domain}",
|
||||||
|
type => 'CNAME',
|
||||||
|
value => "${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Autodiscover record for Outlook/Microsoft clients
|
||||||
|
profiles::dns::record { "autodiscover_${domain}":
|
||||||
|
record => "autodiscover.${domain}",
|
||||||
|
type => 'CNAME',
|
||||||
|
value => "${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 101,
|
||||||
|
}
|
||||||
|
|
||||||
|
# IMAP SRV records
|
||||||
|
profiles::dns::record { "imap_srv_${domain}":
|
||||||
|
record => "_imap._tcp.${domain}",
|
||||||
|
type => 'SRV',
|
||||||
|
value => "10 1 143 ${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 102,
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles::dns::record { "imaps_srv_${domain}":
|
||||||
|
record => "_imaps._tcp.${domain}",
|
||||||
|
type => 'SRV',
|
||||||
|
value => "10 1 993 ${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 103,
|
||||||
|
}
|
||||||
|
|
||||||
|
# CalDAV and CardDAV SRV records
|
||||||
|
profiles::dns::record { "caldav_srv_${domain}":
|
||||||
|
record => "_caldav._tcp.${domain}",
|
||||||
|
type => 'SRV',
|
||||||
|
value => "10 1 443 ${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 104,
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles::dns::record { "carddav_srv_${domain}":
|
||||||
|
record => "_carddav._tcp.${domain}",
|
||||||
|
type => 'SRV',
|
||||||
|
value => "10 1 443 ${target_host}.",
|
||||||
|
zone => $domain,
|
||||||
|
order => 105,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
245
modules/stalwart/manifests/init.pp
Normal file
245
modules/stalwart/manifests/init.pp
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
# @summary Main class for managing Stalwart Mail Server
|
||||||
|
#
|
||||||
|
# This class provides a comprehensive setup of Stalwart Mail Server with
|
||||||
|
# clustering, authentication, storage, and protocol support.
|
||||||
|
#
|
||||||
|
# @example Basic Stalwart setup
|
||||||
|
# class { 'stalwart':
|
||||||
|
# node_id => 1,
|
||||||
|
# postgresql_host => 'pgsql.example.com',
|
||||||
|
# postgresql_database => 'stalwart',
|
||||||
|
# postgresql_user => 'stalwart',
|
||||||
|
# postgresql_password => Sensitive('secretpassword'),
|
||||||
|
# s3_endpoint => 'https://ceph-rgw.example.com',
|
||||||
|
# s3_bucket => 'stalwart-blobs',
|
||||||
|
# s3_access_key => 'accesskey',
|
||||||
|
# s3_secret_key => Sensitive('secretkey'),
|
||||||
|
# domains => ['example.com'],
|
||||||
|
# postfix_relay_host => 'postfix.example.com',
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# @param node_id
|
||||||
|
# Unique identifier for this node in the cluster (1-N). If not specified,
|
||||||
|
# automatically calculated based on sorted position in cluster member list.
|
||||||
|
#
|
||||||
|
# @param cluster_role
|
||||||
|
# Role name for cluster member discovery via query_nodes()
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# @param postgresql_host
|
||||||
|
# PostgreSQL server hostname/IP
|
||||||
|
#
|
||||||
|
# @param postgresql_port
|
||||||
|
# PostgreSQL server port
|
||||||
|
#
|
||||||
|
# @param postgresql_database
|
||||||
|
# PostgreSQL database name
|
||||||
|
#
|
||||||
|
# @param postgresql_user
|
||||||
|
# PostgreSQL username
|
||||||
|
#
|
||||||
|
# @param postgresql_password
|
||||||
|
# PostgreSQL password (Sensitive)
|
||||||
|
#
|
||||||
|
# @param postgresql_ssl
|
||||||
|
# Enable SSL/TLS for PostgreSQL connections
|
||||||
|
#
|
||||||
|
# @param s3_endpoint
|
||||||
|
# S3/Ceph-RGW endpoint URL
|
||||||
|
#
|
||||||
|
# @param s3_bucket
|
||||||
|
# S3 bucket name for blob storage
|
||||||
|
#
|
||||||
|
# @param s3_region
|
||||||
|
# S3 region
|
||||||
|
#
|
||||||
|
# @param s3_access_key
|
||||||
|
# S3 access key
|
||||||
|
#
|
||||||
|
# @param s3_secret_key
|
||||||
|
# S3 secret key (Sensitive)
|
||||||
|
#
|
||||||
|
# @param s3_key_prefix
|
||||||
|
# S3 key prefix for stalwart objects
|
||||||
|
#
|
||||||
|
# @param domains
|
||||||
|
# Array of domains this server handles
|
||||||
|
#
|
||||||
|
# @param postfix_relay_host
|
||||||
|
# Postfix relay host for SMTP delivery
|
||||||
|
#
|
||||||
|
# @param bind_address
|
||||||
|
# IP address to bind services to
|
||||||
|
#
|
||||||
|
# @param advertise_address
|
||||||
|
# IP address to advertise to cluster members
|
||||||
|
#
|
||||||
|
# @param enable_imap
|
||||||
|
# Enable IMAP protocol listener
|
||||||
|
#
|
||||||
|
# @param enable_imap_tls
|
||||||
|
# Enable IMAP over TLS listener
|
||||||
|
#
|
||||||
|
# @param enable_http
|
||||||
|
# Enable HTTP listener for JMAP/WebDAV/Autodiscovery
|
||||||
|
#
|
||||||
|
# @param enable_smtp_relay
|
||||||
|
# Enable SMTP for postfix relay communication
|
||||||
|
#
|
||||||
|
# @param enable_smtp_submission
|
||||||
|
# Enable SMTP submission listener on port 587
|
||||||
|
#
|
||||||
|
# @param haproxy_role
|
||||||
|
# Role name for HAProxy nodes to include in proxy trusted networks
|
||||||
|
#
|
||||||
|
# @param service_hostname
|
||||||
|
# Service hostname used for autoconfig/autodiscover and SMTP greeting
|
||||||
|
#
|
||||||
|
# @param package_ensure
|
||||||
|
# Package version to install
|
||||||
|
#
|
||||||
|
# @param config_dir
|
||||||
|
# Stalwart configuration directory
|
||||||
|
#
|
||||||
|
# @param data_dir
|
||||||
|
# Stalwart data directory
|
||||||
|
#
|
||||||
|
# @param log_level
|
||||||
|
# Logging verbosity level
|
||||||
|
#
|
||||||
|
# @param manage_firewall
|
||||||
|
# Whether to manage firewall rules
|
||||||
|
#
|
||||||
|
# @param tls_cert
|
||||||
|
# Path to TLS certificate file
|
||||||
|
#
|
||||||
|
# @param tls_key
|
||||||
|
# Path to TLS private key file
|
||||||
|
#
|
||||||
|
# @param manage_dns_records
|
||||||
|
# Whether to create DNS autodiscovery records
|
||||||
|
#
|
||||||
|
class stalwart (
|
||||||
|
String $cluster_role,
|
||||||
|
Stdlib::Host $postgresql_host,
|
||||||
|
String $postgresql_database,
|
||||||
|
String $postgresql_user,
|
||||||
|
Sensitive[String] $postgresql_password,
|
||||||
|
Stdlib::HTTPUrl $s3_endpoint,
|
||||||
|
String $s3_bucket,
|
||||||
|
String $s3_access_key,
|
||||||
|
Sensitive[String] $s3_secret_key,
|
||||||
|
Array[Stdlib::Fqdn] $domains,
|
||||||
|
Stdlib::Host $postfix_relay_host,
|
||||||
|
Optional[Integer] $node_id = undef,
|
||||||
|
Stdlib::Port $postgresql_port = 5432,
|
||||||
|
Boolean $postgresql_ssl = true,
|
||||||
|
String $s3_region = 'us-east-1',
|
||||||
|
String $s3_key_prefix = 'stalwart/',
|
||||||
|
Stdlib::IP::Address $bind_address = $facts['networking']['ip'],
|
||||||
|
Stdlib::IP::Address $advertise_address = $facts['networking']['ip'],
|
||||||
|
Boolean $enable_imap = true,
|
||||||
|
Boolean $enable_imap_tls = true,
|
||||||
|
Boolean $enable_http = true,
|
||||||
|
Boolean $enable_smtp_relay = true,
|
||||||
|
Boolean $enable_smtp_submission = true,
|
||||||
|
String $haproxy_role = 'roles::infra::halb::haproxy2',
|
||||||
|
Stdlib::Fqdn $service_hostname = $facts['networking']['fqdn'],
|
||||||
|
String $package_ensure = 'present',
|
||||||
|
Stdlib::Absolutepath $config_dir = '/opt/stalwart/etc',
|
||||||
|
Stdlib::Absolutepath $data_dir = '/var/lib/stalwart',
|
||||||
|
Enum['error','warn','info','debug','trace'] $log_level = 'info',
|
||||||
|
Boolean $manage_firewall = false,
|
||||||
|
Stdlib::Absolutepath $tls_cert = '/etc/pki/tls/vault/certificate.crt',
|
||||||
|
Stdlib::Absolutepath $tls_key = '/etc/pki/tls/vault/private.key',
|
||||||
|
Boolean $manage_dns_records = true,
|
||||||
|
Optional[Stdlib::Fqdn] $loadbalancer_host = undef,
|
||||||
|
String $fallback_admin_user = 'admin',
|
||||||
|
Sensitive[String] $fallback_admin_password = Sensitive('admin'),
|
||||||
|
Stdlib::Absolutepath $webadmin_unpack_path = "${data_dir}/webadmin",
|
||||||
|
Stdlib::HTTPUrl $webadmin_resource_url = 'https://github.com/stalwartlabs/webadmin/releases/latest/download/webadmin.zip',
|
||||||
|
Boolean $webadmin_auto_update = true,
|
||||||
|
) {
|
||||||
|
|
||||||
|
# Calculate node_id from last 4 digits of hostname if not provided
|
||||||
|
$my_fqdn = $facts['networking']['fqdn']
|
||||||
|
$hostname = $facts['networking']['hostname']
|
||||||
|
|
||||||
|
# Query cluster members for validation
|
||||||
|
$cluster_query = "enc_role='${cluster_role}' and country='${facts['country']}' and region='${facts['region']}'"
|
||||||
|
$cluster_members_raw = query_nodes($cluster_query, 'networking.fqdn')
|
||||||
|
$cluster_members = $cluster_members_raw ? {
|
||||||
|
undef => [],
|
||||||
|
default => $cluster_members_raw,
|
||||||
|
}
|
||||||
|
$sorted_cluster_members = sort($cluster_members)
|
||||||
|
|
||||||
|
# Calculate cluster information for templates
|
||||||
|
$other_cluster_members = $sorted_cluster_members.filter |$member| { $member != $my_fqdn }
|
||||||
|
$cluster_size = length($sorted_cluster_members)
|
||||||
|
|
||||||
|
# Query HAProxy nodes for proxy trusted networks
|
||||||
|
$haproxy_query = "enc_role='${haproxy_role}' and country='${facts['country']}' and region='${facts['region']}'"
|
||||||
|
$haproxy_members_raw = query_nodes($haproxy_query, 'networking.ip')
|
||||||
|
$haproxy_ips = $haproxy_members_raw ? {
|
||||||
|
undef => [],
|
||||||
|
default => sort($haproxy_members_raw),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract last 4 digits from hostname (e.g., ausyd1nxvm1234 -> 1234)
|
||||||
|
if $hostname =~ /^.*(\d{4})$/ {
|
||||||
|
$hostname_digits = $1
|
||||||
|
$calculated_node_id = Integer($hostname_digits)
|
||||||
|
} else {
|
||||||
|
fail("Unable to extract 4-digit node ID from hostname '${hostname}'. Hostname must end with 4 digits or specify node_id manually.")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use provided node_id or calculated one
|
||||||
|
$effective_node_id = $node_id ? {
|
||||||
|
undef => $calculated_node_id,
|
||||||
|
default => $node_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate parameters
|
||||||
|
if $effective_node_id < 1 {
|
||||||
|
fail('node_id must be a positive integer')
|
||||||
|
}
|
||||||
|
|
||||||
|
if empty($domains) {
|
||||||
|
fail('At least one domain must be specified')
|
||||||
|
}
|
||||||
|
|
||||||
|
if !($my_fqdn in $sorted_cluster_members) {
|
||||||
|
fail("This node (${my_fqdn}) is not found in cluster members for role '${cluster_role}' in ${facts['country']}-${facts['region']}")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Include sub-classes in dependency order
|
||||||
|
include stalwart::install
|
||||||
|
include stalwart::config
|
||||||
|
include stalwart::service
|
||||||
|
|
||||||
|
# Handle DNS records if requested
|
||||||
|
if $manage_dns_records {
|
||||||
|
if $loadbalancer_host {
|
||||||
|
# Only first node in cluster creates DNS records pointing to load balancer
|
||||||
|
if $my_fqdn == $sorted_cluster_members[0] {
|
||||||
|
class { 'stalwart::dns':
|
||||||
|
target_host => $loadbalancer_host,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
# Current behavior: each server creates its own DNS records
|
||||||
|
include stalwart::dns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Class ordering
|
||||||
|
Class['stalwart::install']
|
||||||
|
-> Class['stalwart::config']
|
||||||
|
-> Class['stalwart::service']
|
||||||
|
|
||||||
|
if $manage_dns_records {
|
||||||
|
Class['stalwart::service'] -> Class['stalwart::dns']
|
||||||
|
}
|
||||||
|
}
|
||||||
11
modules/stalwart/manifests/install.pp
Normal file
11
modules/stalwart/manifests/install.pp
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# @summary Manages Stalwart Mail Server package installation
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class stalwart::install {
|
||||||
|
assert_private()
|
||||||
|
|
||||||
|
# Install stalwart package (user/group created by package preinstall script)
|
||||||
|
package { 'stalwart':
|
||||||
|
ensure => $stalwart::package_ensure,
|
||||||
|
}
|
||||||
|
}
|
||||||
26
modules/stalwart/manifests/service.pp
Normal file
26
modules/stalwart/manifests/service.pp
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# @summary Manages Stalwart Mail Server service
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class stalwart::service {
|
||||||
|
assert_private()
|
||||||
|
|
||||||
|
# Service is installed by the RPM package
|
||||||
|
service { 'stalwart':
|
||||||
|
ensure => running,
|
||||||
|
enable => true,
|
||||||
|
subscribe => [
|
||||||
|
File[$stalwart::tls_cert],
|
||||||
|
File[$stalwart::tls_key],
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add capability to bind to privileged ports (143, 443, 993)
|
||||||
|
systemd::manage_dropin { 'bind-capabilities.conf':
|
||||||
|
ensure => present,
|
||||||
|
unit => 'stalwart.service',
|
||||||
|
service_entry => {
|
||||||
|
'AmbientCapabilities' => 'CAP_NET_BIND_SERVICE',
|
||||||
|
},
|
||||||
|
notify => Service['stalwart'],
|
||||||
|
}
|
||||||
|
}
|
||||||
296
modules/stalwart/templates/config.toml.epp
Normal file
296
modules/stalwart/templates/config.toml.epp
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
# Stalwart Mail Server Configuration
|
||||||
|
# Generated by Puppet - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
[server]
|
||||||
|
hostname = "<%= $service_hostname %>"
|
||||||
|
greeting = "Stalwart ESMTP"
|
||||||
|
|
||||||
|
[server.listener."smtp-relay"]
|
||||||
|
bind = ["<%= $bind_address %>:25"]
|
||||||
|
protocol = "smtp"
|
||||||
|
greeting = "Stalwart SMTP Relay"
|
||||||
|
|
||||||
|
<% if !$haproxy_ips.empty { -%>
|
||||||
|
[server.listener."smtp-relay".proxy]
|
||||||
|
trusted-networks = ["127.0.0.0/8", "::1"<% $haproxy_ips.each |$ip| { %>, "<%= $ip %>"<% } %>]
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
<% if $enable_smtp_submission { -%>
|
||||||
|
[server.listener."submission"]
|
||||||
|
bind = ["<%= $bind_address %>:587"]
|
||||||
|
protocol = "smtp"
|
||||||
|
greeting = "Stalwart SMTP Submission"
|
||||||
|
tls.require = true
|
||||||
|
|
||||||
|
<% if !$haproxy_ips.empty { -%>
|
||||||
|
[server.listener."submission".proxy]
|
||||||
|
trusted-networks = ["127.0.0.0/8", "::1"<% $haproxy_ips.each |$ip| { %>, "<%= $ip %>"<% } %>]
|
||||||
|
<% } -%>
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
<% if $enable_imap { -%>
|
||||||
|
[server.listener."imap"]
|
||||||
|
bind = ["<%= $bind_address %>:143"]
|
||||||
|
protocol = "imap"
|
||||||
|
|
||||||
|
<% if !$haproxy_ips.empty { -%>
|
||||||
|
[server.listener."imap".proxy]
|
||||||
|
trusted-networks = ["127.0.0.0/8", "::1"<% $haproxy_ips.each |$ip| { %>, "<%= $ip %>"<% } %>]
|
||||||
|
<% } -%>
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
<% if $enable_imap_tls { -%>
|
||||||
|
[server.listener."imaps"]
|
||||||
|
bind = ["<%= $bind_address %>:993"]
|
||||||
|
protocol = "imap"
|
||||||
|
tls.implicit = true
|
||||||
|
|
||||||
|
<% if !$haproxy_ips.empty { -%>
|
||||||
|
[server.listener."imaps".proxy]
|
||||||
|
trusted-networks = ["127.0.0.0/8", "::1"<% $haproxy_ips.each |$ip| { %>, "<%= $ip %>"<% } %>]
|
||||||
|
<% } -%>
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
<% if $enable_http { -%>
|
||||||
|
[server.listener."https"]
|
||||||
|
bind = ["<%= $bind_address %>:443"]
|
||||||
|
protocol = "http"
|
||||||
|
tls.implicit = true
|
||||||
|
|
||||||
|
<% if !$haproxy_ips.empty { -%>
|
||||||
|
[server.listener."https".proxy]
|
||||||
|
trusted-networks = ["127.0.0.0/8", "::1"<% $haproxy_ips.each |$ip| { %>, "<%= $ip %>"<% } %>]
|
||||||
|
<% } -%>
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
[server.tls]
|
||||||
|
enable = true
|
||||||
|
implicit = false
|
||||||
|
certificate = "default"
|
||||||
|
|
||||||
|
|
||||||
|
[webadmin]
|
||||||
|
path = "<%= $webadmin_unpack_path %>"
|
||||||
|
auto-update = <%= $webadmin_auto_update %>
|
||||||
|
resource = "<%= $webadmin_resource_url %>"
|
||||||
|
|
||||||
|
# Cluster Configuration
|
||||||
|
[cluster]
|
||||||
|
node-id = <%= $effective_node_id %>
|
||||||
|
|
||||||
|
<% if $cluster_size > 1 { -%>
|
||||||
|
# Peer-to-peer coordination
|
||||||
|
[cluster.coordinator]
|
||||||
|
type = "peer-to-peer"
|
||||||
|
addr = "<%= $bind_address %>:11200"
|
||||||
|
advertise-addr = "<%= $advertise_address %>:11200"
|
||||||
|
|
||||||
|
<% $other_cluster_members.each |$node| { -%>
|
||||||
|
[[cluster.coordinator.peers]]
|
||||||
|
addr = "<%= $node %>:11200"
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
# Cluster roles for 3-node setup
|
||||||
|
[cluster.roles.purge]
|
||||||
|
stores = ["1", "2", "3"]
|
||||||
|
accounts = ["1", "2"]
|
||||||
|
|
||||||
|
[cluster.roles.acme]
|
||||||
|
renew = ["1"]
|
||||||
|
|
||||||
|
[cluster.roles.metrics]
|
||||||
|
calculate = ["1", "2"]
|
||||||
|
push = ["1"]
|
||||||
|
|
||||||
|
[cluster.roles.push-notifications]
|
||||||
|
push-notifications = ["1", "3"]
|
||||||
|
|
||||||
|
[cluster.roles.fts-indexing]
|
||||||
|
fts-indexing = ["2", "3"]
|
||||||
|
|
||||||
|
[cluster.roles.bayes-training]
|
||||||
|
bayes-training = ["1"]
|
||||||
|
|
||||||
|
[cluster.roles.imip-processing]
|
||||||
|
imip-processing = ["2"]
|
||||||
|
|
||||||
|
[cluster.roles.calendar-alerts]
|
||||||
|
calendar-alerts = ["3"]
|
||||||
|
<% } -%>
|
||||||
|
|
||||||
|
# Storage Configuration
|
||||||
|
|
||||||
|
# PostgreSQL store for data, FTS, and in-memory
|
||||||
|
[store."postgresql"]
|
||||||
|
type = "postgresql"
|
||||||
|
host = "<%= $postgresql_host %>"
|
||||||
|
port = <%= $postgresql_port %>
|
||||||
|
database = "<%= $postgresql_database %>"
|
||||||
|
user = "<%= $postgresql_user %>"
|
||||||
|
password = "<%= $postgresql_password %>"
|
||||||
|
timeout = "15s"
|
||||||
|
|
||||||
|
[store."postgresql".tls]
|
||||||
|
enable = <%= $postgresql_ssl %>
|
||||||
|
allow-invalid-certs = false
|
||||||
|
|
||||||
|
[store."postgresql".pool]
|
||||||
|
max-connections = 10
|
||||||
|
|
||||||
|
[store."postgresql".purge]
|
||||||
|
frequency = "0 3 *"
|
||||||
|
|
||||||
|
# S3/Ceph-RGW store for blobs
|
||||||
|
[store."s3"]
|
||||||
|
type = "s3"
|
||||||
|
bucket = "<%= $s3_bucket %>"
|
||||||
|
region = "<%= $s3_region %>"
|
||||||
|
access-key = "<%= $s3_access_key %>"
|
||||||
|
secret-key = "<%= $s3_secret_key %>"
|
||||||
|
endpoint = "<%= $s3_endpoint %>"
|
||||||
|
timeout = "30s"
|
||||||
|
key-prefix = "<%= $s3_key_prefix %>"
|
||||||
|
compression = "lz4"
|
||||||
|
|
||||||
|
[store."s3".purge]
|
||||||
|
frequency = "30 5 *"
|
||||||
|
|
||||||
|
# Storage assignment
|
||||||
|
[storage]
|
||||||
|
data = "postgresql"
|
||||||
|
fts = "postgresql"
|
||||||
|
blob = "s3"
|
||||||
|
lookup = "postgresql"
|
||||||
|
directory = "internal"
|
||||||
|
in-memory = "postgresql"
|
||||||
|
|
||||||
|
# Directory configuration
|
||||||
|
[directory.internal]
|
||||||
|
type = "internal"
|
||||||
|
store = "postgresql"
|
||||||
|
|
||||||
|
# Authentication configuration
|
||||||
|
[authentication.fallback-admin]
|
||||||
|
user = "<%= $fallback_admin_user %>"
|
||||||
|
secret = "<%= pw_hash($fallback_admin_password.unwrap, 'SHA-512', 'stalwart') %>"
|
||||||
|
|
||||||
|
[authentication]
|
||||||
|
[authentication.directory]
|
||||||
|
directories = ["internal"]
|
||||||
|
|
||||||
|
# Authorization configuration
|
||||||
|
[authorization]
|
||||||
|
directory = "internal"
|
||||||
|
|
||||||
|
# JMAP configuration
|
||||||
|
[jmap]
|
||||||
|
directory = "internal"
|
||||||
|
|
||||||
|
[jmap.protocol]
|
||||||
|
request-max-size = 10485760
|
||||||
|
get.max-objects = 500
|
||||||
|
query.max-results = 5000
|
||||||
|
changes.max-results = 5000
|
||||||
|
upload.max-size = 50000000
|
||||||
|
upload.ttl = "1h"
|
||||||
|
|
||||||
|
# IMAP configuration
|
||||||
|
[imap]
|
||||||
|
directory = "internal"
|
||||||
|
|
||||||
|
[imap.protocol]
|
||||||
|
max-requests = 64
|
||||||
|
|
||||||
|
# Inbound rate limiting
|
||||||
|
[[queue.limiter.inbound]]
|
||||||
|
key = ["remote_ip"]
|
||||||
|
rate = "500/1s"
|
||||||
|
enable = true
|
||||||
|
|
||||||
|
# SMTP configuration for postfix relay
|
||||||
|
[session.data]
|
||||||
|
pipe.command = "sendmail"
|
||||||
|
pipe.arguments = ["-i", "-f", "{sender}", "{recipient}"]
|
||||||
|
|
||||||
|
# Outbound SMTP configuration
|
||||||
|
[queue]
|
||||||
|
path = "<%= $data_dir %>/queue"
|
||||||
|
|
||||||
|
[queue.schedule]
|
||||||
|
retry = ["2s", "5s", "1m", "5m", "15m", "30m", "1h", "2h"]
|
||||||
|
notify = ["1d", "3d"]
|
||||||
|
expire = "5d"
|
||||||
|
|
||||||
|
[session.extensions]
|
||||||
|
future-release = "7d"
|
||||||
|
|
||||||
|
# Relay configuration for postfix
|
||||||
|
[remote."postfix"]
|
||||||
|
address = "<%= $postfix_relay_host %>"
|
||||||
|
port = 25
|
||||||
|
protocol = "smtp"
|
||||||
|
|
||||||
|
# HTTP configuration
|
||||||
|
[server.http]
|
||||||
|
use-x-forwarded = false
|
||||||
|
permissive-cors = false
|
||||||
|
|
||||||
|
# Disable spam filtering (handled by postfix)
|
||||||
|
[session.ehlo]
|
||||||
|
reject-non-fqdn = false
|
||||||
|
|
||||||
|
[session.rcpt]
|
||||||
|
type = "internal"
|
||||||
|
store = "postgresql"
|
||||||
|
max-recipients = 25
|
||||||
|
|
||||||
|
[session.data]
|
||||||
|
max-messages = 10
|
||||||
|
max-message-size = 52428800
|
||||||
|
|
||||||
|
# TLS configuration
|
||||||
|
[certificate."default"]
|
||||||
|
cert = "%{file:<%= $tls_cert %>}%"
|
||||||
|
private-key = "%{file:<%= $tls_key %>}%"
|
||||||
|
default = true
|
||||||
|
|
||||||
|
# Logging configuration
|
||||||
|
[tracer]
|
||||||
|
type = "log"
|
||||||
|
level = "<%= $log_level %>"
|
||||||
|
ansi = false
|
||||||
|
multiline = true
|
||||||
|
|
||||||
|
[tracer.file]
|
||||||
|
path = "/var/log/stalwart/stalwart.log"
|
||||||
|
rotate = "daily"
|
||||||
|
keep = 30
|
||||||
|
|
||||||
|
# Report storage
|
||||||
|
[report]
|
||||||
|
path = "<%= $data_dir %>/reports"
|
||||||
|
hash = "sha256"
|
||||||
|
encrypt = false
|
||||||
|
|
||||||
|
# Metrics configuration
|
||||||
|
[metrics]
|
||||||
|
prometheus.enable = true
|
||||||
|
prometheus.port = 9090
|
||||||
|
|
||||||
|
# Queue routing configuration
|
||||||
|
[queue.strategy]
|
||||||
|
route = [ { if = "is_local_domain('', rcpt_domain)", then = "'local'" },
|
||||||
|
{ else = "'relay'" } ]
|
||||||
|
|
||||||
|
[queue.route."local"]
|
||||||
|
type = "local"
|
||||||
|
|
||||||
|
[queue.route."relay"]
|
||||||
|
type = "relay"
|
||||||
|
address = "<%= $postfix_relay_host %>"
|
||||||
|
port = 25
|
||||||
|
protocol = "smtp"
|
||||||
|
|
||||||
|
[queue.route."relay".tls]
|
||||||
|
implicit = false
|
||||||
|
allow-invalid-certs = false
|
||||||
@ -10,6 +10,7 @@ class vmcluster::vmagent (
|
|||||||
Stdlib::Absolutepath $vars_file = '/etc/default/vmagent',
|
Stdlib::Absolutepath $vars_file = '/etc/default/vmagent',
|
||||||
String $consul_node_token = $facts['consul_node_token'],
|
String $consul_node_token = $facts['consul_node_token'],
|
||||||
Hash[String, Variant[String, Array[String]]] $options = {},
|
Hash[String, Variant[String, Array[String]]] $options = {},
|
||||||
|
Hash[String, Hash] $static_targets = {},
|
||||||
) {
|
) {
|
||||||
|
|
||||||
# if enabled, manage this service
|
# if enabled, manage this service
|
||||||
|
|||||||
@ -35,3 +35,28 @@ scrape_configs:
|
|||||||
- source_labels: [__meta_consul_tag_metrics_job]
|
- source_labels: [__meta_consul_tag_metrics_job]
|
||||||
target_label: job
|
target_label: job
|
||||||
action: replace
|
action: replace
|
||||||
|
|
||||||
|
<% if @static_targets -%>
|
||||||
|
<% @static_targets.each do |job_name, config| -%>
|
||||||
|
- job_name: '<%= job_name %>'
|
||||||
|
static_configs:
|
||||||
|
<% config['targets'].each do |target| -%>
|
||||||
|
- targets: ['<%= target %>']
|
||||||
|
<% if config['labels'] -%>
|
||||||
|
labels:
|
||||||
|
<% config['labels'].each do |label_name, label_value| -%>
|
||||||
|
<%= label_name %>: '<%= label_value %>'
|
||||||
|
<% end -%>
|
||||||
|
<% end -%>
|
||||||
|
<% end -%>
|
||||||
|
<% if config['scrape_interval'] -%>
|
||||||
|
scrape_interval: <%= config['scrape_interval'] %>
|
||||||
|
<% end -%>
|
||||||
|
<% if config['metrics_path'] -%>
|
||||||
|
metrics_path: <%= config['metrics_path'] %>
|
||||||
|
<% end -%>
|
||||||
|
<% if config['scheme'] -%>
|
||||||
|
scheme: <%= config['scheme'] %>
|
||||||
|
<% end -%>
|
||||||
|
<% end -%>
|
||||||
|
<% end -%>
|
||||||
|
|||||||
19
modules/zfs/lib/facter/zfs_datasets.rb
Normal file
19
modules/zfs/lib/facter/zfs_datasets.rb
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
Facter.add(:zfs_datasets) do
|
||||||
|
confine kernel: 'Linux'
|
||||||
|
setcode do
|
||||||
|
datasets = []
|
||||||
|
|
||||||
|
if Facter::Core::Execution.which('zfs')
|
||||||
|
begin
|
||||||
|
output = Facter::Core::Execution.execute('zfs list -H -o name 2>/dev/null', on_fail: nil)
|
||||||
|
datasets = output.strip.split("\n") if output && !output.empty?
|
||||||
|
rescue StandardError => e
|
||||||
|
Facter.debug("Error getting zfs dataset information: #{e.message}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
datasets.empty? ? nil : datasets
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -3,7 +3,8 @@
|
|||||||
Facter.add('zfs_zpool_cache_present') do
|
Facter.add('zfs_zpool_cache_present') do
|
||||||
confine kernel: 'Linux'
|
confine kernel: 'Linux'
|
||||||
setcode do
|
setcode do
|
||||||
File.exist?('/etc/zfs/zpool.cache')
|
cache_file = '/etc/zfs/zpool.cache'
|
||||||
|
File.exist?(cache_file) && File.size(cache_file).positive?
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
19
modules/zfs/lib/facter/zfs_zpools.rb
Normal file
19
modules/zfs/lib/facter/zfs_zpools.rb
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
Facter.add(:zfs_zpools) do
|
||||||
|
confine kernel: 'Linux'
|
||||||
|
setcode do
|
||||||
|
zpools = []
|
||||||
|
|
||||||
|
if Facter::Core::Execution.which('zpool')
|
||||||
|
begin
|
||||||
|
output = Facter::Core::Execution.execute('zpool list -H -o name 2>/dev/null', on_fail: nil)
|
||||||
|
zpools = output.strip.split("\n") if output && !output.empty?
|
||||||
|
rescue StandardError => e
|
||||||
|
Facter.debug("Error getting zpool information: #{e.message}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
zpools.empty? ? nil : zpools
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -38,8 +38,11 @@ class zfs (
|
|||||||
|
|
||||||
# create zpools
|
# create zpools
|
||||||
$zpools.each | $zpool, $data | {
|
$zpools.each | $zpool, $data | {
|
||||||
zpool { $zpool:
|
# Only create zpool if it doesn't already exist
|
||||||
* => $data
|
if $facts['zfs_zpools'] == undef or !($zpool in $facts['zfs_zpools']) {
|
||||||
|
zpool { $zpool:
|
||||||
|
* => $data
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
54
site/profiles/files/postfix/gateway/aliases
Normal file
54
site/profiles/files/postfix/gateway/aliases
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# FILE MANAGED BY PUPPET, CHANGES WILL BE REPLACED
|
||||||
|
|
||||||
|
postmaster: root
|
||||||
|
|
||||||
|
# Many mailers use this address to represent the empty SMTP return path
|
||||||
|
MAILER-DAEMON: postmaster
|
||||||
|
|
||||||
|
# Common aliases for system accounts.
|
||||||
|
bin: root
|
||||||
|
daemon: root
|
||||||
|
games: root
|
||||||
|
ingres: root
|
||||||
|
nobody: root
|
||||||
|
system: root
|
||||||
|
toor: root
|
||||||
|
foo: root
|
||||||
|
falken: root
|
||||||
|
|
||||||
|
# Well-known aliases.
|
||||||
|
admin: root
|
||||||
|
manager: root
|
||||||
|
dumper: root
|
||||||
|
operator: root
|
||||||
|
|
||||||
|
# traps to catch security attacks
|
||||||
|
decode: root
|
||||||
|
moof: root
|
||||||
|
moog: root
|
||||||
|
|
||||||
|
# Standard aliases also defined by RFC 2142
|
||||||
|
abuse: postmaster
|
||||||
|
|
||||||
|
# reports of network infrastructure difficulties
|
||||||
|
noc: root
|
||||||
|
|
||||||
|
# address to report secuirty problems
|
||||||
|
security: root
|
||||||
|
|
||||||
|
# DNS administrator (DNS soa records should use this)
|
||||||
|
hostmaster: root
|
||||||
|
|
||||||
|
# Usenet news service administrator
|
||||||
|
news: usenet
|
||||||
|
usenet: root
|
||||||
|
|
||||||
|
# http/web service administrator
|
||||||
|
www: webmaster
|
||||||
|
webmaster: root
|
||||||
|
|
||||||
|
# UUCP service administrator
|
||||||
|
uucp: root
|
||||||
|
|
||||||
|
# FTP administrator (especially anon FTP)
|
||||||
|
ftp: root
|
||||||
@ -1,12 +1,12 @@
|
|||||||
# manage the root user
|
# manage the root user
|
||||||
class profiles::accounts::root (
|
class profiles::accounts::root (
|
||||||
|
String $password,
|
||||||
Optional[Array[String]] $sshkeys = undef,
|
Optional[Array[String]] $sshkeys = undef,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
if $sshkeys {
|
accounts::user { 'root':
|
||||||
accounts::user { 'root':
|
sshkeys => $sshkeys,
|
||||||
sshkeys => $sshkeys,
|
password => $password,
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
file {'/root/.config':
|
file {'/root/.config':
|
||||||
|
|||||||
@ -1,7 +1,5 @@
|
|||||||
# this is the base class, which will be used by all servers
|
# this is the base class, which will be used by all servers
|
||||||
class profiles::base (
|
class profiles::base () {
|
||||||
Array $puppet_servers,
|
|
||||||
) {
|
|
||||||
|
|
||||||
# run a limited set of classes on the first run aimed at bootstrapping the new node
|
# run a limited set of classes on the first run aimed at bootstrapping the new node
|
||||||
if $facts['firstrun'] {
|
if $facts['firstrun'] {
|
||||||
@ -13,11 +11,7 @@ class profiles::base (
|
|||||||
|
|
||||||
# manage the puppet agent
|
# manage the puppet agent
|
||||||
include profiles::puppet::agent
|
include profiles::puppet::agent
|
||||||
|
include profiles::puppet::client
|
||||||
# manage puppet clients
|
|
||||||
if ! member($puppet_servers, $trusted['certname']) {
|
|
||||||
include profiles::puppet::client
|
|
||||||
}
|
|
||||||
|
|
||||||
# include the base profiles
|
# include the base profiles
|
||||||
include profiles::base::repos
|
include profiles::base::repos
|
||||||
|
|||||||
36
site/profiles/manifests/ceph/conf.pp
Normal file
36
site/profiles/manifests/ceph/conf.pp
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
class profiles::ceph::conf (
|
||||||
|
Hash $config = {}
|
||||||
|
) {
|
||||||
|
|
||||||
|
package {[
|
||||||
|
'ceph',
|
||||||
|
'ceph-common'
|
||||||
|
]:
|
||||||
|
ensure => installed,
|
||||||
|
}
|
||||||
|
|
||||||
|
file {'/etc/ceph':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'ceph',
|
||||||
|
group => 'ceph',
|
||||||
|
mode => '0755',
|
||||||
|
require => Package['ceph'],
|
||||||
|
}
|
||||||
|
|
||||||
|
file {'/var/log/ceph':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'ceph',
|
||||||
|
group => 'ceph',
|
||||||
|
mode => '0755',
|
||||||
|
require => Package['ceph'],
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/etc/ceph/ceph.conf':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'ceph',
|
||||||
|
group => 'ceph',
|
||||||
|
mode => '0644',
|
||||||
|
content => template('profiles/ceph/conf.erb'),
|
||||||
|
require => Package['ceph-common'],
|
||||||
|
}
|
||||||
|
}
|
||||||
20
site/profiles/manifests/ceph/dashboard.pp
Normal file
20
site/profiles/manifests/ceph/dashboard.pp
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
class profiles::ceph::dashboard (
|
||||||
|
Stdlib::IP::Address $ipaddress = $facts['networking']['ip'],
|
||||||
|
){
|
||||||
|
|
||||||
|
# export haproxy balancemember
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_9443":
|
||||||
|
service => 'be_ceph_dashboard',
|
||||||
|
ipaddress => $ipaddress,
|
||||||
|
ports => [9443],
|
||||||
|
options => [
|
||||||
|
"cookie ${facts['networking']['hostname']}",
|
||||||
|
'ssl',
|
||||||
|
'verify none',
|
||||||
|
'check',
|
||||||
|
'inter 2s',
|
||||||
|
'rise 3',
|
||||||
|
'fall 2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
41
site/profiles/manifests/ceph/rgw.pp
Normal file
41
site/profiles/manifests/ceph/rgw.pp
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
class profiles::ceph::rgw (
|
||||||
|
Boolean $enable = true,
|
||||||
|
Hash[String, String] $ceph_client_keys = {},
|
||||||
|
Stdlib::Absolutepath $base_path = '/var/lib/ceph'
|
||||||
|
){
|
||||||
|
|
||||||
|
$key = $ceph_client_keys[$facts['networking']['hostname']]
|
||||||
|
|
||||||
|
if $enable {
|
||||||
|
|
||||||
|
include profiles::ceph::conf
|
||||||
|
|
||||||
|
package {'ceph-radosgw':
|
||||||
|
ensure => installed,
|
||||||
|
}
|
||||||
|
|
||||||
|
file { [
|
||||||
|
"${base_path}/radosgw",
|
||||||
|
"${base_path}/radosgw/ceph-${facts['networking']['hostname']}"
|
||||||
|
]:
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'ceph',
|
||||||
|
group => 'ceph',
|
||||||
|
mode => '0750',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { "${base_path}/radosgw/ceph-${facts['networking']['hostname']}/keyring":
|
||||||
|
ensure => file,
|
||||||
|
owner => 'ceph',
|
||||||
|
group => 'ceph',
|
||||||
|
mode => '0750',
|
||||||
|
content => Sensitive("[client.${facts['networking']['hostname']}]\n key = ${key}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
service {"ceph-radosgw@${facts['networking']['hostname']}":
|
||||||
|
ensure => true,
|
||||||
|
enable => true,
|
||||||
|
subscribe => File["${base_path}/radosgw/ceph-${facts['networking']['hostname']}/keyring"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -11,6 +11,7 @@ class profiles::consul::client (
|
|||||||
Stdlib::Absolutepath $data_dir = '/opt/consul',
|
Stdlib::Absolutepath $data_dir = '/opt/consul',
|
||||||
Array[Hash] $node_rules = [],
|
Array[Hash] $node_rules = [],
|
||||||
Hash $ports = {},
|
Hash $ports = {},
|
||||||
|
Stdlib::IP::Address $host_addr = $facts['networking']['ip'],
|
||||||
) {
|
) {
|
||||||
|
|
||||||
if $facts['enc_role'] != $members_role {
|
if $facts['enc_role'] != $members_role {
|
||||||
@ -42,8 +43,8 @@ class profiles::consul::client (
|
|||||||
'log_level' => 'INFO',
|
'log_level' => 'INFO',
|
||||||
'node_name' => $facts['networking']['fqdn'],
|
'node_name' => $facts['networking']['fqdn'],
|
||||||
'retry_join' => $servers_array,
|
'retry_join' => $servers_array,
|
||||||
'bind_addr' => $::facts['networking']['ip'],
|
'bind_addr' => $host_addr,
|
||||||
'advertise_addr' => $::facts['networking']['ip'],
|
'advertise_addr' => $host_addr,
|
||||||
'enable_script_checks' => true,
|
'enable_script_checks' => true,
|
||||||
'ports' => $ports,
|
'ports' => $ports,
|
||||||
'acl' => {
|
'acl' => {
|
||||||
|
|||||||
@ -11,6 +11,7 @@ class profiles::defaults {
|
|||||||
ensure => present,
|
ensure => present,
|
||||||
require => [
|
require => [
|
||||||
Class['profiles::base::repos'],
|
Class['profiles::base::repos'],
|
||||||
|
Exec['dnf_makecache'],
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,7 @@ class profiles::dns::base (
|
|||||||
$facts['networking']['interfaces'].each | $interface, $data | {
|
$facts['networking']['interfaces'].each | $interface, $data | {
|
||||||
|
|
||||||
# exclude those without ipv4 address, lo, docker0 and anycast addresses
|
# exclude those without ipv4 address, lo, docker0 and anycast addresses
|
||||||
if $data['ip'] and $interface != 'lo' and $interface != 'docker0' and $interface !~ /^anycast[0-9]$/ {
|
if $data['ip'] and $interface != 'lo' and $interface != 'docker0' and $interface !~ /^anycast[0-9]$/ and $interface !~ /^cilium_/ {
|
||||||
|
|
||||||
# use defaults for the primary_interface
|
# use defaults for the primary_interface
|
||||||
if $interface == $primary_interface {
|
if $interface == $primary_interface {
|
||||||
|
|||||||
@ -1,13 +1,12 @@
|
|||||||
# profiles::gitea::init
|
# profiles::gitea::init
|
||||||
class profiles::gitea::runner (
|
class profiles::gitea::runner (
|
||||||
String $registration_token,
|
String $registration_token,
|
||||||
Stdlib::HTTPSUrl $source,
|
|
||||||
String $user = 'runner',
|
String $user = 'runner',
|
||||||
String $group = 'runner',
|
String $group = 'runner',
|
||||||
Stdlib::Absolutepath $home = '/data/runner',
|
Stdlib::Absolutepath $home = '/data/runner',
|
||||||
Hash $config = {},
|
Hash $config = {},
|
||||||
Stdlib::HTTPSUrl $instance = 'https://git.query.consul',
|
Stdlib::HTTPSUrl $instance = 'https://git.query.consul',
|
||||||
String $version = '0.2.10',
|
String $version = 'latest',
|
||||||
) {
|
) {
|
||||||
|
|
||||||
group { $group:
|
group { $group:
|
||||||
@ -32,24 +31,27 @@ class profiles::gitea::runner (
|
|||||||
require => User[$user],
|
require => User[$user],
|
||||||
}
|
}
|
||||||
|
|
||||||
archive { '/usr/local/bin/act_runner':
|
unless $version in ['latest', 'present'] {
|
||||||
ensure => present,
|
# versionlock act
|
||||||
extract => false,
|
yum::versionlock{ 'act_runner':
|
||||||
source => $source,
|
ensure => present,
|
||||||
creates => '/usr/local/bin/act_runner',
|
version => $version,
|
||||||
cleanup => true,
|
before => Package['act_runner'],
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# install act
|
||||||
|
package { 'act_runner':
|
||||||
|
ensure => $version,
|
||||||
|
}
|
||||||
|
|
||||||
|
# remove manually installed act_runner
|
||||||
file { '/usr/local/bin/act_runner':
|
file { '/usr/local/bin/act_runner':
|
||||||
ensure => 'file',
|
ensure => absent,
|
||||||
mode => '0755',
|
|
||||||
owner => 'root',
|
|
||||||
group => 'root',
|
|
||||||
require => Archive['/usr/local/bin/act_runner'],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
exec {'register_act_runner':
|
exec { 'register_act_runner':
|
||||||
command => "/usr/local/bin/act_runner register \
|
command => "/usr/bin/act_runner register \
|
||||||
--no-interactive \
|
--no-interactive \
|
||||||
--instance ${instance} \
|
--instance ${instance} \
|
||||||
--token ${registration_token} \
|
--token ${registration_token} \
|
||||||
@ -60,12 +62,12 @@ class profiles::gitea::runner (
|
|||||||
user => $user,
|
user => $user,
|
||||||
group => $group,
|
group => $group,
|
||||||
require => [
|
require => [
|
||||||
File['/usr/local/bin/act_runner'],
|
Package['act_runner'],
|
||||||
File["${home}/config.yaml"],
|
File["${home}/config.yaml"],
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
systemd::unit_file {'act_runner.service':
|
systemd::unit_file { 'act_runner.service':
|
||||||
enable => true,
|
enable => true,
|
||||||
active => true,
|
active => true,
|
||||||
content => template('profiles/gitea/act_runner.service.erb'),
|
content => template('profiles/gitea/act_runner.service.erb'),
|
||||||
|
|||||||
@ -3,6 +3,7 @@ define profiles::haproxy::balancemember (
|
|||||||
String $service,
|
String $service,
|
||||||
Array[Stdlib::Port] $ports,
|
Array[Stdlib::Port] $ports,
|
||||||
Array $options = ['check'],
|
Array $options = ['check'],
|
||||||
|
Stdlib::IP::Address $ipaddress = $facts['networking']['ip'],
|
||||||
) {
|
) {
|
||||||
|
|
||||||
$location_environment = "${facts['country']}-${facts['region']}-${facts['environment']}"
|
$location_environment = "${facts['country']}-${facts['region']}-${facts['environment']}"
|
||||||
@ -12,7 +13,7 @@ define profiles::haproxy::balancemember (
|
|||||||
listening_service => $service,
|
listening_service => $service,
|
||||||
ports => $ports,
|
ports => $ports,
|
||||||
server_names => $facts['networking']['hostname'],
|
server_names => $facts['networking']['hostname'],
|
||||||
ipaddresses => $facts['networking']['ip'],
|
ipaddresses => $ipaddress,
|
||||||
options => $options,
|
options => $options,
|
||||||
tag => $balancemember_tag,
|
tag => $balancemember_tag,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,7 @@ class profiles::metrics::grafana (
|
|||||||
String $db_pass = fqdn_rand_string(16),
|
String $db_pass = fqdn_rand_string(16),
|
||||||
Stdlib::Host $db_host = '127.0.0.1',
|
Stdlib::Host $db_host = '127.0.0.1',
|
||||||
Stdlib::Port $db_port = 5432,
|
Stdlib::Port $db_port = 5432,
|
||||||
|
Hash $plugins = {}
|
||||||
) {
|
) {
|
||||||
|
|
||||||
# set the fqdn
|
# set the fqdn
|
||||||
@ -108,6 +109,7 @@ class profiles::metrics::grafana (
|
|||||||
class { 'grafana':
|
class { 'grafana':
|
||||||
cfg => $cfg,
|
cfg => $cfg,
|
||||||
ldap_cfg => $ldap_cfg,
|
ldap_cfg => $ldap_cfg,
|
||||||
|
plugins => $plugins,
|
||||||
}
|
}
|
||||||
|
|
||||||
# fix the package provided systemd service
|
# fix the package provided systemd service
|
||||||
|
|||||||
@ -136,9 +136,10 @@ class profiles::nginx::simpleproxy (
|
|||||||
}
|
}
|
||||||
|
|
||||||
service { 'nginx':
|
service { 'nginx':
|
||||||
ensure => true,
|
ensure => true,
|
||||||
enable => true,
|
enable => true,
|
||||||
subscribe => [
|
hasrestart => true,
|
||||||
|
subscribe => [
|
||||||
File[$selected_ssl_cert],
|
File[$selected_ssl_cert],
|
||||||
File[$selected_ssl_key],
|
File[$selected_ssl_key],
|
||||||
Nginx::Resource::Server[$nginx_vhost]
|
Nginx::Resource::Server[$nginx_vhost]
|
||||||
|
|||||||
349
site/profiles/manifests/postfix/gateway.pp
Normal file
349
site/profiles/manifests/postfix/gateway.pp
Normal file
@ -0,0 +1,349 @@
|
|||||||
|
class profiles::postfix::gateway (
|
||||||
|
Stdlib::Absolutepath $tls_cert_file = '/etc/pki/tls/vault/certificate.pem',
|
||||||
|
Stdlib::Absolutepath $tls_key_file = '/etc/pki/tls/vault/certificate.pem',
|
||||||
|
Stdlib::Absolutepath $tls_ca_file = '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem',
|
||||||
|
String $myhostname = $trusted['certname'],
|
||||||
|
String $message_size_limit = '133169152',
|
||||||
|
String $mailbox_size_limit = '133169152',
|
||||||
|
String $local_transport = 'error:No local mail delivery',
|
||||||
|
Boolean $enable_postscreen = true,
|
||||||
|
Array[String] $alias_maps = [
|
||||||
|
'hash:/etc/aliases',
|
||||||
|
'hash:/etc/postfix/aliases',
|
||||||
|
],
|
||||||
|
Array[String] $postscreen_dnsbl_sites = [
|
||||||
|
'zen.spamhaus.org*3',
|
||||||
|
'b.barracudacentral.org=127.0.0.[2..11]*2',
|
||||||
|
'bl.spameatingmonkey.net*2',
|
||||||
|
'bl.spamcop.net',
|
||||||
|
'dnsbl.sorbs.net',
|
||||||
|
'swl.spamhaus.org*-4',
|
||||||
|
'list.dnswl.org=127.[0..255].[0..255].0*-2',
|
||||||
|
'list.dnswl.org=127.[0..255].[0..255].1*-4',
|
||||||
|
'list.dnswl.org=127.[0..255].[0..255].[2..3]*-6',
|
||||||
|
],
|
||||||
|
Array[String] $smtpd_client_restrictions = [
|
||||||
|
'permit_sasl_authenticated',
|
||||||
|
'permit_mynetworks',
|
||||||
|
'reject_rbl_client zen.spamhaus.org',
|
||||||
|
],
|
||||||
|
Array[String] $smtpd_sender_restrictions = [
|
||||||
|
'permit_sasl_authenticated',
|
||||||
|
'check_sender_access hash:/etc/postfix/sender_access',
|
||||||
|
'reject_non_fqdn_sender',
|
||||||
|
'reject_unknown_sender_domain',
|
||||||
|
],
|
||||||
|
Array[String] $smtpd_recipient_restrictions = [
|
||||||
|
'permit_sasl_authenticated',
|
||||||
|
'permit_mynetworks',
|
||||||
|
'reject_unauth_destination',
|
||||||
|
'reject_non_fqdn_recipient',
|
||||||
|
'reject_unknown_recipient_domain',
|
||||||
|
'check_recipient_access hash:/etc/postfix/recipient_access',
|
||||||
|
'reject_unverified_recipient',
|
||||||
|
],
|
||||||
|
Array[String] $smtpd_relay_restrictions = [
|
||||||
|
'permit_sasl_authenticated',
|
||||||
|
'permit_mynetworks',
|
||||||
|
'reject_unauth_destination',
|
||||||
|
],
|
||||||
|
Hash[String, String] $smtp_tls_policy_maps = {},
|
||||||
|
Hash[String, String] $sender_canonical_maps = {},
|
||||||
|
Hash[String, String] $sender_access_maps = {},
|
||||||
|
Hash[String, String] $relay_recipients_maps = {},
|
||||||
|
Hash[String, String] $relay_domains_maps = {},
|
||||||
|
Hash[String, String] $recipient_canonical_maps = {},
|
||||||
|
Hash[String, String] $recipient_access_maps = {},
|
||||||
|
Hash[String, String] $postscreen_access_maps = {},
|
||||||
|
Hash[String, String] $helo_access_maps = {},
|
||||||
|
) {
|
||||||
|
|
||||||
|
$alias_maps_string = join($alias_maps, ', ')
|
||||||
|
|
||||||
|
# Set master.cf configuration based on postscreen setting
|
||||||
|
if $enable_postscreen {
|
||||||
|
$master_smtp = 'smtp inet n - n - 1 postscreen'
|
||||||
|
$master_entries = [
|
||||||
|
'smtpd pass - - n - - smtpd',
|
||||||
|
'dnsblog unix - - n - 0 dnsblog',
|
||||||
|
'tlsproxy unix - - n - 0 tlsproxy',
|
||||||
|
]
|
||||||
|
$postscreen_configs = {
|
||||||
|
'postscreen_access_list' => {
|
||||||
|
'value' => 'permit_mynetworks, cidr:/etc/postfix/postscreen_access'
|
||||||
|
},
|
||||||
|
'postscreen_blacklist_action' => {
|
||||||
|
'value' => 'enforce'
|
||||||
|
},
|
||||||
|
'postscreen_cache_map' => {
|
||||||
|
'value' => 'btree:$data_directory/postscreen_cache'
|
||||||
|
},
|
||||||
|
'postscreen_dnsbl_action' => {
|
||||||
|
'value' => 'enforce'
|
||||||
|
},
|
||||||
|
'postscreen_dnsbl_sites' => {
|
||||||
|
'value' => join($postscreen_dnsbl_sites, ', ')
|
||||||
|
},
|
||||||
|
'postscreen_dnsbl_threshold' => {
|
||||||
|
'value' => '2'
|
||||||
|
},
|
||||||
|
'postscreen_greet_action' => {
|
||||||
|
'value' => 'enforce'
|
||||||
|
},
|
||||||
|
'postscreen_greet_banner' => {
|
||||||
|
'value' => '$smtpd_banner'
|
||||||
|
},
|
||||||
|
'postscreen_greet_wait' => {
|
||||||
|
'value' => "\${stress?2}\${stress:6}s"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
$master_smtp = undef
|
||||||
|
$master_entries = []
|
||||||
|
$postscreen_configs = {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Base postfix configuration
|
||||||
|
$base_configs = {
|
||||||
|
'alias_database' => {
|
||||||
|
'value' => $alias_maps_string
|
||||||
|
},
|
||||||
|
'default_destination_recipient_limit' => {
|
||||||
|
'value' => '1'
|
||||||
|
},
|
||||||
|
'disable_vrfy_command' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'enable_long_queue_ids' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'error_notice_recipient' => {
|
||||||
|
'value' => 'root'
|
||||||
|
},
|
||||||
|
'header_checks' => {
|
||||||
|
'value' => 'regexp:/etc/postfix/header_checks'
|
||||||
|
},
|
||||||
|
'local_recipient_maps' => {
|
||||||
|
'ensure' => 'blank'
|
||||||
|
},
|
||||||
|
'local_transport' => {
|
||||||
|
'value' => $local_transport
|
||||||
|
},
|
||||||
|
'mailbox_size_limit' => {
|
||||||
|
'value' => $mailbox_size_limit
|
||||||
|
},
|
||||||
|
'message_size_limit' => {
|
||||||
|
'value' => $message_size_limit
|
||||||
|
},
|
||||||
|
'myhostname' => {
|
||||||
|
'value' => $myhostname
|
||||||
|
},
|
||||||
|
'non_smtpd_milters' => {
|
||||||
|
'ensure' => 'blank'
|
||||||
|
},
|
||||||
|
'qmqpd_authorized_clients' => {
|
||||||
|
'value' => '127.0.0.1 [::1]'
|
||||||
|
},
|
||||||
|
'recipient_canonical_maps' => {
|
||||||
|
'value' => 'hash:/etc/postfix/recipient_canonical'
|
||||||
|
},
|
||||||
|
'recipient_delimiter' => {
|
||||||
|
'value' => '+'
|
||||||
|
},
|
||||||
|
'relay_domains' => {
|
||||||
|
'value' => 'hash:/etc/postfix/relay_domains'
|
||||||
|
},
|
||||||
|
'relay_recipient_maps' => {
|
||||||
|
'value' => 'hash:/etc/postfix/relay_recipients'
|
||||||
|
},
|
||||||
|
'sender_canonical_maps' => {
|
||||||
|
'value' => 'hash:/etc/postfix/sender_canonical'
|
||||||
|
},
|
||||||
|
'smtp_tls_CAfile' => {
|
||||||
|
'value' => $tls_ca_file
|
||||||
|
},
|
||||||
|
'smtp_tls_mandatory_protocols' => {
|
||||||
|
'value' => '!SSLv2,!SSLv3'
|
||||||
|
},
|
||||||
|
'smtp_tls_note_starttls_offer' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtp_tls_protocols' => {
|
||||||
|
'value' => '!SSLv2,!SSLv3'
|
||||||
|
},
|
||||||
|
'smtp_tls_security_level' => {
|
||||||
|
'value' => 'may'
|
||||||
|
},
|
||||||
|
'smtp_tls_session_cache_database' => {
|
||||||
|
'value' => 'btree:/var/lib/postfix/smtp_tls_session_cache'
|
||||||
|
},
|
||||||
|
'smtp_use_tls' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_banner' => {
|
||||||
|
'value' => '$myhostname ESMTP $mail_name'
|
||||||
|
},
|
||||||
|
'smtpd_client_restrictions' => {
|
||||||
|
'value' => join($smtpd_client_restrictions, ', ')
|
||||||
|
},
|
||||||
|
'smtpd_data_restrictions' => {
|
||||||
|
'value' => 'reject_unauth_pipelining'
|
||||||
|
},
|
||||||
|
'smtpd_delay_reject' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_discard_ehlo_keywords' => {
|
||||||
|
'value' => 'chunking, silent-discard'
|
||||||
|
},
|
||||||
|
'smtpd_forbid_bare_newline' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_forbid_bare_newline_exclusions' => {
|
||||||
|
'value' => '$mynetworks'
|
||||||
|
},
|
||||||
|
'smtpd_forbid_unauth_pipelining' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_helo_required' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_helo_restrictions' => {
|
||||||
|
'value' => 'check_helo_access hash:/etc/postfix/helo_access, reject_invalid_hostname'
|
||||||
|
},
|
||||||
|
'smtpd_milters' => {
|
||||||
|
'value' => 'inet:127.0.0.1:33333'
|
||||||
|
},
|
||||||
|
'smtpd_recipient_restrictions' => {
|
||||||
|
'value' => join($smtpd_recipient_restrictions, ', ')
|
||||||
|
},
|
||||||
|
'smtpd_relay_restrictions' => {
|
||||||
|
'value' => join($smtpd_relay_restrictions, ', ')
|
||||||
|
},
|
||||||
|
'smtpd_sender_restrictions' => {
|
||||||
|
'value' => join($smtpd_sender_restrictions, ', ')
|
||||||
|
},
|
||||||
|
'smtpd_tls_CAfile' => {
|
||||||
|
'value' => $tls_ca_file
|
||||||
|
},
|
||||||
|
'smtpd_tls_cert_file' => {
|
||||||
|
'value' => $tls_cert_file
|
||||||
|
},
|
||||||
|
'smtpd_tls_ciphers' => {
|
||||||
|
'value' => 'medium'
|
||||||
|
},
|
||||||
|
'smtpd_tls_key_file' => {
|
||||||
|
'value' => $tls_key_file
|
||||||
|
},
|
||||||
|
'smtpd_tls_loglevel' => {
|
||||||
|
'value' => '1'
|
||||||
|
},
|
||||||
|
'smtpd_tls_mandatory_protocols' => {
|
||||||
|
'value' => '!SSLv2,!SSLv3'
|
||||||
|
},
|
||||||
|
'smtpd_tls_protocols' => {
|
||||||
|
'value' => '!SSLv2,!SSLv3'
|
||||||
|
},
|
||||||
|
'smtpd_tls_received_header' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'smtpd_tls_security_level' => {
|
||||||
|
'value' => 'may'
|
||||||
|
},
|
||||||
|
'smtpd_tls_session_cache_database' => {
|
||||||
|
'value' => 'btree:/var/lib/postfix/smtpd_tls_session_cache'
|
||||||
|
},
|
||||||
|
'smtpd_tls_session_cache_timeout' => {
|
||||||
|
'value' => '3600s'
|
||||||
|
},
|
||||||
|
'smtpd_use_tls' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'tls_medium_cipherlist' => {
|
||||||
|
'value' => join([
|
||||||
|
'ECDSA+AESGCM:ECDH+AESGCM:DH+AESGCM:ECDSA+AES:ECDH+AES:DH+AES',
|
||||||
|
'ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS'
|
||||||
|
], ':')
|
||||||
|
},
|
||||||
|
'tls_preempt_cipherlist' => {
|
||||||
|
'value' => 'yes'
|
||||||
|
},
|
||||||
|
'tls_random_source' => {
|
||||||
|
'value' => 'dev:/dev/urandom'
|
||||||
|
},
|
||||||
|
'unverified_recipient_reject_code' => {
|
||||||
|
'value' => '550'
|
||||||
|
},
|
||||||
|
'unverified_recipient_reject_reason' => {
|
||||||
|
'value' => 'No user at this address'
|
||||||
|
},
|
||||||
|
'smtp_tls_policy_maps' => {
|
||||||
|
'value' => 'hash:/etc/postfix/smtp_tls_policy_maps'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Postfix maps (all using templates now)
|
||||||
|
$postfix_maps = {
|
||||||
|
'postscreen_access' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'cidr',
|
||||||
|
'content' => template('profiles/postfix/gateway/postscreen_access.erb')
|
||||||
|
},
|
||||||
|
'relay_recipients' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/relay_recipients.erb')
|
||||||
|
},
|
||||||
|
'relay_domains' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/relay_domains.erb')
|
||||||
|
},
|
||||||
|
'aliases' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'source' => 'puppet:///modules/profiles/postfix/gateway/aliases'
|
||||||
|
},
|
||||||
|
'helo_access' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/helo_access.erb')
|
||||||
|
},
|
||||||
|
'sender_access' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/sender_access.erb')
|
||||||
|
},
|
||||||
|
'recipient_access' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/recipient_access.erb')
|
||||||
|
},
|
||||||
|
'recipient_canonical' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/recipient_canonical.erb')
|
||||||
|
},
|
||||||
|
'sender_canonical' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/sender_canonical.erb')
|
||||||
|
},
|
||||||
|
'smtp_tls_policy_maps' => {
|
||||||
|
'ensure' => 'present',
|
||||||
|
'type' => 'hash',
|
||||||
|
'content' => template('profiles/postfix/gateway/smtp_tls_policy_maps.erb')
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Merge base configs with postscreen configs
|
||||||
|
$all_configs = $base_configs + $postscreen_configs
|
||||||
|
|
||||||
|
class { 'postfix':
|
||||||
|
master_smtp => $master_smtp,
|
||||||
|
master_entries => $master_entries,
|
||||||
|
alias_maps => $alias_maps_string,
|
||||||
|
configs => $all_configs,
|
||||||
|
maps => $postfix_maps,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -1,37 +1,68 @@
|
|||||||
# profiles::puppet::agent
|
# profiles::puppet::agent
|
||||||
# This class manages Puppet agent package and service.
|
# This class manages Puppet agent package and service.
|
||||||
class profiles::puppet::agent (
|
class profiles::puppet::agent (
|
||||||
String $puppet_version = 'latest',
|
String $version = 'latest',
|
||||||
|
Boolean $openvox_enable = false,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
# if puppet-version is anything other than latest, set a versionlock
|
# set openvox package, yumrepo, service
|
||||||
$puppet_versionlock_ensure = $puppet_version ? {
|
if $openvox_enable {
|
||||||
|
$use_package = 'openvox-agent'
|
||||||
|
$use_yumrepo = 'openvox'
|
||||||
|
$use_service = 'puppet'
|
||||||
|
}else{
|
||||||
|
$use_package = 'puppet-agent'
|
||||||
|
$use_yumrepo = 'puppet'
|
||||||
|
$use_service = 'puppet'
|
||||||
|
}
|
||||||
|
|
||||||
|
# manage the yumrepo for the given package
|
||||||
|
if $openvox_enable and $facts['os']['family'] == 'RedHat' {
|
||||||
|
yumrepo { 'openvox':
|
||||||
|
ensure => 'present',
|
||||||
|
baseurl => "https://packagerepo.service.consul/openvox7/el/${facts['os']['release']['major']}-daily/${facts['os']['architecture']}/os/",
|
||||||
|
descr => 'openvox repository',
|
||||||
|
gpgkey => "https://packagerepo.service.consul/openvox7/el/${facts['os']['release']['major']}-daily/${facts['os']['architecture']}/os/GPG-KEY-openvox.pub",
|
||||||
|
notify => Exec['dnf_makecache'],
|
||||||
|
}
|
||||||
|
}else{
|
||||||
|
yumrepo { 'puppet':
|
||||||
|
ensure => 'present',
|
||||||
|
baseurl => "https://packagerepo.service.consul/puppet7/el/${facts['os']['release']['major']}-daily/${facts['os']['architecture']}/os/",
|
||||||
|
descr => 'puppet repository',
|
||||||
|
gpgkey => "https://packagerepo.service.consul/puppet7/el/${facts['os']['release']['major']}-daily/${facts['os']['architecture']}/os/RPM-GPG-KEY-puppet-20250406",
|
||||||
|
notify => Exec['dnf_makecache'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# if agent-version is anything other than latest, set a versionlock
|
||||||
|
$agent_versionlock_ensure = $version ? {
|
||||||
'latest' => 'absent',
|
'latest' => 'absent',
|
||||||
default => 'present',
|
default => 'present',
|
||||||
}
|
}
|
||||||
$puppet_versionlock_version = $puppet_version ? {
|
$agent_versionlock_version = $version ? {
|
||||||
'latest' => undef,
|
'latest' => undef,
|
||||||
default => $puppet_version,
|
default => $version,
|
||||||
}
|
}
|
||||||
|
|
||||||
case $facts['os']['family'] {
|
case $facts['os']['family'] {
|
||||||
'RedHat': {
|
'RedHat': {
|
||||||
# Ensure the puppet-agent package is installed and locked to a specific version
|
# Ensure the agent package is installed and locked to a specific version
|
||||||
package { 'puppet-agent':
|
package { $use_package:
|
||||||
ensure => $puppet_version,
|
ensure => $version,
|
||||||
require => Yumrepo['puppet'],
|
require => Yumrepo[$use_yumrepo],
|
||||||
}
|
}
|
||||||
|
|
||||||
# versionlock puppet-agent
|
# versionlock puppet-agent
|
||||||
yum::versionlock{'puppet-agent':
|
yum::versionlock{$use_package:
|
||||||
ensure => $puppet_versionlock_ensure,
|
ensure => $agent_versionlock_ensure,
|
||||||
version => $puppet_versionlock_version,
|
version => $agent_versionlock_version,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
'Debian': {
|
'Debian': {
|
||||||
# Ensure the puppet-agent package is installed and locked to a specific version
|
# Ensure the puppet-agent package is installed and locked to a specific version
|
||||||
package { 'puppet-agent':
|
package { $use_package:
|
||||||
ensure => $puppet_version,
|
ensure => $version,
|
||||||
require => Class['profiles::apt::puppet7'],
|
require => Class['profiles::apt::puppet7'],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -39,12 +70,11 @@ class profiles::puppet::agent (
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Ensure the puppet service is running
|
# Ensure the puppet service is running
|
||||||
service { 'puppet':
|
service { $use_service:
|
||||||
ensure => 'running',
|
ensure => 'running',
|
||||||
enable => true,
|
enable => true,
|
||||||
hasrestart => true,
|
hasrestart => true,
|
||||||
require => Package['puppet-agent'],
|
require => Package[$use_package],
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -26,6 +26,12 @@ class profiles::puppet::puppetdb_api (
|
|||||||
before => Class['puppetdb::server'],
|
before => Class['puppetdb::server'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# cleanup puppetdb first, this isnt replaced by openvoxdb (conflicts)
|
||||||
|
package { 'puppetdb':
|
||||||
|
ensure => 'purged',
|
||||||
|
before => Class['puppetdb::server'],
|
||||||
|
}
|
||||||
|
|
||||||
class { 'puppetdb::server':
|
class { 'puppetdb::server':
|
||||||
manage_firewall => false,
|
manage_firewall => false,
|
||||||
ssl_listen_address => $listen_address,
|
ssl_listen_address => $listen_address,
|
||||||
@ -44,6 +50,7 @@ class profiles::puppet::puppetdb_api (
|
|||||||
database_name => $database_name,
|
database_name => $database_name,
|
||||||
database_password => Sensitive($database_password),
|
database_password => Sensitive($database_password),
|
||||||
database_validate => $database_validate,
|
database_validate => $database_validate,
|
||||||
|
puppetdb_package => 'openvoxdb',
|
||||||
}
|
}
|
||||||
|
|
||||||
contain ::puppetdb::server
|
contain ::puppetdb::server
|
||||||
|
|||||||
@ -20,12 +20,23 @@ class profiles::puppet::puppetmaster (
|
|||||||
include profiles::puppet::puppetca
|
include profiles::puppet::puppetca
|
||||||
include profiles::puppet::eyaml
|
include profiles::puppet::eyaml
|
||||||
|
|
||||||
|
# migration to openvox, cleanup puppetserver/puppetdb-termini
|
||||||
|
package {'puppetdb-termini':
|
||||||
|
ensure => purged,
|
||||||
|
before => Package['openvoxdb-termini'],
|
||||||
|
}
|
||||||
|
package {'puppetserver':
|
||||||
|
ensure => purged,
|
||||||
|
before => Package['openvox-server'],
|
||||||
|
}
|
||||||
|
|
||||||
class { 'puppetdb::master::config':
|
class { 'puppetdb::master::config':
|
||||||
puppetdb_server => $puppetdb_host,
|
puppetdb_server => $puppetdb_host,
|
||||||
manage_storeconfigs => false,
|
manage_storeconfigs => false,
|
||||||
|
terminus_package => 'openvoxdb-termini',
|
||||||
}
|
}
|
||||||
|
|
||||||
Package['puppetserver']
|
Package['openvox-server']
|
||||||
-> Class['profiles::puppet::gems']
|
-> Class['profiles::puppet::gems']
|
||||||
-> Class['profiles::puppet::r10k']
|
-> Class['profiles::puppet::r10k']
|
||||||
-> Class['profiles::puppet::g10k']
|
-> Class['profiles::puppet::g10k']
|
||||||
|
|||||||
76
site/profiles/manifests/stalwart/haproxy.pp
Normal file
76
site/profiles/manifests/stalwart/haproxy.pp
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# enable external access via haproxy
|
||||||
|
class profiles::stalwart::haproxy (
|
||||||
|
Boolean $enable = false,
|
||||||
|
){
|
||||||
|
|
||||||
|
# webadmin
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_443":
|
||||||
|
service => 'be_stalwart_webadmin',
|
||||||
|
ports => [443],
|
||||||
|
options => [
|
||||||
|
"cookie ${facts['networking']['hostname']}",
|
||||||
|
'ssl',
|
||||||
|
'verify none',
|
||||||
|
'check',
|
||||||
|
'inter 2s',
|
||||||
|
'rise 3',
|
||||||
|
'fall 2',
|
||||||
|
'send-proxy-v2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# imap
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_143":
|
||||||
|
service => 'be_stalwart_imap',
|
||||||
|
ports => [143],
|
||||||
|
options => [
|
||||||
|
'check',
|
||||||
|
'inter 3s',
|
||||||
|
'rise 2',
|
||||||
|
'fall 3',
|
||||||
|
'send-proxy-v2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# imaps
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_993":
|
||||||
|
service => 'be_stalwart_imaps',
|
||||||
|
ports => [993],
|
||||||
|
options => [
|
||||||
|
'check',
|
||||||
|
'ssl',
|
||||||
|
'verify none',
|
||||||
|
'inter 3s',
|
||||||
|
'rise 2',
|
||||||
|
'fall 3',
|
||||||
|
'send-proxy-v2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# smtp
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_25":
|
||||||
|
service => 'be_stalwart_smtp',
|
||||||
|
ports => [25],
|
||||||
|
options => [
|
||||||
|
'check',
|
||||||
|
'inter 3s',
|
||||||
|
'rise 2',
|
||||||
|
'fall 3',
|
||||||
|
'send-proxy-v2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# smtp submission
|
||||||
|
profiles::haproxy::balancemember { "${facts['networking']['fqdn']}_587":
|
||||||
|
service => 'be_stalwart_submission',
|
||||||
|
ports => [587],
|
||||||
|
options => [
|
||||||
|
'check',
|
||||||
|
'inter 3s',
|
||||||
|
'rise 2',
|
||||||
|
'fall 3',
|
||||||
|
'send-proxy-v2',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@ -55,4 +55,7 @@ class profiles::yum::global (
|
|||||||
# setup dnf-autoupdate
|
# setup dnf-autoupdate
|
||||||
include profiles::yum::autoupdater
|
include profiles::yum::autoupdater
|
||||||
|
|
||||||
|
# ensure dnf makecache runs before packages
|
||||||
|
Yumrepo <| |> -> Exec['dnf_makecache'] -> Package <| |>
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
9
site/profiles/templates/ceph/conf.erb
Normal file
9
site/profiles/templates/ceph/conf.erb
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# Managed by Puppet in profiles::ceph::conf
|
||||||
|
<% @config.each do |section, settings| -%>
|
||||||
|
[<%= section %>]
|
||||||
|
<% settings.each do |key, value| -%>
|
||||||
|
<%# Convert booleans and numbers to strings, leave strings untouched -%>
|
||||||
|
<%= key %> = <%= value.is_a?(TrueClass) ? 'true' : value.is_a?(FalseClass) ? 'false' : value %>
|
||||||
|
<% end -%>
|
||||||
|
|
||||||
|
<% end -%>
|
||||||
@ -4,7 +4,7 @@ Documentation=https://gitea.com/gitea/act_runner
|
|||||||
After=docker.service
|
After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/act_runner daemon --config <%= @home %>/config.yaml
|
ExecStart=/usr/bin/act_runner daemon --config <%= @home %>/config.yaml
|
||||||
ExecReload=/bin/kill -s HUP $MAINPID
|
ExecReload=/bin/kill -s HUP $MAINPID
|
||||||
WorkingDirectory=<%= @home %>
|
WorkingDirectory=<%= @home %>
|
||||||
TimeoutSec=0
|
TimeoutSec=0
|
||||||
|
|||||||
11
site/profiles/templates/postfix/gateway/helo_access.erb
Normal file
11
site/profiles/templates/postfix/gateway/helo_access.erb
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# FILE MANAGED BY PUPPET, CHANGES WILL BE REPLACED
|
||||||
|
#
|
||||||
|
# Controls access based on HELO/EHLO hostnames to block spam patterns
|
||||||
|
# HELO/EHLO access controls
|
||||||
|
# Format: pattern action
|
||||||
|
# Example: .dynamic.example.com REJECT
|
||||||
|
# Example: localhost REJECT You are not localhost
|
||||||
|
|
||||||
|
<% @helo_access_maps.each do |pattern, action| -%>
|
||||||
|
<%= pattern %> <%= action %>
|
||||||
|
<% end -%>
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user