feat: initial setup
All checks were successful
Build / build (pull_request) Successful in 42s

- manage nomad jobs
- create makefile
- create gitignore
- manage terragrunt environments
- add build/deploy jobs
This commit is contained in:
Ben Vincent 2024-12-28 14:17:21 +11:00
parent f7fad0b74f
commit 3fe7ea4c36
11 changed files with 378 additions and 0 deletions

View File

@ -0,0 +1,36 @@
name: Build
on:
pull_request:
jobs:
build:
runs-on: almalinux-8
container:
image: git.query.consul/unkin/almalinux8-runnerdnd:latest
options: --privileged
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install Packages
run: |
dnf install terraform terragrunt vault jq -y
- name: Run Terraform Plan
env:
VAULT_ROLEID: ${{ secrets.TERRAFORM_NOMAD_VAULT_ROLEID }}
run: |
make plan
- name: Show Plans
run: |
find /workspace -type f -name "*.plan"
- name: Upload Artifacts
uses: actions/upload-artifact@v3
with:
name: plans
path: /workspace/unkin/terraform-nomad/plans/*.plan

View File

@ -0,0 +1,36 @@
name: Deploy
on:
workflow_run:
workflows:
- Build
types:
- completed
jobs:
deploy:
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: almalinux-8
container:
image: git.query.consul/unkin/almalinux8-runnerdnd:latest
options: --privileged
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Download Build Artifacts
uses: actions/download-artifact@v3
with:
name: plans
path: /workspace/unkin/terraform-nomad/plans
- name: Install Packages
run: |
dnf install terraform terragrunt vault jq -y
- name: Run Terraform Apply
env:
VAULT_ROLEID: ${{ secrets.TERRAFORM_NOMAD_VAULT_ROLEID }}
run: |
make apply

6
.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
.terraform
.terraform.lock.hcl
environments/*/*.tf
plans
.venv
env

33
Makefile Normal file
View File

@ -0,0 +1,33 @@
SHELL := /bin/bash
ENVIRONMENT ?= au-syd1
ENV_DIR = environments/$(ENVIRONMENT)
PLAN_DIR = plans
PLAN_FILE = ../../$(PLAN_DIR)/$(ENVIRONMENT).plan
.PHONY: clean init plan apply
define vault_env
@export VAULT_ADDR="https://vault.service.consul:8200" && \
export VAULT_TOKEN=$$(vault write -field=token auth/approle/login role_id=$$VAULT_ROLEID) && \
export $$(vault read -format=json kv/data/service/terraform/nomad | jq -r '.data.data | to_entries[] | "\(.key)=\(.value)"')
endef
clean:
@echo "Cleaning Terraform files..."
find environments -type f -name '*.tf' -exec rm -f "{}" \; && \
find environments -type f -name '.terraform.lock.hcl' -exec rm -f "{}" \; && \
find environments -type d -name '.terraform' -exec rm -rf "{}" \; && \
rm -rf plans
init:
$(call vault_env) && \
terragrunt --terragrunt-working-dir $(ENV_DIR) init
plan: init
@mkdir -p $(PLAN_DIR)
$(call vault_env) && \
terragrunt --terragrunt-working-dir $(ENV_DIR) plan -out=$(PLAN_FILE)
apply:
$(call vault_env) && \
terragrunt --terragrunt-working-dir $(ENV_DIR) apply $(PLAN_FILE)

View File

@ -0,0 +1,27 @@
include "root" {
path = find_in_parent_folders("root.hcl")
}
inputs = {
job_files = [
"testapp1",
"testapp2",
]
policy_files = []
}
generate "shared_modules" {
if_exists = "overwrite"
path = "modules.tf"
# Dynamically include the shared/modules.tf content
contents = file("../../shared/modules.tf")
}
generate "shared_variables" {
if_exists = "overwrite"
path = "variables.tf"
# Dynamically include the shared/variables.tf content
contents = file("../../shared/variables.tf")
}

47
environments/root.hcl Normal file
View File

@ -0,0 +1,47 @@
locals {
vault_addr = "https://vault.service.consul:8200"
nomad_addr = "https://nomad.service.consul:4646"
}
generate "backend" {
path = "backend.tf"
if_exists = "overwrite_terragrunt"
contents = <<EOF
terraform {
backend "consul" {
address = "https://consul.service.consul"
path = "infra/terraform/nomad/${path_relative_to_include()}/state"
scheme = "https"
lock = true
ca_file = "/etc/pki/tls/certs/ca-bundle.crt"
}
}
EOF
}
generate "provider" {
path = "provider.tf"
if_exists = "overwrite_terragrunt"
contents = <<EOF
terraform {
required_providers {
vault = {
source = "hashicorp/vault"
version = "~> 4.5.0"
}
nomad = {
source = "hashicorp/nomad"
version = "~> 2.4.0"
}
}
}
provider "vault" {
address = "${local.vault_addr}"
}
provider "nomad" {
address = "${local.nomad_addr}"
#token = var.nomad_token
}
EOF
}

62
jobs/testapp1.hcl Normal file
View File

@ -0,0 +1,62 @@
job "testapp1" {
datacenters = ["au-syd1"]
type = "service"
group "http" {
count = 3
network {
port "http" {
to = 8080
}
}
update {
max_parallel = 2
health_check = "checks"
min_healthy_time = "30s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
auto_promote = true
canary = 1
stagger = "15s"
}
task "http-server" {
driver = "docker"
config {
image = "git.query.consul/unkin/almalinux9:latest"
network_mode = "bridge"
ports = ["http"]
command = "sh"
args = ["-c", "cd /data/; python3 -m http.server 8080"]
volumes = ["/shared/nomad/testing:/data"]
}
resources {
cpu = 250
memory = 128
}
service {
name = "testapp1"
port = "http"
address_mode = "driver"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
env {
PYTHONUNBUFFERED = "1"
}
}
}
}

62
jobs/testapp2.hcl Normal file
View File

@ -0,0 +1,62 @@
job "testapp2" {
datacenters = ["au-syd1"]
type = "service"
group "http" {
count = 3
network {
port "http" {
to = 8080
}
}
update {
max_parallel = 2
health_check = "checks"
min_healthy_time = "30s"
healthy_deadline = "5m"
progress_deadline = "10m"
auto_revert = true
auto_promote = true
canary = 1
stagger = "15s"
}
task "http-server" {
driver = "docker"
config {
image = "git.query.consul/unkin/almalinux9:latest"
network_mode = "bridge"
ports = ["http"]
command = "sh"
args = ["-c", "cd /data/; python3 -m http.server 8080"]
volumes = ["/shared/nomad/testing:/data"]
}
resources {
cpu = 250 # Adjust as needed
memory = 128 # Adjust as needed
}
service {
name = "testapp2"
port = "http"
address_mode = "driver"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
env {
PYTHONUNBUFFERED = "1"
}
}
}
}

24
policies/anonymous.hcl Normal file
View File

@ -0,0 +1,24 @@
namespace "default" {
policy = "read"
capabilities = ["list-jobs", "read-job"]
}
agent {
policy = "read"
}
operator {
policy = "read"
}
quota {
policy = "read"
}
node {
policy = "read"
}
host_volume "*" {
policy = "read"
}

31
shared/modules.tf Normal file
View File

@ -0,0 +1,31 @@
# Dynamically load all job configurations from the jobs/ folder
resource "nomad_job" "app" {
for_each = toset(var.job_files)
jobspec = file("../../${path.module}/jobs/${each.value}.hcl")
}
resource "nomad_acl_policy" "policies" {
for_each = toset(var.policy_files)
name = each.value
rules_hcl = file("../../${path.module}/policies/${each.value}")
}
resource "nomad_acl_role" "roles" {
for_each = toset(var.policy_files)
name = "${each.value}_role"
policy {
name = each.value
}
}
resource "nomad_acl_token" "tokens" {
for_each = toset(var.policy_files)
name = "${each.value}_token"
type = "client"
policies = [each.value]
}

14
shared/variables.tf Normal file
View File

@ -0,0 +1,14 @@
#variable "nomad_token" {
# description = "The Nomad API token"
# type = string
#}
variable "job_files" {
description = "List of Nomad job files to deploy"
type = list(string)
}
variable "policy_files" {
description = "List of policy files to create Nomad ACL policies"
type = list(string)
}