chore: Refactor VM deployment

This commit is contained in:
Peter 2025-10-28 22:45:12 +01:00
parent d2e0c26900
commit 9ca0f7c431
Signed by: Peter
SSH key fingerprint: SHA256:B5tYaxBExaDm74r1px9iVeZ6F/ZDiyiy9SbBqfZYrvg
13 changed files with 192 additions and 328 deletions

View file

@ -0,0 +1,5 @@
export KUBERNETES_API_SERVER_ADDRESS="10.13.38.11"
export KUBECONFIG=$HOME/.kube/k8s-wheatley
export TALOSCONFIG=$HOME/.talos/k8s-wheatley
export TF_VAR_proxmox_endpoint="https://10.167.84.10:8006/api2/json"
export TF_VAR_proxmox_api_token="tofu-prov@pve!apitoken123=486691dd-2527-4b25-b5a7-cdfff6ae841d"

View file

@ -41,6 +41,24 @@ provider "registry.opentofu.org/hashicorp/http" {
]
}
provider "registry.opentofu.org/hashicorp/time" {
version = "0.11.1"
constraints = "0.11.1"
hashes = [
"h1:+S9YvR/HeCxFGMS3ITjOFqlWrR6DdarWWowT9Cz18/M=",
"zh:048c56f9f810f67a7460363a26bf3ef939d64f0d7b7342b9e7f24cc85ee1491b",
"zh:49f949cc5cb50fbb65f7b4578b79fbe02b6bafe9e3f5f1c2936114dd445b84b3",
"zh:553174a4fa88f6e186800d7ee155a6b5b4c6c81793643f1a20eab26becc7f823",
"zh:5cae304e21f77091d4b50389c655afd5e4e2e8d4cd9c06de139a31b8e7d343a9",
"zh:7aae20832bd9885f034831aa44db3a6ffcec034a2d5a2815d92c42c40c14ca1d",
"zh:93d715610dce777474b5eff1d7dbe797e72ca0b679cd8636efb3aa45d1cb589e",
"zh:bd29e04645775851eb10e7f3b39104ae57ca3632dec4ae07328d33d4182e7fb5",
"zh:d6ad6a4d52a6989b8452466f2ec3dbcdb00cc44a96bd1ca618d91a5d74895f49",
"zh:e68cfad3ec526631410fa9406938d624fd56b9ab065c76525cb3f731d106fbfe",
"zh:ffee8aa6b7ce56f4b8fdc0c492404be0041137a278388eb1d1180b637fb5b3de",
]
}
provider "registry.opentofu.org/siderolabs/talos" {
version = "0.9.0"
constraints = "0.9.0"

View file

@ -8,9 +8,10 @@ locals {
cluster_name = "k8s-wheatley"
kubernetes_version = "1.34.1"
talos_version = "1.11.3"
cilium_version = "1.14.1"
ipv4_gateway = "10.13.38.1"
ipv4_cidr = "/24"
cluster_endpoint_ip = "10.13.38.10"
proxmox_node = "pve01"
controlplanes = {
@ -34,10 +35,14 @@ locals {
{
name = "cp03"
ip_address = "10.13.38.13"
}
},
]
}
controlplane_addresses = [
for node in local.controlplanes.nodes : node.ip_address
]
workers = {
cpu = 4
memory = 4
@ -50,18 +55,30 @@ locals {
nodes = [
{
name = "worker01"
ip_address = "10.13.38.20"
},
{
name = "worker02"
ip_address = "10.13.38.21"
},
{
name = "worker03"
name = "worker02"
ip_address = "10.13.38.22"
}
},
{
name = "worker03"
ip_address = "10.13.38.23"
},
{
name = "worker04"
ip_address = "10.13.38.24"
},
{
name = "worker05"
ip_address = "10.13.38.25"
},
]
}
worker_addresses = [
for node in local.workers.nodes : node.ip_address
]
}
module "talos-image" {
@ -73,10 +90,12 @@ module "talos-image" {
module "controlplanes" {
depends_on = [module.talos-image]
source = "./modules/controlplane"
source = "./modules/talos-node"
for_each = { for node in local.controlplanes.nodes : node.name => node }
controlplane = {
node = {
name = format("k8s-wheatley-%s", each.value.name)
ipv4_address = each.value.ip_address
cpu = local.controlplanes.cpu
memory = local.controlplanes.memory
disk = local.controlplanes.disk
@ -84,20 +103,29 @@ module "controlplanes" {
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
node_name = format("k8s-wheatley-%s", each.value.name)
cluster_endpoint = format("https://%s:6443", local.cluster_endpoint_ip)
node_ipv4_address = format("%s%s", each.value.ip_address, local.ipv4_cidr)
cluster_endpoint = local.cluster_endpoint_ip
ipv4_gateway = local.ipv4_gateway
proxmox_node = local.proxmox_node
controlplane_addresses = local.controlplane_addresses
worker_addresses = local.worker_addresses
}
}
resource "time_sleep" "delay_before_workers" {
depends_on = [module.talos-image]
create_duration = "1s"
}
module "workers" {
depends_on = [module.controlplanes]
depends_on = [time_sleep.delay_before_workers]
source = "./modules/worker"
source = "./modules/talos-node"
for_each = { for node in local.workers.nodes : node.name => node }
worker = {
node = {
name = format("k8s-wheatley-%s", each.value.name)
ipv4_address = each.value.ip_address
cpu = local.workers.cpu
memory = local.workers.memory
disk = local.workers.disk
@ -105,9 +133,10 @@ module "workers" {
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
node_name = format("k8s-wheatley-%s", each.value.name)
cluster_endpoint = format("https://%s:6443", local.cluster_endpoint_ip)
node_ipv4_address = format("%s%s", each.value.ip_address, local.ipv4_cidr)
cluster_endpoint = local.cluster_endpoint_ip
ipv4_gateway = local.ipv4_gateway
proxmox_node = local.proxmox_node
controlplane_addresses = local.controlplane_addresses
worker_addresses = local.worker_addresses
}
}

View file

@ -1,34 +0,0 @@
debug: false
machine:
install:
disk: ${install_disk}
network:
hostname: ${hostname}
nameservers:
- 10.13.37.2
interfaces:
- interface: eth0
dhcp: false
kubelet:
extraArgs:
pod-max-pids: 1000
extraConfig:
imageGCHighThresholdPercent: 75
imageGCLowThresholdPercent: 70
cluster:
apiServer:
auditPolicy:
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# Log only metadata in audit logs
- level: Metadata
network:
hostname: ${hostname}
cni:
name: none
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
proxy:
disabled: true

View file

@ -1,93 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
resource "proxmox_virtual_environment_vm" "controlplane" {
name = var.controlplane.node_name
node_name = "pve01"
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.controlplane.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.controlplane.memory * 1024
}
disk {
datastore_id = var.controlplane.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.controlplane.disk * 1024
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.controlplane.talos_version)
}
efi_disk {
datastore_id = var.controlplane.storagepool
file_format = "raw"
type = "4m"
}
tpm_state {
datastore_id = var.controlplane.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.controlplane.storagepool
ip_config {
ipv4 {
address = var.controlplane.node_ipv4_address
gateway = var.controlplane.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}
# resource "talos_machine_secrets" "controlplane" {
# talos_version = var.controlplane.talos_version
# }
#
# data "talos_client_configuration" "controlplane" {
# cluster_name = var.controlplane.cluster_name
# client_configuration = talos_machine_secrets.controlplane.client_configuration
# nodes = [for k, v in var.controlplane : v.ip]
# endpoints = [var.controlplane.cluster_endpoint]
# }

View file

@ -1,17 +0,0 @@
variable "controlplane" {
description = "Control plane node configuration"
type = object({
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
node_name = string
node_ipv4_address = string
cluster_endpoint = string
ipv4_gateway = string
})
}

View file

@ -0,0 +1,77 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
}
}
resource "proxmox_virtual_environment_vm" "talos-node" {
name = var.node.name
node_name = var.node.proxmox_node
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.node.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.node.memory * 1024
}
disk {
datastore_id = var.node.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.node.disk
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.node.talos_version)
}
efi_disk {
datastore_id = var.node.storagepool
file_format = "raw"
type = "4m"
}
boot_order = ["virtio0", "ide3", "net0"]
tpm_state {
datastore_id = var.node.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.node.storagepool
ip_config {
ipv4 {
address = format("%s/24", var.node.ipv4_address)
gateway = var.node.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}

View file

@ -0,0 +1,19 @@
variable "node" {
description = "Virtual node configuration"
type = object({
name = string
ipv4_address = string
ipv4_gateway = string
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
cluster_endpoint = string
proxmox_node = string
controlplane_addresses = list(string)
worker_addresses = list(string)
})
}

View file

@ -1,34 +0,0 @@
debug: false
machine:
install:
disk: ${install_disk}
network:
hostname: ${hostname}
nameservers:
- 10.13.37.2
interfaces:
- interface: eth0
dhcp: false
kubelet:
extraArgs:
pod-max-pids: 1000
extraConfig:
imageGCHighThresholdPercent: 75
imageGCLowThresholdPercent: 70
cluster:
apiServer:
auditPolicy:
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# Log only metadata in audit logs
- level: Metadata
network:
hostname: ${hostname}
cni:
name: none
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
proxy:
disabled: true

View file

@ -1,93 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
resource "proxmox_virtual_environment_vm" "worker" {
name = var.worker.node_name
node_name = "pve01"
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.worker.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.worker.memory * 1024
}
disk {
datastore_id = var.worker.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.worker.disk * 1024
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.worker.talos_version)
}
efi_disk {
datastore_id = var.worker.storagepool
file_format = "raw"
type = "4m"
}
tpm_state {
datastore_id = var.worker.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.worker.storagepool
ip_config {
ipv4 {
address = var.worker.node_ipv4_address
gateway = var.worker.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}
# resource "talos_machine_secrets" "controlplane" {
# talos_version = var.controlplane.talos_version
# }
#
# data "talos_client_configuration" "controlplane" {
# cluster_name = var.controlplane.cluster_name
# client_configuration = talos_machine_secrets.controlplane.client_configuration
# nodes = [for k, v in var.controlplane : v.ip]
# endpoints = [var.controlplane.cluster_endpoint]
# }

View file

@ -1,17 +0,0 @@
variable "worker" {
description = "Worker node configuration"
type = object({
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
node_name = string
node_ipv4_address = string
cluster_endpoint = string
ipv4_gateway = string
})
}

View file

@ -8,5 +8,9 @@ terraform {
source = "bpg/proxmox"
version = "0.85.1"
}
time = {
source = "hashicorp/time"
version = "0.11.1"
}
}
}