chore: Refactor using new Proxmox provider

This commit is contained in:
Peter 2025-10-26 18:14:46 +01:00
parent 7d4baad8ff
commit 310f6fb29d
Signed by: Peter
SSH key fingerprint: SHA256:B5tYaxBExaDm74r1px9iVeZ6F/ZDiyiy9SbBqfZYrvg
7 changed files with 320 additions and 97 deletions

View file

@ -0,0 +1,34 @@
debug: false
machine:
install:
disk: ${install_disk}
network:
hostname: ${hostname}
nameservers:
- 10.13.37.2
interfaces:
- interface: eth0
dhcp: false
kubelet:
extraArgs:
pod-max-pids: 1000
extraConfig:
imageGCHighThresholdPercent: 75
imageGCLowThresholdPercent: 70
cluster:
apiServer:
auditPolicy:
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# Log only metadata in audit logs
- level: Metadata
network:
hostname: ${hostname}
cni:
name: none
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
proxy:
disabled: true

View file

@ -1,44 +1,93 @@
terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = ">= 3.0.2-rc05"
source = "bpg/proxmox"
version = "0.85.1"
}
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
resource "proxmox_vm_qemu" "worker" {
for_each = { for node in var.workers.nodes : node.name => node }
name = format("k8s-wheatley-%s", each.value.name)
target_node = "pve01"
tags = "k8s-wheatley,worker"
onboot = true
bios = "ovmf"
boot = "order=virtio0;net0"
clone = format("talos-%s", var.workers.talos_version)
scsihw = "virtio-scsi-pci"
resource "proxmox_virtual_environment_vm" "worker" {
disk {
size = var.workers.disk
storage = var.workers.storagepool
type = "disk"
slot = "virtio0"
format = "raw"
name = var.worker.node_name
node_name = "pve01"
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.workers.cpu
cores = var.worker.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory = var.workers.memory * 1024
memory {
dedicated = var.worker.memory * 1024
}
network {
id = 0
model = "virtio"
disk {
datastore_id = var.worker.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.worker.disk * 1024
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.worker.talos_version)
}
efi_disk {
datastore_id = var.worker.storagepool
file_format = "raw"
type = "4m"
}
tpm_state {
datastore_id = var.worker.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.worker.storagepool
ip_config {
ipv4 {
address = var.worker.node_ipv4_address
gateway = var.worker.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
ipconfig0 = format("ip=%s/24,gw=10.13.38.1", each.value.ip_address)
skip_ipv6 = true
}
# resource "talos_machine_secrets" "controlplane" {
# talos_version = var.controlplane.talos_version
# }
#
# data "talos_client_configuration" "controlplane" {
# cluster_name = var.controlplane.cluster_name
# client_configuration = talos_machine_secrets.controlplane.client_configuration
# nodes = [for k, v in var.controlplane : v.ip]
# endpoints = [var.controlplane.cluster_endpoint]
# }

View file

@ -1,15 +1,17 @@
variable "workers" {
variable "worker" {
description = "Worker node configuration"
type = object({
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
nodes = list(object({
name = string
ip_address = string
}))
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
node_name = string
node_ipv4_address = string
cluster_endpoint = string
ipv4_gateway = string
})
}