chore: Refactor using new Proxmox provider
This commit is contained in:
parent
7d4baad8ff
commit
310f6fb29d
7 changed files with 320 additions and 97 deletions
|
|
@ -1,44 +1,93 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "telmate/proxmox"
|
||||
version = ">= 3.0.2-rc05"
|
||||
source = "bpg/proxmox"
|
||||
version = "0.85.1"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = "0.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_vm_qemu" "worker" {
|
||||
for_each = { for node in var.workers.nodes : node.name => node }
|
||||
|
||||
name = format("k8s-wheatley-%s", each.value.name)
|
||||
target_node = "pve01"
|
||||
tags = "k8s-wheatley,worker"
|
||||
onboot = true
|
||||
bios = "ovmf"
|
||||
boot = "order=virtio0;net0"
|
||||
clone = format("talos-%s", var.workers.talos_version)
|
||||
scsihw = "virtio-scsi-pci"
|
||||
resource "proxmox_virtual_environment_vm" "worker" {
|
||||
|
||||
disk {
|
||||
size = var.workers.disk
|
||||
storage = var.workers.storagepool
|
||||
type = "disk"
|
||||
slot = "virtio0"
|
||||
format = "raw"
|
||||
name = var.worker.node_name
|
||||
node_name = "pve01"
|
||||
tags = ["tofu"]
|
||||
bios = "ovmf"
|
||||
on_boot = true
|
||||
machine = "q35"
|
||||
stop_on_destroy = true
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
cpu {
|
||||
cores = var.workers.cpu
|
||||
cores = var.worker.cpu
|
||||
sockets = 1
|
||||
type = "x86-64-v2-AES"
|
||||
}
|
||||
|
||||
memory = var.workers.memory * 1024
|
||||
memory {
|
||||
dedicated = var.worker.memory * 1024
|
||||
}
|
||||
|
||||
network {
|
||||
id = 0
|
||||
model = "virtio"
|
||||
disk {
|
||||
datastore_id = var.worker.storagepool
|
||||
interface = "virtio0"
|
||||
aio = "io_uring"
|
||||
size = var.worker.disk * 1024
|
||||
file_format = "raw"
|
||||
}
|
||||
|
||||
cdrom {
|
||||
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.worker.talos_version)
|
||||
}
|
||||
|
||||
efi_disk {
|
||||
datastore_id = var.worker.storagepool
|
||||
file_format = "raw"
|
||||
type = "4m"
|
||||
}
|
||||
|
||||
tpm_state {
|
||||
datastore_id = var.worker.storagepool
|
||||
version = "v2.0"
|
||||
}
|
||||
|
||||
initialization {
|
||||
datastore_id = var.worker.storagepool
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = var.worker.node_ipv4_address
|
||||
gateway = var.worker.ipv4_gateway
|
||||
}
|
||||
}
|
||||
dns {
|
||||
servers = ["10.13.37.2"]
|
||||
}
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr1"
|
||||
}
|
||||
ipconfig0 = format("ip=%s/24,gw=10.13.38.1", each.value.ip_address)
|
||||
skip_ipv6 = true
|
||||
|
||||
}
|
||||
|
||||
# resource "talos_machine_secrets" "controlplane" {
|
||||
# talos_version = var.controlplane.talos_version
|
||||
# }
|
||||
#
|
||||
# data "talos_client_configuration" "controlplane" {
|
||||
# cluster_name = var.controlplane.cluster_name
|
||||
# client_configuration = talos_machine_secrets.controlplane.client_configuration
|
||||
# nodes = [for k, v in var.controlplane : v.ip]
|
||||
# endpoints = [var.controlplane.cluster_endpoint]
|
||||
# }
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue