Compare commits

...

3 commits

17 changed files with 477 additions and 277 deletions

5
.gitignore vendored
View file

@ -37,3 +37,8 @@ override.tf.json
.terraformrc
terraform.rc
# ---> End Terraform
.idea/
.vscode/
.DS_Store

View file

@ -0,0 +1,5 @@
export KUBERNETES_API_SERVER_ADDRESS="10.13.38.11"
export KUBECONFIG=$HOME/.kube/k8s-wheatley
export TALOSCONFIG=$HOME/.talos/k8s-wheatley
export TF_VAR_proxmox_endpoint="https://10.167.84.10:8006/api2/json"
export TF_VAR_proxmox_api_token="tofu-prov@pve!apitoken123=486691dd-2527-4b25-b5a7-cdfff6ae841d"

View file

@ -41,6 +41,24 @@ provider "registry.opentofu.org/hashicorp/http" {
]
}
provider "registry.opentofu.org/hashicorp/time" {
version = "0.11.1"
constraints = "0.11.1"
hashes = [
"h1:+S9YvR/HeCxFGMS3ITjOFqlWrR6DdarWWowT9Cz18/M=",
"zh:048c56f9f810f67a7460363a26bf3ef939d64f0d7b7342b9e7f24cc85ee1491b",
"zh:49f949cc5cb50fbb65f7b4578b79fbe02b6bafe9e3f5f1c2936114dd445b84b3",
"zh:553174a4fa88f6e186800d7ee155a6b5b4c6c81793643f1a20eab26becc7f823",
"zh:5cae304e21f77091d4b50389c655afd5e4e2e8d4cd9c06de139a31b8e7d343a9",
"zh:7aae20832bd9885f034831aa44db3a6ffcec034a2d5a2815d92c42c40c14ca1d",
"zh:93d715610dce777474b5eff1d7dbe797e72ca0b679cd8636efb3aa45d1cb589e",
"zh:bd29e04645775851eb10e7f3b39104ae57ca3632dec4ae07328d33d4182e7fb5",
"zh:d6ad6a4d52a6989b8452466f2ec3dbcdb00cc44a96bd1ca618d91a5d74895f49",
"zh:e68cfad3ec526631410fa9406938d624fd56b9ab065c76525cb3f731d106fbfe",
"zh:ffee8aa6b7ce56f4b8fdc0c492404be0041137a278388eb1d1180b637fb5b3de",
]
}
provider "registry.opentofu.org/siderolabs/talos" {
version = "0.9.0"
constraints = "0.9.0"

View file

@ -8,16 +8,17 @@ locals {
cluster_name = "k8s-wheatley"
kubernetes_version = "1.34.1"
talos_version = "1.11.3"
cilium_version = "1.14.1"
ipv4_gateway = "10.13.38.1"
ipv4_cidr = "/24"
cluster_endpoint_ip = "10.13.38.10"
proxmox_node = "pve01"
controlplanes = {
cpu = 4
memory = 4
disk = 40
storagepool = "local-zfs"
storagepool = "nvme-fastpool"
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
@ -34,15 +35,19 @@ locals {
{
name = "cp03"
ip_address = "10.13.38.13"
}
},
]
}
controlplane_addresses = [
for node in local.controlplanes.nodes : node.ip_address
]
workers = {
cpu = 4
memory = 4
disk = 40
storagepool = "local-zfs"
storagepool = "nvme-fastpool"
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
@ -50,18 +55,30 @@ locals {
nodes = [
{
name = "worker01"
ip_address = "10.13.38.20"
},
{
name = "worker02"
ip_address = "10.13.38.21"
},
{
name = "worker03"
name = "worker02"
ip_address = "10.13.38.22"
}
},
{
name = "worker03"
ip_address = "10.13.38.23"
},
{
name = "worker04"
ip_address = "10.13.38.24"
},
{
name = "worker05"
ip_address = "10.13.38.25"
},
]
}
worker_addresses = [
for node in local.workers.nodes : node.ip_address
]
}
module "talos-image" {
@ -73,10 +90,12 @@ module "talos-image" {
module "controlplanes" {
depends_on = [module.talos-image]
source = "./modules/controlplane"
source = "./modules/talos-node"
for_each = { for node in local.controlplanes.nodes : node.name => node }
controlplane = {
node = {
name = format("k8s-wheatley-%s", each.value.name)
ipv4_address = each.value.ip_address
cpu = local.controlplanes.cpu
memory = local.controlplanes.memory
disk = local.controlplanes.disk
@ -84,20 +103,29 @@ module "controlplanes" {
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
node_name = format("k8s-wheatley-%s", each.value.name)
cluster_endpoint = format("https://%s:6443", local.cluster_endpoint_ip)
node_ipv4_address = format("%s%s", each.value.ip_address, local.ipv4_cidr)
cluster_endpoint = local.cluster_endpoint_ip
ipv4_gateway = local.ipv4_gateway
proxmox_node = local.proxmox_node
controlplane_addresses = local.controlplane_addresses
worker_addresses = local.worker_addresses
}
}
module "workers" {
depends_on = [module.controlplanes]
resource "time_sleep" "delay_before_workers" {
depends_on = [module.talos-image]
source = "./modules/worker"
create_duration = "1s"
}
module "workers" {
depends_on = [time_sleep.delay_before_workers]
source = "./modules/talos-node"
for_each = { for node in local.workers.nodes : node.name => node }
worker = {
node = {
name = format("k8s-wheatley-%s", each.value.name)
ipv4_address = each.value.ip_address
cpu = local.workers.cpu
memory = local.workers.memory
disk = local.workers.disk
@ -105,9 +133,48 @@ module "workers" {
talos_version = local.talos_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
node_name = format("k8s-wheatley-%s", each.value.name)
cluster_endpoint = format("https://%s:6443", local.cluster_endpoint_ip)
node_ipv4_address = format("%s%s", each.value.ip_address, local.ipv4_cidr)
cluster_endpoint = local.cluster_endpoint_ip
ipv4_gateway = local.ipv4_gateway
proxmox_node = local.proxmox_node
controlplane_addresses = local.controlplane_addresses
worker_addresses = local.worker_addresses
}
}
module "talos-bootstrap" {
depends_on = [
module.controlplanes,
module.workers
]
source = "./modules/talos-bootstrap"
node_config = {
ipv4_gateway = local.ipv4_gateway
talos_version = local.talos_version
cilium_version = local.cilium_version
cluster_name = local.cluster_name
kubernetes_version = local.kubernetes_version
cluster_endpoint = local.cluster_endpoint_ip
proxmox_node = local.proxmox_node
controlplane_addresses = local.controlplane_addresses
worker_addresses = local.worker_addresses
}
talos_nodes = concat(
[
for node in local.controlplanes.nodes : {
name = format("k8s-wheatley-%s", node.name)
type = "controlplane"
ipv4_address = node.ip_address
}
],
[
for node in local.workers.nodes : {
name = format("k8s-wheatley-%s", node.name)
type = "worker"
ipv4_address = node.ip_address
}
]
)
}

View file

@ -1,93 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
resource "proxmox_virtual_environment_vm" "controlplane" {
name = var.controlplane.node_name
node_name = "pve01"
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.controlplane.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.controlplane.memory * 1024
}
disk {
datastore_id = var.controlplane.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.controlplane.disk * 1024
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.controlplane.talos_version)
}
efi_disk {
datastore_id = var.controlplane.storagepool
file_format = "raw"
type = "4m"
}
tpm_state {
datastore_id = var.controlplane.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.controlplane.storagepool
ip_config {
ipv4 {
address = var.controlplane.node_ipv4_address
gateway = var.controlplane.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}
# resource "talos_machine_secrets" "controlplane" {
# talos_version = var.controlplane.talos_version
# }
#
# data "talos_client_configuration" "controlplane" {
# cluster_name = var.controlplane.cluster_name
# client_configuration = talos_machine_secrets.controlplane.client_configuration
# nodes = [for k, v in var.controlplane : v.ip]
# endpoints = [var.controlplane.cluster_endpoint]
# }

View file

@ -1,17 +0,0 @@
variable "controlplane" {
description = "Control plane node configuration"
type = object({
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
node_name = string
node_ipv4_address = string
cluster_endpoint = string
ipv4_gateway = string
})
}

View file

@ -0,0 +1,95 @@
terraform {
required_providers {
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
locals {
cluster_endpoint_full = format("https://%s:6443", var.node_config.cluster_endpoint)
}
resource "talos_machine_secrets" "machine_secrets" {}
data "talos_machine_configuration" "machineconfig-cp" {
cluster_name = var.node_config.cluster_name
machine_type = "controlplane"
cluster_endpoint = local.cluster_endpoint_full
kubernetes_version = var.node_config.kubernetes_version
talos_version = talos_machine_secrets.machine_secrets.talos_version
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
}
data "talos_machine_configuration" "machineconfig-worker" {
cluster_name = var.node_config.cluster_name
machine_type = "worker"
cluster_endpoint = local.cluster_endpoint_full
kubernetes_version = var.node_config.kubernetes_version
talos_version = talos_machine_secrets.machine_secrets.talos_version
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
}
data "talos_client_configuration" "talosconfig" {
cluster_name = var.node_config.cluster_name
endpoints = var.node_config.controlplane_addresses
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
nodes = concat(var.node_config.controlplane_addresses, var.node_config.worker_addresses)
}
resource "talos_machine_configuration_apply" "config_apply_cp" {
for_each = {
for talos_node in var.talos_nodes : talos_node.name => talos_node
if talos_node.type == "controlplane"
}
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.machineconfig-cp.machine_configuration
endpoint = each.value.ipv4_address
node = each.value.ipv4_address
config_patches = [
templatefile("${path.module}/templates/machineconfig-cp.yaml.tmpl", {
hostname = each.value.name
cluster_name = var.node_config.cluster_name
vip_address = var.node_config.cluster_endpoint
node_name = format("%s.wheatley.in", var.node_config.proxmox_node)
machine_type = each.value.type
talos_version = var.node_config.talos_version
cilium_version = var.node_config.cilium_version
cilium_install_file = file("${path.module}/templates/cilium-install.yaml.tmpl")
})
]
}
resource "talos_machine_configuration_apply" "config_apply_worker" {
for_each = {
for talos_node in var.talos_nodes : talos_node.name => talos_node
if talos_node.type == "worker"
}
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.machineconfig-worker.machine_configuration
endpoint = each.value.ipv4_address
node = each.value.ipv4_address
config_patches = [
templatefile("${path.module}/templates/machineconfig-worker.yaml.tmpl", {
hostname = each.value.name
cluster_name = var.node_config.cluster_name
vip_address = var.node_config.cluster_endpoint
node_name = format("%s.wheatley.in", var.node_config.proxmox_node)
machine_type = each.value.type
talos_version = var.node_config.talos_version
cilium_version = var.node_config.cilium_version
cilium_install_file = file("${path.module}/templates/cilium-install.yaml.tmpl")
})
]
}
resource "talos_machine_bootstrap" "talos_machine_bootstrap" {
depends_on = [
talos_machine_configuration_apply.config_apply_cp,
talos_machine_configuration_apply.config_apply_worker
]
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
node = var.talos_nodes[0].ipv4_address
}

View file

@ -0,0 +1,96 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-install
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cilium-install
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-install
namespace: kube-system
---
apiVersion: batch/v1
kind: Job
metadata:
name: cilium-install
namespace: kube-system
spec:
backoffLimit: 10
template:
metadata:
labels:
app: cilium-install
spec:
restartPolicy: OnFailure
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- effect: PreferNoSchedule
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoExecute
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: PreferNoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
serviceAccount: cilium-install
serviceAccountName: cilium-install
hostNetwork: true
containers:
- name: cilium-install
image: quay.io/cilium/cilium-cli:latest
env:
- name: KUBERNETES_SERVICE_HOST
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KUBERNETES_SERVICE_PORT
value: "6443"
command:
- cilium
- install
- --set
- ipam.mode=kubernetes
- --set
- kubeProxyReplacement=true
- --set
- securityContext.capabilities.ciliumAgent={CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}
- --set
- securityContext.capabilities.cleanCiliumState={NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}
- --set
- cgroup.autoMount.enabled=false
- --set
- cgroup.hostRoot=/sys/fs/cgroup
- --set
- k8sServiceHost=10.13.38.11
- --set
- k8sServicePort=6443
- --set
- gatewayAPI.enabled=true
- --set
- gatewayAPI.enableAlpn=true
- --set
- gatewayAPI.enableAppProtocol=true

View file

@ -1,34 +1,43 @@
debug: false
machine:
type: ${machine_type}
install:
disk: ${install_disk}
disk: /dev/vda
image: factory.talos.dev/nocloud-installer-secureboot/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:v${talos_version}
network:
hostname: ${hostname}
nameservers:
- 10.13.37.2
%{ if machine_type == "controlplane" }
interfaces:
- interface: eth0
dhcp: false
vip:
ip: ${vip_address}
%{ endif }
kubelet:
extraArgs:
pod-max-pids: 1000
extraConfig:
imageGCHighThresholdPercent: 75
imageGCLowThresholdPercent: 70
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
cluster:
apiServer:
auditPolicy:
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# Log only metadata in audit logs
- level: Metadata
network:
hostname: ${hostname}
cni:
name: none
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
proxy:
disabled: true
inlineManifests:
- name: cilium-bootstrap
contents: |
${indent(6, cilium_install_file)}

View file

@ -1,34 +1,36 @@
debug: false
machine:
type: ${machine_type}
install:
disk: ${install_disk}
disk: /dev/vda
image: factory.talos.dev/nocloud-installer-secureboot/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:v${talos_version}
network:
hostname: ${hostname}
nameservers:
- 10.13.37.2
interfaces:
- interface: eth0
dhcp: false
kubelet:
extraArgs:
pod-max-pids: 1000
extraConfig:
imageGCHighThresholdPercent: 75
imageGCLowThresholdPercent: 70
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
cluster:
apiServer:
auditPolicy:
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# Log only metadata in audit logs
- level: Metadata
network:
hostname: ${hostname}
cni:
name: none
nodeLabels:
topology.kubernetes.io/region: ${cluster_name}
topology.kubernetes.io/zone: ${node_name}
proxy:
disabled: true
inlineManifests:
- name: cilium-bootstrap
contents: |
${indent(6, cilium_install_file)}

View file

@ -0,0 +1,23 @@
variable "node_config" {
description = "Talos node configuration"
type = object({
ipv4_gateway = string
talos_version = string
cilium_version = string
cluster_name = string
kubernetes_version = string
cluster_endpoint = string
proxmox_node = string
controlplane_addresses = list(string)
worker_addresses = list(string)
})
}
variable "talos_nodes" {
description = "List of Talos nodes to bootstrap"
type = list(object({
name = string
type = string
ipv4_address = string
}))
}

View file

@ -0,0 +1,77 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
}
}
resource "proxmox_virtual_environment_vm" "talos-node" {
name = var.node.name
node_name = var.node.proxmox_node
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.node.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.node.memory * 1024
}
disk {
datastore_id = var.node.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.node.disk
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.node.talos_version)
}
efi_disk {
datastore_id = var.node.storagepool
file_format = "raw"
type = "4m"
}
boot_order = ["virtio0", "ide3", "net0"]
tpm_state {
datastore_id = var.node.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.node.storagepool
ip_config {
ipv4 {
address = format("%s/24", var.node.ipv4_address)
gateway = var.node.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}

View file

@ -0,0 +1,19 @@
variable "node" {
description = "Virtual node configuration"
type = object({
name = string
ipv4_address = string
ipv4_gateway = string
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
cluster_endpoint = string
proxmox_node = string
controlplane_addresses = list(string)
worker_addresses = list(string)
})
}

View file

@ -1,93 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.85.1"
}
talos = {
source = "siderolabs/talos"
version = "0.9.0"
}
}
}
resource "proxmox_virtual_environment_vm" "worker" {
name = var.worker.node_name
node_name = "pve01"
tags = ["tofu"]
bios = "ovmf"
on_boot = true
machine = "q35"
stop_on_destroy = true
operating_system {
type = "l26"
}
agent {
enabled = true
}
cpu {
cores = var.worker.cpu
sockets = 1
type = "x86-64-v2-AES"
}
memory {
dedicated = var.worker.memory * 1024
}
disk {
datastore_id = var.worker.storagepool
interface = "virtio0"
aio = "io_uring"
size = var.worker.disk * 1024
file_format = "raw"
}
cdrom {
file_id = format("local:iso/talos-%s-nocloud-amd64-secureboot.iso", var.worker.talos_version)
}
efi_disk {
datastore_id = var.worker.storagepool
file_format = "raw"
type = "4m"
}
tpm_state {
datastore_id = var.worker.storagepool
version = "v2.0"
}
initialization {
datastore_id = var.worker.storagepool
ip_config {
ipv4 {
address = var.worker.node_ipv4_address
gateway = var.worker.ipv4_gateway
}
}
dns {
servers = ["10.13.37.2"]
}
}
network_device {
bridge = "vmbr1"
}
}
# resource "talos_machine_secrets" "controlplane" {
# talos_version = var.controlplane.talos_version
# }
#
# data "talos_client_configuration" "controlplane" {
# cluster_name = var.controlplane.cluster_name
# client_configuration = talos_machine_secrets.controlplane.client_configuration
# nodes = [for k, v in var.controlplane : v.ip]
# endpoints = [var.controlplane.cluster_endpoint]
# }

View file

@ -1,17 +0,0 @@
variable "worker" {
description = "Worker node configuration"
type = object({
cpu = number
memory = number
disk = string
storagepool = string
talos_version = string
cluster_name = string
kubernetes_version = string
node_name = string
node_ipv4_address = string
cluster_endpoint = string
ipv4_gateway = string
})
}

View file

@ -8,5 +8,9 @@ terraform {
source = "bpg/proxmox"
version = "0.85.1"
}
time = {
source = "hashicorp/time"
version = "0.11.1"
}
}
}