diff --git a/config.toml b/config.toml index 5b6a2b7..4af4349 100644 --- a/config.toml +++ b/config.toml @@ -1,34 +1,38 @@ -[k0s] # Global settings. +[k0s] version = "v1.34.3+k0s.0" -private_ip_range = "10.1.0.0/24" # Node private IPv4 range in CIDR format. +private_ip_range = "10.1.0.0/24" # Node private IPv4 range in CIDR format. -[metallb] # Load balancer for external IP access +[metallb] version = "v0.15.3" -public_ip_range = "192.168.1.0/24" # External IP range for public access in CIDR format. +public_ip_range = "192.168.1.0/24" # Public IP range for external IPs in CIDR format. -[haproxy] # HAProxy nodes. -keepalived.id = "100" # VRRP ID Number. -keepalived.auth = "4PycOh7xM" # VRRP Password. -proxy.one.ip = "10.1.0.8" # Proxy one IPv4 -proxy.two.ip = "10.1.0.9" # Proxy two IPv4 -proxy.kube.ip = "10.1.0.10" # VIP for kubernetes cluster API high-availability. +[haproxy] +keepalived.id = "100" # KeepaliveD ID Number. +keepalived.auth = "4PycOh7xM" # KeepaliveD Password. +proxy.1.ip = "10.1.0.8" # HAProxy one IPv4 +proxy.2.ip = "10.1.0.9" # HAProxy two IPv4 +proxy.kube.ip = "10.1.0.10" # VIP for kubernetes cluster API high-availability. -[nodes] # Cluster hardware deployment configuartion -ssh.user = "root" # controller and worker node SSH login information. +[nodes] +ssh.user = "root" # Controller and worker node login information. ssh.port = "22" ssh.key = "~/.ssh/id_rsa" -interface.private = "eth0" # Cluster internal network interface name. -interface.public = "eth1" # Cluster public network interface name. -controller.one.ip = "10.1.0.11" # Controller nodes IPv4 addresses. -controller.two.ip = "10.1.0.12" -controller.three.ip = "10.1.0.13" -worker.1.ip = "10.1.0.14" # Worker nodes IPv4 addresses. ( 1-250 ) -worker.2.ip = "10.1.0.15" -worker.3.ip = "10.1.0.16" -worker.4.ip = "10.1.0.17" -worker.5.ip = "10.1.0.18" +network.private.interface = "eth0" # Cluster internal network interface name. +network.private.gateway = "10.1.0.1" # Private network gateway +network.private.netmask = "24" # Public network netmask/cidr +network.public.interface = "eth1" # Cluster public network interface name. +network.public.gateway = "192.168.1.1" # Public network gateway +network.public.netmask = "24" # Public network netmask/cidr +controller.1 = { mac = "", ip = "10.1.0.11" } # Controller nodes IPv4 addresses. +controller.2 = { mac = "", ip = "10.1.0.13" } +controller.3 = { mac = "", ip = "10.1.0.15" } +worker.1 = { mac = "", ip = "10.1.0.14" } # Worker nodes IPv4 addresses. ( 1-250 ) +worker.2 = { mac = "", ip = "10.1.0.15" } +worker.3 = { mac = "", ip = "10.1.0.16" } +worker.4 = { mac = "", ip = "10.1.0.17" } +worker.5 = { mac = "", ip = "10.1.0.18" } -[bastion] # Enable for accessing nodes on secure networks that requires a bastion/jumphost for ssh access +[bastion] # Enable for accessing nodes on secure networks that requires a bastion/jumphost for ssh access enable = false ssh.user = "example_user" ssh.port = "22"