Defining libvirt network definition

This commit is contained in:
vandomej 2025-10-03 09:58:04 -07:00
parent 292a0d036b
commit f9433ad477
5 changed files with 37 additions and 313 deletions

View file

@ -0,0 +1,17 @@
<network ipv6='yes'>
<name>my-talos-net</name>
<bridge name="talos-bridge" stp="on" delay="0"/>
<forward mode='nat'>
<nat ipv6='yes'/>
</forward>
<ip address="10.5.0.1" netmask="255.255.255.0">
<dhcp>
<range start="10.5.0.2" end="10.5.0.254"/>
</dhcp>
</ip>
<ip family='ipv6' address='2001:db8:b84b:5::1' prefix='64'>
<dhcp>
<range start='2001:db8:b84b:5::2' end='2001:db8:b84b:5::ffff'/>
</dhcp>
</ip>
</network>

View file

@ -1,5 +1,19 @@
terraform {
required_version = ">= 1.0"
required_providers {
talos = {
source = "siderolabs/talos"
version = "~> 0.9"
}
libvirt = {
source = "dmacvicar/libvirt"
version = "~> 0.7"
}
}
}
provider "libvirt" {
uri = "qemu:///system"
}
# Load variables from a .tfvars file
@ -27,85 +41,11 @@ variable "talos_version" {
default = "v1.11.1"
}
resource "null_resource" "talos_cluster" {
triggers = {
cluster_name = var.cluster_name
memory_mb = var.memory_mb
vcpu_count = var.vcpu_count
talos_version = var.talos_version
config_hash = sha1(join("", [
var.cluster_name,
tostring(var.memory_mb),
tostring(var.vcpu_count),
var.talos_version,
filesha1("${path.module}/cleanup.sh") # Recreate if cleanup script changes
]))
resource "libvirt_network" "talos_bridge" {
name = "my-talos-net"
autostart = true
xml {
file = "control-plane-net.xml"
}
# Download Talos kernel and initramfs
provisioner "local-exec" {
command = <<EOT
mkdir -p _out/
curl -L -o _out/vmlinuz-arm64 https://github.com/siderolabs/talos/releases/download/${self.triggers.talos_version}/vmlinuz-arm64
curl -L -o _out/initramfs-arm64.xz https://github.com/siderolabs/talos/releases/download/${self.triggers.talos_version}/initramfs-arm64.xz
EOT
}
provisioner "local-exec" {
command = <<EOT
set -e # Exit on error
# Cleaning up any existing configs for this cluster
chmod +x ${path.module}/cleanup.sh
${path.module}/cleanup.sh ${self.triggers.cluster_name}
# Now create the cluster
echo "Creating new cluster..."
sudo --preserve-env=HOME talosctl cluster create \
--provisioner=qemu \
--name ${self.triggers.cluster_name} \
--controlplanes 1 \
--workers 0 \
--memory ${self.triggers.memory_mb} \
--cpus ${self.triggers.vcpu_count}
# Change ownership of the generated files in ~/.talos and ~/.kube
sudo chown -R $(id -u):$(id -g) $HOME/.talos
sudo chown -R $(id -u):$(id -g) $HOME/.kube
EOT
}
provisioner "local-exec" {
when = destroy
command = <<EOT
chmod +x ${path.module}/cleanup.sh
${path.module}/cleanup.sh ${self.triggers.cluster_name}
EOT
}
}
resource "null_resource" "talos_cluster_patches" {
depends_on = [null_resource.talos_cluster]
triggers = {
patches_hash = filesha1("${path.module}/patches/controlplane-patch-1.yaml")
}
provisioner "local-exec" {
command = <<EOT
# Apply ONLY your custom patches (not the full config)
talosctl patch machineconfig \
--patch-file patches/controlplane-patch-1.yaml \
--nodes 10.5.0.2
EOT
}
}
output "next_steps" {
value = <<EOT
After applying:
1. The control plane will be available at 10.5.0.2
2. The configuration is embedded in the kernel parameters
3. Terraform will automatically apply the config and bootstrap the cluster
4. Check status with: talosctl --talosconfig talosconfig version --nodes 10.5.0.2
EOT
}

View file

@ -1,29 +0,0 @@
Vagrant.configure("2") do |config|
# You need to specify a base box for VirtualBox
# Using a minimal dummy box - we'll override most settings
config.vm.box = "generic/alpine316"
config.vm.define "control-plane-node-1" do |vm|
vm.vm.provider :virtualbox do |vb|
vb.cpus = 2
vb.memory = 2048
# Remove the default storage that comes with the base box
vb.customize ["modifyvm", :id, "--boot1", "dvd"]
vb.customize ["modifyvm", :id, "--boot2", "none"]
# Attach the Talos ISO
vb.customize ["storageattach", :id, "--storagectl", "IDE Controller", "--port", "1", "--device", "0", "--type", "dvddrive", "--medium", "/tmp/vagrant/metal-arm64.iso"]
end
end
config.vm.define "worker-node-1" do |vm|
vm.vm.provider :virtualbox do |vb|
vb.cpus = 2
vb.memory = 2048
# Remove the default storage that comes with the base box
vb.customize ["modifyvm", :id, "--boot1", "dvd"]
vb.customize ["modifyvm", :id, "--boot2", "none"]
# Attach the Talos ISO
vb.customize ["storageattach", :id, "--storagectl", "IDE Controller", "--port", "1", "--device", "0", "--type", "dvddrive", "--medium", "/tmp/vagrant/metal-arm64.iso"]
end
end
end

View file

@ -1,2 +0,0 @@
mkdir -p /tmp/vagrant
curl https://factory.talos.dev/image/4a0d65c669d46663f377e7161e50cfd570c401f26fd9e7bda34a0216b6f1922b/v1.11.1/metal-arm64.iso -L -o /tmp/vagrant/metal-arm64.iso

View file

@ -1,202 +0,0 @@
#!/bin/bash
set -e
# Configuration
VM_DIR="${VM_DIR:-$(pwd)/control-plane-vm}"
CONTROLPLANE_NAME="talos-controlplane"
MEMORY="2048" # 2GB per VM
CPUS="2"
DISK_SIZE="10G" # 10GB disk
TALOS_VERSION="v1.11.1"
KERNEL_URL="https://github.com/siderolabs/talos/releases/download/${TALOS_VERSION}/vmlinuz-arm64"
INITRAMFS_URL="https://github.com/siderolabs/talos/releases/download/${TALOS_VERSION}/initramfs-arm64.xz"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Create VM directory
create_vm_dir() {
local vm_name=$1
local vm_path="$VM_DIR/$vm_name"
mkdir -p "$vm_path"
echo "$vm_path"
}
# Download Talos kernel and initramfs
download_kernel_file() {
local kernel_path="$VM_DIR/vmlinuz-arm64"
if [[ ! -f "$kernel_path" ]]; then
curl -L -o "$kernel_path" "$KERNEL_URL"
fi
echo "$kernel_path"
}
download_initramfs_file() {
local initramfs_path="$VM_DIR/initramfs-arm64.xz"
if [[ ! -f "$initramfs_path" ]]; then
curl -L -o "$initramfs_path" "$INITRAMFS_URL"
fi
echo "$initramfs_path"
}
# Create disk image
create_disk_image() {
local vm_path=$1
local vm_name=$2
local disk_path="$vm_path/disk.qcow2"
if [[ ! -f "$disk_path" ]]; then
qemu-img create -f qcow2 "$disk_path" "$DISK_SIZE"
fi
echo "$disk_path"
}
# Generate unique MAC address
generate_mac() {
local vm_name=$1
local hash=$(echo "$vm_name" | md5sum | cut -c1-6)
printf "52:54:00:%s:%s:%s" "${hash:0:2}" "${hash:2:2}" "${hash:4:2}"
}
# Create VM startup script
create_vm_script() {
local vm_path=$1
local vm_name=$2
local mac_address=$3
local kernel_path=$4
local initramfs_path=$5
local disk_path=$6
local script_path="$vm_path/start.sh"
cat > "$script_path" << EOF
#!/bin/bash
OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES qemu-system-aarch64 \\
-name "$vm_name" \\
-machine virt,highmem=off \\
-accel hvf \\
-cpu host \\
-smp "$CPUS" \\
-m "${MEMORY}M" \\
-drive file="$disk_path",if=virtio,format=qcow2 \\
-kernel "$kernel_path" \\
-initrd "$initramfs_path" \\
-append "talos.platform=metal console=tty0 console=ttyS0" \\
-netdev user,id=net0 \\
-device virtio-net-pci,netdev=net0,mac=$mac_address \\
-nographic \\
-serial mon:stdio
EOF
chmod +x "$script_path"
echo "$script_path"
}
# Create systemd service file (optional)
create_service_file() {
local vm_path=$1
local vm_name=$2
local script_path=$3
local service_path="$vm_path/$vm_name.service"
cat > "$service_path" << EOF
[Unit]
Description=Talos VM - $vm_name
After=network.target
[Service]
Type=simple
ExecStart=$script_path
WorkingDirectory=$vm_path
Restart=always
User=$USER
[Install]
WantedBy=multi-user.target
EOF
echo "$service_path"
}
# Main execution
main() {
log_info "Creating Talos VMs with QEMU..."
if ! command -v qemu-system-aarch64 &> /dev/null; then
log_error "QEMU is not installed. Install with: brew install qemu"
exit 1
fi
log_info "Creating controlplane VM..."
local controlplane_path=$(create_vm_dir "$CONTROLPLANE_NAME")
local controlplane_disk=$(create_disk_image "$controlplane_path" "$CONTROLPLANE_NAME")
local controlplane_mac=$(generate_mac "$CONTROLPLANE_NAME")
local kernel_path=$(download_kernel_file)
local initramfs_path=$(download_initramfs_file)
local controlplane_script=$(create_vm_script "$controlplane_path" "$CONTROLPLANE_NAME" "$controlplane_mac" "$kernel_path" "$initramfs_path" "$controlplane_disk")
create_service_file "$controlplane_path" "$CONTROLPLANE_NAME" "$controlplane_script"
local manage_script="$VM_DIR/manage-vm.sh"
cat > "$manage_script" << 'EOF'
#!/bin/bash
VM_DIR="$(cd "$(dirname "$0")" && pwd)"
start_vm() {
echo "Starting Talos VM..."
"$VM_DIR/talos-controlplane/start.sh" &
}
stop_vm() {
echo "Stopping Talos VM..."
pkill -f "qemu-system-aarch64.*talos-controlplane"
}
case "$1" in
start)
start_vm
;;
stop)
stop_vm
;;
status)
pgrep -f "qemu-system-aarch64.*talos-controlplane" > /dev/null && echo "VM is running" || echo "VM is stopped"
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
;;
esac
EOF
chmod +x "$manage_script"
log_info "VM creation complete!"
log_info "VM files located at: $VM_DIR"
log_info ""
log_info "To start VM: $manage_script start"
log_info "To stop VM: $manage_script stop"
log_info ""
log_info "Controlplane MAC: $controlplane_mac"
}
main "$@"