Adding cleanup script

This commit is contained in:
vandomej 2025-09-28 11:26:14 -07:00
parent cdb19b2aaa
commit c1b0b95c13
5 changed files with 59 additions and 72 deletions

View file

@ -1,25 +1,15 @@
# Setup Instructions # Setup Instructions
Start out in the control_plane directory.
Ensure you have [Homebrew](https://brew.sh/) installed on your machine. Ensure you have [Homebrew](https://brew.sh/) installed on your machine.
`/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"`
Ensure you have the dependencies installed: After you spin up the control plane, then move on to the hc_vault directory.
`brew install terraform` After you spin up Vault, then move on to the teamcity-agent directory.
Run the following commands to initialize and apply the Terraform configuration: Finally, you can spin up worker nodes in the worker_nodes directory.
```bash
terraform init
terraform plan
terraform apply
```
# Development Environment Setup # Development Environment Setup
`brew install minikube`
```bash
minikube start --driver=podman --container-runtime=cri-o
```

25
control_plane/README.md Normal file
View file

@ -0,0 +1,25 @@
# Setup Instructions
Ensure you have [Homebrew](https://brew.sh/) installed on your machine.
`/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"`
Ensure you have the dependencies installed:
`brew install terraform`
Run the following commands to initialize and apply the Terraform configuration:
```bash
terraform init
terraform plan
terraform apply
```
# Development Environment Setup
`brew install minikube`
```bash
minikube start --driver=podman --container-runtime=cri-o
```

14
control_plane/cleanup.sh Normal file
View file

@ -0,0 +1,14 @@
#!/bin/bash
cluster_name="$1"
# Remove kubeconfig entries
kubectl config delete-context "$cluster_name" 2>/dev/null || true
kubectl config delete-cluster "admin@$cluster_name" 2>/dev/null || true
kubectl config delete-user "admin@$cluster_name" 2>/dev/null || true
talosctl config remove "$cluster_name" 2>/dev/null || true
# Remove files
rm -rf "${HOME}/.talos/clusters/${cluster_name}" 2>/dev/null || true
echo "Cleanup completed for cluster: $cluster_name"

View file

@ -8,6 +8,7 @@ terraform {
} }
} }
# Load variables from a .tfvars file # Load variables from a .tfvars file
variable "cluster_name" { variable "cluster_name" {
description = "The name of the Talos cluster" description = "The name of the Talos cluster"
@ -33,30 +34,19 @@ variable "talos_version" {
default = "v1.11.1" default = "v1.11.1"
} }
# Configure providers
provider "talos" {}
# Generate machine secrets locals {
resource "talos_machine_secrets" "this" {} # Generate unique cluster name each time
unique_cluster_name = "${var.cluster_name}-${random_id.cluster_suffix.hex}"
# Create control plane configuration
data "talos_machine_configuration" "this" {
cluster_name = var.cluster_name
cluster_endpoint = "https://10.5.0.2:6443"
machine_type = "controlplane"
machine_secrets = talos_machine_secrets.this.machine_secrets
talos_version = var.talos_version
} }
data "talos_client_configuration" "this" { resource "random_id" "cluster_suffix" {
cluster_name = var.cluster_name byte_length = 4
client_configuration = talos_machine_secrets.this.client_configuration
nodes = ["10.5.0.2"]
} }
resource "null_resource" "talos_cluster" { resource "null_resource" "talos_cluster" {
triggers = { triggers = {
cluster_name = var.cluster_name cluster_name = local.unique_cluster_name
memory_mb = var.memory_mb memory_mb = var.memory_mb
vcpu_count = var.vcpu_count vcpu_count = var.vcpu_count
} }
@ -74,7 +64,8 @@ resource "null_resource" "talos_cluster" {
command = <<EOT command = <<EOT
sudo --preserve-env=HOME talosctl cluster create \ sudo --preserve-env=HOME talosctl cluster create \
--provisioner=qemu \ --provisioner=qemu \
--name ${var.cluster_name} \ --name ${lself.triggers.cluster_name} \
--talosconfig ${path.module}/talosconfig \
--controlplanes 1 \ --controlplanes 1 \
--workers 0 \ --workers 0 \
--memory ${var.memory_mb} \ --memory ${var.memory_mb} \
@ -85,49 +76,16 @@ resource "null_resource" "talos_cluster" {
provisioner "local-exec" { provisioner "local-exec" {
when = destroy when = destroy
command = <<EOT command = <<EOT
sudo --preserve-env=HOME talosctl cluster destroy \ sudo --preserve-env=HOME --provisioner=qemu talosctl cluster destroy \
--name ${self.triggers.cluster_name} --name ${self.triggers.cluster_name}
# Call cleanup script
chmod +x ${path.module}/cleanup.sh
${path.module}/cleanup.sh ${self.triggers.cluster_name}
EOT EOT
} }
} }
# resource "talos_machine_configuration_apply" "this" {
# client_configuration = talos_machine_secrets.this.client_configuration
# machine_configuration_input = data.talos_machine_configuration.this.machine_configuration
# node = "10.5.0.2"
# }
# resource "talos_machine_bootstrap" "this" {
# depends_on = [
# talos_machine_configuration_apply.this
# ]
# node = "10.5.0.2"
# client_configuration = talos_machine_secrets.this.client_configuration
# }
# resource "talos_cluster_kubeconfig" "this" {
# depends_on = [
# talos_machine_bootstrap.this
# ]
# client_configuration = talos_machine_secrets.this.client_configuration
# node = "10.5.0.2"
# }
# Output important information
output "controlplane_ip" {
value = "10.5.0.2"
}
output "talos_client_config" {
value = data.talos_client_configuration.this.talos_config
sensitive = true
}
output "machine_config" {
value = data.talos_machine_configuration.this.machine_configuration
sensitive = true
}
output "next_steps" { output "next_steps" {
value = <<EOT value = <<EOT
After applying: After applying: