Files
worldskills_scripts/common/template/debian13-gui/debian.pkr.hcl

338 lines
13 KiB
HCL

packer {
required_version = ">= 1.9.0"
required_plugins {
vsphere = {
source = "github.com/hashicorp/vsphere"
version = "~> 1"
}
}
}
# ── vCenter connection ───────────────────────────────────────────────────────
variable "vcenter_server" {
type = string
description = "vCenter server hostname or IP address"
}
variable "vcenter_username" {
type = string
description = "vCenter username (e.g. administrator@vsphere.local)"
}
variable "vcenter_password" {
type = string
description = "vCenter password"
sensitive = true
}
variable "vcenter_insecure_connection" {
type = bool
description = "Skip TLS certificate verification (set false in production)"
default = true
}
# ── vSphere placement ────────────────────────────────────────────────────────
variable "datacenter" {
type = string
description = "vSphere datacenter name"
}
variable "cluster" {
type = string
description = "vSphere cluster or standalone ESXi host name"
}
variable "datastore" {
type = string
description = "Datastore name where the VM will be stored"
}
variable "network" {
type = string
description = "Port group / network name (used for Packer SSH — not saved in template)"
}
variable "vm_folder" {
type = string
description = "VM inventory folder path (empty = root datacenter folder)"
default = ""
}
variable "resource_pool" {
type = string
description = "Resource pool name (use 'Resources' for standalone ESXi hosts)"
default = "Resources"
}
variable "http_ip" {
type = string
description = "IP of the Packer host NIC reachable by the build VM (required on multi-NIC hosts)"
default = ""
}
# ── VM hardware ──────────────────────────────────────────────────────────────
variable "vm_name" {
type = string
description = "Name of the resulting VM template"
default = "debian-13-gui-template"
}
variable "vm_cpus" {
type = number
description = "Number of vCPUs"
default = 2
}
variable "vm_memory_mb" {
type = number
description = "RAM in MB"
default = 2048
}
variable "vm_disk_size_mb" {
type = number
description = "System disk size in MB"
default = 40960 # 40 GB
}
# ── ISO paths on the vSphere datastore ──────────────────────────────────────
# Format: "[<datastore-name>] <folder/filename.iso>"
# All five BDs are mounted simultaneously so apt can resolve packages from any
# disc without manual disc swapping. BD-1 is the only bootable disc.
variable "iso_bd1" {
type = string
description = "Datastore path to debian-13-amd64-BD-1.iso (bootable installer)"
}
variable "iso_bd2" {
type = string
description = "Datastore path to debian-13-amd64-BD-2.iso"
}
variable "iso_bd3" {
type = string
description = "Datastore path to debian-13-amd64-BD-3.iso"
}
variable "iso_bd4" {
type = string
description = "Datastore path to debian-13-amd64-BD-4.iso"
}
variable "iso_bd5" {
type = string
description = "Datastore path to debian-13-amd64-BD-5.iso"
}
variable "iso_bd6" {
type = string
description = "Datastore path to debian-13-amd64-BD-6.iso"
}
# ── Preseed / SSH credentials ────────────────────────────────────────────────
# Must match the values in http/preseed.cfg
variable "ssh_username" {
type = string
default = "root"
}
variable "ssh_password" {
type = string
sensitive = true
default = "packer"
}
# ── Source definition ────────────────────────────────────────────────────────
source "vsphere-iso" "debian" {
# vCenter
vcenter_server = var.vcenter_server
username = var.vcenter_username
password = var.vcenter_password
insecure_connection = var.vcenter_insecure_connection
datacenter = var.datacenter
cluster = var.cluster
datastore = var.datastore
folder = var.vm_folder
resource_pool = var.resource_pool
# VM identity
vm_name = var.vm_name
guest_os_type = "debian10_64Guest" # highest Debian guest ID recognised by ESXi 6.5
notes = "Debian 13 (Trixie) — built with Packer on ${formatdate("YYYY-MM-DD", timestamp())}"
firmware = "efi"
# CPU / RAM
CPUs = var.vm_cpus
RAM = var.vm_memory_mb
RAM_reserve_all = false
# Disk — thin-provisioned on a PVSCSI controller (best practice for vSphere)
disk_controller_type = ["pvscsi"]
storage {
disk_size = var.vm_disk_size_mb
disk_thin_provisioned = true
disk_controller_index = 0
}
# Network adapter — needed only so Packer can SSH in after install.
# The preseed does not write any persistent network configuration.
network_adapters {
network = var.network
network_card = "vmxnet3"
}
# SATA CD-ROM drives — SATA supports up to 30 slots, avoiding the 4-device
# IDE limit. All five BD ISOs are mounted simultaneously so apt-cdrom can
# scan every disc without manual swapping.
cdrom_type = "sata"
iso_paths = [
var.iso_bd1, # CD-ROM 0 — bootable installer disc
var.iso_bd2, # CD-ROM 1
var.iso_bd3, # CD-ROM 2
var.iso_bd4, # CD-ROM 3
var.iso_bd5, # CD-ROM 4
var.iso_bd6, # CD-ROM 5
]
# Packer starts a local HTTP server to serve the preseed file.
http_directory = "${path.root}/http"
http_ip = var.http_ip
http_port_min = 8100
http_port_max = 8199
# Boot sequence (UEFI / GRUB edit mode):
# 1. Wait for the GRUB menu to appear.
# 2. Press down once — default is "Graphical install"; one down selects
# the plain "Install" (TUI) entry.
# 3. Press 'e' to open the entry editor.
# 4. Navigate down to the linux kernel line (3rd line in the entry) and
# jump to its end, then append preseed parameters.
# 5. Press Ctrl+X to boot.
boot_wait = "10s"
boot_command = [
"<down><wait>",
"e<wait2>",
"<down><down><down><end>",
" auto=true priority=critical",
" locale=en_US.UTF-8",
" keymap=us",
" keyboard-configuration/xkb-keymap=us",
" keyboard-configuration/layoutcode=us",
" preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg",
"<wait><leftCtrlOn>x<leftCtrlOff>"
]
# SSH communicator — Packer waits here until the VM finishes installing,
# reboots, and becomes reachable (open-vm-tools reports the DHCP address).
communicator = "ssh"
ssh_username = var.ssh_username
ssh_password = var.ssh_password
ssh_timeout = "120m"
ssh_handshake_attempts = 100
# Keep the five CD-ROM drives in the template so the discs can be remounted
# later (e.g. to install additional packages on cloned VMs).
# ISOs remain attached; disconnect them in the vSphere UI if not needed.
remove_cdrom = false
# Power off and convert to template once the build completes
convert_to_template = false
create_snapshot = true
snapshot_name = "Start"
}
# ── Build ─────────────────────────────────────────────────────────────────────
build {
name = "debian-13-gui-template"
sources = ["source.vsphere-iso.debian"]
# Install VS Code + YAML extension and Zeal docsets (requires internet via DHCP)
provisioner "shell" {
inline = [
"set -e",
# BDs are mounted at boot via fstab (installer keeps trays closed via
# cdrom-detect/eject=false in preseed).
"for n in 1 2 3 4 5 6; do mountpoint -q /mnt/bd$n || mount /mnt/bd$n; done",
"sudo apt-get update",
# ── VS Code ─────────────────────────────────────────────────────────────
# Download the .deb directly — avoids apt-get update hitting stale CD-ROM sources
"curl -fL --retry 3 'https://code.visualstudio.com/sha/download?build=stable&os=linux-deb-x64' -o /tmp/vscode.deb",
"file /tmp/vscode.deb | grep -q 'Debian binary package' || { echo 'VS Code download is not a valid .deb'; head -c 500 /tmp/vscode.deb; exit 1; }",
"dpkg -i /tmp/vscode.deb || apt-get install -fy",
"rm -f /tmp/vscode.deb",
"code --install-extension redhat.vscode-yaml --no-sandbox --user-data-dir /root/.vscode --force",
# Patch the .desktop launcher so VS Code opens without the root sandbox error
"sed -i 's|Exec=/usr/share/code/code|Exec=/usr/share/code/code --no-sandbox|g' /usr/share/applications/code.desktop",
# ── Zeal ────────────────────────────────────────────────────────────────
"apt-get install -y zeal",
# Qt WebEngine (Chromium) refuses to run as root without disabling sandbox.
# Patch the launcher to pass the env var instead of --no-sandbox flag.
"sed -i 's|^Exec=zeal|Exec=env QTWEBENGINE_CHROMIUM_FLAGS=--no-sandbox zeal|' /usr/share/applications/org.zealdocs.zeal.desktop",
"mkdir -p /root/.local/share/Zeal/Zeal/docsets",
"wget -qO /tmp/Python.tgz https://kapeli.com/feeds/Python.tgz && tar -xz -C /root/.local/share/Zeal/Zeal/docsets -f /tmp/Python.tgz && rm /tmp/Python.tgz",
"wget -qO /tmp/Ansible.tgz https://kapeli.com/feeds/Ansible.tgz && tar -xz -C /root/.local/share/Zeal/Zeal/docsets -f /tmp/Ansible.tgz && rm /tmp/Ansible.tgz",
"wget -qO /tmp/Nginx.tgz https://kapeli.com/feeds/Nginx.tgz && tar -xz -C /root/.local/share/Zeal/Zeal/docsets -f /tmp/Nginx.tgz && rm /tmp/Nginx.tgz",
"wget -qO /tmp/Bash.tgz https://kapeli.com/feeds/Bash.tgz && tar -xz -C /root/.local/share/Zeal/Zeal/docsets -f /tmp/Bash.tgz && rm /tmp/Bash.tgz",
]
}
# Generalise the VM so every clone starts with a clean identity
provisioner "shell" {
inline = [
# Remove SSH host keys — regenerated on first boot of each clone
"sudo rm -f /etc/ssh/ssh_host_*",
# Generate root SSH key
"sudo mkdir -p /root/.ssh",
"sudo chmod 700 /root/.ssh",
"sudo ssh-keygen -t ed25519 -N '' -f /root/.ssh/id_ed25519",
"sudo chmod 600 /root/.ssh/id_ed25519",
"sudo chmod 644 /root/.ssh/id_ed25519.pub",
"sudo cp /root/.ssh/id_ed25519.pub /root/.ssh/authorized_keys",
"sudo chmod 600 /root/.ssh/authorized_keys",
# Clear machine-id — systemd and DHCP clients use it as a unique identifier
"sudo truncate -s0 /etc/machine-id",
"sudo rm -f /var/lib/dbus/machine-id",
"sudo ln -s /etc/machine-id /var/lib/dbus/machine-id",
# Network left as DHCP — no wipe needed for GUI template
"sudo rm -f /etc/udev/rules.d/70-persistent-net.rules",
"sudo truncate -s0 /etc/hostname",
# ── cloud-init ──────────────────────────────────────────────────────────
# Set VMware as the preferred datasource (open-vm-tools is installed)
"echo 'datasource_list: [VMware, NoCloud, None]' | sudo tee /etc/cloud/cloud.cfg.d/99-datasource.cfg",
# Remove any cloud-init state so it runs fresh on first boot of each clone
# Services are enabled automatically by the package — no explicit enable needed
"sudo cloud-init clean --logs",
# ── Cleanup ─────────────────────────────────────────────────────────────
# Clean apt package cache
"sudo apt-get clean -y",
"sudo apt-get autoremove --purge -y",
# Truncate log files (keep the files so rsyslog doesn't complain)
"sudo find /var/log -type f | sudo xargs truncate -s0",
"sudo truncate -s0 /var/log/wtmp",
"sudo truncate -s0 /var/log/lastlog",
# Clear shell histories
"sudo truncate -s0 /root/.bash_history",
"truncate -s0 ~/.bash_history",
]
}
}