|
#!/usr/bin/env bash |
|
|
|
# Proxmox Tailscale LXC Creator |
|
# Creates a minimal Debian LXC container with Tailscale pre-installed |
|
# Configured for exit node / subnet router use |
|
# |
|
# Usage: ./create-tailscale-lxc.sh [OPTIONS] |
|
# or: bash -c "$(curl -fsSL URL)" -- [OPTIONS] |
|
# |
|
# Options: |
|
# --ctid <id> Container ID (default: auto-detect next available) |
|
# --hostname <name> Hostname (default: tailscale) |
|
# --storage <name> Storage pool (default: auto-detect) |
|
# --auth-key <key> Tailscale auth key for automatic authentication |
|
# --memory <mb> Memory in MB (default: 512) |
|
# --disk <gb> Disk size in GB (default: 2) |
|
# -h, --help Show this help message |
|
|
|
set -Eeuo pipefail |
|
|
|
# Colors and formatting |
|
RED='\033[0;31m' |
|
GREEN='\033[0;32m' |
|
YELLOW='\033[1;33m' |
|
BLUE='\033[0;34m' |
|
CYAN='\033[0;36m' |
|
NC='\033[0m' # No Color |
|
BOLD='\033[1m' |
|
|
|
# Default values |
|
DEFAULT_HOSTNAME="tailscale" |
|
DEFAULT_MEMORY=512 |
|
DEFAULT_DISK=2 |
|
TEMPLATE_NAME="debian-12-standard" |
|
|
|
# Script variables |
|
CTID="" |
|
HOSTNAME="" |
|
STORAGE="" |
|
AUTH_KEY="" |
|
MEMORY="" |
|
DISK="" |
|
|
|
banner() { |
|
echo -e "${CYAN}" |
|
cat << 'EOF' |
|
╔╦╗┌─┐┬┬ ┌─┐┌─┐┌─┐┬ ┌─┐ ╦ ═╗ ╦╔═╗ |
|
║ ├─┤││ └─┐│ ├─┤│ ├┤ ║ ╔╩╦╝║ |
|
╩ ┴ ┴┴┴─┘└─┘└─┘┴ ┴┴─┘└─┘ ╩═╝╩ ╚═╚═╝ |
|
EOF |
|
echo -e "${NC}${BOLD} Proxmox LXC Creator${NC}" |
|
echo "" |
|
} |
|
|
|
msg_info() { echo -e "${BLUE}[INFO]${NC} $1"; } |
|
msg_ok() { echo -e "${GREEN}[OK]${NC} $1"; } |
|
msg_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } |
|
msg_error() { echo -e "${RED}[ERROR]${NC} $1"; } |
|
|
|
usage() { |
|
cat << EOF |
|
Usage: $0 [OPTIONS] |
|
|
|
Creates a minimal Debian LXC container with Tailscale pre-installed, |
|
configured for use as an exit node or subnet router. |
|
|
|
Options: |
|
--ctid <id> Container ID (default: next available) |
|
--hostname <name> Hostname (default: $DEFAULT_HOSTNAME) |
|
--storage <name> Storage pool (default: auto-detect) |
|
--auth-key <key> Tailscale auth key for automatic authentication |
|
--memory <mb> Memory in MB (default: $DEFAULT_MEMORY) |
|
--disk <gb> Disk size in GB (default: $DEFAULT_DISK) |
|
-h, --help Show this help message |
|
|
|
Examples: |
|
$0 # Interactive mode |
|
$0 --auth-key tskey-auth-xxxxx # Automated with auth key |
|
$0 --ctid 200 --hostname ts-exit # Custom CTID and hostname |
|
|
|
EOF |
|
exit 0 |
|
} |
|
|
|
check_root() { |
|
if [[ $EUID -ne 0 ]]; then |
|
msg_error "This script must be run as root on the Proxmox host" |
|
exit 1 |
|
fi |
|
} |
|
|
|
check_proxmox() { |
|
if ! command -v pveversion &>/dev/null; then |
|
msg_error "This script must be run on a Proxmox VE host" |
|
exit 1 |
|
fi |
|
msg_ok "Running on Proxmox VE $(pveversion --version | head -1)" |
|
} |
|
|
|
parse_args() { |
|
while [[ $# -gt 0 ]]; do |
|
case $1 in |
|
--ctid) |
|
CTID="$2" |
|
shift 2 |
|
;; |
|
--hostname) |
|
HOSTNAME="$2" |
|
shift 2 |
|
;; |
|
--storage) |
|
STORAGE="$2" |
|
shift 2 |
|
;; |
|
--auth-key) |
|
AUTH_KEY="$2" |
|
shift 2 |
|
;; |
|
--memory) |
|
MEMORY="$2" |
|
shift 2 |
|
;; |
|
--disk) |
|
DISK="$2" |
|
shift 2 |
|
;; |
|
-h|--help) |
|
usage |
|
;; |
|
--) |
|
shift |
|
;; |
|
*) |
|
msg_error "Unknown option: $1" |
|
usage |
|
;; |
|
esac |
|
done |
|
} |
|
|
|
get_next_ctid() { |
|
pvesh get /cluster/nextid 2>/dev/null || echo "100" |
|
} |
|
|
|
detect_storage() { |
|
local storages |
|
storages=$(pvesm status -content rootdir 2>/dev/null | awk 'NR>1 && $2=="active" {print $1}') |
|
|
|
if [[ -z "$storages" ]]; then |
|
msg_error "No storage pools available for containers" |
|
exit 1 |
|
fi |
|
|
|
local count |
|
count=$(echo "$storages" | wc -l) |
|
|
|
if [[ $count -eq 1 ]]; then |
|
echo "$storages" |
|
else |
|
echo "$storages" | head -1 |
|
fi |
|
} |
|
|
|
select_storage() { |
|
local storages |
|
storages=$(pvesm status -content rootdir 2>/dev/null | awk 'NR>1 && $2=="active" {print $1}') |
|
|
|
if [[ -z "$storages" ]]; then |
|
msg_error "No storage pools available for containers" |
|
exit 1 |
|
fi |
|
|
|
local count |
|
count=$(echo "$storages" | wc -l) |
|
|
|
if [[ $count -eq 1 ]]; then |
|
STORAGE="$storages" |
|
msg_info "Using storage: $STORAGE" |
|
else |
|
msg_info "Available storage pools:" |
|
local i=1 |
|
declare -A storage_map |
|
while IFS= read -r s; do |
|
echo " $i) $s" |
|
storage_map[$i]="$s" |
|
((i++)) |
|
done <<< "$storages" |
|
|
|
while true; do |
|
read -rp "Select storage [1]: " choice |
|
choice=${choice:-1} |
|
if [[ -n "${storage_map[$choice]:-}" ]]; then |
|
STORAGE="${storage_map[$choice]}" |
|
break |
|
fi |
|
echo "Invalid selection" |
|
done |
|
fi |
|
} |
|
|
|
prompt_values() { |
|
# CTID |
|
if [[ -z "$CTID" ]]; then |
|
local next_id |
|
next_id=$(get_next_ctid) |
|
read -rp "Container ID [$next_id]: " CTID |
|
CTID=${CTID:-$next_id} |
|
fi |
|
|
|
# Validate CTID doesn't exist |
|
if pct status "$CTID" &>/dev/null; then |
|
msg_error "Container $CTID already exists" |
|
exit 1 |
|
fi |
|
|
|
# Hostname |
|
if [[ -z "$HOSTNAME" ]]; then |
|
read -rp "Hostname [$DEFAULT_HOSTNAME]: " HOSTNAME |
|
HOSTNAME=${HOSTNAME:-$DEFAULT_HOSTNAME} |
|
fi |
|
|
|
# Storage |
|
if [[ -z "$STORAGE" ]]; then |
|
select_storage |
|
fi |
|
|
|
# Memory |
|
if [[ -z "$MEMORY" ]]; then |
|
MEMORY=$DEFAULT_MEMORY |
|
fi |
|
|
|
# Disk |
|
if [[ -z "$DISK" ]]; then |
|
DISK=$DEFAULT_DISK |
|
fi |
|
} |
|
|
|
get_template() { |
|
local template_storage="$1" |
|
local template_file |
|
|
|
# Find the latest debian-12-standard template |
|
template_file=$(pveam list "$template_storage" 2>/dev/null | grep -E "debian-12-standard.*tar\.(gz|zst)" | tail -1 | awk '{print $1}') |
|
|
|
if [[ -z "$template_file" ]]; then |
|
msg_info "Downloading Debian 12 template..." |
|
pveam update >/dev/null 2>&1 || true |
|
|
|
# Get available template name |
|
local available_template |
|
available_template=$(pveam available -section system 2>/dev/null | grep -E "debian-12-standard" | tail -1 | awk '{print $2}') |
|
|
|
if [[ -z "$available_template" ]]; then |
|
msg_error "Could not find Debian 12 template" |
|
exit 1 |
|
fi |
|
|
|
pveam download "$template_storage" "$available_template" |
|
template_file=$(pveam list "$template_storage" 2>/dev/null | grep -E "debian-12-standard.*tar\.(gz|zst)" | tail -1 | awk '{print $1}') |
|
fi |
|
|
|
echo "$template_file" |
|
} |
|
|
|
create_container() { |
|
local template="$1" |
|
|
|
msg_info "Creating container $CTID..." |
|
|
|
pct create "$CTID" "$template" \ |
|
--hostname "$HOSTNAME" \ |
|
--memory "$MEMORY" \ |
|
--cores 1 \ |
|
--rootfs "${STORAGE}:${DISK}" \ |
|
--net0 name=eth0,bridge=vmbr0,ip=dhcp \ |
|
--unprivileged 1 \ |
|
--features nesting=1,keyctl=1 \ |
|
--onboot 1 \ |
|
--start 0 |
|
|
|
msg_ok "Container created" |
|
} |
|
|
|
configure_tun() { |
|
local conf="/etc/pve/lxc/${CTID}.conf" |
|
|
|
msg_info "Configuring TUN device access..." |
|
|
|
# Add TUN device access if not already present |
|
if ! grep -q "lxc.cgroup2.devices.allow: c 10:200 rwm" "$conf" 2>/dev/null; then |
|
echo "lxc.cgroup2.devices.allow: c 10:200 rwm" >> "$conf" |
|
fi |
|
|
|
if ! grep -q "lxc.mount.entry: /dev/net/tun" "$conf" 2>/dev/null; then |
|
echo "lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file" >> "$conf" |
|
fi |
|
|
|
msg_ok "TUN device configured" |
|
} |
|
|
|
start_container() { |
|
msg_info "Starting container..." |
|
pct start "$CTID" |
|
|
|
# Wait for container to be fully up |
|
local max_wait=30 |
|
local waited=0 |
|
while [[ $waited -lt $max_wait ]]; do |
|
if pct exec "$CTID" -- test -f /etc/os-release 2>/dev/null; then |
|
break |
|
fi |
|
sleep 1 |
|
((waited++)) |
|
done |
|
|
|
if [[ $waited -ge $max_wait ]]; then |
|
msg_error "Container failed to start properly" |
|
exit 1 |
|
fi |
|
|
|
# Wait for network |
|
waited=0 |
|
while [[ $waited -lt $max_wait ]]; do |
|
if pct exec "$CTID" -- ping -c1 1.1.1.1 &>/dev/null; then |
|
break |
|
fi |
|
sleep 1 |
|
((waited++)) |
|
done |
|
|
|
msg_ok "Container started" |
|
} |
|
|
|
install_tailscale() { |
|
msg_info "Installing Tailscale..." |
|
|
|
pct exec "$CTID" -- bash -c ' |
|
set -e |
|
export DEBIAN_FRONTEND=noninteractive |
|
|
|
# Get OS info |
|
ID=$(grep "^ID=" /etc/os-release | cut -d"=" -f2) |
|
VER=$(grep "^VERSION_CODENAME=" /etc/os-release | cut -d"=" -f2) |
|
|
|
# Install prerequisites |
|
apt-get update -qq |
|
apt-get install -y -qq curl gnupg >/dev/null |
|
|
|
# Add Tailscale repo |
|
curl -fsSL "https://pkgs.tailscale.com/stable/${ID}/${VER}.noarmor.gpg" \ |
|
| tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null |
|
|
|
echo "deb [signed-by=/usr/share/keyrings/tailscale-archive-keyring.gpg] https://pkgs.tailscale.com/stable/${ID} ${VER} main" \ |
|
> /etc/apt/sources.list.d/tailscale.list |
|
|
|
# Install Tailscale |
|
apt-get update -qq |
|
apt-get install -y -qq tailscale >/dev/null |
|
|
|
# Enable and start tailscaled |
|
systemctl enable tailscaled >/dev/null 2>&1 |
|
systemctl start tailscaled |
|
' |
|
|
|
msg_ok "Tailscale installed" |
|
} |
|
|
|
configure_ip_forwarding() { |
|
msg_info "Configuring IP forwarding for exit node support..." |
|
|
|
pct exec "$CTID" -- bash -c ' |
|
# Enable IP forwarding |
|
cat > /etc/sysctl.d/99-tailscale.conf << EOF |
|
# Enable IP forwarding for Tailscale exit node / subnet router |
|
net.ipv4.ip_forward = 1 |
|
net.ipv6.conf.all.forwarding = 1 |
|
EOF |
|
sysctl -p /etc/sysctl.d/99-tailscale.conf >/dev/null |
|
' |
|
|
|
msg_ok "IP forwarding enabled" |
|
} |
|
|
|
authenticate_tailscale() { |
|
if [[ -n "$AUTH_KEY" ]]; then |
|
msg_info "Authenticating with Tailscale..." |
|
pct exec "$CTID" -- tailscale up --auth-key="$AUTH_KEY" |
|
msg_ok "Tailscale authenticated" |
|
fi |
|
} |
|
|
|
add_proxmox_tag() { |
|
local current_tags |
|
current_tags=$(pct config "$CTID" | awk -F': ' '/^tags:/ {print $2}') |
|
|
|
if [[ -n "$current_tags" ]]; then |
|
pct set "$CTID" -tags "${current_tags};tailscale" |
|
else |
|
pct set "$CTID" -tags "tailscale" |
|
fi |
|
} |
|
|
|
print_summary() { |
|
echo "" |
|
echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════${NC}" |
|
echo -e "${GREEN}${BOLD} Container created successfully!${NC}" |
|
echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════${NC}" |
|
echo "" |
|
echo -e " ${BOLD}Container ID:${NC} $CTID" |
|
echo -e " ${BOLD}Hostname:${NC} $HOSTNAME" |
|
echo -e " ${BOLD}Storage:${NC} $STORAGE" |
|
echo -e " ${BOLD}Memory:${NC} ${MEMORY}MB" |
|
echo -e " ${BOLD}Disk:${NC} ${DISK}GB" |
|
echo "" |
|
|
|
if [[ -z "$AUTH_KEY" ]]; then |
|
echo -e "${YELLOW}${BOLD}Next Steps:${NC}" |
|
echo "" |
|
echo " 1. Authenticate with Tailscale:" |
|
echo -e " ${CYAN}pct exec $CTID -- tailscale up${NC}" |
|
echo "" |
|
else |
|
echo -e "${YELLOW}${BOLD}Tailscale Status:${NC}" |
|
echo "" |
|
pct exec "$CTID" -- tailscale status 2>/dev/null || true |
|
echo "" |
|
fi |
|
|
|
echo -e "${YELLOW}${BOLD}Enable as Exit Node:${NC}" |
|
echo "" |
|
echo " 1. Advertise as exit node:" |
|
echo -e " ${CYAN}pct exec $CTID -- tailscale up --advertise-exit-node${NC}" |
|
echo "" |
|
echo " 2. Approve in Tailscale admin console:" |
|
echo -e " ${CYAN}https://login.tailscale.com/admin/machines${NC}" |
|
echo " Find the machine → Edit route settings → Enable 'Use as exit node'" |
|
echo "" |
|
echo -e "${YELLOW}${BOLD}Enable as Subnet Router:${NC}" |
|
echo "" |
|
echo " Advertise routes (replace with your subnet):" |
|
echo -e " ${CYAN}pct exec $CTID -- tailscale up --advertise-routes=192.168.1.0/24${NC}" |
|
echo "" |
|
echo -e "${YELLOW}${BOLD}Combined (Exit Node + Subnet Router):${NC}" |
|
echo "" |
|
echo -e " ${CYAN}pct exec $CTID -- tailscale up --advertise-exit-node --advertise-routes=192.168.1.0/24${NC}" |
|
echo "" |
|
} |
|
|
|
main() { |
|
banner |
|
check_root |
|
check_proxmox |
|
parse_args "$@" |
|
prompt_values |
|
|
|
local template |
|
template=$(get_template "$STORAGE") |
|
|
|
create_container "$template" |
|
configure_tun |
|
start_container |
|
install_tailscale |
|
configure_ip_forwarding |
|
authenticate_tailscale |
|
add_proxmox_tag |
|
print_summary |
|
} |
|
|
|
main "$@" |