Skip to content

Instantly share code, notes, and snippets.

@skoppe
Created January 24, 2025 09:51
Show Gist options
  • Save skoppe/49a000de0a0196fe3e38a0a1e579360a to your computer and use it in GitHub Desktop.
Save skoppe/49a000de0a0196fe3e38a0a1e579360a to your computer and use it in GitHub Desktop.
ZFS debian installation
#!/bin/bash
#
# Install Debian GNU/Linux 11 Bullseye to a ZFS root filesystem
# https://openzfs.github.io/openzfs-docs/Getting%20Started/Debian/index.html
#
set -e +H
RED="\033[1;31m"
GREEN="\033[1;32m"
YELLOW="\033[1;33m"
NOCOL="\033[00m"
# Ensure all tool locations are present in PATH.
export PATH=/usr/sbin:/usr/bin:/sbin:/bin
###############################################################################
### Static settings, overridable by TARGET_* environment variables
# Name of the ZFS pool for /boot, a pool with a limited set of features that are supported by GRUB.
BPOOL=${TARGET_BPOOL:-bpool}
# Name of the ZFS root pool, the main OS filesystem and all other datasets within.
RPOOL=${TARGET_RPOOL:-rpool}
# The codename of the Debian release to install on disk.
DIST=${TARGET_DIST:-bullseye}
# When partitioning TARGET_DISKS, how much space is reserved for system swap.
SIZESWAP=${TARGET_SIZESWAP:-2G}
# When partitioning TARGET_DISKS, how big is the TARGET_RPOOL partition (defaults entire disk)
SIZEROOT=${TARGET_SIZEROOT:-0}
# Comma-separated list of device names to install the TARGET_DIST on (e.g: sda,sdb,...)
DISKS=${TARGET_DISKS}
# ZFS RAID level to use when creating TARGET_BPOOL and TARGET_RPOOL (supports raid0, raid1, raidz)
RAIDLEVEL=${TARGET_RAIDLEVEL:-raid1}
# When true, fills existing partition boundaries with zeroes before creating new ZFS pools.
# Not to be confused with running a security disk erasure with zeroes or a bit pattern.
ZAPALL=${TARGET_ZAPALL:-false}
# Hostname and domain of the target system. When omitted, the installer uses $(hostid)
NEWHOST=${TARGET_HOSTNAME}
DOMAIN=${TARGET_DOMAIN}
# Password to give the root account. When omitted, the installer will generate a random password.
PASSWORD=${TARGET_PASSWORD}
# A comma-separatd list of extra APT packages to be preinstalled on the target system.
PACKAGES=${TARGET_PACKAGES}
# The HTTP proxy (e.g squid) server, for when there is no direct internet access from the installer.
PROXY=${TARGET_PROXY}
###############################################################################
### User settings
declare -A BYID
while read -r IDLINK; do
BYID["${IDLINK#* }"]="${IDLINK% *}"
done < <(
for IDPATH in /dev/disk/by-id/*; do
[ -e "$IDPATH" ] || continue
printf '%s %s\n' "$IDPATH" "$(basename "$(readlink "$IDPATH")")"
done | sort -k2 -s -u
)
for DISK in $(echo "$DISKS" | tr , ' '); do
if [ -z "${BYID[$DISK]}" ]; then
echo -e "${RED}[ERROR] ${YELLOW}Supplied disk does not exist:${NOCOL}"
echo -e " $DISK"
exit 1
fi
ZFSDISKS+=("${BYID[$DISK]}")
BOOTPARTITIONS+=("${BYID[$DISK]}-part3")
ZFSPARTITIONS+=("${BYID[$DISK]}-part4")
done
if [ ${#ZFSDISKS[@]} -eq 0 ]; then
echo -e "${RED}[ERROR] ${YELLOW}Need at least one disk set by TARGET_DISKS${NOCOL}"
exit 1
fi
case "$RAIDLEVEL" in
raid0)
BOOTRAIDDEF=("${BOOTPARTITIONS[@]}")
ZFSRAIDDEF=("${ZFSPARTITIONS[@]}")
;;
raid1)
if [ $((${#ZFSPARTITIONS[@]} % 2)) -ne 0 ]; then
echo -e "${RED}[ERROR] ${YELLOW}Need an even number of disks for RAID level '$RAIDLEVEL':${NOCOL}"
echo -e " ${ZFSPARTITIONS[*]}"
exit 1
fi
I=0
for BOOTPARTITION in "${BOOTPARTITIONS[@]}"; do
if [ $((I % 2)) -eq 0 ]; then
BOOTRAIDDEF+=(mirror)
fi
BOOTRAIDDEF+=("$BOOTPARTITION")
((I++)) || true
done
for ZFSPARTITION in "${ZFSPARTITIONS[@]}"; do
if [ $((I % 2)) -eq 0 ]; then
ZFSRAIDDEF+=(mirror)
fi
ZFSRAIDDEF+=("$ZFSPARTITION")
((I++)) || true
done
;;
raidz|raidz1)
if [ ${#ZFSPARTITIONS[@]} -lt 3 ]; then
echo -e "${RED}[ERROR] ${YELLOW}Need at least 3 disks for RAID level '$RAIDLEVEL':${NOCOL}"
echo -e " ${ZFSPARTITIONS[*]}"
exit 1
fi
BOOTRAIDDEF=("$RAIDLEVEL" "${BOOTPARTITIONS[@]}")
ZFSRAIDDEF=("$RAIDLEVEL" "${ZFSPARTITIONS[@]}")
;;
raidz2)
if [ ${#ZFSPARTITIONS[@]} -lt 5 ]; then
echo -e "${RED}[ERROR] ${YELLOW}Need at least 5 disks for RAID level '$RAIDLEVEL':${NOCOL}"
echo -e " ${ZFSPARTITIONS[*]}"
exit 1
fi
BOOTRAIDDEF=("$RAIDLEVEL" "${BOOTPARTITIONS[@]}")
ZFSRAIDDEF=("$RAIDLEVEL" "${ZFSPARTITIONS[@]}")
;;
raidz3)
if [ ${#ZFSPARTITIONS[@]} -lt 8 ]; then
echo -e "${RED}[ERROR] ${YELLOW}Need at least 8 disks for RAID level '$RAIDLEVEL':${NOCOL}"
echo -e " ${ZFSPARTITIONS[*]}"
exit 1
fi
BOOTRAIDDEF=("$RAIDLEVEL" "${BOOTPARTITIONS[@]}")
ZFSRAIDDEF=("$RAIDLEVEL" "${ZFSPARTITIONS[@]}")
;;
*)
echo -e "${RED}[ERROR] ${YELLOW}Unknown RAID level '$RAIDLEVEL':${NOCOL}"
exit 1
esac
#######################################
# Wait for a period of time to allow backing out before the irreversible
# process starts.
echo -e "${RED}WARNING:"
echo -e "${YELLOW} Starting destruction in 20 seconds ..."
echo -e "${YELLOW} Press X to continue immediately ...${NOCOL}"
echo -e "${RED} Process will DELETE ALL DATA ON DISK(s)!"
echo -e "${YELLOW} Creating a new ZFS pool on: ${ZFSDISKS[*]}"
echo -e "${YELLOW} Press CTRL-C to abort now!${NOCOL}"
echo -n " => "
for ((i=1; i<=20; i++)); do
echo -n "."
read -r -t1 -n1 anykey || true
if [ "$anykey" = "x" ] || [ "$anykey" = "X" ]; then
break
fi
done
echo ""
###############################################################################
### Prepare The Install Environment
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=595790
if [ "$(hostid | cut -b-6)" == "007f01" ]; then
dd if=/dev/urandom of=/etc/hostid bs=1 count=4
fi
# Setup and update the repositories
if [ -n "$PROXY" ]; then
export http_proxy="$PROXY"
echo "Acquire::http::Proxy \"${PROXY}\";" > /etc/apt/apt.conf.d/proxy.conf
fi
DEBCODENAME=$(lsb_release -sc)
echo "deb http://deb.debian.org/debian/ ${DEBCODENAME} contrib non-free" > /etc/apt/sources.list.d/contrib-non-free.list
test -f "/var/lib/apt/lists/deb.debian.org_debian_dists_${DEBCODENAME}_non-free_binary-amd64_Packages" || \
apt-get -qq update
# Install ZFS in the Host environment
if [ ! -f /usr/sbin/debootstrap ]; then NEED_PACKAGES+=(debootstrap); fi
if [ ! -f /sbin/sgdisk ]; then NEED_PACKAGES+=(gdisk); fi
if [ ! -d /usr/share/doc/zfs-dkms ]; then NEED_PACKAGES+=(zfs-dkms); fi
if [ ! -f /sbin/zpool ]; then NEED_PACKAGES+=(zfsutils-linux); fi
if [ ! -f /sbin/mkfs.btrfs ]; then NEED_PACKAGES+=(btrfs-progs); fi
if [ ! -f /sbin/mkdosfs ]; then NEED_PACKAGES+=(dosfstools); fi
if [ ! -f /usr/sbin/dmidecode ]; then NEED_PACKAGES+=(dmidecode); fi
if [ ! -f /usr/sbin/mdadm ]; then NEED_PACKAGES+=(mdadm); fi
if [ ! -f /usr/sbin/partprobe ]; then NEED_PACKAGES+=(parted); fi
if [ -n "${NEED_PACKAGES[*]}" ]; then
echo -e "${YELLOW}Need packages: ${NEED_PACKAGES[*]}${NOCOL}"
DEBIAN_FRONTEND=noninteractive apt-get -qq install --yes "${NEED_PACKAGES[@]}";
fi
modprobe zfs || {
echo -e "${RED}[ERROR] ${YELLOW}Unable to load ZFS kernel module${NOCOL}"
exit 1
}
###############################################################################
### Disk Formatting
test -d "/proc/spl/kstat/zfs/$BPOOL" && zpool destroy "$BPOOL"
test -d "/proc/spl/kstat/zfs/$RPOOL" && zpool destroy "$RPOOL"
# When re-using a disk, clear it as necessary
if [ "$ZAPALL" == "true" ]; then
for DISK in $(echo "$DISKS" | tr , ' '); do
# If the disk was previously used in an MD array
while read -r MDRAID; do
MDDEVS=()
for DEV in $(mdadm -vDs "$MDRAID" | awk -F= '/^[ ]+devices/ {print $2}' | tr , ' '); do
MDDEVS+=("$DEV")
done
mdadm --stop "$MDRAID"
mdadm --zero-superblock "${MDDEVS[@]}"
done < <(grep "^md.* ${DISK}" /proc/mdstat | awk '{print "/dev/"$1}')
# In case of lingering ZFS partitions, run wipefs on all drives.
while [ "$(wipefs -a -f "/dev/$DISK" | wc -c)" -ne 0 ]; do
sleep 1;
done
# For some reason wipefs might miss zeroing some ZFS meta data on the disk.
# Formatting with btrfs overwrites this residual data, then wipe again.
mkfs.btrfs -f "/dev/$DISK"
while [ "$(wipefs -a -f "/dev/$DISK" | wc -c)" -ne 0 ]; do
sleep 1;
done
done
fi
for DISK in "${ZFSDISKS[@]}"; do
echo -e "\n${YELLOW}Partitioning disk: $DISK${NOCOL}"
# Clear the partition table
sgdisk --zap-all "$DISK"
# Partition the disks
if [ -d /sys/firmware/efi ]; then
sgdisk -n1:2048:+512M -t1:EF00 "$DISK"
else
sgdisk -a1 -n1:24K:+1000K -t1:EF02 "$DISK"
fi
sgdisk -n2:0:+"$SIZESWAP" -t2:8200 \
-n3:0:+2G -t3:BE00 \
-n4:0:+"$SIZEROOT" -t4:BF00 "$DISK"
done
udevadm trigger --subsystem-match=block
udevadm settle
partprobe -s
# Sometimes we need to just wait a bit longer for the disks to settle.
for DISK in "${BOOTRAIDDEF[@]}"; do
if [ ! -d "$DISK" ]; then
sleep 1
fi
done
for DISK in "${ZFSRAIDDEF[@]}"; do
if [ ! -d "$DISK" ]; then
sleep 1
fi
done
###############################################################################
# Create the boot pool
# Feature notes:
# GRUB does not support all of the zpool features. This step creates a separate
# boot pool for /boot with the features limited to only those that GRUB
# supports, allowing the root pool to use any/all features.
# See `spa_feature_names` in grub-core/fs/zfs.c:
# https://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/fs/zfs/zfs.c?id=7259d55ffcf124e32eafb61aa381f9856e98a708#n280
zpool create -f \
-o cachefile=/etc/zfs/zpool.cache \
-o ashift=12 -d \
-o feature@async_destroy=enabled \
-o feature@bookmarks=enabled \
-o feature@embedded_data=enabled \
-o feature@empty_bpobj=enabled \
-o feature@enabled_txg=enabled \
-o feature@extensible_dataset=enabled \
-o feature@filesystem_limits=enabled \
-o feature@hole_birth=enabled \
-o feature@large_blocks=enabled \
-o feature@lz4_compress=enabled \
-o feature@spacemap_histogram=enabled \
-o feature@zpool_checkpoint=enabled \
-O acltype=posixacl -O canmount=off -O compression=lz4 \
-O devices=off -O normalization=formD -O relatime=on -O xattr=sa \
-O mountpoint=/boot -R /target \
"$BPOOL" "${BOOTRAIDDEF[@]}" || {
echo -e "${RED}[ERROR] ${YELLOW}Unable to create zpool:${NOCOL}"
echo -e " $BPOOL"
exit 1
}
###############################################################################
# Create the root pool
# Feature notes:
# - The use of `ashift=12` is recommended here because many drives today have 4KiB (or larger)
# physical sectors, even though they present 512B logical sectors.
# - The `large_dnode` feature is explicitly disabled because otherwise GRUB won't correctly
# identify the ZFS pool name when generating grub.cfg from `update-grub`.
# - Setting `acltype=posixacl` enables POSIX ACLs globally (i.e: journald requires ACLs).
# - Setting `canmount=off` disables the mounting of the pool as a file system.
# - Setting `compression=lz4` as it has low CPU overhead, and can improve IO efficiency.
# ZFS will skip compression of objects that it sees as having no effect anyway.
# - Setting `normalization=formD` eliminates some corner cases relating to UTF-8 filename
# normalization. It also implies utf8only=on, which means that only UTF-8 filenames are allowed.
# - Setting `relatime=on` follows the default for other Linux filesystems.
# - Setting `xattr=sa` vastly improves the performance of extended attributes.
zpool create -f \
-o ashift=12 \
-o feature@large_dnode=disabled \
-O acltype=posixacl -O canmount=off -O compression=lz4 \
-O normalization=formD -O relatime=on -O xattr=sa \
-O mountpoint=/ -R /target \
"$RPOOL" "${ZFSRAIDDEF[@]}" || {
echo -e "${RED}[ERROR] ${YELLOW}Unable to create zpool:${NOCOL}"
echo -e " $RPOOL"
exit 1
}
# Create filesystem datasets to act as containers
zfs create -o canmount=off -o mountpoint=none "$RPOOL/ROOT"
zfs create -o canmount=off -o mountpoint=none "$BPOOL/BOOT"
# Create filesystem datasets for the root and boot filesystems
zfs create -o canmount=noauto -o mountpoint=/ "$RPOOL/ROOT/debian"
zfs mount "$RPOOL/ROOT/debian"
zfs create -o mountpoint=/boot "$BPOOL/BOOT/debian"
# Create datasets
zfs create "$RPOOL/home"
zfs create -o mountpoint=/root "$RPOOL/home/root"
chmod 700 /target/root
zfs create -o canmount=off "$RPOOL/var"
zfs create "$RPOOL/var/lib"
zfs create "$RPOOL/var/log"
zfs create "$RPOOL/var/spool"
zfs create -o com.sun:auto-snapshot=false "$RPOOL/var/cache"
zfs create -o com.sun:auto-snapshot=false "$RPOOL/var/tmp"
chmod 1777 /target/var/tmp
zfs create -o com.sun:auto-snapshot=false "$RPOOL/tmp"
chmod 1777 /target/tmp
zfs create "$RPOOL/opt"
zfs create "$RPOOL/srv"
zfs create -o canmount=off "$RPOOL/usr"
zfs create "$RPOOL/usr/local"
zfs create "$RPOOL/var/mail"
zfs create "$RPOOL/var/snap"
###############################################################################
### System Installation
# Install the minimal system
debootstrap --include=console-setup,dbus,htop,less,locales,openssh-server,net-tools,psmisc \
--components main,contrib,non-free "$DIST" /target http://deb.debian.org/debian/
# Copy in zpool.cache
mkdir /target/etc/zfs
cp -va /etc/zfs/zpool.cache /target/etc/zfs/
# Copy hostid as the target system will otherwise not be able to mount the
# misleadingly foreign file system
cp -va /etc/hostid /target/etc/
###############################################################################
### System Configuration
# Configure the hostname
test -n "$NEWHOST" || NEWHOST=debian-$(hostid)
echo "$NEWHOST" >/target/etc/hostname
if [ -n "$DOMAIN" ]; then
sed -i "1s/^/127.0.1.1\t$NEWHOST.$DOMAIN $NEWHOST\n/" /target/etc/hosts
else
sed -i "1s/^/127.0.1.1\t$NEWHOST\n/" /target/etc/hosts
fi
# Bind the virtual filesystems from the host environment to the new system
mount --rbind /dev /target/dev
mount --rbind /proc /target/proc
mount --rbind /sys /target/sys
mkdir -p /target/run
mount -t tmpfs tmpfs /target/run
mkdir /target/run/lock
# Install and configure a basic system environment
if [ "$DIST" = "bullseye" ]; then
ln -s /proc/mounts /target/etc/mtab
fi
# Bookworm changed its 'non-free component' value from 'non-free' to
# 'non-free non-free-firmware'
if [ "${DIST}" = "bookworm" ]; then
NONFREE="non-free non-free-firmware"
else
NONFREE="non-free"
fi
cat << EOF > "/target/etc/apt/sources.list"
deb http://deb.debian.org/debian $DIST main contrib $NONFREE
deb http://security.debian.org/ $DIST-security main contrib $NONFREE
EOF
if [ -n "$PROXY" ]; then
echo "Acquire::http::Proxy \"${PROXY}\";" > /target/etc/apt/apt.conf.d/proxy.conf
fi
cat << EOF > /target/etc/apt/apt.conf.d/50norecommends
Apt::Install-Recommends 0;
Apt::AutoRemove::RecommendsImportant 0;
Apt::AutoRemove::SuggestsImportant 0;
EOF
chroot /target /usr/bin/apt-get update -qq
sed -i 's/# \(en_US.UTF-8\)/\1/' /target/etc/locale.gen
echo 'LANG="en_US.UTF-8"' > /target/etc/default/locale
chroot /target /usr/sbin/locale-gen
for PACKAGE in $(echo "$PACKAGES" | tr , ' '); do
EXTRA_PACKAGES+=("$PACKAGE")
done
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
apt-transport-https ca-certificates console-setup dbus gnupg htop less locales net-tools \
openssh-server psmisc sudo bind9-dnsutils etckeeper ethtool mtr-tiny vim \
"${EXTRA_PACKAGES[@]}"
if [ -d /proc/acpi ]; then
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes acpi acpid
chroot /target /usr/sbin/service acpid stop
fi
if dmidecode --type 38 | grep -q IPMI; then
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get install --yes ipmitool
fi
DEBIAN_FRONTEND=noninteractive chroot /target /usr/sbin/dpkg-reconfigure tzdata
if [ "$DIST" = "bookworm" ]; then
# Systemd resolved has been moved to its own package in Bookworm
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
systemd-resolved
mkdir -p /target/run/systemd/resolve
cp -va /etc/resolv.conf /target/run/systemd/resolve/stub-resolv.conf
fi
#######################################
# Install ZFS in the chroot environment for the new system
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
linux-headers-amd64 linux-image-amd64
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
zfs-initramfs
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
zfs-dkms
echo REMAKE_INITRD=yes > /target/etc/dkms/zfs.conf
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
zfs-zed
# Enable importing bpool. This ensures that bpool is always imported,
# regardless of whether /etc/zfs/zpool.cache exists, whether it is in the
# cachefile or not, or whether zfs-import-scan.service is enabled.
cat << EOF > /target/etc/systemd/system/zfs-import-bpool.service
[Unit]
DefaultDependencies=no
Before=zfs-import-scan.service
Before=zfs-import-cache.service
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/sbin/zpool import -N -o cachefile=none $BPOOL
# Work-around to preserve zpool cache:
ExecStartPre=-/bin/mv /etc/zfs/zpool.cache /etc/zfs/preboot_zpool.cache
ExecStartPost=-/bin/mv /etc/zfs/preboot_zpool.cache /etc/zfs/zpool.cache
[Install]
WantedBy=zfs-import.target
EOF
chroot /target /usr/bin/systemctl enable zfs-import-bpool.service
# We need to activate zfs-mount-generator. This makes systemd aware of the
# separate mountpoints, which is important for things like /var/log and /var/tmp.
if [ ! -d /target/etc/zfs/zfs-list.cache ]; then
mkdir "/target/etc/zfs/zfs-list.cache"
touch "/target/etc/zfs/zfs-list.cache/$BPOOL"
touch "/target/etc/zfs/zfs-list.cache/$RPOOL"
fi
chroot /target /usr/sbin/zed -p /run/zed.pid
while true; do
if grep -q "$BPOOL" "/target/etc/zfs/zfs-list.cache/$BPOOL" && \
grep -q "$RPOOL" "/target/etc/zfs/zfs-list.cache/$RPOOL"; then
break
fi
sleep 1
done
# Terminate zed and wait before editing zfs-list.cache.
ZEDPID=$(cat /target/run/zed.pid)
kill -SIGTERM "$ZEDPID"
while ps -p "$ZEDPID" > /dev/null; do
sleep 1
done
# Fix the paths to eliminate /target
sed -Ei "s|/target/?|/|" "/target/etc/zfs/zfs-list.cache/$BPOOL"
sed -Ei "s|/target/?|/|" "/target/etc/zfs/zfs-list.cache/$RPOOL"
#######################################
# Configure the network interface
if [ -z "$(ls -A /etc/systemd/network)" ]; then
ETHDEVS=$(udevadm info -e | grep "ID_NET_NAME_PATH=" | cut -d= -f2)
for ETHDEV in $ETHDEVS; do
echo -e "[Match]\nName=$ETHDEV\n\n[Network]\nDHCP=yes" > "/target/etc/systemd/network/$ETHDEV.network"
done
else
cp -a /etc/systemd/network/* /target/etc/systemd/network/
fi
chroot /target /usr/bin/systemctl enable systemd-networkd
chroot /target /usr/bin/systemctl enable systemd-resolved
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq purge --yes ifupdown
#######################################
# Configure swap
cat << EOF > /target/etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point> <type> <options> <dump> <pass>
EOF
for DISK in "${ZFSDISKS[@]}"; do
mkswap -f "$DISK-part2"
echo "UUID=$(blkid -s UUID -o value "$DISK-part2") none swap discard 0 1" >> /target/etc/fstab
done
#######################################
# Set a root password
test -n "$PASSWORD" || PASSWORD=$(< /dev/urandom tr -dc 'A-Za-z0-9!$%^_+=?' | head -c"${1:-24}")
echo "root:$PASSWORD" | chroot /target /usr/sbin/chpasswd
sed -i 's/^#\(PermitRootLogin\) .*$/\1 yes/' /target/etc/ssh/sshd_config
###############################################################################
### GRUB Installation
GRUBPKG=(grub-pc)
if [ -d /sys/firmware/efi ]; then
GRUBPKG=(grub-efi-amd64 shim-signed efibootmgr)
fi
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq install --yes \
grub2-common "${GRUBPKG[@]}"
DEBIAN_FRONTEND=noninteractive chroot /target /usr/bin/apt-get -qq remove --purge os-prober
# Refresh the initrd files
chroot /target /usr/sbin/update-initramfs -c -k all
# Update the boot configuration and install the boot loader
if [ -d /sys/firmware/efi ]; then
# This is arguably a mis-design in the UEFI specification - the ESP is
# a single point of failure on one disk.
# https://wiki.debian.org/UEFI#RAID_for_the_EFI_System_Partition
I=1
for DISK in "${ZFSDISKS[@]}"; do
mkdosfs -F 32 -n EFI "$DISK-part1"
EFIUUID=$(blkid -s UUID -o value "$DISK-part1")
if [ $I -eq 1 ]; then
echo "UUID=$EFIUUID /boot/efi vfat defaults 0 0" >> /target/etc/fstab
if [ ! -d /target/boot/efi ]; then
mkdir -pv /target/boot/efi
fi
chroot /target /usr/bin/mount /boot/efi
chroot /target /usr/sbin/update-grub
chroot /target /usr/sbin/grub-install --target=x86_64-efi \
--efi-directory=/boot/efi --bootloader-id="debian" \
--recheck --no-floppy
else
if [ ! -d /target/boot/efibackup ]; then
mkdir -pv /target/boot/efibackup
fi
mount "$DISK-part1" /target/boot/efibackup
cp -av /target/boot/efi/* /target/boot/efibackup/
umount "$DISK-part1"
chroot /target /usr/bin/efibootmgr -c -d "$DISK" -p 1 \
-L "debian-$I" -l '\EFI\debian\grubx64.efi'
if [ $I -gt 2 ]; then
C="#"
fi
echo "${C}UUID=$EFIUUID /boot/efibackup vfat defaults 0 0" >> /target/etc/fstab
fi
((I++)) || true
done
else
chroot /target /usr/sbin/update-grub
for DISK in "${ZFSDISKS[@]}"; do
chroot /target /usr/sbin/grub-install "$DISK"
done
# Let debconf know which disks we installed grub on.
printf -v INSTALLED_DEVICES "%s, " "${ZFSDISKS[@]}"
chroot /target debconf-set-selections \
<<< "grub-pc grub-pc/install_devices multiselect ${INSTALLED_DEVICES%, }"
fi
###############################################################################
### Final Cleanup
# Snapshot the initial installation
zfs snapshot "$BPOOL/BOOT/debian@install"
zfs snapshot "$RPOOL/ROOT/debian@install"
# Unmount all filesystems
sync
mount | grep -v zfs | tac | awk '/\/target/ {print $3}' | xargs -I{} umount -lf {}
# ??? umount /target also unmounts from / too...
mount -t devpts -o rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 devpts /dev/pts
mount -t securityfs -o rw,nosuid,nodev,noexec,relatime securityfs /sys/kernel/security
mount -t tmpfs -o rw,nosuid,nodev tmpfs /dev/shm
mount -t cgroup2 -o rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot cgroup2 /sys/fs/cgroup
mount -t pstore -o rw,nosuid,nodev,noexec,relatime pstore /sys/fs/pstore
mount -t bpf -o rw,nosuid,nodev,noexec,relatime,mode=700 none /sys/fs/bpf
mount -t tracefs -o rw,nosuid,nodev,noexec,relatime tracefs /sys/kernel/tracing
mount -t debugfs -o rw,nosuid,nodev,noexec,relatime debugfs /sys/kernel/debug
mount -t mqueue -o rw,nosuid,nodev,noexec,relatime mqueue /dev/mqueue
mount -t hugetlbfs -o rw,relatime,pagesize=2M hugetlbfs /dev/hugepages
mount -t configfs -o rw,nosuid,nodev,noexec,relatime configfs /sys/kernel/config
mount -t fusectl -o rw,nosuid,nodev,noexec,relatime fusectl /sys/fs/fuse/connections
if [ -d /sys/firmware/efi ]; then mount -t efivarfs -o rw,relatime efivarfs /sys/firmware/efi/efivars; fi
zpool export -a
echo -e "${GREEN}===================================================${NOCOL}"
echo -e "${GREEN}Installation complete${NOCOL}"
echo -e "${GREEN}New root password:${NOCOL} $PASSWORD"
echo -e "${GREEN}===================================================${NOCOL}"
## reboot
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment