Nomad is designed to be a highly-available, quorum-based cluster scheduler. Nomad deployments should ideally have three or more server nodes and one or more client nodes.
However, Nomad does provide a mode, which you can use to run Nomad with a single agent process.
Note: Nomad dev agents do behave slightly different than non-dev agents. To create a more authentic feeling experience, you might want to run Nomad with a configuration that runs both the client and server processes.
This configuration will enable you to run a single-node Nomad "cluster".
Running Nomad in this configuration comes with several tradeoffs and caveats.
For Example:
- The node has zero high availability -- the main drive for a scheduler.
- If the state is damaged, you will have to wipe it and start over.
- If your IP address changes, you have to perform peers.json recovery or wipe the state.
- You can't run more than one instance of a workload that uses static ports.
- https://github.com/hashicorp/guides-configuration/blob/master/nomad/scripts/install-nomad.sh
- https://releases.hashicorp.com/
- https://releases.hashicorp.com/nomad-driver-podman/
- https://github.com/jsiebens/hashi-up/tree/main
- https://github.com/containernetworking/plugins/releases
- https://github.com/hashicorp/homebrew-tap
cat <<EOF >> /etc/hosts
172.25.100.50 nomad-master
172.25.100.51 nomad-minion
EOF
echo "49152 65535" > /proc/sys/net/ipv4/ip_local_port_range
echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables
echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
cat <<EOF > /etc/sysctl.d/98-hashicorp-nomad.conf
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
cat /proc/sys/net/ipv4/ip_local_port_range
Create the nomad user and necessary directories
mkdir -p /opt/cni/bin
mkdir -p /opt/nomad/{plugins,data}
mkdir -p /etc/nomad.d
chmod 700 /etc/nomad.d
touch /etc/nomad.d/{nomad,server,client}.hcl
groupadd -r nomad
useradd --system --home /etc/nomad.d -s /bin/false nomad
#useradd -Mr -g nomad -s /usr/sbin/nologin nomad
Ubuntu:
apt-get update \
&& apt-get upgrade -y
sudo apt-get install -y \
ca-certificates \
curl \
wget \
gnupg \
lsb-release \
git \
ufw \
seccomp \
jq \
unzip \
vim
Ubuntu repos:
#curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add -
apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
apt-get update \
&& apt-get install -y nomad
RHEL based systems:
yum update
yum install -y \
yum-utils \
curl \
wget \
jq \
unzip \
firewald \
vim \
git
Repos for RHEL based systems:
yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo
yum update \
&& yum -y install nomad
Manually download binaries:
export NOMAD_VERSION="1.1.0"
curl -sL --remote-name 'https://releases.hashicorp.com/nomad/${NOMAD_VERSION}/nomad_${NOMAD_VERSION}_linux_amd64.zip' \
-o nomad_linux_amd64.zip
unzip nomad_linux_amd64.zip
sudo chown root:root nomad
sudo mv nomad /usr/local/bin/
nomad version
#nomad -v
nomad -autocomplete-install
complete -C /usr/local/bin/nomad nomad
nomad podman driver
curl -sL https://releases.hashicorp.com/nomad-driver-podman/0.5.1/nomad-driver-podman_0.5.1_linux_amd64.zip \
nomad-driver-podman_linux_amd64.zip
unzip nomad-driver-podman_linux_amd64.zip
#mv nomad-driver-podman /var/nomad/plugins/
mv nomad-driver-podman /opt/nomad/plugins/
cni driver:
curl -sL -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz
tar -C /opt/cni/bin -xzf cni-plugins.tgz
Ubuntu firewall
ufw allow 4646:4647/tcp
ufw allow 4648
ufw reload
ufw status verbose
RHEL based host firewall:
sudo firewall-cmd --permanent --zone=public --add-port=4646/tcp --add-port=4647/tcp --add-port=4648/tcp --add-port=4648/udp
sudo firewall-cmd --reload
sudo firewall-cmd --zone=public --list-ports
Systemd service:
cat <<EOF > /etc/systemd/system/nomad.service
[Unit]
Description=Nomad
Documentation=https://www.nomadproject.io/docs/
Wants=network-online.target
After=network-online.target
# When using Nomad with Consul it is not necessary to start Consul first. These
# lines start Consul before Nomad as an optimization to avoid Nomad logging
# that Consul is unavailable at startup.
#Wants=consul.service
#After=consul.service
[Service]
# Nomad server should be run as the nomad user. Nomad clients
# should be run as root
User=nomad
Group=nomad
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d
KillMode=process
KillSignal=SIGINT
LimitNOFILE=65536
LimitNPROC=infinity
Restart=on-failure
RestartSec=2
## Configure unit start rate limiting. Units which are started more than
## *burst* times within an *interval* time span are not permitted to start any
## more. Use `StartLimitIntervalSec` or `StartLimitInterval` (depending on
## systemd version) to configure the checking interval and `StartLimitBurst`
## to configure how many starts per interval are allowed. The values in the
## commented lines are defaults.
# StartLimitBurst = 5
## StartLimitIntervalSec is used for systemd versions >= 230
# StartLimitIntervalSec = 10s
## StartLimitInterval is used for systemd versions < 230
# StartLimitInterval = 10s
TasksMax=infinity
OOMScoreAdjust=-1000
[Install]
WantedBy=multi-user.target
EOF
Nomad agent configuration, start Nomad with nomad agent -config=/etc/nomad.d
.
cat <<EOF > /etc/nomad.d/nomad.conf
FLAGS=-bind 0.0.0.0 -dev
EOF
cat <<EOF > /etc/nomad.d/nomad.hcl
## https://www.nomadproject.io/docs/agent/configuration
datacenter = "iad1"
#name = "nomad-master"
bind_addr = "0.0.0.0"
data_dir = "/opt/nomad/data"
plugin_dir = "/opt/nomad/plugins"
disable_update_check = true
leave_on_interrupt = true
leave_on_terminate = true
## https://www.nomadproject.io/docs/agent/configuration/index.html#log_level
## [WARN|INFO|DEBUG]
log_level = "INFO"
#enable_syslog = true
#log_file = "/var/log/nomad.log"
#log_rotate_bytes = 10485760
#log_rotate_max_files = 5
#addresses {
# http = "0.0.0.0"
#}
#server {
# enabled = true
# bootstrap_expect = 1
#}
#client {
# enabled = true
# servers = ["127.0.0.1"]
#}
#acl {
# enabled = true
#}
#ui {
# enabled = false
#}
EOF
Server
cat <<EOF > /etc/nomad.d/server.hcl
server {
enabled = true
bootstrap_expect = 1
#encrypt = "<nomad_key>"
#server_join {
#retry_join = ["<server_ip>"]
#retry_max = 0
}
EOF
Client
cat <<EOF > /etc/nomad.d/client.hcl
client {
enabled = true
servers = ["127.0.0.1"]
#servers = ["nomad-master"]
}
#plugin "nomad-driver-podman" {
# config {
# socket_path = "unix:///var/run/podman.sock"
# }
#}
EOF
Enable and start the systemd service:
systemctl unmask nomad
systemctl enable --now nomad
systemctl status nomad
Verify:
nomad server members
nomad server members -detailed
nomad node status
nomad agent-info
Admin tasks
nomad monitor
nomad node eligibility -disable {node-id}
nomad node drain -enable {node-id}
nomad job init
vim example.nomad
cat <<EOF > redis.nomad
job "redis" {
datacenters = ["iad1"]
group "cache" {
network {
port "redis" { to = "6379" }
}
task "redis" {
driver = "podman"
config {
image = "redis:6.2"
ports = ["redis"]
}
resources {
cpu = 500
memory = 256
}
}
}
}
EOF
cat <<EOF > http-echo.nomad
job "http-echo" {
datacenters = ["iad1"]
group "echo" {
network {
mode = "bridge"
port "http" {
static = 8080
to = 80
}
}
count = 2
task "server" {
driver = "docker"
config {
image = "nginx"
ports = ["http"]
}
}
}
}
EOF
nomad job run example.nomad
nomad status example
nomad alloc status <allocation_id>
nomad alloc logs <allocation_id> redis
nomad job plan example.nomad
nomad job stop example
nomad job status example