Skip to content

Instantly share code, notes, and snippets.

@briantd
Last active August 31, 2017 23:45
Show Gist options
  • Save briantd/2f60a0550ce8ac6f1ab04b4737668773 to your computer and use it in GitHub Desktop.
Save briantd/2f60a0550ce8ac6f1ab04b4737668773 to your computer and use it in GitHub Desktop.
JenkinsWorld2017 Docker + Jenkins
version: "3.1"
# See https://docs.docker.com/compose/compose-file/
#
# Purpose -- stackfile to spin up a jenkins stack (master + workers)
# Stack needs volumes and secrets to be already defined in the Swarm.
# (c) Docker 2017
#
#
# Define services that are part of this stack.
# Invocation:
# $ docker stack deploy -c docker-compose.yml <stackname>
#
# Notes:
# <stackname> in this case is user-defined; resulting service names will be based on the <stackname> provided
#
# e.g.
# $ docker stack ls
# NAME SERVICES
# mystack 3
# docker@manager1:~$ docker stack ps mystack
# ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
# r05brbtu5x00 mystack_viz.1 manomarks/visualizer:latest worker1 Running Running 5 hours ago
# wfegndqhdpx2 mystack_jenkins_app.1 jenkins:latest manager1 Running Running 8 hours ago
# i9yw3e8putft \_ mystack_jenkins_app.1 jenkins:latest manager1 Shutdown Shutdown 8 hours ago
#
services:
#
# Docker Swarm visualizer
#
# See https://docs.docker.com/engine/tutorials/dockervolumes/#volume-labels for info on the ":z"
viz:
image: dockersamples/visualizer
ports:
- 8081:8080
networks:
- jenkins-net
volumes:
- /var/run/docker.sock:/var/run/docker.sock:z
deploy:
placement:
constraints: [node.role==manager]
#
# Jenkins master
# * Requires persistent filesystem for state
# * Single container. Leverages jenkins_workers for load/concurrency
#
jenkins_master:
image: jenkins
# Expose port 9443 on port 9443 (HTTPS), and 50000 on 50000
ports:
- 8080:8080
- 9443:9443
- 50000:50000
# Create a custom, named network
networks:
- jenkins-net
# Mount the named data volume "jenkins-data", as well as the host's docker socket
volumes:
- jenkins-data:/var/jenkins_home/:z
- jenkins-backups:/var/backups/:z
- /var/run/docker.sock:/var/run/docker.sock:z
# Depending on the runtime used (e.g. LinuxKit, Centos, etc...)
# this might need to be changed. If the node does NOT already have the docker client binaries installed,
# or if they're not statically linked, download and install binaries.
# For CE, look here --> https://docs.docker.com/engine/installation/linux/docker-ce/binaries/
- /usr/local/bin/docker:/usr/local/bin/docker
# Deploy settings
deploy:
# Just 1 instance
mode: replicated
replicas: 1
labels: [APP=JENKINS]
# Only run container on a manager node
placement:
constraints: [node.role==manager]
restart_policy:
condition: any
#
# Jenkins worker
# * Allows for scale-out of work
# * java -jar swarm-client-3.3.jar -master http://XXXXX.elb.amazonaws.com:8080 -username admin -password XXXX
#
jenkins_worker:
# This image needs to be built/deployed (using the Dockerfile "jenkins_slave_Dockerfile" below)
# on all nodes where a jenkins worker container may be scheduled. To properly build the image,
# create a dir, and put the "jenkins_slave_Dockerfile" in the dir as "Dockerfile", alongside
# "jenkins_slave_join_jenkins_swarm.sh" renamed to "join_jenkins_swarm.sh".
#
image: jw2017_worker
networks:
- jenkins-net
depends_on:
- jenkins_master
volumes:
- /var/run/docker.sock:/var/run/docker.sock:z
# See note above in jenkins_master service concerning docker client binaries
- /usr/local/bin/docker:/usr/local/bin/docker
deploy:
# Run an instance on every worker node
mode: global
labels: [APP=JENKINS_WORKER]
placement:
constraints: [node.role==worker]
secrets:
# Used to find jenkins_master
- jenkins_join_url
- jenkins_join_password
networks:
#
# Declare jenkins-net named network as using the "overlay" network plugin
# See: https://docs.docker.com/engine/userguide/networking/
# ** Note: broadcasts are NOT supported
#
jenkins-net:
driver: overlay
volumes:
#
# Declare jenkins-data named volume as using the "rexray" volume plugin
# See: https://docs.docker.com/engine/extend/plugins_volume/
# **RexRay plugin used -- needs to be installed beforehand
# See https://rexray.readthedocs.io/en/stable/user-guide/docker-plugins/
#
# Create volumes via "docker volume create --driver=rexray jenkins-data"
#
jenkins-data:
external: true
#
# Backup repository
#
jenkins-backups:
external: true
#
# Declare secrets used by services (must run the secrets_install.sh script for this stack definition to work)
# - Noteworthy, when the secrets have a "." (period) in the name, the stack application name is prefixed to it.
#
secrets:
jenkins_join_url:
external: true
jenkins_join_password:
external: true
#!/bin/bash -x
#
# Purpose -- script installing a docker swarm cluster on a laptop using "docker-machine".
# (c) Docker 2017
#
# Source shared config for IP info
if [[ ! -e ./shared_config.sh ]]; then
echo "Missing ./shared_config"
exit 1
else
. shared_config.sh
fi
# Experimental features on by default
: ${DOCKER_ENG_OPT:="--engine-opt experimental=true"}
#
# VM Cleanup -- !! DO NOT USE !! for now. Need to determine how to automate un-mounting
# existing rexray volumes before calling docker-machine rm -- as that seems to remove
# all volumes mounted on the machine in question.
#
# May throw errors if the machines do not already exist.
# TODO: Need to use docker-machine ls and filter to get valid list of machines
# TODO: Need to figure out how to script unmounting rexray volumes to allow for "ocker-machine rm"
#KNOWN_MACHINES=`docker-machine ls -q`
#for machine in ${KNOWN_MACHINES}; do
#
# # Only affect expected machines
# for expected in ${MANAGERS} ${WORKERS}; do
# if [[ ${machine} == ${expected} ]]; then
# docker-machine stop ${machine}
# break
# fi
# done
#done
#
# Provision machines:
# * IP assignment (use bootsync.sh startup script to enforce)
# * RexRay startup
#
KNOWN_MACHINES=`docker-machine ls -q`
for MACHINE in ${MANAGERS} ${WORKERS}; do
echo "Configure ${MACHINE}"
#
# CREATE/RE-PROVISION machine
# * Depending on the state of the machine (i.e. Running, Stopped, not defined)
#
MACHINE_CREATED=0
for seen in ${KNOWN_MACHINES}; do
# Match element of list
if [[ ${MACHINE} == ${seen} ]]; then
STATUS=`docker-machine status ${MACHINE}`
# Running? Just go ahead and "provision"
if [[ ${STATUS} == "Running" ]]; then
docker-machine restart ${MACHINE}
else
# Else, presume it's "Stopped", so start it...
docker-machine start ${MACHINE}
fi
# !!! Break out of loop and continue with configuration setup
MACHINE_CREATED=1
break
fi
done
# Machine not seen, create
if [[ ${MACHINE_CREATED} == 0 ]]; then
# So machine is NOT already defined. So create the node
if [[ $MACHINE =~ ^manager[0-9]+$ ]]; then
MEMORY=2048
else
MEMORY=1024
fi
docker-machine create --driver virtualbox --virtualbox-memory ${MEMORY} ${DOCKER_ENG_OPT} ${MACHINE}
fi
#
# Create a bootsync.sh (init) script
# From http://stackoverflow.com/questions/34336218/is-there-a-way-to-force-docker-machine-to-create-vm-with-a-specific-ip/34336489
# ----
# First kill dhcp
echo "kill \`cat /var/run/udhcpc.eth1.pid\`" \
| docker-machine ssh ${MACHINE} sudo tee /var/lib/boot2docker/bootsync.sh > /dev/null 2>&1
# Set IP address (presumes no more than 9 managers, and that the IPs are available)
echo ifconfig eth1 ${IP_BASE}.${IP_LAST_BYTE} netmask 255.255.255.0 broadcast ${IP_BASE}.255 up \
| docker-machine ssh ${MACHINE} sudo tee -a /var/lib/boot2docker/bootsync.sh > /dev/null 2>&1
# ----
# restart machine (activates the IP)
docker-machine restart ${MACHINE}
# regenerate the certs -- because the IP may have changed
docker-machine regenerate-certs -f ${MACHINE}
# Leave swarm (if actually part of one)
docker-machine ssh ${MACHINE} docker swarm leave -f
# Increment IP
IP_LAST_BYTE=$((IP_LAST_BYTE+1))
done
#
# Init/Join swarm
#
for MACHINE in ${MANAGERS}; do
# Init swarm (only manager1)
# Remember the respective manager and worker tokens
if [[ "${MACHINE}" == "manager1" ]]; then
docker-machine ssh ${MACHINE} docker swarm init --advertise-addr eth1
#LEAD_MANAGER_IP=`docker-machine ip ${MACHINE}`
MANAGER_TOKEN=`docker-machine ssh ${MACHINE} docker swarm join-token manager -q`
WORKER_TOKEN=`docker-machine ssh ${MACHINE} docker swarm join-token worker -q`
else
# Join other manager nodes (if any exist) to swarm as managers
docker-machine ssh ${MACHINE} docker swarm join --token ${MANAGER_TOKEN} ${LEAD_MANAGER_IP}
fi
done
# Join workers
for MACHINE in ${WORKERS}; do
docker-machine ssh ${MACHINE} docker swarm join --token ${WORKER_TOKEN} ${LEAD_MANAGER_IP}
done
#!/bin/bash -x
#
# Purpose -- script for installing rexray service on docker machines. This is necessary for certain platforms such
# as Docker Machines running w/ a Virtualbox driver, or Azure's Premium Storage. For virtualbox, there's a dependency
# on a SOAP service to coordinate with mounting/unmounting virtual disks. For Azure, as of the time of this writing
# (2017/08/31), there is currently no pre-built managed docker plugin (i.e. no "docker plugin install rexray/azured").
#
# (c) Docker 2017
#
#
# RexRay needs to be re-installed after every docker-machine restart as the machine has a READONLY filesystem.
# See config in /etc/rexray/config.yml
# See logs in /var/log/rexray/rexray.log
#
# Source shared config for IP info
if [[ ! -e ./shared_config.sh ]]; then
echo "Missing ./shared_config"
exit 1
else
. shared_config.sh
fi
# VirtualBox SOAP API log
VBOXWEBLOG=~/vboxwebsrv.log
# RexRay config
REXRAY_CONFIG=rexray_config.yml
#
# Define a function that sets up RexRay
#
function install_rex_ray() {
export MACHINE=$1
# Install rexray
docker-machine ssh ${MACHINE} "sudo curl -sSL https://dl.bintray.com/emccode/rexray/install | sh -s -- stable"
#docker-machine scp ${REXRAY_CONFIG} ${MACHINE}:
cat rexray_config.yml | perl -pe 's#\$(\w+)#$ENV{$1}#e' | docker-machine ssh ${MACHINE} sudo tee /etc/rexray/config.yml > /dev/null 2>&1
# Start service
# * Use restart to re-load the config if service is already running
docker-machine ssh ${MACHINE} "sudo rexray service restart"
}
#
# (Re)start Host (i.e. laptop) VirtualBox SOAPAPI process. RexRay uses this to handle volume mounts
#
# Kill existing process
ps auxwww | grep "vboxwebsrv" | grep -v grep | awk '{ print $2 }' | xargs kill
sleep 5
# Configure no auth (ok for dev purposes, otherwise credentials need to be in rexray config)
VBoxManage setproperty websrvauthlibrary null
# Start the SOAPAPI process in the background
vboxwebsrv -H 0.0.0.0 -v -b > ${VBOXWEBLOG} 2>&1
#
# Provision machines:
# * IP assignment (use bootsync.sh startup script to enforce)
# * RexRay startup
#
for MACHINE in ${MANAGERS} ${WORKERS}; do
# Setup RexRay
install_rex_ray ${MACHINE}
done
#!/bin/bash -x
#
# Purpose -- demonstrate changing placement constraints for Jenkins master such that the master is migrated from manager nodes
# to worker nodes.
#
# (c) Docker 2017
#
# Remove constraint to host container on manager, and assign specific host
# Use "docker service inspect mystack_jenkins_master" to see placement constraints. May need to sort those.
docker-machine ssh manager1 docker service update --detach=true --constraint-add "node.role==manager" --constraint-rm "node.hostname==worker1" mystack_jenkins_master
# Investigate container status using:
# docker service ps mystack_jenkins_master
# AND
# docker service inspect mystack_jenkins_master
#
# RESULTS:
#
# "Placement": {
# "Constraints": [
# "node.hostname==worker1",
# "node.role==worker"
# ]
# },
#
# "UpdateStatus": {
# "State": "updating",
# "StartedAt": "2017-02-16T18:19:46.208484688Z",
# "CompletedAt": "1970-01-01T00:00:00Z",
# "Message": "update in progress"
# }
#
#!/bin/bash -x
#
# Purpose -- demonstrate changing placement constraints for Jenkins master such that the master is migrated from worker nodes
# to manager nodes.
#
# (c) Docker 2017
#
# Remove constraint to host container on manager, and assign specific host
# Use "docker service inspect mystack_jenkins_master" to see placement constraints. May need to sort those.
docker-machine ssh manager1 docker service update --detach=true --constraint-rm "node.role==manager" --constraint-add "node.hostname==worker1" mystack_jenkins_master
FROM evarga/jenkins-slave
#
# Purpose -- Dockerfile adds additional dependencies and uses docker secrets to authenticate against Jenkins master
# to add Jenkins workers.
#
# (c) Docker 2017
#
# Install python and curl
RUN apt-get -q update \
&& apt-get install -y git \
&& apt-get install -y python \
&& apt-get install -y python-pip \
&& apt-get install -y curl \
&& apt-get install -y jq
# Pull swarm-client
RUN curl -O https://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/3.3/swarm-client-3.3.jar
# Add jenkins to staff group so it can use the docker.sock from bind mount
RUN addgroup jenkins staff
ADD join_jenkins_swarm.sh /
RUN chmod +x /join_jenkins_swarm.sh
# Standard SSH port
EXPOSE 22
# Default command
#!/usr/bin/env bash
#
# Purpose -- Make the jenkins worker attempt to connect to jenkins master using the swarm plugin and JNLP.
#
# (c) Docker 2017
#
java -jar swarm-client-3.3.jar -master $(cat /var/run/secrets/jenkins_join_url) -username admin -password $(cat /var/run/secrets/jenkins_join_password)
#/usr/sbin/sshd -D
rexray:
logLevel: info
libstorage:
logLevel: info
service: virtualbox
integration:
volume:
operations:
create:
default:
size: 25
mount:
# Use preempt feature so Volume follows container across VM Hosts
preempt: true
virtualbox:
volumePath: $HOME/rexray/Volumes # MUST NOT LEAVE AS A VARIABLE -- script that copies this to HostVM has an interpolation step.
# Default VirtualBox IP:port
endpoint: http://10.0.2.3:18083
controller: SATA
#!/bin/bash
#
# Define sets of swarm manager and worker nodes
#
MANAGERS=`seq 1 | xargs -I{} echo manager{}`
WORKERS=`seq 2 | xargs -I{} echo worker{}`
#
# Create manager nodes with STATIC IPs so that Swarm doesn't freak out
#
IP_BASE=192.168.99 # First 3 bytes
IP_LAST_BYTE=100 # Last byte
LEAD_MANAGER_IP=${IP_BASE}.${IP_LAST_BYTE}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment