Created
February 6, 2018 17:15
-
-
Save kurtbrose/32a4e9cc25e009176c8523a3f1508d81 to your computer and use it in GitHub Desktop.
gitlab auto devops template
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# This file is a template, and might need editing before it works on your project. | |
# Auto DevOps | |
# This CI/CD configuration provides a standard pipeline for | |
# * building a Docker image (using a buildpack if necessary), | |
# * storing the image in the container registry, | |
# * running tests from a buildpack, | |
# * running code quality analysis, | |
# * creating a review app for each topic branch, | |
# * and continuous deployment to production | |
# | |
# In order to deploy, you must have a Kubernetes cluster configured either | |
# via a project integration, or via group/project variables. | |
# AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project | |
# level, or manually added below. | |
# | |
# If you want to deploy to staging first, or enable canary deploys, | |
# uncomment the relevant jobs in the pipeline below. | |
# | |
# If Auto DevOps fails to detect the proper buildpack, or if you want to | |
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the | |
# repository URL of the buildpack. | |
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142 | |
# If you need multiple buildpacks, add a file to your project called | |
# `.buildpacks` that contains the URLs, one on each line, in order. | |
# Note: Auto CI does not work with multiple buildpacks yet | |
image: alpine:latest | |
variables: | |
# AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level. | |
# AUTO_DEVOPS_DOMAIN: domain.example.com | |
POSTGRES_USER: user | |
POSTGRES_PASSWORD: testing-password | |
POSTGRES_ENABLED: "true" | |
POSTGRES_DB: $CI_ENVIRONMENT_SLUG | |
KUBERNETES_VERSION: 1.8.6 | |
HELM_VERSION: 2.6.1 | |
CODECLIMATE_VERSION: 0.69.0 | |
stages: | |
- build | |
- test | |
- review | |
- dast | |
- staging | |
- canary | |
- production | |
- performance | |
- cleanup | |
build: | |
stage: build | |
image: docker:git | |
services: | |
- docker:dind | |
variables: | |
DOCKER_DRIVER: overlay2 | |
script: | |
- setup_docker | |
- build | |
only: | |
- branches | |
test: | |
services: | |
- postgres:latest | |
variables: | |
POSTGRES_DB: test | |
stage: test | |
image: gliderlabs/herokuish:latest | |
script: | |
- setup_test_db | |
- cp -R . /tmp/app | |
- /bin/herokuish buildpack test | |
only: | |
- branches | |
codequality: | |
image: docker:latest | |
variables: | |
DOCKER_DRIVER: overlay2 | |
allow_failure: true | |
services: | |
- docker:dind | |
script: | |
- setup_docker | |
- codeclimate | |
artifacts: | |
paths: [codeclimate.json] | |
performance: | |
stage: performance | |
image: docker:latest | |
variables: | |
DOCKER_DRIVER: overlay2 | |
allow_failure: true | |
services: | |
- docker:dind | |
script: | |
- setup_docker | |
- performance | |
artifacts: | |
paths: | |
- performance.json | |
only: | |
refs: | |
- branches | |
kubernetes: active | |
sast: | |
image: registry.gitlab.com/gitlab-org/gl-sast:latest | |
variables: | |
POSTGRES_DB: "false" | |
allow_failure: true | |
script: | |
- sast . | |
artifacts: | |
paths: [gl-sast-report.json] | |
sast:container: | |
image: docker:latest | |
variables: | |
DOCKER_DRIVER: overlay2 | |
allow_failure: true | |
services: | |
- docker:dind | |
script: | |
- setup_docker | |
- sast_container | |
artifacts: | |
paths: [gl-sast-container-report.json] | |
dast: | |
stage: dast | |
allow_failure: true | |
image: owasp/zap2docker-stable | |
variables: | |
POSTGRES_DB: "false" | |
script: | |
- dast | |
artifacts: | |
paths: [gl-dast-report.json] | |
only: | |
refs: | |
- branches | |
kubernetes: active | |
except: | |
- master | |
review: | |
stage: review | |
script: | |
- check_kube_domain | |
- install_dependencies | |
- download_chart | |
- ensure_namespace | |
- install_tiller | |
- create_secret | |
- deploy | |
- persist_environment_url | |
environment: | |
name: review/$CI_COMMIT_REF_NAME | |
url: http://$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN | |
on_stop: stop_review | |
artifacts: | |
paths: [environment_url.txt] | |
only: | |
refs: | |
- branches | |
kubernetes: active | |
except: | |
- master | |
stop_review: | |
stage: cleanup | |
variables: | |
GIT_STRATEGY: none | |
script: | |
- install_dependencies | |
- delete | |
environment: | |
name: review/$CI_COMMIT_REF_NAME | |
action: stop | |
when: manual | |
allow_failure: true | |
only: | |
refs: | |
- branches | |
kubernetes: active | |
except: | |
- master | |
# Keys that start with a dot (.) will not be processed by GitLab CI. | |
# Staging and canary jobs are disabled by default, to enable them | |
# remove the dot (.) before the job name. | |
# https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys | |
# Staging deploys are disabled by default since | |
# continuous deployment to production is enabled by default | |
# If you prefer to automatically deploy to staging and | |
# only manually promote to production, enable this job by removing the dot (.), | |
# and uncomment the `when: manual` line in the `production` job. | |
.staging: | |
stage: staging | |
script: | |
- check_kube_domain | |
- install_dependencies | |
- download_chart | |
- ensure_namespace | |
- install_tiller | |
- create_secret | |
- deploy | |
environment: | |
name: staging | |
url: http://$CI_PROJECT_PATH_SLUG-staging.$AUTO_DEVOPS_DOMAIN | |
only: | |
refs: | |
- master | |
kubernetes: active | |
# Canaries are disabled by default, but if you want them, | |
# and know what the downsides are, enable this job by removing the dot (.), | |
# and uncomment the `when: manual` line in the `production` job. | |
.canary: | |
stage: canary | |
script: | |
- check_kube_domain | |
- install_dependencies | |
- download_chart | |
- ensure_namespace | |
- install_tiller | |
- create_secret | |
- deploy canary | |
environment: | |
name: production | |
url: http://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN | |
when: manual | |
only: | |
refs: | |
- master | |
kubernetes: active | |
# This job continuously deploys to production on every push to `master`. | |
# To make this a manual process, either because you're enabling `staging` | |
# or `canary` deploys, or you simply want more control over when you deploy | |
# to production, uncomment the `when: manual` line in the `production` job. | |
production: | |
stage: production | |
script: | |
- check_kube_domain | |
- install_dependencies | |
- download_chart | |
- ensure_namespace | |
- install_tiller | |
- create_secret | |
- deploy | |
- delete canary | |
- persist_environment_url | |
environment: | |
name: production | |
url: http://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN | |
artifacts: | |
paths: [environment_url.txt] | |
# when: manual | |
only: | |
refs: | |
- master | |
kubernetes: active | |
# --------------------------------------------------------------------------- | |
.auto_devops: &auto_devops | | |
# Auto DevOps variables and functions | |
[[ "$TRACE" ]] && set -x | |
auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB} | |
export DATABASE_URL=${DATABASE_URL-$auto_database_url} | |
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG | |
export CI_APPLICATION_TAG=$CI_COMMIT_SHA | |
export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID} | |
export TILLER_NAMESPACE=$KUBE_NAMESPACE | |
function sast_container() { | |
docker run -d --name db arminc/clair-db:latest | |
docker run -p 6060:6060 --link db:postgres -d --name clair arminc/clair-local-scan:v2.0.1 | |
apk add -U wget ca-certificates | |
docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} | |
wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64 | |
mv clair-scanner_linux_amd64 clair-scanner | |
chmod +x clair-scanner | |
touch clair-whitelist.yml | |
./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-sast-container-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true | |
} | |
function codeclimate() { | |
cc_opts="--env CODECLIMATE_CODE="$PWD" \ | |
--volume "$PWD":/code \ | |
--volume /var/run/docker.sock:/var/run/docker.sock \ | |
--volume /tmp/cc:/tmp/cc" | |
docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" init | |
docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" analyze -f json > codeclimate.json | |
} | |
function sast() { | |
case "$CI_SERVER_VERSION" in | |
*-ee) | |
/app/bin/run "$@" | |
;; | |
*) | |
echo "GitLab EE is required" | |
;; | |
esac | |
} | |
function deploy() { | |
track="${1-stable}" | |
name="$CI_ENVIRONMENT_SLUG" | |
if [[ "$track" != "stable" ]]; then | |
name="$name-$track" | |
fi | |
replicas="1" | |
service_enabled="false" | |
postgres_enabled="$POSTGRES_ENABLED" | |
# canary uses stable db | |
[[ "$track" == "canary" ]] && postgres_enabled="false" | |
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' ) | |
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' ) | |
if [[ "$track" == "stable" ]]; then | |
# for stable track get number of replicas from `PRODUCTION_REPLICAS` | |
eval new_replicas=\$${env_slug}_REPLICAS | |
service_enabled="true" | |
else | |
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS` | |
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS | |
fi | |
if [[ -n "$new_replicas" ]]; then | |
replicas="$new_replicas" | |
fi | |
helm upgrade --install \ | |
--wait \ | |
--set service.enabled="$service_enabled" \ | |
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \ | |
--set image.repository="$CI_APPLICATION_REPOSITORY" \ | |
--set image.tag="$CI_APPLICATION_TAG" \ | |
--set image.pullPolicy=IfNotPresent \ | |
--set application.track="$track" \ | |
--set application.database_url="$DATABASE_URL" \ | |
--set service.url="$CI_ENVIRONMENT_URL" \ | |
--set replicaCount="$replicas" \ | |
--set postgresql.enabled="$postgres_enabled" \ | |
--set postgresql.nameOverride="postgres" \ | |
--set postgresql.postgresUser="$POSTGRES_USER" \ | |
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \ | |
--set postgresql.postgresDatabase="$POSTGRES_DB" \ | |
--namespace="$KUBE_NAMESPACE" \ | |
--version="$CI_PIPELINE_ID-$CI_JOB_ID" \ | |
"$name" \ | |
chart/ | |
} | |
function install_dependencies() { | |
apk add -U openssl curl tar gzip bash ca-certificates git | |
wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub | |
wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r3/glibc-2.23-r3.apk | |
apk add glibc-2.23-r3.apk | |
rm glibc-2.23-r3.apk | |
curl "https://kubernetes-helm.storage.googleapis.com/helm-v${HELM_VERSION}-linux-amd64.tar.gz" | tar zx | |
mv linux-amd64/helm /usr/bin/ | |
helm version --client | |
curl -L -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl" | |
chmod +x /usr/bin/kubectl | |
kubectl version --client | |
} | |
function setup_docker() { | |
if ! docker info &>/dev/null; then | |
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then | |
export DOCKER_HOST='tcp://localhost:2375' | |
fi | |
fi | |
} | |
function setup_test_db() { | |
if [ -z ${KUBERNETES_PORT+x} ]; then | |
DB_HOST=postgres | |
else | |
DB_HOST=localhost | |
fi | |
export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}" | |
} | |
function download_chart() { | |
if [[ ! -d chart ]]; then | |
auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app} | |
auto_chart_name=$(basename $auto_chart) | |
auto_chart_name=${auto_chart_name%.tgz} | |
else | |
auto_chart="chart" | |
auto_chart_name="chart" | |
fi | |
helm init --client-only | |
helm repo add gitlab https://charts.gitlab.io | |
if [[ ! -d "$auto_chart" ]]; then | |
helm fetch ${auto_chart} --untar | |
fi | |
if [ "$auto_chart_name" != "chart" ]; then | |
mv ${auto_chart_name} chart | |
fi | |
helm dependency update chart/ | |
helm dependency build chart/ | |
} | |
function ensure_namespace() { | |
kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE" | |
} | |
function check_kube_domain() { | |
if [ -z ${AUTO_DEVOPS_DOMAIN+x} ]; then | |
echo "In order to deploy or use Review Apps, AUTO_DEVOPS_DOMAIN variable must be set" | |
echo "You can do it in Auto DevOps project settings or defining a secret variable at group or project level" | |
echo "You can also manually add it in .gitlab-ci.yml" | |
false | |
else | |
true | |
fi | |
} | |
function build() { | |
if [[ -n "$CI_REGISTRY_USER" ]]; then | |
echo "Logging to GitLab Container Registry with CI credentials..." | |
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY" | |
echo "" | |
fi | |
if [[ -f Dockerfile ]]; then | |
echo "Building Dockerfile-based application..." | |
docker build -t "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" . | |
else | |
echo "Building Heroku-based application using gliderlabs/herokuish docker image..." | |
docker run -i --name="$CI_CONTAINER_NAME" -v "$(pwd):/tmp/app:ro" gliderlabs/herokuish /bin/herokuish buildpack build | |
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" | |
docker rm "$CI_CONTAINER_NAME" >/dev/null | |
echo "" | |
echo "Configuring $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG docker image..." | |
docker create --expose 5000 --env PORT=5000 --name="$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" /bin/herokuish procfile start web | |
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" | |
docker rm "$CI_CONTAINER_NAME" >/dev/null | |
echo "" | |
fi | |
echo "Pushing to GitLab Container Registry..." | |
docker push "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" | |
echo "" | |
} | |
function install_tiller() { | |
echo "Checking Tiller..." | |
helm init --upgrade | |
kubectl rollout status -n "$TILLER_NAMESPACE" -w "deployment/tiller-deploy" | |
if ! helm version --debug; then | |
echo "Failed to init Tiller." | |
return 1 | |
fi | |
echo "" | |
} | |
function create_secret() { | |
echo "Create secret..." | |
kubectl create secret -n "$KUBE_NAMESPACE" \ | |
docker-registry gitlab-registry \ | |
--docker-server="$CI_REGISTRY" \ | |
--docker-username="$CI_REGISTRY_USER" \ | |
--docker-password="$CI_REGISTRY_PASSWORD" \ | |
--docker-email="$GITLAB_USER_EMAIL" \ | |
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f - | |
} | |
function dast() { | |
export CI_ENVIRONMENT_URL=$(cat environment_url.txt) | |
mkdir /zap/wrk/ | |
/zap/zap-baseline.py -J gl-dast-report.json -t "$CI_ENVIRONMENT_URL" || true | |
cp /zap/wrk/gl-dast-report.json . | |
} | |
function performance() { | |
export CI_ENVIRONMENT_URL=$(cat environment_url.txt) | |
mkdir gitlab-exporter | |
wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-3/index.js | |
mkdir sitespeed-results | |
if [ -f .gitlab-urls.txt ] | |
then | |
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt | |
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt | |
else | |
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL" | |
fi | |
mv sitespeed-results/data/performance.json performance.json | |
} | |
function persist_environment_url() { | |
echo $CI_ENVIRONMENT_URL > environment_url.txt | |
} | |
function delete() { | |
track="${1-stable}" | |
name="$CI_ENVIRONMENT_SLUG" | |
if [[ "$track" != "stable" ]]; then | |
name="$name-$track" | |
fi | |
if [[ -n "$(helm ls -q "^$name$")" ]]; then | |
helm delete "$name" | |
fi | |
} | |
before_script: | |
- *auto_devops |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment