Last active
October 28, 2020 11:59
-
-
Save Arabus/40ed5189e81fb10fc8a93f1f568aca65 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# | |
# This script ports deployed Helm v2 Packages from k8s API v1.15 to 1.16 | |
# | |
# When upgrading K8s clusters to a new minor API version it is possible that | |
# already deprecated API Endpoints are removed e.g., | |
# extensions/v1beta1/DaemonSet | |
# K8s handles this transparently for already deployed ressources from the | |
# deprecated paths, making them available via their new API. They are no longer | |
# available through the removed paths though. | |
# Unfortunately already deployed Helm v2 Packages can no longer be upgraded when | |
# this happens. This is due to the nature of the stored deployment information. | |
# | |
# Helm v2 packages in deployed state are stored as k8s configmaps in the tillers | |
# namespace. The ConfigMap contains a base64 encoded, gzipped, protobuf message | |
# with the package metadata e.g., NOTES and template files, the contents of the | |
# chart.yaml file and all default and additionally provided values. | |
# | |
# Because the templates of a package are stored in its deployment information, | |
# helm v2 needs to recalculate the actual ressources generated by it (called a | |
# manifest) whenever status information is requested about it, e.g., when | |
# running helm get manifest. Because the ressources constructed use the k8s | |
# API this will fail if the helm chart template contains deprecated API paths. | |
# The resulting error will look similar to this: | |
# | |
# UPGRADE FAILED | |
# Error: failed decoding reader into objects: [unable to recognize "": no matches for kind "Deployment" in version "extensions/v1beta1", unable to recognize "": no matches for kind "StatefulSet" in version "apps/v1beta2"] | |
# Error: UPGRADE FAILED: failed decoding reader into objects: [unable to recognize "": no matches for kind "Deployment" in version "extensions/v1beta1", unable to recognize "": no matches for kind "StatefulSet" in version "apps/v1beta2"] | |
# | |
# Which is helm/k8s way of telling you that there is no object under the | |
# requested API path, because that path no longer exists. | |
# | |
# This can be fixed either by upgrading the helm chart before upgrading k8s | |
# with a breaking API change or by downgrading k8s, upgrading the chart and | |
# then upgrading k8s again. | |
# | |
############################################################################## | |
# | |
# Or you could just change the existing deployment information so they produce | |
# a valid manifest again and then upgrade the helm chart. | |
# | |
# To do this, you need to download the configmaps that stores your helm | |
# deployment. Then you need to extract the deployment data, decode the base64 | |
# blob, unzip it and last but not least decode the contained protbuf. | |
# | |
# Now you can edit the data and replace changed api paths or add missing | |
# attrbutes. | |
# | |
# Afterwards your need to reencode, pack, base64encode and integrate | |
# it into the configmap again. | |
# | |
# Finally you can kubectl apply the changed configmap and helm will now use | |
# the new APIs. You can check via helm get manifest $YOURDEPLOYMENTNAMEHERE | |
############################################################################### | |
# Directory we do all the hacky file manipulation in. Change it to your liking. | |
HM_WORKDIR="${HOME}/.helmmigration" | |
readonly HM_WORKDIR | |
# You need the specific protobuf schema of helm to decode the message content | |
# it is part of the github helm repo, so you need to clone that somewhere local. | |
# git clone https://github.com/helm/helm.git | |
# NOTE: You need to checkout the corresponding helm v2 branch of your tiller | |
# deployment, e.g. v2.16.1 Otherwise the headers won't be there (they were | |
# removed with helm V3) | |
HELM_PROTOBUF_SCHEMA="${HOME}/repos/helm/_proto/" | |
readonly HELM_PROTOBUF_SCHEMA | |
# You also need the protbuf base schema, so clone that as well | |
# git clone https://github.com/google/protobuf | |
PROTOBUF_SCHEMA="${HOME}/repos/protobuf/src" | |
readonly PROTOBUF_SCHEMA | |
# Hacky workaround for MacOS, because we really want to use gnu tools there | |
SED_BIN="$(command -v gsed || command -v sed)" | |
readonly SED_BIN | |
# We need these to do our work | |
HM_DEPENDENCIES=(git kubectl helm gunzip base64 protoc) | |
readonly HM_DEPENDENCIES | |
shopt -s expand_aliases | |
shopt -s extglob | |
# shellcheck disable=SC2139 | |
alias helmprotod="protoc --proto_path ${HELM_PROTOBUF_SCHEMA} --proto_path ${PROTOBUF_SCHEMA} --decode hapi.release.Release ${HELM_PROTOBUF_SCHEMA}/hapi/**/*" | |
# shellcheck disable=SC2139 | |
alias helmprotoe="protoc --proto_path ${HELM_PROTOBUF_SCHEMA} --proto_path ${PROTOBUF_SCHEMA} --encode hapi.release.Release ${HELM_PROTOBUF_SCHEMA}/hapi/**/*" | |
_test_dependencies() { | |
printf "Testing if all dependencies are installed ...\n" | |
for dep in "${HM_DEPENDENCIES[@]}"; do | |
command -v "${dep}" || { printf "Missing dependency %s" "${dep}" && exit 1; } | |
done | |
} | |
_usage() { | |
cat << EOF | |
Usage: ${0} <helm_deployment_name> [tiller-namespace]\n | |
EOF | |
} | |
_get_configmap() { | |
# We make a backup of the original config, in case anything goes wrong | |
# You can restore it by running kubectl apply -f | |
kubectl get configmaps \ | |
--namespace "${tiller_namespace}" \ | |
--selector "NAME=${helm_deployment_name},STATUS=DEPLOYED" \ | |
--output yaml \ | |
> "${HM_WORKDIR}/${helm_deployment_name}_latest_helm_configmap.yaml" | |
test -s "${HM_WORKDIR}/${helm_deployment_name}_latest_helm_configmap.yaml" || exit 1 | |
_git_store 'Original Configmap Backup' | |
} | |
_get_configmap_data() { | |
# extract the deployment data to plaintext | |
kubectl get configmaps \ | |
--namespace "${tiller_namespace}" \ | |
--selector "NAME=${helm_deployment_name},STATUS=DEPLOYED" \ | |
--output jsonpath='{ .items[0].data.release }' \ | |
| base64 --decode \ | |
| gunzip \ | |
| helmprotod \ | |
> "${HM_WORKDIR}/${helm_deployment_name}_data.yaml" | |
test -s "${HM_WORKDIR}/${helm_deployment_name}_data.yaml" || exit 1 | |
_git_store 'Original Configmap Data' | |
} | |
_initialize_workdir() { | |
test -d "${HM_WORKDIR}" || mkdir --parents "${HM_WORKDIR}" | |
cd "${HM_WORKDIR}" || exit 1 | |
git init | |
} | |
_git_store() { | |
cd "${HM_WORKDIR}" || exit 1 | |
git add . | |
git commit --message "${1}" | |
} | |
_replace_removed_k8s_1_15_APIs() { | |
"${SED_BIN}" -i ''\ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: Deployment|\\napiVersion: apps/v1\\nkind: Deployment|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: DaemonSet|\\napiVersion: apps/v1\\nkind: DaemonSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: StatefulSet|\\napiVersion: apps/v1\\nkind: StatefulSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: ReplicaSet|\\napiVersion: apps/v1\\nkind: ReplicaSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: Deployment|\\napiVersion: apps/v1\\nkind: Deployment|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: DaemonSet|\\napiVersion: apps/v1\\nkind: DaemonSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: StatefulSet|\\napiVersion: apps/v1\\nkind: StatefulSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: ReplicaSet|\\napiVersion: apps/v1\\nkind: ReplicaSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: NetworkPolicy|\\napiVersion: networking.k8s.io/v1\\nkind: NetworkPolicy|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: PodSecurityPolicy|\\napiVersion: policy/v1beta1\\nkind: PodSecurityPolicy|g' \ | |
"${HM_WORKDIR}/${helm_deployment_name}_data.yaml" | |
_git_store 'Patched Configmap Data' || { printf "Nothing to do! Exiting ...\n" && exit 1; } | |
} | |
_show_changes() { | |
git diff HEAD^ | |
read -rp 'Press enter to continue or ctrl-c to abort' | |
} | |
_set_configmap_data() { | |
patched_data="$(helmprotoe < "${helm_deployment_name}_data.yaml" | gzip | base64 --wrap 0)" | |
readonly patched_data | |
"${SED_BIN}" -Ei '' "s|^([ ]+)release:([ ]+).*$|\1release:\2${patched_data}|" "${helm_deployment_name}_latest_helm_configmap.yaml" | |
_git_store 'Patched Configmap' | |
} | |
_apply_patched_config() { | |
read -rp 'Press enter to apply changes or ctrl-c to abort' | |
kubectl apply --dry-run \ | |
--namespace "${tiller_namespace}" \ | |
--filename "${helm_deployment_name}_latest_helm_configmap.yaml" \ | |
&& kubectl apply \ | |
--namespace "${tiller_namespace}" \ | |
--filename "${helm_deployment_name}_latest_helm_configmap.yaml" | |
} | |
_show_patched_manifest() { | |
helm get manifest "${helm_deployment_name}" --tiller-namespace "${tiller_namespace}" | |
} | |
main() { | |
declare helm_deployment_name | |
declare tiller_namespace | |
helm_deployment_name="${1?"No Helm deployment name provided to ${FUNCNAME[0]}"}" | |
tiller_namespace="${2:-kube-system}" | |
printf "Welcome to the helm deployment k8s API migration. We will now start looking for your helm deployment. Enjoy the ride.\n" | |
_test_dependencies | |
_initialize_workdir | |
_get_configmap | |
_get_configmap_data | |
_replace_removed_k8s_1_15_APIs | |
_show_changes | |
_set_configmap_data | |
_apply_patched_config | |
_show_patched_manifest | |
} | |
trap _usage err | |
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && main "${@}" |
Thank you and thanks for finding those bugs. I have amended them in the current version of the gist.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thank you so much @Arabus ! This script was super helpful to us while fixing some old releases on our 1.16 cluster.
I recommend a couple of minor changes: