-
-
Save Nastradamus/b7499af5d25a23a6a75fd8eb0ffa84ce to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# | |
# This script ports deployed Helm v2 Packages from k8s API v1.15 to 1.16 | |
# | |
# When upgrading K8s clusters to a new minor API version it is possible that | |
# already deprecated API Endpoints are removed e.g, | |
# extensions/v1beta1/DaemonSet | |
# K8s handles this transparently for already deployed ressources from the | |
# deprecated paths, making them available via their new API. They are no loanger | |
# available through the removed paths though | |
# Unfortunately deployed Helm v2 Packages can no longer be upgraded when this | |
# happens. This is due to the nature of the stored deployment information. | |
# | |
# Helm v2 packages in deployed state are stored as k8s configmaps in the tillers | |
# namespace. The ConfigMap contains a base64 encoded, gzipped protobuf message | |
# with the package metadata e.g., NOTES and template files, the contents of the | |
# chart.yaml file and all default and additionally provided values. | |
# | |
# Because the templates of a package are stored in its deployment information, | |
# helm v2 needs to recalculate the actual ressources generated by it (called a | |
# manifest) whenever status information is requested about it, e.g., when | |
# running helm get manifest. Because the ressources constructed using the k8s | |
# API this will fail if the helm chart template contain deprecated API paths. | |
# The resulting error will look similar to this: | |
# | |
# UPGRADE FAILED | |
# Error: failed decoding reader into objects: [unable to recognize "": no matches for kind "Deployment" in version "extensions/v1beta1", unable to recognize "": no matches for kind "StatefulSet" in version "apps/v1beta2"] | |
# Error: UPGRADE FAILED: failed decoding reader into objects: [unable to recognize "": no matches for kind "Deployment" in version "extensions/v1beta1", unable to recognize "": no matches for kind "StatefulSet" in version "apps/v1beta2"] | |
# | |
# Whis is helm/k8s way of telling you that there is no object under the | |
# requested API path, because that path no longer exists. | |
# | |
# This can be fixed either by upgrading the helm chart before upgrading k8s | |
# with a breaking API change or by downgrading k8s, upgrading the chart and | |
# then upgrading k8s again. | |
# | |
# Or you could just change the existing deployment information so they produce | |
# a valid manifest again and then upgrade the helm chart. | |
# | |
# To do this, you need to download the configmaps that stores your helm | |
# deployment. Then you need to extract the deployment data, decode the base64 | |
# blob, unzip it and last but not least decode the contained protbuf. Then you | |
# can edit the changed api paths and reencode, pack, base64encode and integrate | |
# it into the configmap again. | |
# | |
# Afterwards you can kubectl apply the changed configmap helm will now use | |
# the new APIs. You can check via helm get manifest $YOURDEPLOYMENTNAMEHERE | |
############################################################################### | |
HM_WORKDIR="${HOME}/.helmmigration" | |
readonly HM_WORKDIR | |
# You need the specific protobuf schema of helm to decode the message content | |
# it is part of the github helm repo, so you need to clone that somewhere local. | |
# git clone https://github.com/helm/helm.git | |
# NOTE: You need to checkout the corresponding helm v2 branch of your tiller | |
# deployment, e.g. v2.16.1 Otherwise the headers won't be there (they were | |
# removed with helmv3) | |
HELM_PROTOBUF_SCHEMA="${HOME}/repos/helm/_proto/" | |
readonly HELM_PROTOBUF_SCHEMA | |
# You also need the protbuf base schema, so clone that as well | |
# git clone https://github.com/google/protobuf | |
PROTOBUF_SCHEMA="${HOME}/repos/protobuf/src" | |
readonly PROTOBUF_SCHEMA | |
# Hacky workaround for MacOS, because we really want to use gnu tools there | |
SED_BIN="$(type -fp gsed || type -fp sed)" | |
readonly SED_BIN | |
# We need these to do our work | |
HM_DEPENDENCIES=(git kubectl helm gunzip base64 protoc) | |
readonly HM_DEPENDENCIES | |
shopt -s expand_aliases | |
shopt -s extglob | |
alias helmprotod="protoc -I ${HELM_PROTOBUF_SCHEMA} -I ${PROTOBUF_SCHEMA} --decode hapi.release.Release ${HELM_PROTOBUF_SCHEMA}/hapi/**/*" | |
alias helmprotoe="protoc -I ${HELM_PROTOBUF_SCHEMA} -I ${PROTOBUF_SCHEMA} --encode hapi.release.Release ${HELM_PROTOBUF_SCHEMA}/hapi/**/*" | |
_test_dependencies() { | |
printf "Testing if all dependencies are installed ...\n" | |
for dep in "${HM_DEPENDENCIES[@]}"; do | |
type -fp "${dep}" || { printf "Missing %s" "${dep}" && exit 1; } | |
done | |
} | |
_usage() { | |
cat << EOF | |
Usage: ${0} <helm_deployment_name> [tiller-namespace]\n | |
EOF | |
} | |
_get_configmap() { | |
# We make a backup of the original config, in case anything goes wrong | |
# You can restore it by running kubectl apply -f | |
kubectl get configmaps \ | |
-n "${tiller_namespace}" \ | |
-l "NAME=${helm_deployment_name},STATUS=DEPLOYED" \ | |
-o yaml \ | |
> "${HM_WORKDIR}/${helm_deployment_name}_latest_helm_configmap.yaml" | |
test -s "${HM_WORKDIR}/${helm_deployment_name}_latest_helm_configmap.yaml" || exit 1 | |
_git_store 'Original Configmap Backup' | |
} | |
_get_configmap_data() { | |
# extract the deployment data to plaintext | |
kubectl get configmaps \ | |
-n "${tiller_namespace}" \ | |
-l "NAME=${helm_deployment_name},STATUS=DEPLOYED" \ | |
-o jsonpath='{ .items[0].data.release }' \ | |
| base64 --decode \ | |
| gunzip \ | |
| helmprotod \ | |
> "${HM_WORKDIR}/${helm_deployment_name}_data.yaml" | |
test -s "${HM_WORKDIR}/${helm_deployment_name}_data.yaml" || exit 1 | |
_git_store 'Original Configmap Data' | |
} | |
_initialize_workdir() { | |
test -d "${HM_WORKDIR}" || mkdir "${HM_WORKDIR}" | |
cd "${HM_WORKDIR}" || exit 1 | |
git init | |
} | |
_git_store() { | |
git add . | |
git commit -m "${1}" | |
} | |
_replace_removed_k8s_1_15_APIs() { | |
${SED_BIN} -i ''\ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: Deployment|\\napiVersion: apps/v1\\nkind: Deployment|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: DaemonSet|\\napiVersion: apps/v1\\nkind: DaemonSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: StatefulSet|\\napiVersion: apps/v1\\nkind: StatefulSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: ReplicaSet|\\napiVersion: apps/v1\\nkind: ReplicaSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: Deployment|\\napiVersion: apps/v1\\nkind: Deployment|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: DaemonSet|\\napiVersion: apps/v1\\nkind: DaemonSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: StatefulSet|\\napiVersion: apps/v1\\nkind: StatefulSet|g' \ | |
-e 's|\\napiVersion: apps/v1beta2\\nkind: ReplicaSet|\\napiVersion: apps/v1\\nkind: ReplicaSet|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: NetworkPolicy|\\napiVersion: networking.k8s.io/v1\\nkind: NetworkPolicy|g' \ | |
-e 's|\\napiVersion: extensions/v1beta1\\nkind: PodSecurityPolicy|\\napiVersion: policy/v1beta1\\nkind: PodSecurityPolicy|g' \ | |
"${HM_WORKDIR}/${helm_deployment_name}_data.yaml" | |
_git_store 'Patched Configmap Data' || { printf "Nothing to do! Exiting ...\n" && exit 1; } | |
} | |
_show_changes() { | |
git diff HEAD^ | |
read -rp 'Press enter to continue or ctrl-c to abort' | |
} | |
_set_configmap_data() { | |
patched_data="$(helmprotoe < "${helm_deployment_name}_data.yaml" | gzip | base64 -w0)" | |
json_patch="{ \"items\": [ \"data\": { \"release\": \"${patched_data}\" } ] }" | |
readonly patched_data | |
${SED_BIN} -Ei '' "s|^([ ]+)release:([ ]+).*$|\1release:\2${patched_data}|" "${helm_deployment_name}_latest_helm_configmap.yaml" | |
_git_store 'Patched Configmap' | |
} | |
_apply_patched_config() { | |
read -rp 'Press enter to apply changes or ctrl-c to abort' | |
kubectl apply --dry-run -f "${helm_deployment_name}_latest_helm_configmap.yaml" -n kube-system \ | |
&& kubectl apply -f "${helm_deployment_name}_latest_helm_configmap.yaml" -n kube-system | |
} | |
_show_patched_manifest() { | |
helm get manifest "${helm_deployment_name}" | |
} | |
main() { | |
declare helm_deployment_name | |
declare tiller_namespace | |
helm_deployment_name="${1?"No Helm deployment name provided to ${FUNCNAME[0]}"}" | |
tiller_namespace="${2:-kube-system}" | |
printf "Welcome to the helm deployment k8s API migration. We will now start looking for your helm deployment. Enjoy the ride.\n" | |
_test_dependencies | |
_initialize_workdir | |
_get_configmap | |
_get_configmap_data | |
_replace_removed_k8s_1_15_APIs | |
_show_changes | |
_set_configmap_data | |
_apply_patched_config | |
_show_patched_manifest | |
} | |
trap usage err | |
[[ ${BASH_SOURCE[0]} == "${0}" ]] && main "$@" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment