-
-
Save zoidyzoidzoid/6af8c80cc5b706e2adcf25df3dc2f7e1 to your computer and use it in GitHub Desktop.
| #!/usr/bin/env bash | |
| set -e | |
| for full_name in $(kubectl get all -o name | grep -vE replicaset\|pod); do | |
| name="$(echo "$full_name" | sed -e "s/.*\///g")"; | |
| type="$(echo "$full_name" | sed -e "s/\/.*//g")"; | |
| case "${type}" in | |
| deployment*) | |
| type=Deployment | |
| ;; | |
| "service") | |
| type=Service | |
| ;; | |
| "configmap") | |
| type=ConfigMap | |
| ;; | |
| "job.batch") | |
| type=Job | |
| ;; | |
| "cronjob.batch") | |
| type=CronJob | |
| ;; | |
| esac | |
| filename="${name}.${type}.yaml" | |
| echo "Exporting ${filename}" | |
| kubectl get $full_name --export -o json --export | jq 'del(.metadata.selfLink)' | jy > "${filename}" | |
| done |
| # coding: utf-8 | |
| import json | |
| from subprocess import check_output | |
| import ruamel.yaml as yaml | |
| def trim_labels(data): | |
| for k, v in data.get('labels', {}).copy().items(): | |
| if k not in ('app', 'k8s-app'): | |
| data['labels'].pop(k, None) | |
| if data['labels'] == {}: | |
| data.pop('labels') | |
| def main(): | |
| def my_represent_none(self, data): | |
| return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') | |
| yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none) | |
| output = check_output('kubectl get all -o json'.split(' ')) | |
| resources = json.loads(output) | |
| for resource in resources['items']: | |
| if resource['metadata'].get('ownerReferences', None): | |
| continue | |
| fn = '{}.{}.yaml'.format(resource['metadata']['name'], resource['kind']) | |
| resource['metadata'].pop('annotations', None) | |
| trim_labels(resource['metadata']) | |
| trim_labels(resource['spec'].get('template', {}).get('metadata', {})) | |
| trim_labels(resource['spec'].get('jobTemplate', {}).get('metadata', {})) | |
| trim_labels(resource['spec'].get('jobTemplate', {}).get('spec', {}).get('template', {}).get('metadata', {})) | |
| resource['metadata']['creationTimestamp'] = None | |
| resource['metadata'].pop('generation', None) | |
| resource['metadata'].pop('namespace', None) | |
| resource['metadata'].pop('resourceVersion', None) | |
| resource['metadata'].pop('selfLink', None) | |
| resource['metadata'].pop('uid', None) | |
| resource['status'] = {} | |
| with open(fn, 'w') as f: | |
| print('Exporting {}'.format(fn)) | |
| f.write(yaml.round_trip_dump(resource)) | |
| if __name__ == '__main__': | |
| main() |
| #!/usr/bin/env python3 | |
| # coding: utf-8 | |
| import json | |
| import sys | |
| from subprocess import check_output | |
| def trim_labels(data): | |
| for k, v in data.get('labels', {}).copy().items(): | |
| if k not in ('app', 'k8s-app'): | |
| data['labels'].pop(k, None) | |
| if data['labels'] == {}: | |
| data.pop('labels') | |
| def main(): | |
| args = sys.argv[1:] | |
| if not args: | |
| print('No arguments') | |
| sys.exit(1) | |
| output = check_output('kubectl get {} -o json'.format(' '.join(sys.argv[1:])).split(' ')) | |
| resources = json.loads(output) | |
| for resource in resources['items']: | |
| if resource['metadata'].get('ownerReferences', None): | |
| continue | |
| resource['metadata'].pop('annotations', None) | |
| trim_labels(resource['metadata']) | |
| trim_labels(resource['spec'].get('template', {}).get('metadata', {})) | |
| trim_labels(resource['spec'].get('jobTemplate', {}).get('metadata', {})) | |
| trim_labels(resource['spec'].get('jobTemplate', {}).get('spec', {}).get('template', {}).get('metadata', {})) | |
| resource['metadata']['creationTimestamp'] = None | |
| resource['metadata'].pop('generation', None) | |
| resource['metadata'].pop('namespace', None) | |
| resource['metadata'].pop('resourceVersion', None) | |
| resource['metadata'].pop('selfLink', None) | |
| resource['metadata'].pop('uid', None) | |
| resource['status'] = {} | |
| print(json.dumps(resources, indent=2, separators=(',', ': '))) | |
| if __name__ == '__main__': | |
| main() |
| #!/usr/bin/env python3 | |
| # coding: utf-8 | |
| import sys | |
| import ruamel.yaml as yaml | |
| def split_yaml_file(multiple_document_file): | |
| with open(multiple_document_file) as f: | |
| docs = list(yaml.round_trip_load_all(f)) | |
| for doc in docs: | |
| fn = '{}-{}.yaml'.format(doc['metadata']['name'].split('.')[0], doc['kind']).lower().replace('customresourcedefinition', 'crd') | |
| print('Writing {}'.format(fn)) | |
| with open(fn, 'w') as f: | |
| f.write(yaml.round_trip_dump(file)) | |
| if __name__ == '__main__': | |
| if len(sys.argv) <= 1: | |
| print( | |
| "Specify file name for file to split\n" | |
| "e.g. ./split_yaml_resources.py calico.yaml") | |
| sys.exit(1) | |
| split_yaml_file(sys.argv[1]) |
Hey, minor intro how to use kubectl-export ? some docstring ? Looks very useful, thanks!!!
Thanks for the suggestion, @Moulick!
So I originally used an export.sh script, which I've added to this diff now. Sometimes I fiddle with stuff and then wanna regenerate the Kuebrnetes manifests since I've kubectl edited them to get them into a working state, or updated them with kubectl set.
export_resources.py matches the use of export.sh, where you call it with no arguments and then it exports all your resources of all resource types/kinds to YAML files from your current kubectl context.
kubectl-export is a bit different where it does a single resource. One of the reasons is so I didn't need PyYAML or any other non standard library dependencies for it.
I generally use it in a similar way to export_resources.py though, except for a single deployment or service, e.g.:
kubectl-export -f gitserver.StatefulSet.yaml | jy > gitserver.StatefulSet.yaml
kubectl-export deploy/nginx | jy > nginx.Deployment.yaml
These rely on @sourcegraph's useful jy tool that converts JSON to YAML, but for an example that doesn't require something like that:
# kubectl-export-v2 deploy/pgsql | jq '.spec.template.spec.containers[].image'
"sourcegraph/postgres-11.1:19-02-13_22d74790@sha256:10c2ff7a4da422cd75e022b51bef3a0c935f4b3ded335d9679a4f1202db605d2"
"wrouesnel/postgres_exporter:v0.4.7@sha256:dd8051322ceb8995d3d7f116041a2116815e01e88232a90f635ebde8dcc4d3f4"
What about removing nodePort?
@MosheM123 That should be easy enough. I'm planning on moving this to a repo, so folks can PR it.
@MosheM123 That should be easy enough. I'm planning on moving this to a repo, so folks can PR it.
aight, let me know, I'd love to contribute.
More Kubernetes fun!