Enable kubectl completion (needs the bash-completion package):
source <(kubectl completion bash)
Dry-run, outputs Service (--expose) and a Deployment in yaml:
kubectl run --image=apache \
--port=80 \
| /** | |
| * This is a sample webhook server that listens for webhook | |
| * callbacks coming from Trello, and updates any cards that are | |
| * added or modified so everyone knows they are "PRIORITY" | |
| * | |
| * To get started | |
| * * Add your key and token below | |
| * * Install dependencies via `npm install express request body-parser` | |
| * * Run `node app.js` on a publicly visible IP | |
| * * Register your webhook and point to http://<ip or domain>:3123/priority |
| #expose console from 443 | |
| oc create route reencrypt master --service kubernetes --port 443 -n default | |
| #nested loops , tags count in IS | |
| oc get is --template='{{range $i, $is:=.items}}{{ $total :=0 }}{{ range $index, $element :=$is.status.tags }}{{if eq $element.tag "latest"}}{{$is.metadata.name}}{{"\t"}}{{ $index }}{{end}}{{end}}{{"\n"}}{{end}}' | |
| get complex labels | |
| oc get nodes --template='{{ with $i := index .items 0 }}{{ index $i.metadata.labels "failure-domain.beta.kubernetes.io/region" }}{{ end }}' | |
| #patchin resource limits |
| #!/bin/bash | |
| # Scale DOWN all Turbonetes PODs to 0 | |
| function turbo_stop_all_pods { | |
| turbo_stop_all_pods=$(kubectl get deploy -n turbonomic --no-headers=true | cut -d ' ' -f1 | xargs -I % kubectl scale --replicas=0 deployment/% -n turbonomic) | |
| while true; do | |
| if `kubectl get pods -n turbonomic | grep -v STATUS | wc -l` -gt 0 then | |
| echo -e "turbo_STOP_all_pods: Waiting on Turbonetes POD(s) to TERMINATE, so far: \n`kubectl get pods -n turbonomic | grep -v NAME`" | |
| elif | |
| [[ "$counter" -gt 30 ]]; then; | |
| echo "MAX Counter Reached! One or more PODs are stuck ##TERMINATING##, intervening to kill it/them" |
| Add the following in .zshrc: | |
| ... | |
| plugins=(osx git zsh-autosuggestions zsh-syntax-highlighting zsh-nvm docker kubectl) | |
| ... | |
| ### Fix slowness of pastes with zsh-syntax-highlighting.zsh | |
| pasteinit() { | |
| OLD_SELF_INSERT=${${(s.:.)widgets[self-insert]}[2,3]} | |
| zle -N self-insert url-quote-magic # I wonder if you'd need `.url-quote-magic`? |
| kubectl get secrets -o json --namespace old | jq '.items[].metadata.namespace = "new"' | kubectl create -f - |
| #!/usr/bin/python | |
| # connect2beam is a script that finds Erlang nodes and connects to them or | |
| # kills them. | |
| # | |
| # Example for connection: | |
| # | |
| # $ connect2beam | |
| # Available nodes: index name (cookie, pid) | |
| # |
| #!/bin/bash | |
| # | |
| # usage ./clean.docker.registry.sh registryUrl login filterString | |
| # | |
| # read the password | |
| echo -n Password: | |
| read -s password | |
| user="$2:${password}" |
| val sparkConf = new SparkConf().setMaster("local").setAppName("text") | |
| val sc = new SparkContext(sparkConf) | |
| val hadoopConf = sc.hadoopConfiguration | |
| //set the aws secret information | |
| hadoopConf.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem") | |
| hadoopConf.set("fs.s3n.awsAccessKeyId","youraccesskeyid") | |
| hadoopConf.set("fs.s3n.awsSecretAccessKey","secretkey") | |