Skip to content

Instantly share code, notes, and snippets.

@cvarjao
Last active February 27, 2024 05:29
Show Gist options
  • Save cvarjao/a841b050b5cabe697619c937375727b2 to your computer and use it in GitHub Desktop.
Save cvarjao/a841b050b5cabe697619c937375727b2 to your computer and use it in GitHub Desktop.
OpenShift oc command line cheatsheet
images/
layers/
node_modules/
.cache/
manifests/
#!/usr/bin/env bash
set -o errexit #fail on first error
set -o pipefail
mkdir -p .cache
HASH=$(md5sum <<< "$@" | awk '{print $1}')
if [ ! -f ".cache/${HASH}" ]; then
"$@" | tee ".cache/${HASH}"
else
cat ".cache/${HASH}"
fi
#!/usr/bin/env bash
set -o errexit #fail on first error
set -o pipefail
./cache-stdout oc get imagestreams -o json | jq -r '.items[] | {imageStream:.metadata.name, image:.status.tags[]?.items[]?.image} | (.imageStream + "@" + .image)' | xargs -I {} ./cache-stdout oc get 'imagestreamimage/{}' -o json | jq -r '.image.dockerImageLayers[] | {name: .name, size: .size}' | jq -sr 'unique_by(.name) | .[].size' | awk '{ sum += $1 } END { print (sum / 1024 / 1024 / 1024) "GB" }'
#!/usr/bin/env bash
set -o errexit #fail on first error
set -o pipefail
./save-all-objects.sh
test -d layers || mkdir layers
test -d images || mkdir images
find images -empty -delete
jq -Mrc '.items[] | . as $item | .status.tags // [] | .[] | . as $tag | .items // [] | .[] | [$item.metadata.namespace, $item.metadata.name, $tag.tag, .image] | @tsv' all-image-streams.json |
while IFS=$'\t' read -r namespace name tag image; do
./fetch-image-layers.sh "${namespace}" "${name}" "${image}"
done
#!/usr/bin/env bash
set -o errexit #fail on first error
set -o pipefail
./save-all-objects.sh
test -d layers || mkdir layers
test -d layers/v1 || mkdir layers/v1
test -d layers/v2 || mkdir layers/v2
test -d manifests || mkdir manifests
test -d manifests/v1 || mkdir manifests/v1
test -d manifests/v2 || mkdir manifests/v2
test -d images || mkdir images
namespace="$1"
imagestream="$2"
image="$3"
#while IFS=$'\t' read -r namespace name image; do
test -f "images/${image}.json" || (echo "fetching ${namespace}/${imagestream}@${image}"; oc -n "${namespace}" get "isimage/${imagestream}@${image}" -o json > "images/${image}.json")
dockerImageManifestMediaType="$(jq -crM '.image.dockerImageManifestMediaType' "images/${image}.json")"
if [ "$dockerImageManifestMediaType" == "application/vnd.docker.distribution.manifest.v1+json" ]; then
test -f "manifests/v1/${image}.json" || ( echo "Fetching manifest ${namespace}/${imagestream}/manifests/${image}"; curl -kfsSL -H "Authorization: Bearer $(oc whoami -t)" "https://docker-registry.pathfinder.gov.bc.ca/v2/${namespace}/${imagestream}/manifests/${image}" -o "manifests/v1/${image}.json" || echo "Error downloading ${namespace}/${imagestream}/manifests/${image}")
else
#test -f "manifests/v2/${image}.json" || curl -kfsSL -H "Authorization: Bearer $(oc whoami -t)" "https://docker-registry.pathfinder.gov.bc.ca/v2/${namespace}/${imagestream}/manifests/${image}" -o "manifests/v2/${image}.json"
test -f "images/${image}-layers.txt" || jq -Mr ".image.dockerImageLayers // [] | .[].name" "images/${image}.json" > "images/${image}-layers.txt"
fi
#done
#!/usr/bin/env bash
# Example:
# ./find-image-usage-by-is.sh openshift/jenkins-2-centos7
./save-all-objects.sh
test -d layers || mkdir layers
test -d images || mkdir images
_ns_is="$1"
_ns="$(cut -d / -f1 <<< "$_ns_is")"
_is="$(cut -d / -f2 <<< "$_ns_is")"
jq -Mrc --arg ns "${_ns}" --arg is "${_is}" '.items[] | select(.metadata.namespace == $ns and .metadata.name == $is) | . as $item | .status.tags // [] | .[] | . as $tag | .items[] | [$item.metadata.namespace, $item.metadata.name, $tag.tag, .image] | @tsv' all-image-streams.json |
while IFS=$'\t' read -r namespace name tag image; do
toplayer="$(head -1 "images/${image}-layers.txt")"
echo "top layer of ${image} is ${toplayer}"
find images -type f -name '*-layers.txt' | grep -v "images/${image}-layers.txt" | xargs grep -oh -F "${toplayer}" | wc -l
done
#!/usr/bin/env bash
jq -r '.items[] | . as $pod | select(.spec.volumes !=null) | .spec.volumes[] | select(.persistentVolumeClaim !=null ) | [$pod.metadata.namespace, .persistentVolumeClaim.claimName] | @csv' all-pods.json | sort | uniq > pod-volumes.csv
if [ ! -f cronjobs.json ]; then
echo "Generating list of CrobJobs"
oc get cronjob --all-namespaces -o json > cronjobs.json
fi
jq -r '.items[] | . as $item | select(.spec.jobTemplate.spec.template.spec.volumes !=null) | .spec.jobTemplate.spec.template.spec.volumes[] | select(.persistentVolumeClaim !=null ) | [$item.metadata.namespace, .persistentVolumeClaim.claimName] | @csv' cronjobs.json | sort | uniq > cronjob-volumes.csv
if [ ! -f deployments.json ]; then
echo "Generating list of Deployments"
oc get deployment,deploymentConfig --all-namespaces -o json > deployments.json
fi
jq -r '.items[] | . as $item | select(.spec.template.spec.volumes !=null) | .spec.template.spec.volumes[] | select(.persistentVolumeClaim !=null ) | [$item.metadata.namespace, .persistentVolumeClaim.claimName] | @csv' deployments.json | sort | uniq > deployment-volumes.csv
if [ ! -f statefulsets.json ]; then
echo "Generating list of Statefulsets"
oc get statefulset --all-namespaces -o json > statefulsets.json
fi
jq -r '.items[] | . as $item | select(.spec.template.spec.volumes !=null) | .spec.template.spec.volumes[] | select(.persistentVolumeClaim !=null ) | [$item.metadata.namespace, .persistentVolumeClaim.claimName] | @csv' statefulsets.json | sort | uniq > statefulset-volumes.csv
jq -r '.items[] | . as $item | select(.spec.volumeClaimTemplates !=null) | .spec.volumeClaimTemplates[] | [$item.metadata.namespace, $item.metadata.name + "-" + .metadata.name + "-0"] | @csv' statefulsets.json | sort | uniq > statefulset-vct.csv
if [ ! -f pvc.json ]; then
echo "Generating list of PVCs"
oc get pvc --all-namespaces -o json > pvc.json
fi
jq -r '.items[] | [.metadata.namespace, .metadata.name] | @csv' 'pvc.json' | sort | uniq > pvc.csv
# Includes only existing PVCs
cat statefulset-vct.csv pvc.csv | sort | uniq -c | awk '$1>1' | cut -c 6- > statefulset-vct2.csv
cat pod-volumes.csv cronjob-volumes.csv deployment-volumes.csv statefulset-volumes.csv statefulset-vct2.csv | sort | uniq > pvc-ref.csv
cat pvc-ref.csv pvc.csv | sort | uniq -c | awk '$1<2' | cut -c 6- > orphan-pvc.csv
wc -l orphan-pvc.csv
#!/usr/bin/env bash
set -o errexit #fail on first error
set -o pipefail
#docker-registry.pathfinder.gov.bc.ca/
#docker-registry.default.svc:5000/
#172.50.0.2:5000/
# Pods
#jq -cr '.items[].spec.containers[].image | select( . | contains("/openshift/"))' all-Pod.json | sort | uniq
#jq -cr '.items[] | .status // {} | .containerStatuses // [] | .[].image | select( . | contains("/openshift/")) ' all-Pod.json | sort | uniq
#jq -cr '.items[] | .status // {} | .containerStatuses // [] | .[].imageID | select( . | contains("/openshift/"))' all-Pod.json | sort | uniq | sed 's|docker-pullable://||g'
# Deployment
#jq -cr '.items[].spec.template.spec.containers[].image | select( . | contains("/openshift/"))' all-Deployment.json | sort | uniq
#jq -cr '.items[].spec.template.spec.containers[].image | select( . | contains("/openshift/"))' all-ReplicaSet.json | sort | uniq
# DeploymentConfig
#jq -cr '.items[].spec.template.spec.containers[].image | select( . | contains("/openshift/"))' all-DeploymentConfig.json | sort | uniq
#jq -cr '.items[].spec.template.spec.containers[].image | select( . | contains("/openshift/"))' all-ReplicationController.json | sort | uniq
# Build
## jq -cr '.items[].spec.strategy.type' all-Build.json | sort | uniq -c
## jq -cr '.items[].spec.strategy | select(.dockerStrategy and .dockerStrategy.from) | .dockerStrategy.from.kind' all-Build.json | sort | uniq -c
## jq -cr '.items[].spec.strategy | select(.sourceStrategy and .sourceStrategy.from) | .sourceStrategy.from.kind' all-Build.json | sort | uniq -c
## jq -cr '.items[] | . as $item | select(.spec.strategy.type == "Docker" or .spec.strategy.type == "Source") | [.spec.strategy.dockerStrategy, .spec.strategy.sourceStrategy] | .[] | select(.from) | .from | . as $from | {kind:$item.kind, metadata:{name:$item.metadata.name, namespace:$item.metadata.namespace}, from:.}' all-Build.json | sort | uniq
jq -cr '.items[] | . as $item | select(.spec.strategy.type == "Docker" or .spec.strategy.type == "Source") | .spec.strategy.dockerStrategy // .spec.strategy.sourceStrategy | select(.from) | .from | . as $from | {kind:$item.kind, metadata:{name:$item.metadata.name, namespace:$item.metadata.namespace}, from:.}' all-Build.json > image-ref.json
jq -cr '.items[] | . as $item | select(.spec | .source // {} | .images) | .spec.source.images[] | .from | . as $from | {kind:$item.kind, metadata:{name:$item.metadata.name, namespace:$item.metadata.namespace}, from:.}' all-Build.json >> image-ref.json
jq -cr '.from | select(.kind == "DockerImage") | .name | select( . | contains("/openshift/"))' image-ref.json | sort | uniq
# BuildConfig
## jq -cr '.items[].spec.strategy.type' all-BuildConfig.json | sort | uniq -c
jq -cr '.items[] | . as $item | select(.spec.strategy.type == "Docker" or .spec.strategy.type == "Source") | .spec.strategy.dockerStrategy // .spec.strategy.sourceStrategy | select(.from) | .from | . as $from | {kind:$item.kind, metadata:{name:$item.metadata.name, namespace:$item.metadata.namespace}, from:.}' all-BuildConfig.json > image-ref.json
jq -cr '.items[] | . as $item | select(.spec | .source // {} | .images) | .spec.source.images[] | .from | . as $from | {kind:$item.kind, metadata:{name:$item.metadata.name, namespace:$item.metadata.namespace}, from:.}' all-BuildConfig.json >> image-ref.txt
jq -cr '.from | select(.kind == "DockerImage") | .name | select( . | contains("/openshift/"))' image-ref.json | sort | uniq
const neo4j = require('neo4j-driver');
const fs = require('fs');
const readline = require('readline');
const driver = neo4j.driver(
'neo4j://localhost',
neo4j.auth.basic(process.env.NEO4J_USERNAME, process.env.NEO4J_PASSWORD)
);
const session = driver.session({ defaultAccessMode: neo4j.session.READ })
function createImageStreamImageLayersV2(imageStream, imageStreamTag, imageStreamImage){
const imageLayers = (imageStreamImage.image.dockerImageLayers || []);
return imageLayers.slice().reverse().reduce((promise, layer) => {
return promise.then((baseLayer) => {
//console.log(`Creatimg ImageLayer ${layer.name}`)
return session.run(`MERGE (n:ImageLayer:ImageLayerV2 {key: '${layer.name}', name: '${layer.name}'}) RETURN id(n) as _id, n.key as key`).then((thisLayer)=>{
if (baseLayer){
return session.run(`MATCH (left:ImageLayer { key: '${thisLayer.records[0].get("key")}' }),(right:ImageLayer { key: '${baseLayer.records[0].get("key")}' }) MERGE (left)-[r:EXTENDS]->(right)`).then(()=>{
return thisLayer;
})
//console.dir({base:baseLayer.records[0].get("_id"), this:thisLayer.records[0].get("_id")})
}
return thisLayer;
})
})
}, Promise.resolve()).then((result)=>{
if (imageLayers.length > 0){
return session.run(`MATCH (left:ImageStreamImage { key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}@${imageStreamImage.image.metadata.name}' }),(right:ImageLayer { key: '${imageLayers[0].name}' }) MERGE (left)-[r:POINTS_TO]->(right)`).then(()=>{ return result})
}
return result;
})
}
//.image.dockerImageManifestMediaType=application/vnd.docker.distribution.manifest.v2+json
function createImageStreamImageLayers(imageStream, imageStreamTag, imageStreamImage){
if (imageStreamImage.image.dockerImageManifestMediaType === 'application/vnd.docker.distribution.manifest.v2+json'){
return createImageStreamImageLayersV2(imageStream, imageStreamTag, imageStreamImage)
//}else{
// console.log(`Unsupported dockerImageManifestMediaType ('${imageStreamImage.image.dockerImageManifestMediaType}')`)
}
return Promise.resolve(imageStreamImage)
}
function createImageStreamImage(imageStream, imageStreamTag, image){
const imageStreamImageFileName = `images/${image.image}.json`
const imageStreamImage = JSON.parse(fs.readFileSync(imageStreamImageFileName, 'utf8'));
const imageStreamImageKey = `${imageStream.metadata.namespace}/${imageStream.metadata.name}@${imageStreamImage.image.metadata.name}`
return session.run(`MERGE (n:ImageStreamImage {key: '${imageStreamImageKey}', namespace: '${imageStream.metadata.namespace}', name: '${imageStream.metadata.name}:${image.image}'}) RETURN n.key as key`)
.then( (result)=>{
//console.log(`Processing ${imageStreamImageFileName}`)
return session.run(`MATCH (left:ImageStreamTag { key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}:${imageStreamTag.tag}' }), (right:ImageStreamImage { key: '${imageStreamImageKey}' }) MERGE (left)-[r:HAS]->(right)`).then(()=>{ return result})
})
.then(()=>{
//console.log(`Processing Layers from ${imageStreamImageFileName}`)
return createImageStreamImageLayers(imageStream, imageStreamTag, imageStreamImage)
})
}
function createImageStreamTagRef(imageStream, imageStreamTag){
return session.run(`MERGE (n:ImageStreamTag {key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}:${imageStreamTag.tag}', namespace: '${imageStream.metadata.namespace}', name: '${imageStream.metadata.name}:${imageStreamTag.tag}'}) RETURN n.key as key`)
.then(()=>{
return (imageStreamTag.items || []).reduce((promise, image) => {
return promise.then( () => {
return createImageStreamImage(imageStream, imageStreamTag, image)
})
.then( (result)=>{
return session.run(`MATCH (left:ImageStream { key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}' }), (right:ImageStreamTag { key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}:${imageStreamTag.tag}' }) MERGE (left)-[r:HAS]->(right)`).then(()=>{ return result})
})
}, Promise.resolve())
})
}
function createImageStream(imageStream){
return session.run(`MERGE (n:ImageStream {key: '${imageStream.metadata.namespace}/${imageStream.metadata.name}', namespace: '${imageStream.metadata.namespace}', name: '${imageStream.metadata.name}'}) RETURN n.key AS key`)
.then(() => {
return ((imageStream.status || {}).tags || []).reduce((promise, imageStreamTag) => {
return promise.then( () => {
return createImageStreamTagRef(imageStream, imageStreamTag)
})
}, Promise.resolve());
})
}
const obj = JSON.parse(fs.readFileSync('all-image-streams.json', 'utf8'));
Promise.resolve(obj.items)
.then ((items)=>{
return items.reduce(function (promise, imageStream) {
if (imageStream.metadata.namespace === "openshift" && imageStream.metadata.name.startsWith('jenkins')){
return promise.then(function () {
return createImageStream(imageStream);
});
}
return promise;
}, Promise.resolve());
})
.catch(error => {
console.log(error)
})
.then(() => session.close())
.then(() => driver.close())

Find available object types

# at namespace level
oc api-resources --namespaced=true

# at cluster level
oc api-resources --namespaced=false

Obect type documentation

oc explain bc
oc explain bc.spec
oc explain bc.spec.completionDeadlineSeconds

oc explain dc
oc explain dc.spec
oc explain dc.spec.template
oc explain dc.spec.template.spec
oc explain dc.spec.template.spec.volumes

Find POD assigned node (node name)

oc get pod/<pod> -o custom-columns=namespace:metadata.namespace,name:metadata.name,nodeName:spec.nodeName

Find routes that have not been admitted

oc get route --all-namespaces -o json | jq '.items[] | select ( .status.ingress[].conditions[] | .type == "Admitted" and .status == "False")'

## Prettier output (columns)
oc get route --all-namespaces -o json | jq -r '["namespace", "name", "reason"], (.items[] | select ( .status.ingress[].conditions[] | .type == "Admitted" and .status == "False") | [.metadata.namespace, (.kind +"/"+.metadata.name), ( .status.ingress[].conditions[].reason)]) | @csv' | column -ts,

What does all means?

# as of 3.9
oc get '--raw=/apis?limit=500' | jq  -crM '.groups[] | .preferredVersion.groupVersion' | ( xargs -I {} oc get '--raw=/apis/{}?limit=500' | jq -crM '.resources[] | select(.categories | values | .[]  | . == "all" ) | .name' )

Reference: openshift/origin#18396 (comment)

List ALL resources/types

oc get '--raw=/apis?limit=500' | jq  -crM '.groups[] | .preferredVersion.groupVersion' | ( xargs -I {} oc get '--raw=/apis/{}?limit=500' | jq -crM '.resources[] | .name' )

List of Projects

oc get project -o custom-columns=name:.metadata.name,project-set:.metadata.labels.name,product:.metadata.labels.product,product-owner:.metadata.annotations.product-owner,product-lead:.metadata.annotations.product-lead


oc get project -o custom-columns=NAME:.metadata.name,PROJECT-SET:.metadata.labels.name,DISPLAY-NAME:.metadata.annotations.openshift.io/display-name

oc get project -o 'custom-columns=NAME:.metadata.name,PROJECT-SET:.metadata.labels.name,TEAM:.metadata.labels.team,DISPLAY-NAME:.metadata.annotations.openshift\.io/display-name'

oc get project -o 'custom-columns=NAME:.metadata.name,PROJECT-SET:.metadata.labels.name,TEAM:.metadata.labels.team,MCIO:.metadata.labels.mcio,DISPLAY-NAME:.metadata.annotations.openshift\.io/display-name'


# to a CSV file:
oc get namespace -l environment=prod -o json | jq -r '.items[] | . as $p | ["namespace", "product", "MCIO", "ProductOwner", "ProductLead", "DisplayName", "Description"], [$p.metadata.name, $p.metadata.labels.product // "<null>", $p.metadata.labels.mcio, $p.metadata.annotations["product-owner"], $p.metadata.annotations["product-lead"], $p.metadata.annotations["openshift.io/display-name"], $p.metadata.annotations["openshift.io/description"]] | @csv' > all-prod-products.csv


oc get namespace -l environment=prod -o json | jq -r '[.items[] | . as $p | [$p.metadata.name, $p.metadata.labels.product // "<null>", $p.metadata.labels.mcio, $p.metadata.annotations["product-owner"], $p.metadata.annotations["product-lead"], $p.metadata.annotations["openshift.io/description"]]] | ["namespace", "product", "MCIO", "ProductOwner", "ProductLead", "DisplayName", "Description"], .[] | @csv'


Number of PVC per namespace

oc get pvc -o custom-columns=namespace:.metadata.namespace --all-namespaces --no-headers | sort | uniq -c | sort -n

# Top 10 namespaces using most PVCs
oc get pvc -o custom-columns=namespace:.metadata.namespace --all-namespaces --no-headers | uniq -c| awk '$1>4' | sort -r | head -n 10

# more than 10 PVC per namespace
oc get pvc -o custom-columns=namespace:.metadata.namespace --all-namespaces --no-headers | uniq -c| awk '$1>=10' | sort -r

Estimated disk consumption of images for a namespace

#currently selected project/namespace

# Option 1
oc get imagestreams -o json | jq -r '.items[] | {imageStream:.metadata.name, image:.status.tags[]?.items[].image} | (.imageStream + "@" + .image)' | xargs -I {} oc get 'imagestreamimage/{}' -o json |  jq -r '.image.dockerImageLayers[] | (.name + "\t" + (.size | tostring))'  | sort | uniq | awk '{ sum += $2 } END { print sum }' | awk '{ foo = $1 / 1024 / 1024 / 1024 ; print foo "GB" }'

# Option 2
oc get imagestreams -o json | jq -r '.items[] | {imageStream:.metadata.name, image:.status.tags[]?.items[].image} | (.imageStream + "@" + .image)' | xargs -I {} oc get 'imagestreamimage/{}' -o json |  jq -r '.image.dockerImageLayers[] | {name: .name, size: .size}' | jq -sr 'unique_by(.name) | .[].size' | awk '{ sum += $1 } END { print (sum / 1024 / 1024 / 1024) "GB" }'

Delete not running pods

oc delete pods --field-selector 'status.phase!=Running'
{
"name": "openshift-cheatsheet",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "git+https://gist.github.com/a841b050b5cabe697619c937375727b2.git"
},
"author": "Clecio Varjao",
"license": "Apache-2.0",
"bugs": {
"url": "https://gist.github.com/a841b050b5cabe697619c937375727b2"
},
"homepage": "https://gist.github.com/a841b050b5cabe697619c937375727b2",
"dependencies": {
"neo4j-driver": "^4.0.1"
}
}
#!/usr/bin/env bash
find . -type f -name 'all-*.json' -deleted
test -f all-image-streams.json || oc get is -o json --all-namespaces > all-image-streams.json
test -f all-images.json || oc -n openshift get Image -o json --all-namespaces > all-images.json
test -f all-Pod.json || oc get Pod -o json --all-namespaces | jq 'del(.items[].spec.containers[].env) | del(.items[].metadata.annotations)' > all-Pod.json
test -f all-BuildConfig.json || oc get BuildConfig -o json --all-namespaces > all-BuildConfig.json
test -f all-Build.json || oc get Build -o json --all-namespaces > all-Build.json
test -f all-Deployment.json || oc get Deployment -o json --all-namespaces | jq 'del(.items[].spec.template.spec.containers[].env) | del(.items[].metadata.annotations)' > all-Deployment.json
test -f all-ReplicaSet.json || oc get ReplicaSet --all-namespaces -o json | jq 'del(.items[].spec.template.spec.containers[].env) | del(.items[].metadata.annotations)' > all-ReplicaSet.json
test -f all-DeploymentConfig.json || oc get DeploymentConfig -o json --all-namespaces | jq 'del(.items[].spec.template.spec.containers[].env) | del(.items[].metadata.annotations)' > all-DeploymentConfig.json
test -f all-ReplicationController.json || oc get ReplicationController --all-namespaces -o json | jq 'del(.items[].spec.template.spec.containers[].env) | del(.items[].metadata.annotations)' > all-ReplicationController.json
test -f all-CronJob.json || oc get CronJob --all-namespaces -o json > all-CronJob.json
test -f all-Job.json || oc get Job --all-namespaces -o json > all-Job.json
test -f all-StatefulSet.json || oc get StatefulSet --all-namespaces -o json > all-StatefulSet.json
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment