Skip to content

Instantly share code, notes, and snippets.

@aelkz
Created February 12, 2019 12:45
Show Gist options
  • Save aelkz/6f5415e42b3b783a8e413631f5fa2c74 to your computer and use it in GitHub Desktop.
Save aelkz/6f5415e42b3b783a8e413631f5fa2c74 to your computer and use it in GitHub Desktop.
OC-RECIPES
export current_pod =
export current_project =
export current_application =
export current_cicd_project =
export current_template =
# show / explore all openshift resources
oc get all
# exame deployment configuration of a pod
oc describe dc ${current_pod}
# show imagestream of an application
oc describe is ${current_application}
# show route of an application
oc describe route ${current_application}
# show template used to create an application
oc describe template eap70-basic-s2i -n openshift
# list all parameters from a template
$ oc process --parameters=true -n openshift ${current_template}
# show all templates from a project
oc get templates -n openshift
# show current events
oc get events
# show current events and wait
oc get events -w
# export service definition to yaml
oc get service ${current_application} -o yaml
# export service definition to json
oc get service ${current_application} -o json
# set a number of pods running for the application
oc scale dc ${current_application} --replicas=3
# disable all pods running for the application
oc scale dc ${current_application} --replicas=0
# log verbosity configuration
--loglevel=10
# show pod log (tail the pod log)
oc logs -f ${current_pod}
# show build config log (tail the build log)
oc logs -f bc/${current_application}
# show docker container log
docker logs -f <containerId>
# show all pods from project
oc get pods
# execute a shell command into the pod
oc exec ${current_pod} tail /opt/eap/standalone/configuration/standalone.xml
# debug the pod (enable ssh access)
oc debug ${current_pod}
# open remote shell in container
oc rsh ${current_pod}
# examine all running processess in container (after remote shell access)
ps -ef
# directory of s2i configuration
cd /usr/local/s2i
# rsync inside a container
> all content is lost if pod is restarted, unless if using a persistent-volume
oc rsync /home/rabreu/Documents/_redhat/_banestes/theme-banestes-2.0/_banestes/_dev/ rhsso-1-fwtj8:/opt/eap/themes/banestes-development
# directory of service account information
> The directory contains the token of the service account, as well as certificates required to connect to the OpenShift master API.
cd /var/run/secrets/kubernetes.io/serviceaccount
# directory of build information used to build the container (all Dockerfile files)
> All xPaaS images include the Dockerfile in the image in the /root/buildinfo directory.
cat /root/buildinfo/Dockerfile- ...
# search docker images
docker search centos
docker search wildfly
# search docker images on red hat registry
docker search registry.access.redhat.com/rhel7.4
# download a image from red hat registry
docker pull registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest
# stop docker container from running
docker stop <containerId>
# inspect a docker container
docker inspect <containerId>
# delete docker container
docker rm -f <containerId>
# delete docker image
docker rmi <imageId>
# inspect docker container volume path
docker inspect -f '{{json .Mounts}}' <containerId> | jq
# create a route
oc expose svc <serviceId>
# show all routes
oc get route
# delete the application (imagestream,deploymentconfig,route,pod,service and so on)
oc delete all -lapp=${current_application}
# show project builds
oc get build
# tag a image
oc tag nodejs-ex:latest nodejs-ex:testing
# start a new build
oc start-build ${current_application}
# allocate resources of a pod
oc set resources dc ${current_application} --requests=cpu=500m,memory=1Gi --limits=cpu=500m,memory=1Gi
# execute an template
$ oc process -f $HOME/deployment-example.json | oc create -f -
# edit a deploymentconfiguration
oc edit dc ${current_application} -o json
# environment variable setting
$ oc set env dc/${current_application} STORAGE=/data
$ oc set env dc/${current_application} --overwrite STORAGE=/opt
$ env | grep RAILS_ | oc set env rc/${current_application} -e -
# environment variable unsetting
$ oc set env dc/${current_application} ENV1- ENV2-
$ oc set env rc --all ENV3-
# list all environment variables for current pod
$ oc set env pod/${current_application} --list
# list all environment variables for current dc
$ oc set env dc/${current_application} --list
# creating a secret
$ echo 'admin' > ./user.txt
$ echo 'r3dh4t1!' > ./password.txt
$ oc secret new printenv-secret app_user=user.txt app_password=password.txt
$ oc env dc/printenv --from=secret/printenv-secret --prefix=DB_
$ oc env dc/printenv --from=secret/printenv-secret
$ oc set volume dc/printenv --add --overwrite --name=db-config-volume --mount-path /dbconfig/ --secret-name=printenv-secret
# decode secrets (base64)
echo "cjNkaDR0MSEK" | base64 --decode
echo "YWRtaW4K" | base64 --decode
oc get secret ${current_application} --template '{{ index .data "database-user"}}'|base64 -d
oc get secret ${current_application} --template '{{ index .data "database-password"}}'|base64 -d
# get secret details
oc get secret mongodb
oc get secret mongodb -o yaml
# creating a configMap (individual properties / entire files)
$ oc create configmap special-config --from-literal=APP_USER=admin --from-literal=APP_PASSWORD=passw0rd1
> create a configMap using all files from a directory in file-system
$ oc create configmap game-config --from-file=example-files/
oc get configmaps game-config -o yaml
> create a configMap using a single file
$ oc create configmap game-config-3 --from-file=game-special-key=example-files/game.properties
# creating a configMap and mount as a volume in pod
echo "This is Wolfgang's Config File" > configfile.txt
oc create configmap printenv-config-file --from-file=configfile.txt
oc set volume dc/printenv --add --overwrite --name=config-volume -m /data/ -t configmap --configmap-name=printenv-config-file
$ oc rsh printenv-5-xqgsl
sh-4.2$ ls /data/configfile.txt
/data/configfile.txt
sh-4.2$ cat /data/configfile.txt
This is Wolfgangs Config File
# list files mounted in a pod
$ oc rsh $( oc get pod | grep ${current_application} | grep Running | awk '{ print $1 }' )
# oc create project
oc new-project <project_name> --description="<description>" --display-name="<display_name>"
# turn off all automatic triggers
oc set triggers dc ${current_application} --manual
# set jenkins permissions to edit objects to build and deploy applications in the current project
# When granting permissions it is always necessary to specify the fully qualified path to the service account. For a single service account, this is always system:serviceaccount:<project name>:<service account name>.
oc policy add-role-to-user edit system:serviceaccount:${current_cicd_project}:jenkins -n ${current_project}
# persistent volume tip
# when inspecting a deployment config, which of the following indicates that a container volume is currently not backed by a persistent volume?
# missingVolume: {}
# count the total amount of cpu (processors)
cat /proc/cpuinfo | awk '/^processor/{print $3}' | wc -l
# count the total amount of memory (ram)
cat /proc/meminfo | grep MemTotal
# network utitilities
yum install -y telnet bind-utils
# about persistent volumes type (pvc)
ReadWriteOnce (RWO) : The volume can be mounted as read-write by a single node.
ReadOnlyMany (ROX) : The volume can be mounted read-only by many nodes.
ReadWriteMany (RWX) : The volume can be mounted as read-write by many nodes.
The OpenShift Container Platform supports many types of volume plug-ins. However, only the following two volume plug-ins provide the RWX access mode required by 3scale:
- GlusterFS
- NFS
The implications of this requirement is that for a production environment, an API Manager deployment (3Scale) requires either GlusterFS or NFS.
# clcuster quota
oc describe clusterquota clusterquota-rhte-mw-api-mesh-$OCP_USERNAME --as=system:admin
# put pods on paused state
sed -i "s/replicas:\ 1/replicas: 1\n paused: true/" some-template.yml
# working with imagestreams tag (tags)
# https://blog.openshift.com/image-streams-faq/
oc tag -d python:3.5
oc tag python:3.6 python:latest
# --------------------------------------------------
# [example] customize and modify existing template
# --------------------------------------------------
oc get template sso72-mysql-persistent -n openshift -o yaml > sso72-mysql-persistent-cassi.yaml
$ oc export -o json
-n openshift mysql-ephemeral > mysql-ephemeral.json
# ... change the mysql-ephemeral.json file ...
$ oc process -f mysql-ephemeral.json \
-v MYSQL_DATABASE=testdb,MYSQL_USE=testuser,MYSQL_PASSWORD=secret > testdb.json
$ oc create -f testdb.json
# --------------------------------------------------
# [example] new-app
# --------------------------------------------------
oc new-app --template=mongodb-ephemeral --name=mongdb \
--param MONGODB_USER=mongouser --param MONGODB_PASSWORD=redhat \
--param MONGODB_DATABASE=mydb --param DATABASE_SERVICE_NAME=mongodb
oc new-app --image-stream="openshift/jboss-eap64-openshift:1.6" \
https://github.com/tonykay/openshift3mlbparks \
MONGODB_USER=mongouser MONGODB_PASSWORD=redhat \
MONGODB_DATABASE=mydb DATABASE_SERVICE_NAME=mongodb
# --------------------------------------------------
# [example] A/B routing
# --------------------------------------------------
oc new-app --name='cotd2' -l name='cotd2' https://github.com/wkulhanek/cotd.git -e SELECTOR=cities
oc set route-backends cotd cotd=50 cotd2=50
oc describe route cotd
while true; do curl -s http://$(oc get route cotd --template='{{ .spec.host }}')/item.php | grep "data/images" | awk '{print $5}'; sleep 1; done
oc set route-backends cotd cotd=40 cotd2=10
while true; do curl -s http://$(oc get route cotd --template='{{ .spec.host }}')/item.php | grep "data/images" | awk '{print $5}'; sleep 1; done
oc get all -lapp=cotd2
oc delete all -lapp=cotd2
# --------------------------------------------------
# [example] service account
# --------------------------------------------------
> Service accounts are API objects that exist within each OpenShift project. Each project comes with three default service accounts: builder which is used to build application containes, deployer which is used to deploy applications, and default which is used for all running pods.
> it is a good practice to create a specific service account with the correct permissions for that particular pod only. Granting permissions to the default service account would grant permissions to every running pod in the project
oc create serviceaccount eap7-service-account
oc policy add-role-to-user view system:serviceaccount:$(oc project -q):eap7-service-account -n $(oc project -q)
oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/ose-v1.4.8/secrets/eap7-app-secret.json
oc secrets link eap7-service-account eap7-app-secret
> the template does not automatically use an specific service account. Because the template does not specify a service account, your pod is running with the project’s default service account.
> So, the better way is to modify the deployment configuration to use your service account instead of the default service account.
> The easiest way to figure out how to write the replacement path is to get the object as JSON output (oc get dc session-replication -o json) and then follow the tree.
oc patch dc session-replication -p '{"spec":{"template":{"spec":{"serviceAccountName": "eap7-service-account"}}}}'
# --------------------------------------------------
# [example] s2i
# --------------------------------------------------
> create an application from external git repository using a base image
oc new-app openshift/eap70-basic-s2i -p APPLICATION_NAME=session-replication -p SOURCE_REPOSITORY_URL=https://github.com/wkulhanek/ocp-session-replication.git -p SOURCE_REPOSITORY_REF=3.7 -p CONTEXT_DIR=
# --------------------------------------------------
# [example] database connection using secret as volume point
# --------------------------------------------------
echo 'r3dh4t1!' > ./dbpassword.txt
echo 'admin' > ./dbuser.txt
echo 'http://postgresql:5432' > ./dburl.txt
oc secret new printenv-db-secret app_db_user=user.txt app_db_password=password.txt app_db_url=dburl.txt
oc set volume dc/printenv --add --overwrite --name=db-config-volume -m /dbconfig/ --secret-name=printenv-db-secret
oc set env dc/printenv READ_FROM_FILE=/dbconfig/app_db_url
curl $(oc get route printenv --template='{{ .spec.host }}')
# --------------------------------------------------
# [example] consuming configMap
# --------------------------------------------------
> updating configMaps does not update pods: you must redeploy them
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
command: [ "/bin/sh", "-c", "env" ]
env:
- name: APP_USER_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: APP_USER
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
command: [ "/bin/sh", "-c", "echo $(APP_USER_KEY)]
env:
- name: APP_USER_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: APP_USER
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
command: [ "/bin/sh", "cat", "/etc/config/APP_USER" ]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: special-config
restartPolicy: Never
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
command: [ "/bin/sh", "cat", "/etc/config/path/to/special-key" ]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: special-config
items:
- key: APP_USER
path: path/to/special-key
# --------------------------------------------------
# [example] multi-container pod
# --------------------------------------------------
apiVersion: v1
kind: Pod
metadata:
name: Tomcat
spec:
containers:
- name: Tomcat
image: tomcat: 8.0
ports:
- containerPort: 7500
imagePullPolicy: Always
-name: Database
Image: mongoDB
Ports:
- containerPort: 7501
imagePullPolicy: Always
# --------------------------------------------------
# [example] Dockerfile sed
# --------------------------------------------------
RUN sed -i -f /opt/app-root/etc/httpdconf.sed /opt/rh/httpd24/root/etc/httpd/conf/httpd.conf
> httpdconf.sed
s/^Listen 80/Listen 0.0.0.0:8080/
s/^User apache/User default/
s/^Group apache/Group root/
s%^DocumentRoot "/opt/rh/httpd24/root/var/www/html"%DocumentRoot "/opt/app-root/src"%
s%^<Directory "/opt/rh/httpd24/root/var/www/html"%<Directory "/opt/app-root/src"%
s%^<Directory "/opt/rh/httpd24/root/var/html"%<Directory "/opt/app-root/src"%
s%^ErrorLog "logs/error_log"%ErrorLog "/tmp/error_log"%
s%CustomLog "logs/access_log"%CustomLog "/tmp/access_log"%
151s%AllowOverride None%AllowOverride All%
# --------------------------------------------------
# [example] create cluster quota
# --------------------------------------------------
oc create clusterquota clusterquota-$OCP_AMP_ADMIN_ID \
--project-annotation-selector=openshift.io/requester=$OCP_AMP_ADMIN_ID \
--hard requests.cpu="4" \
--hard limits.cpu="6" \
--hard requests.memory="16Gi" \
--hard limits.memory="24Gi" \
--hard configmaps="15" \
--hard pods="30" \
--hard persistentvolumeclaims="10" \
--hard services="150" \
--hard secrets="150" \
--hard requests.storage="40Gi" \
--as=system:admin
TODO - adding roles to specific user
# --------------------------------------------------
# [setup] registry.redhat.com (docker images)
# --------------------------------------------------
# Internal error occurred: Get https://registry.redhat.io/v2/jboss-eap-7/eap71-openshift/manifests/latest: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials.
# https://access.redhat.com/articles/3399531
# ssh into openshift host and:
docker login https://registry.redhat.io
#> Username: rabreu-redhat-kcs.com
cp -R ~/.docker/ /var/lib/origin/
chmod -R 755 /var/lib/origin/.docker/
cd /var/lib/origin/.docker/
oc login https://console.arekkusu.io:8443 --token=...
oc delete secret redhat.io -n openshift
oc create secret generic "redhat.io" --from-file=.dockerconfigjson=config.json --type=kubernetes.io/dockerconfigjson -n openshift
oc import-image --all=true jboss-eap-7/eap71-openshift --confirm
oc import-image my-jboss-eap-7/eap71-openshift --from=registry.access.redhat.com/jboss-eap-7/eap71-openshift -n openshift --confirm
# try again: https://console.arekkusu.io:8443/console/project/openshift/browse/images/jboss-eap71-openshift
##################################################
# OPENSHIFT CONCEPTS
##################################################
# image stream
# > Can be used to automatically perform action when new images are created
# > Builds and deployments can watch an image stream to receive notifications when new images are added and react by performing build or deployment
# > Image streams are created after pulling the images. The advantage of an image stream is that it looks for updates on the new version of an image.
# deployment config
# > OpenShift deployments provide fine-grained management over applications based on user-defined template called a deployment configuration.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment