Created
June 11, 2025 08:16
-
-
Save tommeramber/08ad6ac3af85face1982aa9a3526bbc9 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# detect-nfs-stale | |
--- | |
apiVersion: batch/v1 | |
kind: CronJob | |
metadata: | |
name: detect-nfs-stale | |
namespace: openshift-monitoring | |
spec: | |
concurrencyPolicy: Allow | |
failedJobsHistoryLimit: 1 | |
schedule: ’45 * * * *’ | |
successfulJobsHistoryLimit: 3 | |
suspend: false | |
jobTemplate: | |
spec: | |
template: | |
spec: | |
containers: | |
- command: | |
- /bin/sh | |
args: | |
- /opt/scripts/detect-nfs-stale.sh | |
image: {{ .Values.image }} # https://medium.com/@tamber/mini-howto-the-ultimate-container-tool-image-for-openshift-8e338094822e | |
imagePullPolicy: IfNotPresent | |
name: detect-nfs-stale | |
resources: {} | |
terminationMessagePath: /dev/termination-log | |
terminationMessagePolicy: File | |
volumeMounts: | |
- mountPath: /opt/scripts | |
name: detect-nfs-stale-scripts | |
env: | |
- name: CONTAINER_TOOLS_IMAGE | |
value: {{ .Values.image }} # https://medium.com/@tamber/mini-howto-the-ultimate-container-tool-image-for-openshift-8e338094822e | |
volumes: | |
- configMap: | |
name: detect-nfs-stale-scripts | |
name: detect-nfs-stale-scripts | |
dnsPolicy: ClusterFirst | |
restartPolicy: OnFailure | |
schedulerName: default-scheduler | |
securityContext: {} | |
serviceAccount: detect-nfs-stale | |
serviceAccountName: detect-nfs-stale | |
terminationGracePeriodSeconds: 30 | |
--- | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: detect-nfs-stale-scripts | |
namespace: openshift-monitoring | |
data: | |
detect-nfs-stale.sh: | | |
NODES=$(oc get nodes|grep -v master|grep -v infra|awk ’{print $1}’|grep -v NAME) | |
for i in $NODES; do | |
echo "" > /tmp/job.yaml | |
echo "apiVersion: batch/v1" >> /tmp/job.yaml | |
echo "kind: CronJob" >> /tmp/job.yaml | |
echo "metadata:" >> /tmp/job.yaml | |
echo " name: detect-stale-nfs-$i" >> /tmp/job.yaml | |
echo " namespace: openshift-monitoring" >> /tmp/job.yaml | |
echo "spec:" >> /tmp/job.yaml | |
echo " concurrencyPolicy: Allow" >> /tmp/job.yaml | |
echo " failedJobsHistoryLimit: 3" >> /tmp/job.yaml | |
echo " schedule: ’50 * * * *’" >> /tmp/job.yaml | |
echo " successfulJobsHistoryLimit: 3" >> /tmp/job.yaml | |
echo " suspend: false" >> /tmp/job.yaml | |
echo " jobTemplate:" >> /tmp/job.yaml | |
echo " spec:" >> /tmp/job.yaml | |
echo " template:" >> /tmp/job.yaml | |
echo " spec:" >> /tmp/job.yaml | |
echo " containers:" >> /tmp/job.yaml | |
echo " - command: " >> /tmp/job.yaml | |
echo " - /bin/bash" >> /tmp/job.yaml | |
echo " args:" >> /tmp/job.yaml | |
echo " - /opt/scripts/detect-nfs-delete-job-script.sh" >> /tmp/job.yaml | |
echo " image: ${CONTAINER_TOOLS_IMAGE}" >> /tmp/job.yaml | |
echo " imagePullPolicy: IfNotPresent" >> /tmp/job.yaml | |
echo " name: detect-stale-nfs" >> /tmp/job.yaml | |
echo " volumeMounts:" >> /tmp/job.yaml | |
echo " - mountPath: /opt/scripts" >> /tmp/job.yaml | |
echo " name: detect-nfs-delete-job-script" >> /tmp/job.yaml | |
echo " env:" >> /tmp/job.yaml | |
echo " - name: NODE" >> /tmp/job.yaml | |
echo " value: $i" >> /tmp/job.yaml | |
echo " volumes:" >> /tmp/job.yaml | |
echo " - configMap:" >> /tmp/job.yaml | |
echo " name: detect-nfs-delete-job-script" >> /tmp/job.yaml | |
echo " name: detect-nfs-delete-job-script" >> /tmp/job.yaml | |
echo " serviceAccount: detect-nfs-stale" >> /tmp/job.yaml | |
echo " serviceAccountName: detect-nfs-stale" >> /tmp/job.yaml | |
echo " restartPolicy: Never">>/tmp/job.yaml | |
echo " nodeSelector:" >> /tmp/job.yaml | |
echo " kubernetes.io/hostname: $i" >> /tmp/job.yaml | |
echo "" >> /tmp/job.yaml | |
cat /tmp/job.yaml | |
oc apply -f /tmp/job.yaml | |
done | |
--- | |
# node-health-check | |
apiVersion: batch/v1 | |
kind: CronJob | |
metadata: | |
name: node-health-check | |
namespace: openshift-monitoring | |
spec: | |
concurrencyPolicy: Allow | |
failedJobsHistoryLimit: 1 | |
schedule: ’25 * * * *’ | |
successfulJobsHistoryLimit: 3 | |
suspend: false | |
jobTemplate: | |
spec: | |
template: | |
spec: | |
containers: | |
- command: | |
- /bin/sh | |
args: | |
- /opt/scripts/node-health.sh | |
image: {{ .Values.image }} # https://medium.com/@tamber/mini-howto-the-ultimate-container-tool-image-for-openshift-8e338094822e | |
imagePullPolicy: IfNotPresent | |
name: node-health-check | |
resources: {} | |
terminationMessagePath: /dev/termination-log | |
terminationMessagePolicy: File | |
volumeMounts: | |
- mountPath: /opt/scripts | |
name: node-health-scripts | |
env: | |
- name: CONTAINER_TOOLS_IMAGE | |
value: {{ .Values.image }} # https://medium.com/@tamber/mini-howto-the-ultimate-container-tool-image-for-openshift-8e338094822e | |
volumes: | |
- configMap: | |
name: node-health-scripts | |
name: node-health-scripts | |
dnsPolicy: ClusterFirst | |
restartPolicy: OnFailure | |
schedulerName: default-scheduler | |
securityContext: {} | |
serviceAccount: node-health-check | |
serviceAccountName: node-health-check | |
terminationGracePeriodSeconds: 30 | |
--- | |
apiVersion: v1 | |
kind: ConfigMap | |
metadata: | |
name: node-health-scripts | |
namespace: openshift-monitoring | |
data: | |
node-health.sh: | | |
NODES=$(oc get nodes|grep -v master|grep -v infra|awk ’{print $1}’|grep -v NAME) | |
for i in $NODES; do | |
echo "" > /tmp/job.yaml | |
echo "apiVersion: batch/v1" >> /tmp/job.yaml | |
echo "kind: CronJob" >> /tmp/job.yaml | |
echo "metadata:" >> /tmp/job.yaml | |
echo " name: node-health-check-$i" >> /tmp/job.yaml | |
echo " namespace: openshift-monitoring" >> /tmp/job.yaml | |
echo "spec:" >> /tmp/job.yaml | |
echo " concurrencyPolicy: Allow" >> /tmp/job.yaml | |
echo " failedJobsHistoryLimit: 3" >> /tmp/job.yaml | |
echo " schedule: ’30 * * * *’" >> /tmp/job.yaml | |
echo " successfulJobsHistoryLimit: 3" >> /tmp/job.yaml | |
echo " suspend: false" >> /tmp/job.yaml | |
echo " jobTemplate:" >> /tmp/job.yaml | |
echo " spec:" >> /tmp/job.yaml | |
echo " template:" >> /tmp/job.yaml | |
echo " spec:" >> /tmp/job.yaml | |
echo " containers:" >> /tmp/job.yaml | |
echo " - command: " >> /tmp/job.yaml | |
echo " - /bin/bash" >> /tmp/job.yaml | |
echo " args:" >> /tmp/job.yaml | |
echo " - /opt/scripts/delete-job-script.sh" >> /tmp/job.yaml | |
echo " image: ${CONTAINER_TOOLS_IMAGE}" >> /tmp/job.yaml | |
echo " imagePullPolicy: IfNotPresent" >> /tmp/job.yaml | |
echo " name: node-health-check" >> /tmp/job.yaml | |
echo " volumeMounts:" >> /tmp/job.yaml | |
echo " - mountPath: /opt/scripts" >> /tmp/job.yaml | |
echo " name: delete-job-script" >> /tmp/job.yaml | |
echo " env:" >> /tmp/job.yaml | |
echo " - name: NODE" >> /tmp/job.yaml | |
echo " value: $i" >> /tmp/job.yaml | |
echo " volumes:" >> /tmp/job.yaml | |
echo " - configMap:" >> /tmp/job.yaml | |
echo " name: delete-job-script" >> /tmp/job.yaml | |
echo " name: delete-job-script" >> /tmp/job.yaml | |
echo " serviceAccount: node-health-check" >> /tmp/job.yaml | |
echo " serviceAccountName: node-health-check" >> /tmp/job.yaml | |
echo " restartPolicy: Never">>/tmp/job.yaml | |
echo " nodeSelector:" >> /tmp/job.yaml | |
echo " kubernetes.io/hostname: $i" >> /tmp/job.yaml | |
echo "" >> /tmp/job.yaml | |
cat /tmp/job.yaml | |
oc apply -f /tmp/job.yaml | |
done |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment