-
-
Save akomakom/481507c0dd79ec52a395 to your computer and use it in GitHub Desktop.
A jenkins script to clean up workspaces on slaves. On all slaves with less than X free GB, this script removes /slaves/workspace/* if slave is idle. If not idle, it removes /full/job/workspace/path for each job that isn't currently running. There are two versions: a "System Groovy Script" and a Jenkinsfile (Pipeline script)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
Jenkins System Groovy script to clean up workspaces on all slaves. | |
Check if a slave has < X GB of free space, perform cleanup if it's less. | |
If slave is idle, wipe out everything in the workspace directory as well any extra configured directories. | |
If slave is busy, wipe out individual job workspace directories for jobs that aren't running. | |
Either way, remove custom workspaces also if they aren't in use. | |
**/ | |
import hudson.model.*; | |
import hudson.util.*; | |
import jenkins.model.*; | |
import hudson.FilePath.FileCallable; | |
import hudson.slaves.OfflineCause; | |
import hudson.node_monitors.*; | |
//threshold is in GB and comes from a job parameter | |
def threshold = Integer.parseInt(build.buildVariableResolver.resolve("CLEAN_THRESHOLD")) | |
def skippedLabels = [ 'container' ] //don't clean docker slaves | |
def extraDirectoriesToDelete = [ 'temp' ] //additional paths under slave's root path that should be removed if found | |
def deleteRemote(def path, boolean deleteContentsOnly) { | |
boolean result = true | |
def pathAsString = path.getRemote() | |
if (path.exists()) { | |
try { | |
if (deleteContentsOnly) { | |
path.deleteContents() | |
println ".... deleted ALL contents of ${pathAsString}" | |
} else { | |
path.deleteRecursive() | |
println ".... deleted directory ${pathAsString}" | |
} | |
} catch (Throwable t) { | |
println "Failed to delete ${pathAsString}: ${t}" | |
result = false | |
} | |
} | |
return result | |
} | |
def failedNodes = [] | |
for (node in Jenkins.instance.nodes) { | |
computer = node.toComputer() | |
if (computer.getChannel() == null) { | |
continue | |
} | |
if (node.assignedLabels.find{ it.expression in skippedLabels }) { | |
println "Skipping ${node.displayName} based on labels" | |
continue | |
} | |
try { | |
size = DiskSpaceMonitor.DESCRIPTOR.get(computer).size | |
roundedSize = size / (1024 * 1024 * 1024) as int | |
println("node: " + node.getDisplayName() + ", free space: " + roundedSize + "GB. Idle: ${computer.isIdle()}") | |
if (roundedSize < threshold) { | |
def prevOffline = computer.isOffline() | |
if (prevOffline && computer.getOfflineCauseReason().startsWith('disk cleanup from job')) { | |
prevOffline = false //previous run screwed up, ignore it and clear it at the end | |
} | |
if (!prevOffline) { | |
//don't override any previosly set temporarily offline causes (set by humans possibly) | |
computer.setTemporarilyOffline(true, new hudson.slaves.OfflineCause.ByCLI("disk cleanup from job ${build.displayName}")) | |
} | |
if (computer.isIdle()) { | |
//It's idle so delete everything under workspace | |
def workspaceDir = node.rootPath.child('workspace') | |
if (!deleteRemote(workspaceDir, true)) { | |
failedNodes << node | |
} | |
//delete custom workspaces | |
Jenkins.instance.getAllItems(TopLevelItem).findAll{item -> item instanceof Job && !("${item.class}".contains('WorkflowJob')) && item.getCustomWorkspace()}.each{ item -> | |
if (!deleteRemote(node.getRootPath().child(item.customWorkspace), false)) { | |
failedNodes << node | |
} | |
} | |
extraDirectoriesToDelete.each{ | |
if (!deleteRemote(node.getRootPath().child(it), false)) { | |
failedNodes << node | |
} | |
} | |
} else { | |
Jenkins.instance.getAllItems(TopLevelItem).findAll{item -> item instanceof Job && !item.isBuilding() && !("${item.class}".contains('WorkflowJob')) }.each{ item -> | |
jobName = item.getFullDisplayName() | |
//println(".. checking workspaces of job " + jobName) | |
workspacePath = node.getWorkspaceFor(item) | |
if (!workspacePath) { | |
println(".... could not get workspace path for ${jobName}") | |
return | |
} | |
//println(".... workspace = " + workspacePath) | |
customWorkspace = item.getCustomWorkspace() | |
if (customWorkspace) { | |
workspacePath = node.getRootPath().child(customWorkspace) | |
// println(".... custom workspace = " + workspacePath) | |
} | |
if (!deleteRemote(workspacePath, false)) { | |
failedNodes << node | |
} | |
} | |
} | |
if (!prevOffline) { | |
computer.setTemporarilyOffline(false, null) | |
} | |
} | |
} catch (Throwable t) { | |
println "Error with ${node.displayName}: ${t}" | |
failedNodes << node | |
} | |
} | |
println "\n\nSUMMARY\n\n" | |
failedNodes.each{node -> | |
println "\tERRORS with: ${node.displayName}" | |
} | |
assert failedNodes.size() == 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
Jenkins Pipeline script to clean up workspaces on all slaves. | |
Check if a slave has < X GB of free space, perform cleanup if it's less. | |
If slave is idle, wipe out everything in the workspace directory as well any extra configured directories. | |
If slave is busy, wipe out individual job workspace directories for jobs that aren't running. | |
Either way, remove custom workspaces also if they aren't in use. | |
**/ | |
import hudson.model.*; | |
import hudson.util.*; | |
import jenkins.model.*; | |
import hudson.node_monitors.*; | |
properties( | |
[ | |
buildDiscarder(logRotator(artifactDaysToKeepStr: '', artifactNumToKeepStr: '', daysToKeepStr: '', numToKeepStr: '25')), | |
disableConcurrentBuilds(), | |
disableResume(), | |
parameters([ | |
string(defaultValue: '20', description: 'Slave must have less than this # of GB to be cleaned', name: 'CLEAN_THRESHOLD', trim: false), | |
booleanParam(defaultValue: false, description: 'Enable verbosity', name: 'DEBUG') | |
]), | |
pipelineTriggers([cron('@hourly'), pollSCM('')]) //is SCM triggers are set up | |
] | |
) | |
Jenkins.instance.getItemByFullName(env.JOB_NAME).setDescription(''' | |
<pre> | |
For each slave with low disk space: | |
* Empties the workspace directory (if slave is idle) | |
* Deletes individual job workspace directories (if slave is busy) | |
* Deletes directories matching custom workspace directories configured in any job | |
* Deletes a hardcoded list of additional directories | |
Runs at random times to catch recurring jobs | |
Script from https://gist.github.com/rb2k/8372402 | |
(actually a fork of it with fixes for folders: https://gist.github.com/akomakom/481507c0dd79ec52a395) | |
</pre> | |
''') | |
//threshold is in GB and comes from a job parameter | |
def skippedLabels = [ 'container' ] //don't clean docker slaves | |
/** GLOBALS **/ | |
threshold = Integer.parseInt(params.CLEAN_THRESHOLD) | |
debug = Boolean.valueOf(params.DEBUG) | |
countFullClean = 0 | |
countPartialClean = 0 | |
failedNodes = [] | |
extraDirectoriesToDelete = [ 'somethingRelative', '/something/absolute' ] //additional paths under slave's root path that should be removed if found (or abs path) | |
def debug(def string) { | |
if (debug) { | |
echo "DEBUG: ${string}" | |
} | |
} | |
def deleteRemote(def path, boolean deleteContentsOnly) { | |
boolean result = true | |
def pathAsString = path.getRemote() | |
debug "Asked to delete ${path} deleteContentsOnly=${deleteContentsOnly ? 'yes' : 'no'}" | |
if (path.exists()) { | |
try { | |
if (deleteContentsOnly) { | |
path.deleteContents() | |
echo ".... deleted ALL contents of ${pathAsString}" | |
} else { | |
path.deleteRecursive() | |
echo ".... deleted directory ${pathAsString}" | |
} | |
} catch (Throwable t) { | |
echo "Failed to delete ${pathAsString}: ${t}" | |
result = false | |
} | |
} | |
return result | |
} | |
def cleanNode(node, computer) { | |
size = DiskSpaceMonitor.DESCRIPTOR.get(computer).size | |
roundedSize = size / (1024 * 1024 * 1024) as int | |
echo("node: " + node.getDisplayName() + ", free space: " + roundedSize + "GB. Idle: ${computer.isIdle()}") | |
if (roundedSize < threshold) { | |
def prevOffline = computer.isOffline() | |
if (prevOffline && computer.getOfflineCauseReason().startsWith('disk cleanup from job')) { | |
prevOffline = false //previous run screwed up, ignore it and clear it at the end | |
} | |
if (!prevOffline) { | |
//don't override any previosly set temporarily offline causes (set by humans possibly) | |
computer.setTemporarilyOffline(true, new hudson.slaves.OfflineCause.ByCLI("disk cleanup from job ${currentBuild.displayName}")) | |
} | |
if (computer.isIdle()) { | |
//It's idle so delete everything under workspace | |
countFullClean++ | |
def workspaceDir = node.rootPath.child('workspace') | |
debug "Deleting workspace ${workspaceDir}" | |
if (!deleteRemote(workspaceDir, true)) { | |
failedNodes << node | |
} | |
//delete custom workspaces | |
Jenkins.instance.getAllItems(TopLevelItem).findAll { item -> item instanceof Job && !("${item.class}".contains('WorkflowJob')) && item.getCustomWorkspace() }.each { item -> | |
debug "Deleting custom workspace ${item.customWorkspace}" | |
if (!deleteRemote(node.getRootPath().child(item.customWorkspace), false)) { | |
failedNodes << node | |
} | |
} | |
extraDirectoriesToDelete.each { | |
def pathToDelete = it.startsWith('/') ? node.createPath(it) : node.getRootPath().child(it) | |
debug "Deleting extra dir ${pathToDelete}" | |
if (!deleteRemote(pathToDelete, false)) { | |
failedNodes << node | |
} | |
} | |
} else { | |
countPartialClean++ | |
Jenkins.instance.getAllItems(TopLevelItem).findAll { item -> item instanceof Job && !item.isBuilding() && !("${item.class}".contains('WorkflowJob')) }.each { item -> | |
jobName = item.getFullDisplayName() | |
//echo(".. checking workspaces of job " + jobName) | |
workspacePath = node.getWorkspaceFor(item) | |
if (!workspacePath) { | |
echo(".... could not get workspace path for ${jobName}") | |
return | |
} | |
//echo(".... workspace = " + workspacePath) | |
customWorkspace = item.getCustomWorkspace() | |
if (customWorkspace) { | |
workspacePath = node.getRootPath().child(customWorkspace) | |
// echo(".... custom workspace = " + workspacePath) | |
} | |
if (!deleteRemote(workspacePath, false)) { | |
failedNodes << node | |
} | |
} | |
} | |
if (!prevOffline) { | |
computer.setTemporarilyOffline(false, null) | |
} | |
} | |
} | |
timestamps { | |
for (node in Jenkins.instance.nodes) { | |
def computer = node.toComputer() | |
if (computer.getChannel() == null) { | |
continue | |
} | |
if (node.assignedLabels.find { it.expression in skippedLabels }) { | |
echo "Skipping ${node.displayName} based on labels" | |
continue | |
} | |
try { | |
cleanNode(node, computer) | |
} catch (Throwable t) { | |
echo "Error with ${node.displayName}: ${t}" | |
failedNodes << node | |
} | |
} | |
echo "\n\n" | |
failedNodes.each { node -> | |
echo "\tERRORS with: ${node.displayName}" | |
} | |
} | |
assert failedNodes.size() == 0 | |
currentBuild.setDescription("Full: ${countFullClean}, Partial: ${countPartialClean}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
That's because you were trying to run a "Groovy System Script" as a "Jenkins Pipeline Script". I was in fact using this on swarms.
However, I'm adding a Pipeline version today...