Forked from petebankhead/QuPath_cells_to_areas.groovy
Created
February 24, 2021 15:45
-
-
Save sbalci/9e44eec4c187fa2f52d8532d510755bf to your computer and use it in GitHub Desktop.
QuPath script to calculate areas (e.g. of tumor) based upon classified cells only
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* QuPath script to calculate areas (e.g. of tumor) based upon classified cells only. | |
* | |
* It works by using a distance transform applied to binary images generated from the | |
* objects of each class, and then assigns each pixel to the closest object. | |
* | |
* Having done this, new annotations are generated for these regions. | |
* | |
* WARNINGS! | |
* - This will remove *all* previous annotations & replace them with new annotations | |
* only around the detections present, with classifications set. | |
* - The new annotations will be generated at a resolution specified by the | |
* requestedPixelSizeMicrons parameter below (you can adjust it if needed) | |
* - For very large image, or many cells, this might be quite slow. | |
* - This doesn't currently support TMAs in any meaningful way. | |
* For each classification, a single (possibly-disconnected) region is created. | |
* However it could be adapted to work for TMA cores if needed. | |
* | |
* ALWAYS SAVE YOUR DATA BEFORE RUNNING THIS SCRIPT!!! | |
* | |
* @author Pete Bankhead | |
*/ | |
//----------- | |
// Key parameter! Determines the resolution of the final annotations | |
// Too high a value gives excessive 'blockiness', too low a value | |
// will be very slow to compute (if it works at all) | |
double requestedPixelSizeMicrons = 10.0 | |
//----------- | |
import ij.measure.Calibration | |
import ij.plugin.filter.EDM | |
import ij.plugin.filter.ThresholdToSelection | |
import ij.process.ByteProcessor | |
import ij.process.FloatProcessor | |
import ij.process.ImageProcessor | |
import qupath.imagej.objects.ROIConverterIJ | |
import qupath.lib.images.servers.ImageServer | |
import qupath.lib.objects.PathAnnotationObject | |
import qupath.lib.objects.PathDetectionObject | |
import qupath.lib.objects.PathObject | |
import qupath.lib.scripting.QPEx | |
// Get main data structures | |
def imageData = QPEx.getCurrentImageData() | |
def hierarchy = imageData.getHierarchy() | |
def server = imageData.getServer() | |
// Get all objects | |
def annotations = hierarchy.getObjects(null, PathAnnotationObject) | |
def detections = hierarchy.getObjects(null, PathDetectionObject) | |
// Find represented classifications | |
def classifications = new ArrayList<>(detections.collect {it.getPathClass()?.getBaseClass()} as Set) | |
// Create mask for all the annotations | |
double downsample = requestedPixelSizeMicrons / server.getAveragedPixelSizeMicrons() | |
def bpAnnotations = createObjectMask(server, downsample, annotations, false) | |
int width = bpAnnotations.getWidth() | |
int height = bpAnnotations.getHeight() | |
// Compute distance transform for centroids of each detection, split by classification | |
def distanceMaps = [:] | |
def bpMinIdx = new ByteProcessor(width, height) | |
def fpMinDist = new FloatProcessor(width, height) | |
fpMinDist.set(Float.POSITIVE_INFINITY) | |
classifications.eachWithIndex { pathClass, label -> | |
def pathObjects = detections.findAll {it.getPathClass()?.getBaseClass() == pathClass} | |
def bp = createObjectMask(server, downsample, pathObjects, true) | |
def fpDist = new EDM().makeFloatEDM(bp, (byte)255, false) | |
distanceMaps[pathClass] = fpDist | |
for (int i = 0; i < width*height; i++) { | |
if (bpAnnotations.getf(i) == 0f) | |
continue | |
float dist = fpDist.getf(i) | |
if (dist < fpMinDist.getf(i)) { | |
fpMinDist.setf(i, dist) | |
bpMinIdx.setf(i, label+1 as float) | |
} | |
} | |
} | |
// Clear existing objects (needed before we can add new annotations with older detections) | |
annotations.each {it.clearPathObjects()} | |
hierarchy.clearAll() | |
// Add the annotations | |
def annotationsToAdd = [] | |
classifications.eachWithIndex { pathClass, label -> | |
bpMinIdx.setThreshold(label+0.5, label+1.5, ImageProcessor.NO_LUT_UPDATE) | |
def roiIJ = new ThresholdToSelection().convert(bpMinIdx) | |
def roi = ROIConverterIJ.convertToPathROI(roiIJ, new Calibration(), downsample, -1, 0, 0) | |
def annotation = new PathAnnotationObject(roi, pathClass) | |
def pathObjects = detections.findAll {it.getPathClass()?.getBaseClass() == pathClass} | |
annotation.addPathObjects(pathObjects) | |
annotationsToAdd << annotation | |
} | |
print annotationsToAdd | |
hierarchy.addPathObjects(annotationsToAdd, false) | |
/** | |
* Create a binary image from a list of objects, either drawing the objects | |
* fully or using their centroids. | |
* | |
* @param server | |
* @param downsample | |
* @param pathObjects | |
* @param centroidsOnly | |
* @return | |
*/ | |
def createObjectMask(ImageServer<?> server, double downsample, List<PathObject> pathObjects, boolean centroidsOnly) { | |
def width = server.getWidth() / downsample as int | |
def height = server.getHeight() / downsample as int | |
def bp = new ByteProcessor(width, height) | |
bp.setValue(255.0) | |
for (pathObject in pathObjects) { | |
def roi = pathObject.getROI() | |
if (centroidsOnly) { | |
int x = Math.round(roi.getCentroidX() / downsample) as int | |
int y = Math.round(roi.getCentroidY() / downsample) as int | |
bp.setf(x, y, 255f) | |
} else { | |
def roiIJ = ROIConverterIJ.convertToIJRoi(roi, 0, 0, downsample) | |
bp.fill(roiIJ) | |
} | |
} | |
return bp | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment