-
-
Save rsimon/eac82d0ab15ab3e6c635183c923b67cb to your computer and use it in GitHub Desktop.
import OpenSeadragon from 'openseadragon'; | |
import * as Annotorious from '@recogito/annotorious-openseadragon'; | |
import '@recogito/annotorious-openseadragon/dist/annotorious.min.css'; | |
/************************************************************************* | |
* | |
* Basic concept for this is from the official OpenCV docs: | |
* https://docs.opencv.org/3.4/dc/dcf/tutorial_js_contour_features.html | |
* | |
*************************************************************************/ | |
// Helper: chunks an array (i.e array to array of arrays) | |
const chunk = (array, size) => { | |
const chunked_arr = []; | |
let index = 0; | |
while (index < array.length) { | |
chunked_arr.push(array.slice(index, size + index)); | |
index += size; | |
} | |
return chunked_arr; | |
} | |
// Helper: creates a dummy polygon annotation from the given coords | |
const toAnnotation = coords => ({ | |
"@context": "http://www.w3.org/ns/anno.jsonld", | |
"id": "#a88b22d0-6106-4872-9435-c78b5e89fede", | |
"type": "Annotation", | |
"body": [], | |
"target": { | |
"selector": [{ | |
"type": "SvgSelector", | |
"value": `<svg><polygon points='${coords.map(xy => xy.join(',')).join(' ')}'></polygon></svg>` | |
}] | |
} | |
}); | |
/** | |
* Cuts the selected image snippet from the OpenSeadragon CANVAS element. | |
*/ | |
const getSnippet = (viewer, annotation) => { | |
// Scale factor for OSD canvas element (physical vs. logical resolution) | |
const { canvas } = viewer.drawer; | |
const canvasBounds = canvas.getBoundingClientRect(); | |
const kx = canvas.width / canvasBounds.width; | |
const ky = canvas.height / canvasBounds.height; | |
// Parse fragment selector (image coordinates) | |
// WARNING: a hack that STRICTLY assumes a box selection | |
// from Annotorious (will break for polygon selections) | |
const [ xi, yi, wi, hi ] = annotation.target.selector.value | |
.split(':')[1] | |
.split(',') | |
.map(str => parseFloat(str)); | |
// Convert image coordinates (=annotation) to viewport coordinates (=OpenSeadragon canvas) | |
const topLeft = viewer.viewport.imageToViewerElementCoordinates(new OpenSeadragon.Point(xi, yi)); | |
const bottomRight = viewer.viewport.imageToViewerElementCoordinates(new OpenSeadragon.Point(xi + wi, yi + hi)); | |
const { x, y } = topLeft; | |
const w = bottomRight.x - x; | |
const h = bottomRight.y - y; | |
// Cut out the image snippet as in-memory canvas element | |
const snippet = document.createElement('CANVAS'); | |
const ctx = snippet.getContext('2d'); | |
snippet.width = w; | |
snippet.height = h; | |
ctx.drawImage(canvas, x * kx, y * ky, w * kx, h * ky, 0, 0, w * kx, h * ky); | |
// Return snippet canvas + basic properties useful for downstream coord translation | |
return { snippet, kx, ky, x: xi, y: yi }; | |
} | |
/** | |
* Computer vision magic happens here | |
*/ | |
const findContourPolygon = canvasInput => { | |
const src = cv.imread(canvasInput); | |
const dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3); | |
// Convert to grayscale & threshold | |
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0); | |
cv.threshold(src, src, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU); | |
// Find contours | |
const contours = new cv.MatVector(); | |
const hierarchy = new cv.Mat(); | |
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_NONE); // CV_RETR_EXTERNAL | |
// Approximate closed polygons, keep only the largest | |
let largestAreaPolygon = { area: 0 }; | |
for (let i = 0; i < contours.size(); ++i) { | |
const polygon = new cv.Mat(); | |
const contour = contours.get(i); | |
cv.approxPolyDP(contour, polygon, 3, true); | |
// Compute contour areas | |
const area = cv.contourArea(polygon); | |
if (area > largestAreaPolygon.area) | |
largestAreaPolygon = { area, polygon }; | |
contour.delete(); | |
// TODO potential memory leak - we should also delete the other polygons, | |
// but hey, it's a quick hack | |
} | |
const polygons = new cv.MatVector(); | |
polygons.push_back(largestAreaPolygon.polygon); | |
// Uncomment if you want to render the intermediate results to the screen | |
/* | |
let color = new cv.Scalar( | |
Math.round(Math.random() * 255), | |
Math.round(Math.random() * 255), | |
Math.round(Math.random() * 255)); | |
cv.drawContours(dst, polygons, -1, color, 1, 8, hierarchy, 0); | |
const mask = document.createElement('CANVAS'); | |
mask.width = canvasInput.width; | |
mask.height = canvasInput.height; | |
cv.imshow(mask, src); | |
document.getElementById('previews').appendChild(mask); | |
const output = document.createElement('CANVAS'); | |
output.width = canvasInput.width; | |
output.height = canvasInput.height; | |
cv.imshow(output, dst); | |
document.getElementById('previews').appendChild(output); | |
*/ | |
src.delete(); | |
dst.delete(); | |
hierarchy.delete(); | |
contours.delete(); | |
polygons.delete(); | |
return chunk(largestAreaPolygon.polygon.data32S, 2); | |
} | |
(function() { | |
// Init OpenSeadragon | |
const viewer = OpenSeadragon({ | |
id: "openseadragon", | |
prefixUrl: "/images/", | |
tileSources: { | |
type: "image", | |
// Attribution: Grundriss der Kaiserl. Königl. Haupt und Residenzstadt Wien, Max von Grimm | |
// http://sammlung.woldan.oeaw.ac.at/layers/geonode:ac04382777_grimm_wien_1806 | |
url: "/ac04382777_grimm_wien_1806.jpg" | |
} | |
}); | |
// Init Annotorious | |
const anno = Annotorious(viewer, { widgets: [ 'TAG' ] }); | |
// On selection: cut snippet, find contours, update the annotation | |
anno.on('createSelection', async function(selection) { | |
// Extract the image snippet, recording | |
// - image snippet (as canvas element) | |
// - x/y coordinate of the snippet top-left (image coordinate space) | |
// - kx/ky scale factors between canvas element physical and logical dimensions | |
const { snippet, x, y, kx, ky } = getSnippet(viewer, selection); | |
// Current image zoom from OSD | |
const imageZoom = viewer.viewport.viewportToImageZoom(viewer.viewport.getZoom()); | |
// Polygon coordinates, in the snippet element's logical coordinate space | |
const localCoords = findContourPolygon(snippet); | |
// Translate to image coordinate space | |
const coords = localCoords.map(xy => { | |
const px = x + (xy[0] / kx) / imageZoom; | |
const py = y + (xy[1] / ky) / imageZoom; | |
return [ px, py ]; | |
}); | |
// Turn coords to W3C WebAnnotation | |
const annotation = toAnnotation(coords); | |
// Add the new annotation in Annotorious and select | |
// it (replacing the current user selection) | |
setTimeout(function() { | |
anno.setAnnotations([ annotation ]); | |
anno.selectAnnotation(annotation); | |
}, 10); | |
}); | |
})(); |
See https://github.com/recogito/annotorious-openseadragon for the Annotorious annotation plugin to OpenSeadragon
@rsimon. This is wonderful. We have an implementation of this with Digital Object storage and full annotations CRUD operations working on our repository system. This simplistic JS is the glue https://github.com/esmero/format_strawberryfield/blob/1.0.0-RC1/js/iiif-openseadragon_strawberry.js. I want to give your code a try and see what I can get rolling. If this works, how would you like/prefer to be attributed? Will share my code once done (probably by tomorrow afternoon or Friday, we developing on many fronts).
Thanks again, this is inspiring
Wow this is fantastic! I wasn't aware of AnnotoriousOSD already being in production like this.
Concerning attribution: although it's not required, if you mention Annotorious somewhere & point back to the GitHub repo, that would be greatly appreciated of course.
Another question: I'm starting to collect "who's using Annotorious" examples, to add to the Readme/docs. (See here for an example - scroll to the bottom.) Would you be open to letting me list you as a user? Let me know how you want to be attributed + whether you would be happy for me to include a logo image.
P.S.: I'm almost certainly going to evolve that code into an official add-on eventually. (Along with a few other CV/AI-based helpers.)
@rsimon. Great. We have a few live instances already running it and the workflow is making people quite happy. We keep annotations in local cache until people are ready to commit them into the actual digital Object metadata and then those can be used in IIIF manifests too. Happy to share our use case of your work with the world. Feel free to link/share in anyway (its 100% open source) or shoot me an email and I can add more details at [email protected]. Will make sure to add it to our release notes, documentation and in code too and of course link back. Thanks a lot. I feel I will have fun adapting the code on our side too. Cheers
Cool, thanks. Would it be ok to list this under "Who's Using Annotorious" as Archipelago open source digital collections software, by the Metropolitan New York Library Council? Given that Archipelago doesn't have a logo itself, is use of the 'Metro' logo (from https://metro.org/) acceptable?
Of course, thanks @rsimon. Yeah we do more code than logos lately. If you could use Archipelago open Source Digital Object Repository Software that would be even better, but both work and we really appreciate it. Will share with you our documentation language soon and we will also make a google group post soon about this integration (already 2-3 months old) If its easier you can fetch the logo from here https://github.com/mnylc
And our entry point repo (link if you want, not mandatory) is https://github.com/esmero/archipelago-deployment
Wonderful, thanks. I'll do exactly as you suggest. Will ping back when I added this to the Readme.
Hi @DiegoPino, I added the reference to the Annotorious OpenSeadragon Readme.
Hi @rsimon, is the annotation coordinates relative to the aspect ratio of image on the viewer or it is relative to the viewport's aspect ratio?
Hi @SreelakshmiNM, annotation coordinates are pixel coordinates in the original resolution of the base image.
@rsimon is there any proof ?
Ha ha :-) Well how about just trying out https://annotorious.com and see for yourself? ;-) The code above is just a proof of concept though. No guarantees this code will work with the latest version.
@rsimon how to zoom the annotation ,
Zoom so that the long side of the annotation is half the width or height of the image.
Not sure if this is what you mean. But if you wnat to zoom the viewport to show a specific annotation, with an optional padding around it: https://annotorious.github.io/api-docs/osd-plugin/#fitbounds
There's a not-yet-documented feature. You can call the method with anno.fitBounds(annotation, { padding: 40 })
and that would keep a 40px padding around the annotation when zooming.
@rsimon how did you download opencv in react application?
See https://twitter.com/aboutgeo/status/1335580210037710852 for an example