Created
January 25, 2024 20:57
-
-
Save Nanguage/a09ec04b651dbcbf9692355e66240e43 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<docs> | |
[TODO: write documentation for this plugin.] | |
</docs> | |
<config lang="json"> | |
{ | |
"name": "SAM-Kaibu", | |
"type": "iframe", | |
"tags": [], | |
"ui": "", | |
"version": "0.1.0", | |
"cover": "", | |
"description": "[TODO: describe this plugin with one sentence.]", | |
"icon": "extension", | |
"inputs": null, | |
"outputs": null, | |
"api_version": "0.1.8", | |
"env": "", | |
"permissions": [], | |
"requirements": [], | |
"dependencies": [ | |
] | |
} | |
</config> | |
<script lang="javascript"> | |
class ImJoyPlugin { | |
async setup() { | |
api.log('initialized') | |
const py_plugin = await api.loadPlugin(await api.getAttachment("sam-kaibu-py")) | |
this.py_plugin = py_plugin | |
const demoImg = "https://images.proteinatlas.org/61448/1319_C10_2_blue_red_green.jpg" | |
const imgBytes = await (await fetch(demoImg)).arrayBuffer() | |
const img_arr = await py_plugin.load_image_from_bytes("demoImg.jpg", imgBytes) | |
const viewer = await api.createWindow({src: 'https://kaibu.org/#/app', w: 80, h: 40}) | |
await viewer.view_image(img_arr, {name: 'demoImg'}) | |
const annotation_layer = await viewer.add_shapes([], {name: 'annotation_layer'}) | |
const interact_layer = await viewer.add_shapes([], { | |
name: 'interact_layer', | |
_rintf: true, | |
add_feature_callback: async (shape) => { | |
console.log(shape) | |
if (shape.geometry.type === "Point") { | |
console.log("add point at", shape.geometry.coordinates) | |
const coords = await this.py_plugin.run_segmentation(shape.geometry.coordinates) | |
console.log("coords", coords) | |
const polygon = { | |
type: "Feature", | |
coordinates: [coords], | |
geometry: { | |
type: "Polygon", | |
coordinates: [coords], | |
}, | |
properties: { | |
edgecolor: "#FF0000", | |
edge_width: 2, | |
face_color: "#FF00000F", | |
size: 7, | |
}, | |
} | |
annotation_layer.add_feature(polygon) | |
} | |
}, | |
}) | |
await py_plugin.run_encoder(img_arr) | |
} | |
async getEmbeddings(img) { | |
const embeddings = await this.triton.execute({ | |
inputs: [img], | |
//model_name: "efficientsam-encoder", | |
model_name: "cellpose-python", | |
decode_json: true, | |
_rkwargs: true, | |
}) | |
return embeddings | |
} | |
async run(ctx) { | |
//api.alert('hello world.') | |
} | |
} | |
api.export(new ImJoyPlugin()) | |
</script> | |
<attachment name="sam-kaibu-py"> | |
<config lang="json"> | |
{ | |
"name": "sam-kaibu-py", | |
"type": "web-python", | |
"tags": [], | |
"flags": [], | |
"ui": "", | |
"version": "0.1.0", | |
"cover": "", | |
"description": "Connect to the bioengine server, and execute operations.", | |
"icon": "extension", | |
"inputs": null, | |
"outputs": null, | |
"api_version": "0.1.8", | |
"env": "", | |
"permissions": [], | |
"requirements": ["imageio", "scikit-image", "numpy"], | |
"dependencies": [] | |
} | |
</config> | |
<script lang="python"> | |
from imjoy import api | |
import io | |
import imageio | |
from imjoy_rpc.hypha import connect_to_server | |
import numpy as np | |
from skimage.measure import find_contours, approximate_polygon | |
class ImJoyPlugin(): | |
async def setup(self): | |
api.log('initialized') | |
self.server = await connect_to_server({ | |
"server_url": "http://127.0.0.1:9520", | |
"name": "client", | |
"method_timeout": 30, | |
}) | |
self.triton = await self.server.get_service("triton-client") | |
async def run_encoder(self, img): | |
img = img.transpose(2, 0, 1)[None].astype(np.float32) / 255.0 | |
print(img.shape) | |
ret = await self.triton.execute( | |
inputs=[img], | |
model_name="efficientsam-encoder", | |
) | |
embeddings = ret['image_embeddings'] | |
self.embeddings = embeddings | |
self.orig_size = img.shape[2:] | |
async def run_segmentation(self, point): | |
input_points = np.array([[[point]]]).astype(np.float32) | |
print("input_points", input_points.shape) | |
input_labels = np.ones((1, 1, 1)).astype(np.float32) | |
print("input_labels", input_labels.shape) | |
orig_size = np.array(self.orig_size, dtype=np.int64) | |
print("orig_size", orig_size.shape) | |
print("embeddings", self.embeddings.shape) | |
res = await self.run_decoder( | |
self.embeddings, input_labels, self.orig_size, input_points | |
) | |
mask = res[0, 0, 0] > 0 | |
print("mask", mask.shape) | |
coutours = find_contours(mask) | |
print("coutours", len(coutours), coutours[0].shape) | |
coords = approximate_polygon(coutours[0], tolerance=2.5) | |
print("coords", coords.shape) | |
res = coords.tolist() | |
res = [[p[1], p[0]] for p in res] | |
return res | |
async def run_decoder(self, embeddings, labels, orig_size, points): | |
ret = await self.triton.execute( | |
inputs=[labels, orig_size, points, embeddings], | |
model_name="efficientsam-decoder", | |
) | |
return ret['output_masks'] | |
async def load_image_from_bytes(self, file_name, img_bytes): | |
_file = io.BytesIO(img_bytes) | |
_file.name = file_name | |
if file_name.endswith(".tif") or file_name.endswith(".tiff"): | |
image = imageio.volread(_file) | |
else: | |
image = imageio.imread(_file) | |
await api.log( | |
"Image loaded with shape: " + str(image.shape) + | |
" and dtype: " + str(image.dtype) | |
) | |
return image | |
def run(self, ctx): | |
pass | |
api.export(ImJoyPlugin()) | |
</script> | |
</attachment> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment