Last active
October 9, 2020 11:26
-
-
Save oeway/2d4b5899424a14d8e90ad908d4cec364 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<docs lang="markdown"> | |
## Tiktorch Model Loader for BioImage.io | |
This is a BioEngine App for running models on https://bioimage.io | |
</docs> | |
<config lang="json"> | |
{ | |
"name": "Tiktorch Model Loader", | |
"type": "native-python", | |
"version": "0.1.3", | |
"api_version": "0.1.2", | |
"description": "Tiktorch Model Loader for BioImage.io", | |
"icon": "https://raw.githubusercontent.com/bioimage-io/models/master/assets/icons/Ilastik-icon.png", | |
"tags": [], | |
"ui": "", | |
"inputs": null, | |
"outputs": null, | |
"flags": [], | |
"env": ["conda create -n ilastik", {"type": "binder", "spec": "oeway/tiktorch-binder-image/master", "skip_requirements": true}], | |
"requirements": ["git+git://github.com/bioimage-io/pytorch-bioimage-io.git#egg=pybio.torch", "pip:git+git://github.com/bioimage-io/pytorch-bioimage-io.git#egg=pybio.torch"], | |
"dependencies": [] | |
} | |
</config> | |
<script lang="python"> | |
from imjoy import api | |
import numpy as np | |
from PIL import Image | |
import urllib.request | |
import base64 | |
from io import BytesIO | |
import torch | |
from imageio import imread | |
import zipfile | |
from tiktorch.server.reader import eval_model_zip | |
def array2base64(img): | |
img = img/(img.max())*255.0 | |
img = Image.fromarray(img.astype('uint8')) | |
byte_io = BytesIO() | |
img.save(byte_io, 'PNG') | |
result = base64.b64encode(byte_io.getvalue()).decode('ascii') | |
imgurl = 'data:image/png;base64,' + result | |
return imgurl | |
class ImJoyPlugin(): | |
async def setup(self) -> None: | |
self.exemplum = None | |
self.exemplum_url = None | |
self.img_url = None | |
self.input_img = None | |
api.log("initialized") | |
def get_file_manager(self): | |
return api.FILE_MANAGER_URL; | |
def run_model(self, model_url, file_path=None) -> None: | |
if self.exemplum_url != model_url: | |
api.showMessage('Loading model from ' + model_url) | |
urllib.request.urlretrieve(model_url, 'tiktorch_model.zip') | |
with zipfile.ZipFile('tiktorch_model.zip', "r") as model_zip: | |
self.exemplum = eval_model_zip(model_zip, devices=[f"cuda:{i}" for i in range(torch.cuda.device_count())] + ["cpu"]) | |
self.exemplum_url = model_url | |
api.showMessage('Loading image...') | |
image_url = file_path or 'https://raw.githubusercontent.com/bioimage-io/pytorch-bioimage-io/v0.1.1/specs/models/unet2d/nuclei_broad/cover0.png' | |
if self.img_url != image_url: | |
self.input_img = imread(image_url) | |
api.log(str(self.input_img.shape)) | |
if self.input_img.ndim == 2: | |
self.input_img = self.input_img[:, :, np.newaxis] | |
self.img_url = image_url | |
batch = self.input_img[None, :512, :512, 0] # cyx | |
api.showMessage('Running model inference...') | |
prediction = self.exemplum.forward(batch) | |
api.showMessage('Displaying results...') | |
imgurl1 = array2base64(self.input_img[:512, :512, 0]) | |
imgurl2 = array2base64(prediction[0]) | |
api.showMessage('Done.') | |
return {"inputs": imgurl1, "outputs": imgurl2, "width": 512, "height": 512} | |
def preview_image(self, path): | |
img = imread(path) | |
return array2base64(img) | |
def run(self, ctx): | |
ret = self.run_model("https://github.com/bioimage-io/pytorch-bioimage-io/releases/download/v0.1.1/UNet2DNucleiBroad.model.zip") | |
api.showDialog({'type': 'imjoy/image-compare', 'name': 'Ilastik Demo', 'data': {"first": ret["inputs"], 'second': ret["outputs"], 'name': 'Image Processed with TikTorch'}}) | |
api.export(ImJoyPlugin()) | |
</script> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment