Last active
February 9, 2022 15:34
-
-
Save normanrz/b83c5a205434bf5fc05b8848a1d1135c to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# APPLY VOLUME MERGE ANNOTATION | |
# | |
# This script applies a webKnossos merger mode annotation | |
# to a given segmentation layer. The script will output a | |
# WKW layer. | |
# | |
# The --set_zero flag will relabel all non-annotated segments | |
# to 0. | |
# | |
# 1. Download the merger mode NML file. | |
# 2. Install Python 3 (if you don't have it) | |
# 3. Install the dependencies of this script: | |
# pip install -U webknossos fastremap | |
# 4. Run the script from the terminal: | |
# python apply_merger_mode.py \ | |
# /path/to/input_wkw \ | |
# merger_mode.nml \ | |
# /path/to/output_wkw | |
# 5. The script will output a folder with a WKW layer | |
# | |
# License: MIT, scalable minds | |
from argparse import ArgumentParser | |
from pathlib import Path | |
import fastremap | |
import webknossos as wk | |
def main(): | |
# Prelude | |
parser = ArgumentParser(description="Apply webKnossos volume merge annotations") | |
parser.add_argument( | |
"--set_zero", | |
action="store_true", | |
help="Set non-marked segments to zero.", | |
) | |
parser.add_argument("input", help="Path to input WKW dataset", type=Path) | |
parser.add_argument( | |
"--layer_name", "-l", help="Segmentation layer name", default="segmentation" | |
) | |
parser.add_argument("nml", help="Path to NML file", type=Path) | |
parser.add_argument("output", help="Path to output WKW dataset", type=Path) | |
args = parser.parse_args() | |
print("Merging merger mode annotations from {} and {}".format(args.input, args.nml)) | |
# Collect equivalence classes from NML | |
nml = wk.Skeleton.load(args.nml) | |
ds_in = wk.Dataset.open(args.input) | |
in_layer = ds_in.get_layer(args.layer_name) | |
in_mag1 = in_layer.get_mag("1") | |
ds_out = wk.Dataset(args.output, scale=ds_in.scale) | |
out_layer = ds_out.add_layer( | |
args.layer_name, | |
wk.SEGMENTATION_CATEGORY, | |
dtype_per_layer=in_layer.dtype_per_layer, | |
largest_segment_id=in_layer.largest_segment_id, | |
) | |
out_mag1 = out_layer.add_mag("1") | |
equiv_classes = [ | |
set( | |
in_mag1.read(absolute_offset=node.position, size=(1, 1, 1))[0, 0, 0, 0] | |
for node in tree.nodes | |
) | |
for tree in nml.flattened_graphs() | |
] | |
equiv_map = {} | |
for segment_ids in equiv_classes: | |
ref_segment_id = next(iter(segment_ids)) | |
for segment_id in segment_ids: | |
equiv_map[segment_id] = ref_segment_id | |
print( | |
"Found {} equivalence classes with {} nodes".format( | |
len(equiv_classes), len(equiv_map) | |
) | |
) | |
print(equiv_classes) | |
# Rewrite segmentation layer | |
def apply_mapping_for_chunk(func_args): | |
(view, _) = func_args | |
cube_data = view.read()[0] | |
# pylint: disable=c-extension-no-member | |
fastremap.remap( | |
cube_data, | |
equiv_map, | |
preserve_missing_labels=(not args.set_zero), | |
in_place=True, | |
) | |
out_mag1.write( | |
cube_data, absolute_offset=view.bounding_box.in_mag(out_mag1.mag).topleft | |
) | |
in_mag1.for_each_chunk(apply_mapping_for_chunk) | |
out_layer.downsample() | |
# Done | |
print("Rewrote segmentation as a segmentation layer to {}".format(args.output)) | |
print( | |
"You may need to copy over additional layers (e.g. color layers) and compress the output segmentation" | |
) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment