Skip to content

Commit d410622

Browse files
Create volume annotations (#1340)
* make VolumeLayer non-protected * add VolumeLayer.edit contextmanager * format * lint * add @pytest.mark.use_proxay to test * format * rechunk mags with Zarr3Config to have expected volume annotations format * Enum for VolumeLayerEditMode * test_edit_volume_annotation, test_save_edited_volume_annotation and test_edited_volume_annotation_upload_download * format and lint * edited annotation uploadable * set zarr3 datatype for wk.Annotation.download * implement VolumeLayerEditMode.MEMORY * get voxel_size from nml * init zip when creating volume layer * store volume layer in zip of zips and save downloaded annotation volumes to disk * use volume_layers_root argument of Annotation.download * update cassettes * load editing layer into tensorstore, as MemoryFileSystem is not compatible with tensorstore (?) * typing and format * avoid tensorstore array memory path collision * rename nml to skeleton in offline_merger_mode.py Co-authored-by: Tom Herold <[email protected]> * replace volume_layers_root with temp dir and incorporate further review * fix format mistakes * update cassettes * extract VolumeLayer to volume_layer.py * update cassette * faster VolumeLayer.edit with SequentialExecutor * improve test_edit_volume_annotation * document changes * update learned_segmenter.py to use VolumeLayer.edit * lint * VolumeLayer.edit kw-only --------- Co-authored-by: Tom Herold <[email protected]>
1 parent 0e4b724 commit d410622

File tree

13 files changed

+658
-180
lines changed

13 files changed

+658
-180
lines changed

webknossos/Changelog.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ For upgrade instructions, please check the respective _Breaking Changes_ section
1515
### Breaking Changes
1616

1717
### Added
18+
- Added context manager `VolumeLayer.edit` for creating and modifying volume annotations. [#1340](https://github.com/scalableminds/webknossos-libs/pull/1340)
1819

1920
### Changed
2021

webknossos/examples/WIP/merge_trees_at_closest_nodes.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,19 @@
66

77
import webknossos as wk
88

9-
nml = wk.Skeleton.load("trees-in-groups.nml")
9+
skeleton = wk.Skeleton.load("trees-in-groups.nml")
1010

1111
# Probably we want to keep groups and normal trees in distinct collections (groups/trees).
1212
# For many use-cases a common view groups_and_trees would be great, but not here:
13-
for group in nml.groups.values(): # groups is a dict with the name as keys
13+
for group in skeleton.groups.values(): # groups is a dict with the name as keys
1414
min_distance_graph = G = nx.Graph()
1515
for (tree_idx_a, tree_a), (tree_idx_b, tree_b) in combinations(
1616
enumerate(group.flattened_trees()), 2
1717
):
1818
pos_a = (
19-
tree_a.get_node_positions() * nml.voxel_size
19+
tree_a.get_node_positions() * skeleton.voxel_size
2020
) # or tree_a.get_node_positions_nm?
21-
pos_b = tree_b.get_node_positions() * nml.voxel_size
21+
pos_b = tree_b.get_node_positions() * skeleton.voxel_size
2222
node_idx_a, node_idx_b, distance = wk.geometry.closest_pair(pos_a, pos_b)
2323
G.add_edge((tree_idx_a, node_idx_a), (tree_idx_b, node_idx_b), weight=distance)
2424
new_edges = nx.algorithms.tree.mst.minimum_spanning_edges()
@@ -35,7 +35,7 @@
3535
final_tree.name = group.name
3636
final_tree.group = None
3737

38-
del nml.groups[group.name]
38+
del skeleton.groups[group.name]
3939
# or
4040
group.delete()
4141
# The latter only works if everything is double-linked.
@@ -44,4 +44,4 @@
4444
# to do the double-linking. Simply dict-like insertions can't work then:
4545
# nml["tree-name"] = Tree()
4646

47-
nml.save("merged-trees.nml")
47+
skeleton.save("merged-trees.nml")

webknossos/examples/WIP/offline_merger_mode.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,21 @@
44

55
import webknossos as wk
66

7-
# A merger mode nml with every tree corresponding to a new merged segment is available.
7+
# A merger mode skeleton with every tree corresponding to a new merged segment is available.
88
# All segments in which a node is placed should be merged and saved as a new dataset.
99

10-
# for local nml:
11-
nml = wk.open("merger-mode.nml")
10+
# for local skeleton:
11+
skeleton = wk.open("merger-mode.skeleton")
1212
# wk.Skeleton.load or wk.open_skeleton works, too (and is type-safe)
1313

1414
# for online annotation:
15-
annotation = wk.Annotation.download(
16-
"https://webknossos.org/annotations/Explorational/6114d9410100009f0096c640"
17-
)
18-
nml = annotation.skeleton
15+
skeleton = wk.Annotation.download(
16+
"https://webknossos.org/annotations/Explorational/6114d9410100009f0096c640",
17+
skip_volume_data=True,
18+
).skeleton
1919
# should this save anything to disk, or just happen in memory?
2020

21-
dataset = wk.download(nml.dataset_name, organization=nml.dataset_organization)
21+
dataset = wk.download(skeleton.dataset_name, organization=skeleton.dataset_organization)
2222
# asks for auth token, persisted into .env or similar config file (maybe use xdg-path?)
2323

2424
# sub-part access via dicts or dict-like classes
@@ -28,7 +28,7 @@
2828

2929
segmentation_data = view.read()
3030

31-
for tree in nml.trees(): # nml.trees() is a flattened iterator of all trees
31+
for tree in skeleton.trees: # skeleton.trees() is a flattened iterator of all trees
3232
segment_ids_in_tree = set(
3333
segmentation_data[tuple(node.position - view.topleft)] for node in tree.nodes
3434
)

webknossos/examples/apply_merger_mode.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,9 @@ def main() -> None:
1212
# Opening a merger mode annotation #
1313
####################################
1414

15-
nml = wk.Annotation.download(
16-
"https://webknossos.org/annotations/6748612b0100001101c81156"
15+
skeleton = wk.Annotation.download(
16+
"https://webknossos.org/annotations/6748612b0100001101c81156",
17+
skip_volume_data=True,
1718
).skeleton
1819

1920
###############################################
@@ -33,7 +34,7 @@ def main() -> None:
3334
##############################
3435

3536
segment_id_mapping = {}
36-
for tree in nml.flattened_trees():
37+
for tree in skeleton.flattened_trees():
3738
base = None
3839
for node in tree.nodes:
3940
segment_id = in_mag1.read(
@@ -44,7 +45,7 @@ def main() -> None:
4445
segment_id_mapping[segment_id] = base
4546

4647
print(
47-
f"Found {len(list(nml.flattened_trees()))} segment id groups with {len(segment_id_mapping)} nodes"
48+
f"Found {len(list(skeleton.flattened_trees()))} segment id groups with {len(segment_id_mapping)} nodes"
4849
)
4950
print(segment_id_mapping)
5051

webknossos/examples/learned_segmenter.py

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
import os
21
from functools import partial
3-
from tempfile import TemporaryDirectory
42

53
import numpy as np
64
from skimage import feature
@@ -19,7 +17,6 @@ def main() -> None:
1917
# Step 1: Read the training data from the annotation and the dataset's color
2018
# layer (the data will be streamed from WEBKNOSSOS to our local computer)
2119
training_data_bbox = annotation.user_bounding_boxes[0] # type: ignore[index]
22-
new_dataset_name = f"{annotation.dataset_name.replace(' ', '_')}_segmented"
2320
with wk.webknossos_context("https://webknossos.org"):
2421
dataset = annotation.get_remote_annotation_dataset()
2522

@@ -58,28 +55,15 @@ def main() -> None:
5855
assert segmentation.max() < 256
5956
segmentation = segmentation.astype("uint8")
6057

61-
# Step 5: Bundle everything as a WEBKNOSSOS layer and upload to wK for viewing and further work
62-
with TemporaryDirectory() as tempdir:
63-
new_dataset = wk.Dataset(
64-
tempdir, voxel_size=dataset.voxel_size, name=new_dataset_name
65-
)
66-
segmentation_layer = new_dataset.add_layer(
67-
"segmentation",
68-
wk.SEGMENTATION_CATEGORY,
69-
dtype_per_channel=segmentation.dtype,
70-
largest_segment_id=int(segmentation.max()),
71-
)
58+
# Step 5: Upload the segmentation to WEBKNOSSOS
59+
print("Uploading segmentation…")
60+
volume_layer = annotation.add_volume_layer("segmentation", dtype=segmentation.dtype)
61+
with volume_layer.edit() as segmentation_layer:
7262
segmentation_layer.bounding_box = dataset.layers["color"].bounding_box
7363
segmentation_layer.add_mag(mag, compress=True).write(segmentation)
7464
segmentation_layer.downsample(sampling_mode="constant_z")
7565

76-
remote_ds = new_dataset.upload(
77-
layers_to_link=[annotation.get_remote_base_dataset().layers["color"]]
78-
if "PYTEST_CURRENT_TEST" not in os.environ
79-
else None
80-
)
81-
82-
url = remote_ds.url
66+
url = annotation.upload()
8367
print(f"Successfully uploaded {url}")
8468

8569

webknossos/examples/skeleton_path_length.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66
def calculate_path_length(annotation_url: str, auth_token: str) -> None:
77
with wk.webknossos_context(token=auth_token):
88
# Download a annotation directly from the WEBKNOSSOS server
9-
annotation = wk.Annotation.download(
10-
annotation_url,
11-
)
9+
annotation = wk.Annotation.download(annotation_url, skip_volume_data=True)
1210

1311
skeleton = annotation.skeleton
1412
voxel_size = annotation.voxel_size

webknossos/tests/cassettes/test_annotation/test_edited_volume_annotation_upload_download.yml

Lines changed: 65 additions & 0 deletions
Large diffs are not rendered by default.

webknossos/tests/test_annotation.py

Lines changed: 162 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,11 @@
44

55
import numpy as np
66
import pytest
7+
from cluster_tools import get_executor
78

89
import webknossos as wk
10+
from webknossos import Annotation, SegmentationLayer
11+
from webknossos.annotation.volume_layer import VolumeLayerEditMode
912
from webknossos.dataset import DataFormat
1013
from webknossos.geometry import BoundingBox, Vec3Int
1114

@@ -40,7 +43,10 @@ def test_annotation_from_wkw_zip_file() -> None:
4043
assert len(list(copied_annotation.get_volume_layer_names())) == 1
4144
assert len(list(copied_annotation.skeleton.flattened_trees())) == 1
4245

43-
copied_annotation.add_volume_layer(name="new_volume_layer")
46+
copied_annotation.add_volume_layer(
47+
name="new_volume_layer",
48+
dtype=np.uint32,
49+
)
4450
assert len(list(copied_annotation.get_volume_layer_names())) == 2
4551
copied_annotation.delete_volume_layer(volume_layer_name="new_volume_layer")
4652
assert len(list(copied_annotation.get_volume_layer_names())) == 1
@@ -368,3 +374,158 @@ def test_tree_metadata(tmp_path: Path) -> None:
368374
list(tmp_annotation.skeleton.flattened_trees())[0].metadata["test_tree"]
369375
== "test"
370376
)
377+
378+
379+
@pytest.mark.parametrize(
380+
"edit_mode", [VolumeLayerEditMode.MEMORY, VolumeLayerEditMode.TEMPORARY_DIRECTORY]
381+
)
382+
@pytest.mark.parametrize("executor", ["sequential", "multiprocessing"])
383+
def test_edit_volume_annotation(edit_mode: VolumeLayerEditMode, executor: str) -> None:
384+
dtype = np.uint32
385+
data = np.ones((1, 10, 10, 10), dtype=dtype)
386+
ann = wk.Annotation(
387+
name="my_annotation",
388+
dataset_name="sample_dataset",
389+
voxel_size=(11.2, 11.2, 25.0),
390+
)
391+
392+
volume_layer = ann.add_volume_layer(
393+
name="segmentation",
394+
dtype=dtype,
395+
)
396+
if edit_mode == VolumeLayerEditMode.MEMORY and executor == "multiprocessing":
397+
with pytest.raises(ValueError, match="SequentialExecutor"):
398+
with volume_layer.edit(
399+
edit_mode=edit_mode, executor=get_executor(executor)
400+
) as seg_layer:
401+
pass
402+
else:
403+
with volume_layer.edit(
404+
edit_mode=edit_mode, executor=get_executor(executor)
405+
) as seg_layer:
406+
assert isinstance(seg_layer, SegmentationLayer)
407+
mag = seg_layer.add_mag(1)
408+
mag.write(data, absolute_offset=(0, 0, 0), allow_resize=True)
409+
with volume_layer.edit(edit_mode=edit_mode) as seg_layer:
410+
assert len(seg_layer.mags) == 1
411+
mag = seg_layer.get_mag(1)
412+
read_data = mag.read(absolute_offset=(0, 0, 0), size=(10, 10, 10))
413+
assert np.array_equal(data, read_data)
414+
415+
416+
def test_edited_volume_annotation_format() -> None:
417+
import zipfile
418+
419+
import tensorstore
420+
421+
path = TESTDATA_DIR / "annotations" / "l4_sample__explorational__suser__94b271.zip"
422+
ann = Annotation.load(path)
423+
data = np.ones(shape=(10, 10, 10))
424+
425+
volume_layer = ann.add_volume_layer(
426+
name="segmentation",
427+
dtype=np.uint32,
428+
)
429+
with volume_layer.edit() as seg_layer:
430+
mag_view = seg_layer.add_mag(1)
431+
mag_view.write(data, allow_resize=True)
432+
433+
save_path = TESTOUTPUT_DIR / "saved_annotation.zip"
434+
ann.save(save_path)
435+
unpack_dir = TESTOUTPUT_DIR / "unpacked_annotation"
436+
with zipfile.ZipFile(save_path, "r") as zip_ref:
437+
zip_ref.extractall(unpack_dir)
438+
439+
# test for the format assumptions as mentioned in https://github.com/scalableminds/webknossos/issues/8604
440+
ts = tensorstore.open(
441+
{
442+
"driver": "zarr3",
443+
"kvstore": {
444+
"driver": "zip",
445+
"path": "volumeAnnotationData/1/",
446+
"base": {
447+
"driver": "file",
448+
"path": str(unpack_dir / "data_1_segmentation.zip"),
449+
},
450+
},
451+
},
452+
create=False,
453+
open=True,
454+
).result()
455+
metadata = ts.spec().to_json()["metadata"]
456+
457+
assert metadata["chunk_key_encoding"] == {
458+
"configuration": {"separator": "."},
459+
"name": "default",
460+
}
461+
assert ["transpose", "bytes", "blosc"] == [
462+
codec["name"] for codec in metadata["codecs"]
463+
]
464+
data_read = ts.read().result()[0, :10, :10, :10]
465+
assert np.array_equal(data, data_read)
466+
467+
468+
@pytest.mark.parametrize(
469+
"edit_mode", [VolumeLayerEditMode.MEMORY, VolumeLayerEditMode.TEMPORARY_DIRECTORY]
470+
)
471+
def test_edited_volume_annotation_save_load(edit_mode: VolumeLayerEditMode) -> None:
472+
data = np.ones((1, 10, 10, 10))
473+
474+
ann = wk.Annotation(
475+
name="my_annotation",
476+
dataset_name="sample_dataset",
477+
voxel_size=(11.2, 11.2, 25.0),
478+
)
479+
480+
volume_layer = ann.add_volume_layer(name="segmentation", dtype=np.uint32)
481+
with volume_layer.edit(edit_mode=edit_mode) as seg_layer:
482+
mag_view = seg_layer.add_mag(1)
483+
mag_view.write(data, allow_resize=True)
484+
485+
save_path = TESTOUTPUT_DIR / "annotation_saved.zip"
486+
ann.save(save_path)
487+
ann_loaded = Annotation.load(save_path)
488+
489+
volume_layer_downloaded = ann_loaded.get_volume_layer("segmentation")
490+
491+
with volume_layer_downloaded.edit(edit_mode=edit_mode) as seg_layer:
492+
assert len(seg_layer.mags) == 1
493+
mag = seg_layer.get_mag(1)
494+
read_data = mag.read(absolute_offset=(0, 0, 0), size=(10, 10, 10))
495+
assert np.array_equal(data, read_data)
496+
497+
498+
@pytest.mark.use_proxay
499+
def test_edited_volume_annotation_upload_download() -> None:
500+
data = np.ones((1, 10, 10, 10))
501+
502+
ann = Annotation.load(
503+
TESTDATA_DIR / "annotations" / "l4_sample__explorational__suser__94b271.zip"
504+
)
505+
ann.organization_id = "Organization_X"
506+
507+
volume_layer = ann.add_volume_layer(
508+
name="segmentation",
509+
dtype=np.uint32,
510+
)
511+
with volume_layer.edit() as seg_layer:
512+
mag_view = seg_layer.add_mag(1)
513+
mag_view.write(data, allow_resize=True)
514+
515+
url = ann.upload()
516+
ann_downloaded = Annotation.download(
517+
url,
518+
)
519+
520+
assert {layer.name for layer in ann_downloaded._volume_layers} == {
521+
"Volume",
522+
"segmentation",
523+
}
524+
525+
volume_layer_downloaded = ann_downloaded.get_volume_layer("segmentation")
526+
527+
with volume_layer_downloaded.edit() as seg_layer:
528+
assert len(seg_layer.mags) == 1
529+
mag = seg_layer.get_mag(1)
530+
read_data = mag.read(absolute_offset=(0, 0, 0), size=(10, 10, 10))
531+
assert np.array_equal(data, read_data)

webknossos/tests/test_cli.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -660,14 +660,15 @@ def test_merge_fallback_no_fallback_layer(
660660
)
661661

662662
annotation._volume_layers = [
663-
webknossos.annotation._VolumeLayer( # type: ignore
663+
webknossos.annotation.VolumeLayer( # type: ignore
664664
id=0,
665665
name=tmp_layer.name,
666666
fallback_layer_name=fallback_mag.layer.name,
667667
zip=volume_layer_zip,
668668
segments={},
669669
data_format=DataFormat.WKW,
670670
largest_segment_id=largest_segment_id,
671+
voxel_size=tmp_dataset.voxel_size,
671672
),
672673
]
673674

0 commit comments

Comments
 (0)