Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
c4db027
Merge pull request #1 from davidackerman/master
davidackerman Oct 16, 2021
357fa41
Merge pull request #2 from davidackerman/master
davidackerman Oct 16, 2021
f40cfa8
Merge pull request #3 from davidackerman/master
davidackerman Oct 16, 2021
0dab378
Merge pull request #4 from davidackerman/master
davidackerman Oct 16, 2021
7f8c45c
Merge pull request #5 from davidackerman/master
davidackerman Nov 8, 2021
49c16b3
Merge pull request #6 from davidackerman/master
davidackerman Nov 10, 2021
81ad9ee
Merge pull request #7 from davidackerman/master
davidackerman Dec 7, 2021
5034871
Merge pull request #8 from davidackerman/master
davidackerman Dec 9, 2021
42f01f3
Merge pull request #9 from davidackerman/master
davidackerman Dec 13, 2021
beb0967
Merge pull request #10 from davidackerman/master
davidackerman Dec 13, 2021
c5818da
Merge pull request #11 from davidackerman/master
davidackerman Jan 28, 2022
057b183
Merge pull request #12 from davidackerman/master
davidackerman Dec 5, 2022
5fa444f
Merge pull request #13 from davidackerman/master
davidackerman Mar 9, 2024
2dd2701
Merge branch 'davidackerman:master' into master
davidackerman Aug 7, 2024
edc53fb
Merge branch 'davidackerman:master' into master
davidackerman Aug 7, 2024
beae1b9
Merge branch 'davidackerman:master' into master
davidackerman Feb 1, 2025
8cf2784
Merge branch 'davidackerman:master' into master
davidackerman Feb 1, 2025
61f26b3
Merge branch 'davidackerman:master' into master
davidackerman Feb 1, 2025
7d36ada
add pixi configuration
LucaMarconato May 1, 2025
662658e
cleanup pixi configuration; add lockfile
LucaMarconato May 1, 2025
48a8b9f
rename task
LucaMarconato May 1, 2025
e4b13fb
removed sandbox files
LucaMarconato May 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# SCM syntax highlighting & preventing 3-way merges
pixi.lock merge=binary linguist-language=YAML linguist-generated=true
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,7 @@ lsf-config-*
lsf-config_*
local-config-*
local-config_*

# pixi environments
.pixi
*.egg-info
8 changes: 4 additions & 4 deletions local-config/dask-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ distributed:

worker:
memory:
target: 0.0
spill: 0.0
pause: 0.0
terminate: 0.0
target: 0.6
spill: 0.7
pause: 0.8
terminate: 0.95

admin:
log-format: '[%(asctime)s] %(levelname)s %(message)s'
Expand Down
46 changes: 27 additions & 19 deletions multiresolution_mesh_creator/src/create_multiresolution_meshes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,16 @@
import trimesh
from trimesh.intersections import slice_faces_plane
import numpy as np
from dvidutils import encode_faces_to_custom_drc_bytes
import time
import os
from os import listdir
from os.path import isfile, join, splitext
import dask
from dask.distributed import worker_client
import pyfqmr
from ..util import mesh_util, io_util, dask_util
from multiresolution_mesh_creator.util import mesh_util, io_util, dask_util
import logging
import DracoPy

logger = logging.getLogger(__name__)

Expand All @@ -32,7 +32,7 @@ def my_slice_faces_plane(vertices, faces, plane_normal, plane_origin):

if len(vertices) > 0 and len(faces) > 0:
try:
vertices, faces = slice_faces_plane(
vertices, faces, _ = slice_faces_plane(
vertices, faces, plane_normal, plane_origin
)
except ValueError as e:
Expand Down Expand Up @@ -192,16 +192,24 @@ def generate_mesh_decomposition(
# Return combined_fragments_dictionary
for fragment_pos, fragment in combined_fragments_dictionary.items():
current_box_size = lod_0_box_size * 2**current_lod
draco_bytes = encode_faces_to_custom_drc_bytes(
fragment.vertices,
np.zeros(np.shape(fragment.vertices)),
fragment.faces,
np.asarray(3 * [current_box_size]),
np.asarray(fragment_pos) * current_box_size,
position_quantization_bits=10,
)

if len(draco_bytes) > 12:
if len(fragment.vertices) > 0:
quantization_origin = np.asarray(fragment_pos) * current_box_size
draco_bytes = DracoPy.encode(
points=fragment.vertices,
faces=fragment.faces,
quantization_bits=10,
quantization_range=current_box_size,
quantization_origin=quantization_origin,
)
# TODO: check if len(draco_bytes) > 12 and len(fragment.vertices) > 0 are
# equivalent; if not the user may incur in this warning for small meshes
if not len(draco_bytes) > 12:
print(
'Warning: Draco bytes are less than 12 bytes. This may be due to a '
'small mesh. Skipping this mesh.'
)
continue
# Then the mesh is not empty
fragment = mesh_util.CompressedFragment(
draco_bytes,
Expand Down Expand Up @@ -346,20 +354,20 @@ def generate_neuroglancer_multires_mesh(
grid_origin = np.minimum(
grid_origin, np.floor(vertices.min(axis=0) - 1)
) # subtract 1 in case of rounding issues

if not lod_0_box_size and current_lod == 0:
max_distance_between_vertices = np.ceil(
np.max(vertices.max(axis=0) - vertices.min(axis=0))
)
# arbitrarily say around 100 faces per chunk
heuristic_num_chunks = np.ceil(num_faces/100)
if heuristic_num_chunks==1:
lod_0_box_size = np.ceil(max_distance_between_vertices)+1
lod_0_box_size = np.ceil(max_distance_between_vertices)+1
else:
lod_0_box_size = np.ceil(max_distance_between_vertices/np.ceil(heuristic_num_chunks**(1/2)))+1 # use square root rather than cube root since assuming surface area to volume ratio

previous_num_faces = num_faces

# only need as many lods until mesh stops decimating
lods = lods[:idx]

Expand Down Expand Up @@ -407,7 +415,7 @@ def generate_neuroglancer_multires_mesh(

stride = np.ceil(
1.0 * (end_fragment - start_fragment) / num_chunks
).astype(np.int)
).astype(int)

# Scattering here, unless broadcast=True, causes this issue:
# https://github.com/dask/distributed/issues/4612. But that is
Expand Down Expand Up @@ -502,7 +510,7 @@ def get_number_of_subtask_workers(output_path, ids, original_ext, num_workers):
# lots of small objects are present

total_file_size = 0
file_sizes = np.zeros((len(ids),), dtype=np.int)
file_sizes = np.zeros((len(ids),), dtype=int)
for idx, id in enumerate(ids):
current_size = os.stat(
f"{output_path}/mesh_lods/s0/{id}{original_ext}"
Expand All @@ -511,7 +519,7 @@ def get_number_of_subtask_workers(output_path, ids, original_ext, num_workers):
file_sizes[idx] = current_size

num_workers_per_byte = num_workers / total_file_size
num_subtask_workers = np.ceil(file_sizes * num_workers_per_byte).astype(np.int)
num_subtask_workers = np.ceil(file_sizes * num_workers_per_byte).astype(int)
return num_subtask_workers

num_subtask_workers = get_number_of_subtask_workers(
Expand Down
Loading