Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions corgie/cli/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@
default=1,
help="The number of previous sections for which fields will be estimated, then corrected by voting.",
)
@corgie_optgroup("Broadcast Specification")
@corgie_option(
"--decay_dist",
nargs=1,
Expand All @@ -120,6 +121,8 @@
default=0.2,
help="The increase in the size of downsample factor based on distance used in broadcasting a stitching field.",
)
@corgie_option("--broadcast_chunk_z", nargs=1, type=int, default=1)
@corgie_optgroup("Restart Specification")
@corgie_option(
"--restart_stage",
nargs=1,
Expand Down Expand Up @@ -161,6 +164,7 @@ def align(
seethrough_spec_mip,
decay_dist,
blur_rate,
broadcast_chunk_z,
restart_stage,
restart_suffix,
):
Expand Down Expand Up @@ -305,8 +309,8 @@ def align(
else:
seethrough_method = None

#restart_stage = 4
#import pdb; pdb.set_trace()
# restart_stage = 4
# import pdb; pdb.set_trace()
if restart_stage == 0:
corgie_logger.debug("Aligning blocks...")
for block in blocks:
Expand Down Expand Up @@ -404,9 +408,9 @@ def align(
field_to_downsample = stitch_corrected_field
# Hack for fafb
field_info = field_to_downsample.get_info()
for scale in field_info['scales']:
scale['chunk_sizes'][-1][-1] = 1
scale['encoding'] = 'raw'
for scale in field_info["scales"]:
scale["chunk_sizes"][-1][-1] = 1
scale["encoding"] = "raw"
field_to_downsample.cv.store_info(field_info)
field_to_downsample.cv.fetch_info()
downsample_field_job = DownsampleJob(
Expand Down Expand Up @@ -465,6 +469,7 @@ def align(
stitching_fields=stitching_fields,
output_field=composed_field,
chunk_xy=chunk_xy,
chunk_z=broadcast_chunk_z,
bcube=block_bcube,
pad=pad,
z_list=z_list,
Expand Down
44 changes: 32 additions & 12 deletions corgie/cli/broadcast.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def __init__(
mip,
decay_dist,
blur_rate,
chunk_z,
):
"""
Args:
Expand All @@ -34,6 +35,7 @@ def __init__(
self.block_field = block_field
self.stitching_fields = stitching_fields
self.output_field = output_field
self.chunk_z = chunk_z
self.chunk_xy = chunk_xy
self.bcube = bcube
self.pad = pad
Expand All @@ -45,7 +47,7 @@ def __init__(

def task_generator(self):
chunks = self.output_field.break_bcube_into_chunks(
bcube=self.bcube, chunk_xy=self.chunk_xy, chunk_z=1, mip=self.mip
bcube=self.bcube, chunk_xy=self.chunk_xy, chunk_z=self.chunk_z, mip=self.mip
)

tasks = []
Expand Down Expand Up @@ -130,7 +132,7 @@ def __init__(
"""Compose set of stitching_fields, adjusted by distance, with block_field.

Args:
block_field (Layer): most recent field, that last to be warped
block_field (Layer): most recent field, that is last to be warped
stitching_fields ([Layers]): collection of fields at stitching interfaces,
which are assumed to alternate
output_field (Layer)
Expand Down Expand Up @@ -165,14 +167,32 @@ def execute(self):
input_fields = fmul * input_fields + input_fields[:frem]
input_fields = input_fields[::-1]
input_fields += [self.block_field]
z_list = self.z_list + [self.bcube.z_range()[0]]
corgie_logger.debug(f"input_fields: {input_fields}")
corgie_logger.debug(f"z_list: {z_list}")
pbcube = self.bcube.uncrop(self.pad, self.mip)
corgie_logger.debug(f"pbcube: {pbcube}")
fields = PyramidDistanceFieldSet(
decay_dist=self.decay_dist, blur_rate=self.blur_rate, layers=input_fields
for field in input_fields:
field.cv.set_param(key="cache", value=True)
for z in range(*self.bcube.z_range()):
bcube = self.bcube.reset_coords(zs=z, ze=z + 1, in_place=False)
z_list = self.z_list + [bcube.z_range()[0]]
corgie_logger.debug(f"input_fields: {input_fields}")
corgie_logger.debug(f"z_list: {z_list}")
pbcube = bcube.uncrop(self.pad, self.mip)
corgie_logger.debug(f"pbcube: {pbcube}")
fields = PyramidDistanceFieldSet(
decay_dist=self.decay_dist,
blur_rate=self.blur_rate,
layers=input_fields,
)
field = fields.read(bcube=pbcube, z_list=z_list, mip=self.mip)
cropped_field = helpers.crop(field, self.pad)
self.output_field.write(cropped_field, bcube=bcube, mip=self.mip)

# This task can be used with caching
max_mip = PyramidDistanceFieldSet.get_max_mip(
mip=self.mip, dist=self.decay_dist, blur_rate=self.blur_rate
)
field = fields.read(bcube=pbcube, z_list=z_list, mip=self.mip)
cropped_field = helpers.crop(field, self.pad)
self.output_field.write(cropped_field, bcube=self.bcube, mip=self.mip)
mips = range(self.mip, max_mip)
for layer in input_fields:
for mip in mips:
layer.flush(mip)

for field in input_fields:
field.cv.set_param(key="cache", value=False)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you have to temporarily override a setting for an input field in-place, you should ideally keep track of the original value(s) and ensure they always are correctly restored. Right now, if the user passed the fields already with cache=True, and the function succeeds, cache will suddenly be set to False. Conversely, if the user passed the fields with cache=False, but the broadcasting fails for whatever reason, the fields will suddenly stay at cache=True after the exception got handled outside.

Is it necessary to enforce caching here, rather than leaving it to the user? I could imagine that in the special case of chunk_z=1, caching might actually be slower?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I overrode the cache value based on feedback from @supersergiy. I agree that restoring the user's original cache value for a CloudVolume is a better practice -- see new commits. Fair point that caching would degrade performance for chunk_z == 1 -- fixed in new commit.

Loading