|
5 | 5 |
|
6 | 6 | import numpy as np |
7 | 7 |
|
8 | | -from chunkflow.lib.cartesian_coordinate import BoundingBox, Cartesian, BoundingBoxes |
9 | | -from chunkflow.chunk import Chunk, load_chunk |
| 8 | +from chunkflow.lib.cartesian_coordinate import BoundingBox, Cartesian |
| 9 | +from chunkflow.chunk import Chunk |
| 10 | +from chunkflow.chunk.utils import load_chunk_or_volume |
10 | 11 | from chunkflow.lib.synapses import Synapses |
11 | 12 |
|
12 | 13 | from neutorch.data.patch import Patch |
13 | 14 | from neutorch.data.transform import * |
14 | | - |
15 | | -from cloudvolume import CloudVolume |
| 15 | +from chunkflow.volume import PrecomputedVolume |
16 | 16 |
|
17 | 17 | DEFAULT_PATCH_SIZE = Cartesian(128, 128, 128) |
18 | 18 | DEFAULT_NUM_CLASSES = 1 |
@@ -48,7 +48,8 @@ def sampling_weight(self) -> int: |
48 | 48 |
|
49 | 49 | class Sample(AbstractSample): |
50 | 50 | def __init__(self, |
51 | | - images: List[Chunk], label: Union[np.ndarray, Chunk], |
| 51 | + images: List[Chunk, PrecomputedVolume], |
| 52 | + label: Union[Chunk, PrecomputedVolume], |
52 | 53 | output_patch_size: Cartesian, |
53 | 54 | forbbiden_distance_to_boundary: tuple = None) -> None: |
54 | 55 | """Image sample with ground truth annotations |
@@ -351,13 +352,15 @@ def __init__(self, |
351 | 352 | def from_explicit_path(cls, |
352 | 353 | image_paths: list, label_path: str, |
353 | 354 | output_patch_size: Cartesian, |
354 | | - num_classes: int=DEFAULT_NUM_CLASSES): |
355 | | - label = load_chunk(label_path) |
| 355 | + num_classes: int=DEFAULT_NUM_CLASSES, |
| 356 | + **kwargs, |
| 357 | + ): |
| 358 | + label = load_chunk_or_volume(label_path, **kwargs) |
356 | 359 | print(f'label path: {label_path} with size {label.shape}') |
357 | 360 |
|
358 | 361 | images = [] |
359 | 362 | for image_path in image_paths: |
360 | | - image = load_chunk(image_path) |
| 363 | + image = load_chunk_or_volume(image_path, **kwargs) |
361 | 364 | images.append(image) |
362 | 365 | print(f'image path: {image_path} with size {image.shape}') |
363 | 366 | return cls(images, label, output_patch_size, num_classes=num_classes) |
@@ -438,76 +441,6 @@ def transform(self): |
438 | 441 | # MissAlignment(), |
439 | 442 | ]) |
440 | 443 |
|
441 | | -class PrecomputedVolumeSample(AbstractSample): |
442 | | - def __init__(self, |
443 | | - output_patch_size: Union[int, tuple, Cartesian], |
444 | | - volume: Union[str, CloudVolume], |
445 | | - mask: Chunk = None, |
446 | | - forground_weight: int = None): |
447 | | - """Neuroglancer Precomputed Volume Dataset |
448 | | -
|
449 | | - Args: |
450 | | - volume_path (str): cloudvolume precomputed path |
451 | | - patch_size (Union[int, tuple], optional): patch size of network input. Defaults to volume block size. |
452 | | - mask (Chunk, optional): forground mask. Defaults to None. |
453 | | - forground_weight (int, optional): weight of bounding boxes containing forground voxels. Defaults to None. |
454 | | - """ |
455 | | - super.__init__(output_patch_size) |
456 | | - |
457 | | - if isinstance(volume, str): |
458 | | - self.vol = CloudVolume( |
459 | | - volume, |
460 | | - fill_missing=True, |
461 | | - parallel=False, |
462 | | - progress=False, |
463 | | - green_threads = False, |
464 | | - ) |
465 | | - elif isinstance(volume, CloudVolume): |
466 | | - self.vol = volume |
467 | | - else: |
468 | | - raise ValueError("volume should be either an instance of CloudVolume or precomputed volume path.") |
469 | | - |
470 | | - # self.voxel_size = tuple(self.vol.resolution) |
471 | | - |
472 | | - self.bboxes = BoundingBoxes.from_manual_setup( |
473 | | - self.output_patch_size, |
474 | | - roi_start=(0, 0, 0), |
475 | | - roi_stop=self.vol.bounds.maxpt[-3:][::-1], |
476 | | - bounded=True, |
477 | | - ) |
478 | | - print(f'found {len(self.bboxes)} bounding boxes in volume: {volume}') |
479 | | - |
480 | | - if mask is not None: |
481 | | - # find out bboxes containing forground voxels |
482 | | - |
483 | | - if forground_weight is None: |
484 | | - pass |
485 | | - |
486 | | - def __getitem__(self, idx: int): |
487 | | - bbox = self.bboxes[idx] |
488 | | - xyz_slices = bbox.to_slices()[::-1] |
489 | | - print('xyz slices: ', xyz_slices) |
490 | | - image = self.vol[xyz_slices] |
491 | | - image = np.asarray(image) |
492 | | - image = np.transpose(image) |
493 | | - # image = image.astype(np.float32) |
494 | | - # image /= 255. |
495 | | - # chunk = Chunk(arr, voxel_offset=bbox.minpt, voxel_size=self.voxel_size) |
496 | | - # tensor = torch.Tensor(arr) |
497 | | - target = deepcopy(image) |
498 | | - patch = Patch(image, target) |
499 | | - self.transform(patch) |
500 | | - patch.to_tensor() |
501 | | - patch.normalize() |
502 | | - return patch.image, patch.target |
503 | | - |
504 | | - @property |
505 | | - def random_patch(self): |
506 | | - idx = random.randrange(0, len(self.bboxes)) |
507 | | - return self.__getitem__(idx) |
508 | | - |
509 | | - def __len__(self): |
510 | | - return len(self.bboxes) |
511 | 444 |
|
512 | 445 | if __name__ == '__main__': |
513 | 446 | import os |
|
0 commit comments