Skip to content

Commit

Permalink
Merge branch 'Project-MONAI:dev' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
KumoLiu authored Jun 25, 2024
2 parents 63f3bd9 + e801540 commit f010b8b
Show file tree
Hide file tree
Showing 27 changed files with 538 additions and 218 deletions.
16 changes: 9 additions & 7 deletions .github/workflows/blossom-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,15 @@ jobs:
args: ${{ env.args }}

# This job only runs for pull request comments
if: contains('\
Nic-Ma,\
wyli,\
pxLi,\
YanxuanLiu,\
KumoLiu,\
', format('{0},', github.actor)) && github.event.comment.body == '/build'
if: |
github.event.comment.body == '/build' &&
(
github.actor == 'Nic-Ma' ||
github.actor == 'wyli' ||
github.actor == 'pxLi' ||
github.actor == 'YanxuanLiu' ||
github.actor == 'KumoLiu'
)
steps:
- name: Check if comment is issued by authorized person
run: blossom-ci
Expand Down
4 changes: 2 additions & 2 deletions CITATION.cff
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ title: "MONAI: Medical Open Network for AI"
abstract: "AI Toolkit for Healthcare Imaging"
authors:
- name: "MONAI Consortium"
date-released: 2023-10-12
version: "1.3.0"
date-released: 2024-05-21
version: "1.3.1"
identifiers:
- description: "This DOI represents all versions of MONAI, and will always resolve to the latest one."
type: doi
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/deepedit/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

logger = logging.getLogger(__name__)

distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
distance_transform_cdt, _ = optional_import("scipy.ndimage", name="distance_transform_cdt")


class DiscardAddGuidanced(MapTransform):
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/deepgrow/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from monai.utils.enums import PostFix

measure, _ = optional_import("skimage.measure", "0.14.2", min_version)
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
distance_transform_cdt, _ = optional_import("scipy.ndimage", name="distance_transform_cdt")

DEFAULT_POST_FIX = PostFix.meta()

Expand Down
2 changes: 1 addition & 1 deletion monai/apps/nuclick/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

measure, _ = optional_import("skimage.measure")
morphology, _ = optional_import("skimage.morphology")
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
distance_transform_cdt, _ = optional_import("scipy.ndimage", name="distance_transform_cdt")


class NuclickKeys(StrEnum):
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/pathology/transforms/post/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from monai.utils.misc import ensure_tuple_rep
from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor

label, _ = optional_import("scipy.ndimage.measurements", name="label")
label, _ = optional_import("scipy.ndimage", name="label")
disk, _ = optional_import("skimage.morphology", name="disk")
opening, _ = optional_import("skimage.morphology", name="opening")
watershed, _ = optional_import("skimage.segmentation", name="watershed")
Expand Down
4 changes: 2 additions & 2 deletions monai/apps/pathology/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ def compute_multi_instance_mask(mask: np.ndarray, threshold: float) -> Any:
"""

neg = 255 - mask * 255
distance = ndimage.morphology.distance_transform_edt(neg)
distance = ndimage.distance_transform_edt(neg)
binary = distance < threshold

filled_image = ndimage.morphology.binary_fill_holes(binary)
filled_image = ndimage.binary_fill_holes(binary)
multi_instance_mask = measure.label(filled_image, connectivity=2)

return multi_instance_mask
Expand Down
52 changes: 15 additions & 37 deletions monai/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,7 @@

from monai.data.meta_tensor import MetaTensor
from monai.data.utils import SUPPORTED_PICKLE_MOD, convert_tables_to_dicts, pickle_hashing
from monai.transforms import (
Compose,
Randomizable,
RandomizableTrait,
Transform,
apply_transform,
convert_to_contiguous,
reset_ops_id,
)
from monai.transforms import Compose, Randomizable, RandomizableTrait, Transform, convert_to_contiguous, reset_ops_id
from monai.utils import MAX_SEED, convert_to_tensor, get_seed, look_up_option, min_version, optional_import
from monai.utils.misc import first

Expand Down Expand Up @@ -77,15 +69,19 @@ class Dataset(_TorchDataset):
}, }, }]
"""

def __init__(self, data: Sequence, transform: Callable | None = None) -> None:
def __init__(self, data: Sequence, transform: Sequence[Callable] | Callable | None = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
transform: a callable, sequence of callables or None. If transform is not
a `Compose` instance, it will be wrapped in a `Compose` instance. Sequences
of callables are applied in order and if `None` is passed, the data is returned as is.
"""
self.data = data
self.transform: Any = transform
try:
self.transform = Compose(transform) if not isinstance(transform, Compose) else transform
except Exception as e:
raise ValueError("`transform` must be a callable or a list of callables that is Composable") from e

def __len__(self) -> int:
return len(self.data)
Expand All @@ -95,7 +91,7 @@ def _transform(self, index: int):
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
return self.transform(data_i)

def __getitem__(self, index: int | slice | Sequence[int]):
"""
Expand Down Expand Up @@ -264,8 +260,6 @@ def __init__(
using the cached content and with re-created transform instances.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
Expand Down Expand Up @@ -323,9 +317,6 @@ def _pre_transform(self, item_transformed):
random transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")

first_random = self.transform.get_index_of_first(
lambda t: isinstance(t, RandomizableTrait) or not isinstance(t, Transform)
)
Expand All @@ -346,9 +337,6 @@ def _post_transform(self, item_transformed):
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")

first_random = self.transform.get_index_of_first(
lambda t: isinstance(t, RandomizableTrait) or not isinstance(t, Transform)
)
Expand Down Expand Up @@ -501,9 +489,6 @@ def _pre_transform(self, item_transformed):
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")

item_transformed = self.transform(item_transformed, end=self.cache_n_trans, threading=True)

reset_ops_id(item_transformed)
Expand All @@ -519,9 +504,6 @@ def _post_transform(self, item_transformed):
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")

return self.transform(item_transformed, start=self.cache_n_trans)


Expand Down Expand Up @@ -809,8 +791,6 @@ def __init__(
Not following these recommendations may lead to runtime errors or duplicated cache across processes.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.set_num = cache_num # tracking the user-provided `cache_num` option
self.set_rate = cache_rate # tracking the user-provided `cache_rate` option
Expand Down Expand Up @@ -1282,8 +1262,10 @@ def to_list(x):
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))

if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
self.transform.map_items = False # Compose object map_items to false so transform is applied to list
data = self.transform(data)
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)

Expand Down Expand Up @@ -1432,15 +1414,11 @@ def __len__(self):

def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}

if not self.transform:
return data

result = apply_transform(self.transform, data)
result = self.transform(data) if self.transform is not None else data

if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
raise AssertionError("With a dict supplied to Compose, should return a dict or a list of dicts.")


class CSVDataset(Dataset):
Expand Down
2 changes: 1 addition & 1 deletion monai/data/image_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -1331,7 +1331,7 @@ def get_data(self, img: NrrdImage | list[NrrdImage]) -> tuple[np.ndarray, dict]:
header[MetaKeys.SPACE] = SpaceKeys.LPS # assuming LPS if not specified

header[MetaKeys.AFFINE] = header[MetaKeys.ORIGINAL_AFFINE].copy()
header[MetaKeys.SPATIAL_SHAPE] = header["sizes"]
header[MetaKeys.SPATIAL_SHAPE] = header["sizes"].copy()
[header.pop(k) for k in ("sizes", "space origin", "space directions")] # rm duplicated data in header

if self.channel_dim is None: # default to "no_channel" or -1
Expand Down
6 changes: 3 additions & 3 deletions monai/metrics/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@
optional_import,
)

binary_erosion, _ = optional_import("scipy.ndimage.morphology", name="binary_erosion")
distance_transform_edt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_edt")
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
binary_erosion, _ = optional_import("scipy.ndimage", name="binary_erosion")
distance_transform_edt, _ = optional_import("scipy.ndimage", name="distance_transform_edt")
distance_transform_cdt, _ = optional_import("scipy.ndimage", name="distance_transform_cdt")

__all__ = [
"ignore_background",
Expand Down
24 changes: 16 additions & 8 deletions monai/networks/nets/daf3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from collections import OrderedDict
from collections.abc import Callable, Sequence
from functools import partial

import torch
import torch.nn as nn
Expand All @@ -25,6 +26,7 @@
from monai.networks.blocks.convolutions import Convolution
from monai.networks.blocks.feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork
from monai.networks.layers.factories import Conv, Norm
from monai.networks.layers.utils import get_norm_layer
from monai.networks.nets.resnet import ResNet, ResNetBottleneck

__all__ = [
Expand Down Expand Up @@ -170,27 +172,31 @@ class Daf3dResNetBottleneck(ResNetBottleneck):
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for second conv layer.
downsample: which downsample layer to use.
norm: which normalization layer to use. Defaults to group.
"""

expansion = 2

def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None):
norm_type: Callable = Norm[Norm.GROUP, spatial_dims]
def __init__(
self, in_planes, planes, spatial_dims=3, stride=1, downsample=None, norm=("group", {"num_groups": 32})
):
conv_type: Callable = Conv[Conv.CONV, spatial_dims]

norm_layer = partial(get_norm_layer, name=norm, spatial_dims=spatial_dims)

# in case downsample uses batch norm, change to group norm
if isinstance(downsample, nn.Sequential):
downsample = nn.Sequential(
conv_type(in_planes, planes * self.expansion, kernel_size=1, stride=stride, bias=False),
norm_type(num_groups=32, num_channels=planes * self.expansion),
norm_layer(channels=planes * self.expansion),
)

super().__init__(in_planes, planes, spatial_dims, stride, downsample)

# change norm from batch to group norm
self.bn1 = norm_type(num_groups=32, num_channels=planes)
self.bn2 = norm_type(num_groups=32, num_channels=planes)
self.bn3 = norm_type(num_groups=32, num_channels=planes * self.expansion)
self.bn1 = norm_layer(channels=planes)
self.bn2 = norm_layer(channels=planes)
self.bn3 = norm_layer(channels=planes * self.expansion)

# adapt second convolution to work with groups
self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, stride=stride, groups=32, bias=False)
Expand All @@ -212,8 +218,10 @@ class Daf3dResNetDilatedBottleneck(Daf3dResNetBottleneck):
downsample: which downsample layer to use.
"""

def __init__(self, in_planes, planes, spatial_dims=3, stride=1, downsample=None):
super().__init__(in_planes, planes, spatial_dims, stride, downsample)
def __init__(
self, in_planes, planes, spatial_dims=3, stride=1, downsample=None, norm=("group", {"num_groups": 32})
):
super().__init__(in_planes, planes, spatial_dims, stride, downsample, norm)

# add dilation in second convolution
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
Expand Down
Loading

0 comments on commit f010b8b

Please sign in to comment.