From e714c5790339fa91b0eea33219227d0bf1f4cb0b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 04:55:23 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- monai/networks/blocks/head_controller.py | 20 +++++++-------- monai/networks/nets/universal_model.py | 31 ++++++++++++------------ 2 files changed, 24 insertions(+), 27 deletions(-) diff --git a/monai/networks/blocks/head_controller.py b/monai/networks/blocks/head_controller.py index e0f8dc9c5b4..a19e9251140 100755 --- a/monai/networks/blocks/head_controller.py +++ b/monai/networks/blocks/head_controller.py @@ -18,12 +18,12 @@ class HeadController(nn.Module): """ Text-based controller for segmentation outputs, the text-driven segmentor enables for optional outputs instead of fixed output channels. Users can choose and control the number and name of output channels from a mult-class segmentation - model. This can enabble incremental learning by adding new classes to a existing pre-trained model without + model. This can enabble incremental learning by adding new classes to a existing pre-trained model without catatrophic forgetting. - + Text-dirven segmentor, based on: "Liu et al., CLIP-Driven Universal Model for Organ Segmentation and Tumor Detection " - """ + """ def __init__( self, out_channels: int, @@ -45,11 +45,11 @@ def __init__( text_encoding: the text embedding features passed. """ super().__init__() - + self.head_hidden_size = head_hidden_size - self.bias_nums = [head_hidden_size] * (head_layers - 1) + [1] # defined by segmentor head's hidden size, last element of 1. + self.bias_nums = [head_hidden_size] * (head_layers - 1) + [1] # defined by segmentor head's hidden size, last element of 1. self.weight_nums = [head_in_channels*head_hidden_size] + [head_hidden_size*head_hidden_size]*(head_layers-2) + [head_hidden_size] #first+intermediate+last layer - + self.class_num = out_channels self.text_encoding = text_encoding # text-driven controller: connection of bottleneck feature to segmentor features, e.g., from 256(*2) to weights and bias nums @@ -57,7 +57,7 @@ def __init__( self.controller = nn.Conv3d(2*hidden_size, sum(self.weight_nums+self.bias_nums), kernel_size=1, stride=1, padding=0) else: self.controller = nn.Conv3d(hidden_size, sum(self.weight_nums+self.bias_nums), kernel_size=1, stride=1, padding=0) - # convolution layer of backbone output to segmentor head input size, e.g., 48 to 8 + # convolution layer of backbone output to segmentor head input size, e.g., 48 to 8 self.precls_conv = nn.Sequential( nn.GroupNorm(16, feature_size), nn.ReLU(inplace=True), @@ -106,7 +106,7 @@ def heads_forward(self, features, weights, biases, num_insts): x = nn.functional.relu(x) return x - def forward(self, x, out, text_encoding=None, logits_options=None): + def forward(self, x, out, text_encoding=None, logits_options=None): logits_options = range(self.class_num) if not isinstance(logits_options, list) else logits_options b = x.shape[0] logits_array = [] @@ -128,8 +128,6 @@ def forward(self, x, out, text_encoding=None, logits_options=None): weights, biases = self.parse_dynamic_params(params, self.head_hidden_size, self.weight_nums, self.bias_nums) logits = self.heads_forward(head_inputs, weights, biases, N) logits_array.append(logits.reshape(1, -1, D, H, W)) - + out = torch.cat(logits_array,dim=0) return out - - diff --git a/monai/networks/nets/universal_model.py b/monai/networks/nets/universal_model.py index 2bc0505ad6e..4fbfe631fb4 100755 --- a/monai/networks/nets/universal_model.py +++ b/monai/networks/nets/universal_model.py @@ -14,16 +14,16 @@ class Universal_model(nn.Module): """ Universal Model for organ and tumor segmentation, based on: "Liu et al., CLIP-Driven Universal Model for Organ Segmentation and Tumor Detection " - """ + """ def __init__( - self, - img_size, - in_channels, - out_channels, - bottleneck_size: int = 768, - text_dim: int = 512, - hidden_size: int = 256, - backbone: str = 'swinunetr', + self, + img_size, + in_channels, + out_channels, + bottleneck_size: int = 768, + text_dim: int = 512, + hidden_size: int = 256, + backbone: str = 'swinunetr', encoding: str = 'clip_embedding', logits_options: list = None, ): @@ -41,7 +41,7 @@ def __init__( use_checkpoint=False, ) else: - raise Exception('{} backbone is not implemented, please add your own'.format(backbone)) + raise Exception(f'{backbone} backbone is not implemented, please add your own') self.class_num = out_channels self.logits_options = logits_options # text encoder @@ -74,7 +74,7 @@ def load_params(self, model_dict): self.backbone.load_state_dict(store_dict) print('Use swin unetr pretrained weights') else: - raise Exception('{} backbone is not implemented, please add your own'.format(self.backbone_name)) + raise Exception(f'{self.backbone_name} backbone is not implemented, please add your own') def forward(self, x_in): # get backbone feature @@ -89,7 +89,7 @@ def forward(self, x_in): class SwinUNETR_backbone(SwinUNETR): """ - Universal Model uses SwinUNETR as backbone without the segmentation head based on: + Universal Model uses SwinUNETR as backbone without the segmentation head based on: "Hatamizadeh et al., Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images @@ -98,8 +98,8 @@ class SwinUNETR_backbone(SwinUNETR): "Tang et al., Self-Supervised Pre-Training of Swin Transformers for 3D Medical Image Analysis " - """ - def __init__( + """ + def __init__( self, img_size: Union[Sequence[int], int], in_channels: int, @@ -115,7 +115,7 @@ def __init__( use_checkpoint: bool = False, spatial_dims: int = 3, ): - super(SwinUNETR_backbone, self).__init__(img_size,in_channels,out_channels,feature_size=48) + super().__init__(img_size,in_channels,out_channels,feature_size=48) def forward(self, x_in): hidden_states_out = self.swinViT(x_in, self.normalize) @@ -132,4 +132,3 @@ def forward(self, x_in): out = self.decoder1(dec0, enc0) return dec4, out - \ No newline at end of file