-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
e3a2550
commit c3cc3f6
Showing
93 changed files
with
2,456 additions
and
4,293 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,10 +1,6 @@ | ||
from .backbone import * | ||
from .efficientdet import * | ||
from .base import * | ||
from .metrics import * | ||
from .neck import * | ||
from .nn import * | ||
from .optim import * | ||
from .modules import * | ||
from .tools import * | ||
from .transformers import * | ||
|
||
__version__ = '0.1.0' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
from .blocks import build_block, list_blocks | ||
from .components import build_component, list_components | ||
from .layers import build_layer, list_layers | ||
from .optim import (build_lr_scheduler, build_optimizer, list_lr_schedulers, | ||
list_optimizers) | ||
from .power_module import PowerModule | ||
from .utils import (has_children, initialize_weights_, replace_module, | ||
replace_module_attr_value) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
import fnmatch | ||
|
||
from .conv_block import Conv2dBlock, SeparableConv2dBlock | ||
|
||
# from .mamba_block import build_mamba_block | ||
# from .vit_block import build_vit_block | ||
|
||
|
||
def build_block(name, **kwargs): | ||
cls = globals().get(name, None) | ||
if cls is None: | ||
raise ValueError(f'Block named {name} is not support.') | ||
return cls(**kwargs) | ||
|
||
|
||
def list_blocks(filter=''): | ||
block_list = [k for k in globals().keys() if 'Block' in k] | ||
if len(filter): | ||
return fnmatch.filter(block_list, filter) # include these blocks | ||
else: | ||
return block_list |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,192 @@ | ||
from typing import Optional, Tuple, Union | ||
|
||
import torch | ||
import torch.nn as nn | ||
|
||
from ..components import build_component | ||
from ..power_module import PowerModule | ||
|
||
|
||
class SeparableConv2dBlock(PowerModule): | ||
|
||
def __init__( | ||
self, | ||
in_channels: int, | ||
out_channels: int = None, | ||
kernel: Union[int, Tuple[int, int]] = 3, | ||
stride: Union[int, Tuple[int, int]] = 1, | ||
padding: Union[int, Tuple[int, int]] = 1, | ||
bias: bool = False, | ||
inner_norm: Optional[Union[dict, nn.Module]] = None, | ||
inner_act: Optional[Union[dict, nn.Module]] = None, | ||
norm: Optional[Union[dict, nn.Module]] = None, | ||
act: Optional[Union[dict, nn.Module]] = None, | ||
init_type: str = 'normal', | ||
): | ||
""" | ||
A separable convolution block consisting of a depthwise convolution and a pointwise convolution. | ||
Args: | ||
in_channels (int): | ||
Number of input channels. | ||
out_channels (int, optional): | ||
Number of output channels. If not provided, defaults to `in_channels`. | ||
kernel (int or Tuple[int, int], optional): | ||
Size of the convolution kernel. Defaults to 3. | ||
stride (int or Tuple[int, int], optional): | ||
Stride of the convolution. Defaults to 1. | ||
padding (int or Tuple[int, int], optional): | ||
Padding added to all four sides of the input. Defaults to 1. | ||
bias (bool): | ||
Whether to include a bias term in the convolutional layer. | ||
Noted: if normalization layer is not None, bias will always be set to False. | ||
Defaults to False. | ||
inner_norm (dict or nn.Module, optional): | ||
Configuration of normalization layer between dw and pw layer. Defaults to None. | ||
inner_act (dict or nn.Module, optional): | ||
Configuration of activation layer between dw and pw layer. Defaults to None. | ||
norm (dict or nn.Module, optional): | ||
Configuration of normalization layer after pw layer. Defaults to None. | ||
act (dict or nn.Module, optional): | ||
Configuration of activation layer after pw layer. Defaults to None. | ||
init_type (str, optional): | ||
Initialization method for the model parameters. Defaults to 'normal'. | ||
""" | ||
super().__init__() | ||
out_channels = in_channels if out_channels is None else out_channels | ||
|
||
bias = False if norm is not None else bias | ||
|
||
self.block = nn.ModuleDict() | ||
|
||
self.block['dw_conv'] = nn.Conv2d( | ||
in_channels, | ||
in_channels, | ||
kernel_size=kernel, | ||
stride=stride, | ||
padding=padding, | ||
groups=in_channels, | ||
bias=False, | ||
) | ||
self.block['pw_conv'] = nn.Conv2d( | ||
in_channels, | ||
out_channels, | ||
kernel_size=1, | ||
stride=1, | ||
padding=0, | ||
bias=bias, | ||
) | ||
if inner_norm is not None: | ||
self.block['inner_norm'] = build_component(**inner_norm) if isinstance(inner_norm, dict) else inner_norm | ||
if inner_act is not None: | ||
self.block['inner_act'] = build_component(**inner_act) if isinstance(inner_act, dict) else inner_act | ||
if norm is not None: | ||
self.block['norm'] = build_component(**norm) if isinstance(norm, dict) else norm | ||
if act is not None: | ||
self.block['act'] = build_component(**act) if isinstance(act, dict) else act | ||
self.initialize_weights_(init_type) | ||
|
||
def forward(self, x: torch.Tensor) -> torch.Tensor: | ||
for _, m in self.block.items(): | ||
x = m(x) | ||
return x | ||
|
||
|
||
class Conv2dBlock(PowerModule): | ||
|
||
def __init__( | ||
self, | ||
in_channels: Union[float, int], | ||
out_channels: Union[float, int], | ||
kernel: Union[int, Tuple[int, int]] = 3, | ||
stride: Union[int, Tuple[int, int]] = 1, | ||
padding: Union[int, Tuple[int, int]] = 1, | ||
dilation: int = 1, | ||
groups: int = 1, | ||
bias: bool = False, | ||
padding_mode: str = 'zeros', | ||
norm: Union[dict, nn.Module] = None, | ||
act: Union[dict, nn.Module] = None, | ||
init_type: str = 'normal', | ||
): | ||
""" | ||
This class is used to build a 2D convolutional neural network cell. | ||
Args: | ||
in_channels (int or float): | ||
Number of input channels. | ||
out_channels (int or float): | ||
Number of output channels. | ||
kernel (int or tuple, optional): | ||
Size of the convolutional kernel. Defaults to 3. | ||
stride (int or tuple, optional): | ||
Stride size. Defaults to 1. | ||
padding (int or tuple, optional): | ||
Padding size. Defaults to 1. | ||
dilation (int, optional): | ||
Spacing between kernel elements. Defaults to 1. | ||
groups (int, optional): | ||
Number of blocked connections from input channels to output | ||
channels. Defaults to 1. | ||
bias (bool, optional): | ||
Whether to include a bias term in the convolutional layer. | ||
If bias = None, bias would be set as Ture when normalization layer is None and | ||
False when normalization layer is not None. | ||
Defaults to None. | ||
padding_mode (str, optional): | ||
Options = {'zeros', 'reflect', 'replicate', 'circular'}. | ||
Defaults to 'zeros'. | ||
norm (Union[dict, nn.Module], optional): | ||
normalization layer or a dictionary of arguments for building a | ||
normalization layer. Default to None. | ||
act (Union[dict, nn.Module], optional): | ||
Activation function or a dictionary of arguments for building an | ||
activation function. Default to None. | ||
pool (Union[dict, nn.Module], optional): | ||
pooling layer or a dictionary of arguments for building a pooling | ||
layer. Default to None. | ||
init_type (str): | ||
Method for initializing model parameters. Default to 'normal'. | ||
Options = {'normal', 'uniform'} | ||
Examples for using norm, act, and pool: | ||
1. conv_block = Conv2dBlock(in_channels=3, | ||
out_channels=12, | ||
norm=nn.BatchNorm2d(12), | ||
act=nn.ReLU(), | ||
pool=nn.AdaptiveAvgPool2d(1)) | ||
2. conv_block = Conv2dBlock(in_channels=3, | ||
out_channels=12, | ||
norm={'name': 'BatchNorm2d', 'num_features': 12}, | ||
act={'name': 'ReLU', 'inplace': True}) | ||
Attributes: | ||
block (nn.ModuleDict): a model block. | ||
""" | ||
super().__init__() | ||
self.block = nn.ModuleDict() | ||
|
||
bias = False if norm is not None else bias | ||
|
||
self.block['conv'] = nn.Conv2d( | ||
int(in_channels), | ||
int(out_channels), | ||
kernel_size=kernel, | ||
stride=stride, | ||
padding=padding, | ||
dilation=dilation, | ||
groups=groups, | ||
bias=bias, | ||
padding_mode=padding_mode, | ||
) | ||
if norm is not None: | ||
self.block['norm'] = build_component(**norm) if isinstance(norm, dict) else norm | ||
if act is not None: | ||
self.block['act'] = build_component(**act) if isinstance(act, dict) else act | ||
|
||
self.initialize_weights_(init_type) | ||
|
||
def forward(self, x: torch.Tensor) -> torch.Tensor: | ||
for _, m in self.block.items(): | ||
x = m(x) | ||
return x |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
def build_mamba_block(**kwargs): | ||
return print('to be implemented') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
def build_vit_block(**kwargs): | ||
return print('to be implemented') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
import fnmatch | ||
|
||
from .activation import * | ||
from .dropout import * | ||
from .loss import * | ||
from .norm import * | ||
from .pooling import * | ||
|
||
|
||
def build_component_cls(name): | ||
cls = globals().get(name, None) | ||
if cls is None: | ||
raise ValueError(f'Component named {name} is not support.') | ||
return cls | ||
|
||
|
||
def build_component(name, **options): | ||
cls = globals().get(name, None) | ||
if cls is None: | ||
raise ValueError(f'Component named {name} is not support.') | ||
return cls(**options) | ||
|
||
|
||
def list_components(filter=''): | ||
component_list = [k for k in globals().keys() if 'Component' in k] | ||
if len(filter): | ||
return fnmatch.filter(component_list, filter) # include these components | ||
else: | ||
return component_list |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
import torch.nn as nn | ||
from torch.nn import AlphaDropout, Dropout, Dropout2d, Dropout3d | ||
|
||
__all__ = [ | ||
'Dropout', 'Dropout2d', 'Dropout3d', 'AlphaDropout', | ||
] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
import fnmatch | ||
|
||
from .aspp import ASPP | ||
from .grl import GradientReversalLayer | ||
from .selayer import SELayer | ||
from .vae import VAE | ||
from .weighted_sum import WeightedSum | ||
|
||
|
||
def build_layer(name, **options): | ||
cls = globals().get(name, None) | ||
if cls is None: | ||
raise ValueError(f'Layer named {name} is not support.') | ||
return cls(**options) | ||
|
||
|
||
def list_layers(filter=''): | ||
layer_list = [k for k in globals().keys() if 'Layer' in k] | ||
if len(filter): | ||
return fnmatch.filter(layer_list, filter) # include these layers | ||
else: | ||
return layer_list |
Oops, something went wrong.