@@ -35,6 +35,7 @@ def xavier_init(module, gain=1, bias=0, distribution='normal'):
35
35
if hasattr (module , 'bias' ) and module .bias is not None :
36
36
nn .init .constant_ (module .bias , bias )
37
37
38
+
38
39
class ConvModule (nn .Module ):
39
40
def __init__ (self ,
40
41
in_channels ,
@@ -78,7 +79,7 @@ class MultiLevelNeck(nn.Module):
78
79
79
80
Args:
80
81
in_channels (List[int]): Number of input channels per scale.
81
- out_channels (int): Number of output channels (used at each scale).
82
+ out_channels (List[ int] ): Number of output channels (used at each scale).
82
83
scales (List[float]): Scale factors for each input feature map.
83
84
Default: [0.5, 1, 2, 4]
84
85
norm_cfg (dict): Config dict for normalization layer. Default: None.
@@ -94,25 +95,26 @@ def __init__(self,
94
95
act_cfg = None ):
95
96
super ().__init__ ()
96
97
assert isinstance (in_channels , list )
98
+ assert isinstance (out_channels , list )
97
99
self .in_channels = in_channels
98
100
self .out_channels = out_channels
99
101
self .scales = scales
100
102
self .num_outs = len (scales )
101
103
self .lateral_convs = nn .ModuleList ()
102
104
self .convs = nn .ModuleList ()
103
- for in_channel in in_channels :
105
+ for in_channel , out_channel in zip ( in_channels , out_channels ) :
104
106
self .lateral_convs .append (
105
107
ConvModule (
106
108
in_channel ,
107
- out_channels ,
109
+ out_channel ,
108
110
kernel_size = 1 ,
109
111
norm_cfg = norm_cfg ,
110
112
act_cfg = act_cfg ))
111
- for _ in range ( self . num_outs ) :
113
+ for out_channel in out_channels :
112
114
self .convs .append (
113
115
ConvModule (
114
- out_channels ,
115
- out_channels ,
116
+ out_channel ,
117
+ out_channel ,
116
118
kernel_size = 3 ,
117
119
padding = 1 ,
118
120
stride = 1 ,
0 commit comments