33from .nn import NN
44from .. import activations
55from .. import initializers
6+ from .. import regularizers
67
78
89class FNN (NN ):
910 """Fully-connected neural network."""
1011
11- def __init__ (self , layer_sizes , activation , kernel_initializer ):
12+ def __init__ (
13+ self ,
14+ layer_sizes ,
15+ activation ,
16+ kernel_initializer ,
17+ regularization = None ,
18+ dropout_rate = 0 ,
19+ ):
1220 super ().__init__ ()
1321 if isinstance (activation , list ):
1422 if not (len (layer_sizes ) - 1 ) == len (activation ):
@@ -20,6 +28,13 @@ def __init__(self, layer_sizes, activation, kernel_initializer):
2028 self .activation = activations .get (activation )
2129 initializer = initializers .get (kernel_initializer )
2230 initializer_zero = initializers .get ("zeros" )
31+ self .regularizer = regularizers .get (regularization )
32+ self .dropout_rate = dropout_rate
33+ if dropout_rate > 0 :
34+ self .dropouts = [
35+ paddle .nn .Dropout (p = dropout_rate )
36+ for _ in range (1 , len (layer_sizes ) - 1 )
37+ ]
2338
2439 self .linears = paddle .nn .LayerList ()
2540 for i in range (1 , len (layer_sizes )):
@@ -37,6 +52,8 @@ def forward(self, inputs):
3752 if isinstance (self .activation , list )
3853 else self .activation (linear (x ))
3954 )
55+ if self .dropout_rate > 0 :
56+ x = self .dropouts [j ](x )
4057 x = self .linears [- 1 ](x )
4158 if self ._output_transform is not None :
4259 x = self ._output_transform (inputs , x )
@@ -58,11 +75,14 @@ class PFNN(NN):
5875 kernel_initializer: Initializer for the kernel weights matrix.
5976 """
6077
61- def __init__ (self , layer_sizes , activation , kernel_initializer ):
78+ def __init__ (
79+ self , layer_sizes , activation , kernel_initializer , regularization = None
80+ ):
6281 super ().__init__ ()
6382 self .activation = activations .get (activation )
6483 initializer = initializers .get (kernel_initializer )
6584 initializer_zero = initializers .get ("zeros" )
85+ self .regularizer = regularizers .get (regularization )
6686
6787 if len (layer_sizes ) <= 1 :
6888 raise ValueError ("must specify input and output sizes" )
@@ -73,7 +93,6 @@ def __init__(self, layer_sizes, activation, kernel_initializer):
7393
7494 n_output = layer_sizes [- 1 ]
7595
76-
7796 def make_linear (n_input , n_output ):
7897 linear = paddle .nn .Linear (n_input , n_output )
7998 initializer (linear .weight )
@@ -92,18 +111,22 @@ def make_linear(n_input, n_output):
92111 if isinstance (prev_layer_size , (list , tuple )):
93112 # e.g. [8, 8, 8] -> [16, 16, 16]
94113 self .layers .append (
95- paddle .nn .LayerList ([
96- make_linear (prev_layer_size [j ], curr_layer_size [j ])
97- for j in range (n_output )
98- ])
114+ paddle .nn .LayerList (
115+ [
116+ make_linear (prev_layer_size [j ], curr_layer_size [j ])
117+ for j in range (n_output )
118+ ]
119+ )
99120 )
100121 else :
101122 # e.g. 64 -> [8, 8, 8]
102123 self .layers .append (
103- paddle .nn .LayerList ([
104- make_linear (prev_layer_size , curr_layer_size [j ])
105- for j in range (n_output )
106- ])
124+ paddle .nn .LayerList (
125+ [
126+ make_linear (prev_layer_size , curr_layer_size [j ])
127+ for j in range (n_output )
128+ ]
129+ )
107130 )
108131 else : # e.g. 64 -> 64
109132 if not isinstance (prev_layer_size , int ):
@@ -115,10 +138,9 @@ def make_linear(n_input, n_output):
115138 # output layers
116139 if isinstance (layer_sizes [- 2 ], (list , tuple )): # e.g. [3, 3, 3] -> 3
117140 self .layers .append (
118- paddle .nn .LayerList ([
119- make_linear (layer_sizes [- 2 ][j ], 1 )
120- for j in range (n_output )
121- ])
141+ paddle .nn .LayerList (
142+ [make_linear (layer_sizes [- 2 ][j ], 1 ) for j in range (n_output )]
143+ )
122144 )
123145 else :
124146 self .layers .append (make_linear (layer_sizes [- 2 ], n_output ))
0 commit comments