@@ -22,12 +22,14 @@ def __init__(
2222 activation ,
2323 kernel_initializer ,
2424 regularization = None ,
25+ residue = False ,
2526 ):
2627 self .layer_size_lo = layer_size_low_fidelity
2728 self .layer_size_hi = layer_size_high_fidelity
2829 self .activation = activations .get (activation )
2930 self .kernel_initializer = initializers .get (kernel_initializer )
3031 self .regularizer = regularizers .get (regularization )
32+ self .residue = residue
3133
3234 self .training = None
3335 self .dropout = None
@@ -88,9 +90,16 @@ def build(self):
8890 y , self .layer_size_hi [- 1 ], use_bias = False , regularizer = self .regularizer
8991 )
9092 # Linear + nonlinear
91- alpha = tf .Variable (0 , dtype = config .real (tf ))
92- alpha = activations .get ("tanh" )(alpha )
93- self .y_hi = y_hi_l + alpha * y_hi_nl
93+ if not self .residue :
94+ alpha = tf .Variable (0 , dtype = config .real (tf ))
95+ alpha = activations .get ("tanh" )(alpha )
96+ self .y_hi = y_hi_l + alpha * y_hi_nl
97+ else :
98+ alpha1 = tf .Variable (0 , dtype = config .real (tf ))
99+ alpha1 = activations .get ("tanh" )(alpha1 )
100+ alpha2 = tf .Variable (0 , dtype = config .real (tf ))
101+ alpha2 = activations .get ("tanh" )(alpha2 )
102+ self .y_hi = self .y_lo + 0.1 * (alpha1 * y_hi_l + alpha2 * y_hi_nl )
94103
95104 self .target_lo = tf .placeholder (config .real (tf ), [None , self .layer_size_lo [- 1 ]])
96105 self .target_hi = tf .placeholder (config .real (tf ), [None , self .layer_size_hi [- 1 ]])
0 commit comments