88 Conv3DTranspose ,
99 AveragePooling3D ,
1010 MaxPooling3D ,
11- GlobalMaxPooling1D ,
12- GlobalMaxPooling2D ,
13- GlobalMaxPooling3D ,
1411 UpSampling1D ,
1512 UpSampling2D ,
1613 UpSampling3D ,
17- BatchNormalization ,
1814 LayerNormalization ,
1915)
2016from keras_flops import get_flops
@@ -135,26 +131,6 @@ def test_maxpooling1d2d3d():
135131 assert flops == in_w * in_h * in_z * kernel
136132
137133
138- @pytest .mark .xfail
139- def test_global_maxpooling1d2d3d ():
140- in_w = 32
141- in_h = 32
142- in_z = 32
143- kernel = 32
144-
145- model = Sequential (GlobalMaxPooling1D (input_shape = (in_w , kernel )))
146- flops = get_flops (model , batch_size = 1 )
147- assert flops == in_w * kernel
148-
149- model = Sequential (GlobalMaxPooling2D (input_shape = (in_w , in_h , kernel )))
150- flops = get_flops (model , batch_size = 1 )
151- assert flops == in_w * in_h * kernel
152-
153- model = Sequential (GlobalMaxPooling3D (input_shape = (in_w , in_h , in_z , kernel )))
154- flops = get_flops (model , batch_size = 1 )
155- assert flops == in_w * in_h * in_z * kernel
156-
157-
158134@pytest .mark .xfail
159135def test_upsampling1d2d3d ():
160136 in_w = 32
@@ -182,28 +158,6 @@ def test_upsampling1d2d3d():
182158 assert flops == in_w * in_h * in_z * kernel
183159
184160
185- @pytest .mark .xfail
186- def test_batchnormalization ():
187- """
188- batch normalization in tf uses gen_nn_ops.fused_batch_norm_v3 if input shape are 4D
189- """
190- in_w = 32
191- in_h = 32
192- in_ch = 3
193-
194- model = Sequential (
195- BatchNormalization (
196- beta_initializer = "ones" ,
197- gamma_initializer = "ones" ,
198- input_shape = (in_w , in_h , in_ch ),
199- )
200- )
201- flops = get_flops (model , batch_size = 1 )
202- assert (
203- flops == 5 * in_ch + in_w * in_h * in_ch
204- ), "fused is True, fused_batch_norm_v3 is not supportted"
205-
206-
207161@pytest .mark .xfail
208162def test_layernormalization ():
209163 """
@@ -213,11 +167,12 @@ def test_layernormalization():
213167 2. (1 ops * |var|) inv *= gamma (scale)
214168 3. (|x| + |mean| + |var| ops) x' = inv * x + beta (shift) - mean * inv
215169 , where |var| = |mean| = 1 in default
216- Thus, 5 channel size + input element size.
170+ Thus, 5 + input element size.
217171
218- Use nn.fused_batch_norm (gen_nn_ops.fused_batch_norm_v3) for layer normalization, above calculation,
219- but gen_nn_ops.fused_batch_norm_v3 is not registered yet , so can not evaluate corrent FLOPs .
172+ Use nn.fused_batch_norm (gen_nn_ops.fused_batch_norm_v3) for layer normalization, above calculation.
173+ gen_nn_ops.fused_batch_norm_v3 support only 4D , so reshape data as 4D and input them .
220174 squeezed_shape (ndim ops), scale (|x| ops) and shift (not float ops) is calculated.
175+ NOTE: is_training = True, if make trainable attributes of tf.keras.Model instanse False. So, statistics will be incorrect.
221176 """
222177 in_w = 32
223178 in_h = 32
@@ -244,6 +199,13 @@ def test_layernormalization():
244199 )
245200 )
246201 flops = get_flops (model , batch_size = 1 )
247- assert flops == len (input_shape ) + 1 + in_w * in_h * in_ch , "fused is True"
202+ assert (
203+ flops
204+ == len (input_shape )
205+ + 1
206+ + 5
207+ + in_w * in_h * in_ch
208+ + 5 * in_ch
209+ + in_w * in_h * in_ch
210+ ), "fused is True. check gen_nn_ops.fused_batch_norm_v3"
248211
249- assert flops == len (input_shape ) + 1 + 5 * in_ch + in_w * in_h * in_ch
0 commit comments