@@ -86,19 +86,8 @@ def bbox_ohem(bbox_pred,bbox_target,label):
8686 return tf .reduce_mean (square_error )
8787
8888def landmark_ohem (landmark_pred ,landmark_target ,label ):
89- #keep label =-2 then do landmark detection
90- ones = tf .ones_like (label ,dtype = tf .float32 )
91- zeros = tf .zeros_like (label ,dtype = tf .float32 )
92- valid_inds = tf .where (tf .equal (label ,- 2 ),ones ,zeros )
93- square_error = tf .square (landmark_pred - landmark_target )
94- square_error = tf .reduce_sum (square_error ,axis = 1 )
95- num_valid = tf .reduce_sum (valid_inds )
96- #keep_num = tf.cast(num_valid*num_keep_radio,dtype=tf.int32)
97- keep_num = tf .cast (num_valid , dtype = tf .int32 )
98- square_error = square_error * valid_inds
99- _ , k_index = tf .nn .top_k (square_error , k = keep_num )
100- square_error = tf .gather (square_error , k_index )
101- return tf .reduce_mean (square_error )
89+ # Implement here!!!!!!!
90+ pass
10291
10392def cal_accuracy (cls_prob ,label ):
10493 pred = tf .argmax (cls_prob ,axis = 1 )
@@ -206,51 +195,8 @@ def R_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True)
206195 return cls_prob ,bbox_pred ,landmark_pred
207196
208197def O_Net (inputs ,label = None ,bbox_target = None ,landmark_target = None ,training = True ):
209- with slim .arg_scope ([slim .conv2d ],
210- activation_fn = prelu ,
211- weights_initializer = slim .xavier_initializer (),
212- biases_initializer = tf .zeros_initializer (),
213- weights_regularizer = slim .l2_regularizer (0.0005 ),
214- padding = 'valid' ):
215- print inputs .get_shape ()
216- net = slim .conv2d (inputs , num_outputs = 32 , kernel_size = [3 ,3 ], stride = 1 , scope = "conv1" )
217- print net .get_shape ()
218- net = slim .max_pool2d (net , kernel_size = [3 , 3 ], stride = 2 , scope = "pool1" , padding = 'SAME' )
219- print net .get_shape ()
220- net = slim .conv2d (net ,num_outputs = 64 ,kernel_size = [3 ,3 ],stride = 1 ,scope = "conv2" )
221- print net .get_shape ()
222- net = slim .max_pool2d (net , kernel_size = [3 , 3 ], stride = 2 , scope = "pool2" )
223- print net .get_shape ()
224- net = slim .conv2d (net ,num_outputs = 64 ,kernel_size = [3 ,3 ],stride = 1 ,scope = "conv3" )
225- print net .get_shape ()
226- net = slim .max_pool2d (net , kernel_size = [2 , 2 ], stride = 2 , scope = "pool3" , padding = 'SAME' )
227- print net .get_shape ()
228- net = slim .conv2d (net ,num_outputs = 128 ,kernel_size = [2 ,2 ],stride = 1 ,scope = "conv4" )
229- print net .get_shape ()
230- fc_flatten = slim .flatten (net )
231- print fc_flatten .get_shape ()
232- fc1 = slim .fully_connected (fc_flatten , num_outputs = 256 ,scope = "fc1" , activation_fn = prelu )
233- print fc1 .get_shape ()
234- #batch*2
235- cls_prob = slim .fully_connected (fc1 ,num_outputs = 2 ,scope = "cls_fc" ,activation_fn = tf .nn .softmax )
236- print cls_prob .get_shape ()
237- #batch*4
238- bbox_pred = slim .fully_connected (fc1 ,num_outputs = 4 ,scope = "bbox_fc" ,activation_fn = None )
239- print bbox_pred .get_shape ()
240- #batch*10
241- landmark_pred = slim .fully_connected (fc1 ,num_outputs = 10 ,scope = "landmark_fc" ,activation_fn = None )
242- print landmark_pred .get_shape ()
243- #train
244- if training :
245- cls_loss = cls_ohem (cls_prob ,label )
246- bbox_loss = bbox_ohem (bbox_pred ,bbox_target ,label )
247- accuracy = cal_accuracy (cls_prob ,label )
248- landmark_loss = landmark_ohem (landmark_pred , landmark_target ,label )
249- L2_loss = tf .add_n (slim .losses .get_regularization_losses ())
250- return cls_loss ,bbox_loss ,landmark_loss ,L2_loss ,accuracy
251- else :
252- return cls_prob ,bbox_pred ,landmark_pred
253-
198+ # Implement here!!!!!
199+ pass
254200
255201
256202
0 commit comments