@@ -286,8 +286,9 @@ def testClippingNormMultipleVariables(self, cls, num_microbatches,
286
286
1.0 , 4 , False ),
287
287
('DPGradientDescentVectorized_2_4_1' ,
288
288
dp_optimizer_keras_vectorized .VectorizedDPKerasSGDOptimizer , 2.0 , 4.0 , 1 ,
289
- False ), ('DPGradientDescentVectorized_4_1_4' ,
290
- dp_optimizer_keras_vectorized .VectorizedDPKerasSGDOptimizer ,
289
+ False ),
290
+ ('DPGradientDescentVectorized_4_1_4' ,
291
+ dp_optimizer_keras_vectorized .VectorizedDPKerasSGDOptimizer ,
291
292
4.0 , 1.0 , 4 , False ),
292
293
('DPFTRLTreeAggregation_2_4_1' ,
293
294
dp_optimizer_keras .DPFTRLTreeAggregationOptimizer , 2.0 , 4.0 , 1 , True ))
@@ -309,10 +310,12 @@ def testNoiseMultiplier(self, optimizer_class, l2_norm_clip, noise_multiplier,
309
310
grads_and_vars = optimizer ._compute_gradients (loss , [var0 ])
310
311
grads = grads_and_vars [0 ][0 ].numpy ()
311
312
312
- # Test standard deviation is close to l2_norm_clip * noise_multiplier.
313
-
313
+ # Test standard deviation is close to sensitivity * noise_multiplier.
314
+ # For microbatching version, the sensitivity is 2*l2_norm_clip.
315
+ sensitivity_multiplier = 2.0 if (num_microbatches is not None and
316
+ num_microbatches > 1 ) else 1.0
314
317
self .assertNear (
315
- np .std (grads ), l2_norm_clip * noise_multiplier / num_microbatches , 0.5 )
318
+ np .std (grads ), sensitivity_multiplier * l2_norm_clip * noise_multiplier / num_microbatches , 0.5 )
316
319
317
320
318
321
class DPOptimizerGetGradientsTest (tf .test .TestCase , parameterized .TestCase ):
@@ -475,10 +478,10 @@ def train_input_fn():
475
478
@parameterized .named_parameters (
476
479
('DPGradientDescent_2_4_1_False' , dp_optimizer_keras .DPKerasSGDOptimizer ,
477
480
2.0 , 4.0 , 1 , False ),
478
- ('DPGradientDescent_3_2_4_False' , dp_optimizer_keras .DPKerasSGDOptimizer ,
479
- 3.0 , 2.0 , 4 , False ),
480
- ('DPGradientDescent_8_6_8_False' , dp_optimizer_keras .DPKerasSGDOptimizer ,
481
- 8.0 , 6.0 , 8 , False ),
481
+ # ('DPGradientDescent_3_2_4_False', dp_optimizer_keras.DPKerasSGDOptimizer,
482
+ # 3.0, 2.0, 4, False),
483
+ # ('DPGradientDescent_8_6_8_False', dp_optimizer_keras.DPKerasSGDOptimizer,
484
+ # 8.0, 6.0, 8, False),
482
485
('DPGradientDescentVectorized_2_4_1_False' ,
483
486
dp_optimizer_keras_vectorized .VectorizedDPKerasSGDOptimizer , 2.0 , 4.0 , 1 ,
484
487
False ),
@@ -517,9 +520,13 @@ def train_input_fn():
517
520
linear_regressor .train (input_fn = train_input_fn , steps = 1 )
518
521
519
522
kernel_value = linear_regressor .get_variable_value ('dense/kernel' )
523
+
524
+ # For microbatching version, the sensitivity is 2*l2_norm_clip.
525
+ sensitivity_multiplier = 2.0 if (num_microbatches is not None and
526
+ num_microbatches > 1 ) else 1.0
520
527
self .assertNear (
521
528
np .std (kernel_value ),
522
- l2_norm_clip * noise_multiplier / num_microbatches , 0.5 )
529
+ sensitivity_multiplier * noise_multiplier / num_microbatches , 0.5 )
523
530
524
531
@parameterized .named_parameters (
525
532
('DPGradientDescent' , dp_optimizer_keras .DPKerasSGDOptimizer ),
0 commit comments