@@ -38,6 +38,70 @@ static bool IsErrorWithinTolerance(float error, float tolerance) {
38
38
#define EXPECT_IS_TINY (max_error ) \
39
39
EXPECT_IS_TINIER_THAN (max_error, 1 .5e-2f )
40
40
41
+ static void RunReductionTests (const OpDef& op_def) {
42
+
43
+ TestDataVector test_data (
44
+ // Input X
45
+ {
46
+ {{4 , 3 , 2 }},
47
+ {{4 , 3 , 2 }},
48
+ {{4 , 3 , 2 }},
49
+ {{4 , 3 , 2 }},
50
+ {{4 , 3 , 2 }},
51
+ {{4 , 3 , 2 }},
52
+ {{4 , 3 , 2 }},
53
+ {{4 , 3 , 2 }},
54
+ },
55
+ // Input Y
56
+ {
57
+ {{1 , 1 , 1 }},
58
+ {{}},
59
+ {{1 , 3 , 1 }},
60
+ {{2 }},
61
+ {{4 , 1 , 2 }},
62
+ {{4 , 3 }},
63
+ {{4 , 1 , 2 }},
64
+ {{4 }}
65
+ },
66
+ // Attributes
67
+ {
68
+ // default
69
+ {},
70
+ // axes = [0, 1, 2], keepdims = 0
71
+ {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 , 2 }),
72
+ MakeAttribute (" keepdims" , int64_t (0 ))},
73
+ // axes = [0, 2], keepdims = 1
74
+ {MakeAttribute (" axes" , std::vector<int64_t >{0 , 2 })},
75
+ // axes = [0, 1], keepdims = 0
76
+ {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 }),
77
+ MakeAttribute (" keepdims" , int64_t (0 ))},
78
+ // axes = [1], keepdims = 1
79
+ {MakeAttribute (" axes" , std::vector<int64_t >{1 }),
80
+ MakeAttribute (" keepdims" , int64_t (1 ))},
81
+ // axes = [2], keepdims = 0
82
+ {MakeAttribute (" axes" , std::vector<int64_t >{2 }),
83
+ MakeAttribute (" keepdims" , int64_t (0 ))},
84
+ // axes = [-2], keepdims = 1
85
+ {MakeAttribute (" axes" , std::vector<int64_t >{-2 }),
86
+ MakeAttribute (" keepdims" , int64_t (1 ))},
87
+ // axes = [-2, -1], keepdims = 0
88
+ {MakeAttribute (" axes" , std::vector<int64_t >{-2 , -1 }),
89
+ MakeAttribute (" keepdims" , int64_t (0 ))}
90
+ });
91
+
92
+ GradientChecker<float , float , float > gradient_checker;
93
+
94
+ float max_error;
95
+
96
+ for (size_t i = 0 ; i < std::get<0 >(test_data).size (); i++) {
97
+ max_error = 0 ;
98
+ gradient_checker.ComputeGradientError (op_def, std::get<0 >(test_data)[i],
99
+ std::get<1 >(test_data)[i], &max_error,
100
+ std::get<2 >(test_data)[i]);
101
+ EXPECT_IS_TINY (max_error);
102
+ }
103
+ }
104
+
41
105
template <typename T>
42
106
void GenerateRandomDataWithOneHot (
43
107
std::vector<std::vector<float >>& x_datas,
@@ -426,149 +490,24 @@ TEST(GradientCheckerTest, GemmGrad) {
426
490
}
427
491
428
492
TEST (GradientCheckerTest, ReduceMeanGrad) {
429
- float max_error;
430
- GradientChecker<float , float , float > gradient_checker;
431
493
// Attribute axes supports negative values from opset 11.
432
494
OpDef op_def{" ReduceMean" , kOnnxDomain , 11 };
433
495
434
- // default
435
- {
436
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{1 , 1 , 1 }}, &max_error);
437
- EXPECT_IS_TINY (max_error);
438
- }
439
-
440
- // TODO: Fix forward kernel behavior for default axes
441
- // default axes, keepdims = 0
442
- /*
443
- {
444
- gradient_checker.ComputeGradientError(op_def, {{4, 3, 2}}, {{}}, &max_error,
445
- {MakeAttribute("keepdims", int64_t(0))});
446
- EXPECT_IS_TINY(max_error);
447
- }
448
- */
449
-
450
- // axes = [0, 1, 2], keepdims = 0
451
- {
452
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{}}, &max_error,
453
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 , 2 }),
454
- MakeAttribute (" keepdims" , int64_t (0 ))});
455
- EXPECT_IS_TINY (max_error);
456
- }
457
-
458
- // axes = [0, 2], keepdims = 1
459
- {
460
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{1 , 3 , 1 }}, &max_error,
461
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 2 })});
462
- EXPECT_IS_TINY (max_error);
463
- }
464
-
465
- // axes = [0, 1], keepdims = 0
466
- {
467
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{2 }}, &max_error,
468
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 }),
469
- MakeAttribute (" keepdims" , int64_t (0 ))});
470
- EXPECT_IS_TINY (max_error);
471
- }
472
-
473
- // axes = [1], keepdims = 1
474
- {
475
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 1 , 2 }}, &max_error,
476
- {MakeAttribute (" axes" , std::vector<int64_t >{1 }),
477
- MakeAttribute (" keepdims" , int64_t (1 ))});
478
- EXPECT_IS_TINY (max_error);
479
- }
480
-
481
- // axes = [2], keepdims = 0
482
- {
483
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 3 }}, &max_error,
484
- {MakeAttribute (" axes" , std::vector<int64_t >{2 }),
485
- MakeAttribute (" keepdims" , int64_t (0 ))});
486
- EXPECT_IS_TINY (max_error);
487
- }
488
-
489
- // axes = [-2], keepdims = 1
490
- {
491
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 1 , 2 }}, &max_error,
492
- {MakeAttribute (" axes" , std::vector<int64_t >{-2 }),
493
- MakeAttribute (" keepdims" , int64_t (1 ))});
494
- EXPECT_IS_TINY (max_error);
495
- }
496
-
497
- // axes = [-2, -1], keepdims = 0
498
- {
499
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 }}, &max_error,
500
- {MakeAttribute (" axes" , std::vector<int64_t >{-2 , -1 }),
501
- MakeAttribute (" keepdims" , int64_t (0 ))});
502
- EXPECT_IS_TINY (max_error);
503
- }
496
+ RunReductionTests (op_def);
504
497
}
505
498
506
499
TEST (GradientCheckerTest, ReduceSumGrad) {
507
- float max_error;
508
- GradientChecker<float , float , float > gradient_checker;
509
500
// Attribute axes supports negative values from opset 11.
510
501
OpDef op_def{" ReduceSum" , kOnnxDomain , 11 };
511
502
512
- // default
513
- {
514
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{1 , 1 , 1 }}, &max_error);
515
- EXPECT_IS_TINY (max_error);
516
- }
517
-
518
- // axes = [0, 1, 2], keepdims = 0
519
- {
520
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{}}, &max_error,
521
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 , 2 }),
522
- MakeAttribute (" keepdims" , int64_t (0 ))});
523
- EXPECT_IS_TINY (max_error);
524
- }
525
-
526
- // axes = [0, 2], keepdims = 1
527
- {
528
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{1 , 3 , 1 }}, &max_error,
529
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 2 })});
530
- EXPECT_IS_TINY (max_error);
531
- }
532
-
533
- // axes = [0, 1], keepdims = 0
534
- {
535
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{2 }}, &max_error,
536
- {MakeAttribute (" axes" , std::vector<int64_t >{0 , 1 }),
537
- MakeAttribute (" keepdims" , int64_t (0 ))});
538
- EXPECT_IS_TINY (max_error);
539
- }
540
-
541
- // axes = [1], keepdims = 1
542
- {
543
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 1 , 2 }}, &max_error,
544
- {MakeAttribute (" axes" , std::vector<int64_t >{1 }),
545
- MakeAttribute (" keepdims" , int64_t (1 ))});
546
- EXPECT_IS_TINY (max_error);
547
- }
548
-
549
- // axes = [2], keepdims = 0
550
- {
551
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 3 }}, &max_error,
552
- {MakeAttribute (" axes" , std::vector<int64_t >{2 }),
553
- MakeAttribute (" keepdims" , int64_t (0 ))});
554
- EXPECT_IS_TINY (max_error);
555
- }
503
+ RunReductionTests (op_def);
504
+ }
556
505
557
- // axes = [-2], keepdims = 1
558
- {
559
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{4 , 1 , 2 }}, &max_error,
560
- {MakeAttribute (" axes" , std::vector<int64_t >{-2 }),
561
- MakeAttribute (" keepdims" , int64_t (1 ))});
562
- EXPECT_IS_TINY (max_error);
563
- }
506
+ TEST (GradientCheckerTest, ReduceLogSumExpGrad) {
507
+ // Attribute axes supports negative values from opset 11.
508
+ OpDef op_def{" ReduceLogSumExp" , kOnnxDomain , 11 };
564
509
565
- // axes = [-1, -3], keepdims = 0
566
- {
567
- gradient_checker.ComputeGradientError (op_def, {{4 , 3 , 2 }}, {{3 }}, &max_error,
568
- {MakeAttribute (" axes" , std::vector<int64_t >{-1 , -3 }),
569
- MakeAttribute (" keepdims" , int64_t (0 ))});
570
- EXPECT_IS_TINY (max_error);
571
- }
510
+ RunReductionTests (op_def);
572
511
}
573
512
574
513
#ifndef USE_CUDA
@@ -1960,3 +1899,4 @@ TEST(GradientCheckerTest, ExpandGrad) {
1960
1899
} // namespace onnxruntime
1961
1900
1962
1901
#endif // NDEBUG
1902
+
0 commit comments