Skip to content

Commit 9f9f69c

Browse files
dsharletgxnnpack-bot
authored andcommitted
Fix more warnings and type safety issues
PiperOrigin-RevId: 728877342
1 parent 6b08d6e commit 9f9f69c

14 files changed

+74
-52
lines changed

bench/models/qd8-attention.cc

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ xnn_subgraph_t QD8Attention(size_t batch_size, size_t seq_len,
3333
// Scales must be positive.
3434
auto f32rng = std::bind(std::uniform_real_distribution<float>(0.01f, +1.0f),
3535
std::ref(rng));
36+
auto i8rng = std::bind(std::uniform_int_distribution<int>(-127, 127),
37+
std::ref(rng));
3638

3739
// External inputs and outputs.
3840
uint32_t input_id = XNN_INVALID_VALUE_ID;
@@ -83,7 +85,7 @@ xnn_subgraph_t QD8Attention(size_t batch_size, size_t seq_len,
8385
std::generate(weights.value_scale.begin(), weights.value_scale.end(),
8486
std::ref(f32rng));
8587
std::generate(weights.value_data.begin(), weights.value_data.end(),
86-
std::ref(f32rng));
88+
std::ref(i8rng));
8789
status = xnn_define_channelwise_quantized_tensor_value(
8890
subgraph, xnn_datatype_qcint8, weights.value_scale.data(),
8991
value_dims.size(), value_dims.size() - 2, value_dims.data(),
@@ -100,7 +102,7 @@ xnn_subgraph_t QD8Attention(size_t batch_size, size_t seq_len,
100102
std::generate(weights.query_scale.begin(), weights.query_scale.end(),
101103
std::ref(f32rng));
102104
std::generate(weights.query_data.begin(), weights.query_data.end(),
103-
std::ref(f32rng));
105+
std::ref(i8rng));
104106
status = xnn_define_channelwise_quantized_tensor_value(
105107
subgraph, xnn_datatype_qcint8, weights.query_scale.data(),
106108
query_dims.size(), query_dims.size() - 2, query_dims.data(),
@@ -117,7 +119,7 @@ xnn_subgraph_t QD8Attention(size_t batch_size, size_t seq_len,
117119
std::generate(weights.key_scale.begin(), weights.key_scale.end(),
118120
std::ref(f32rng));
119121
std::generate(weights.key_data.begin(), weights.key_data.end(),
120-
std::ref(f32rng));
122+
std::ref(i8rng));
121123
status = xnn_define_channelwise_quantized_tensor_value(
122124
subgraph, xnn_datatype_qcint8, weights.key_scale.data(), key_dims.size(),
123125
key_dims.size() - 2, key_dims.data(), weights.key_data.data(),
@@ -311,7 +313,7 @@ xnn_subgraph_t QD8Attention(size_t batch_size, size_t seq_len,
311313
std::generate(weights.post_proj_scale.begin(), weights.post_proj_scale.end(),
312314
std::ref(f32rng));
313315
std::generate(weights.post_proj_data.begin(), weights.post_proj_data.end(),
314-
std::ref(f32rng));
316+
std::ref(i8rng));
315317
status = xnn_define_channelwise_quantized_tensor_value(
316318
subgraph, xnn_datatype_qcint8, weights.post_proj_scale.data(),
317319
post_proj_dims.size(), post_proj_dims.size() - 2, post_proj_dims.data(),

bench/unary.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,8 @@ static void benchmark_unary_operator(benchmark::State& state,
163163

164164
xnnpack::Buffer<In> input(batch_size + XNN_EXTRA_BYTES / sizeof(In));
165165
xnnpack::Buffer<Out> output(batch_size);
166-
std::generate(input.begin(), input.end(), [&]() { return f32dist(rng); });
166+
std::generate(input.begin(), input.end(),
167+
[&]() { return static_cast<In>(f32dist(rng)); });
167168

168169
xnn_status status = xnn_initialize(nullptr /* allocator */);
169170
if (status != xnn_status_success) {

test/binary-elementwise-nd.cc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,6 @@ class BinaryElementwiseOperatorTester {
312312
MinMaxLow limits = DatatypeMinMaxLow(datatype());
313313

314314
xnnpack::ReplicableRandomDevice rng;
315-
std::uniform_real_distribution<double> dist(limits.min, limits.max);
316315

317316
// Compute generalized shapes.
318317
std::array<size_t, XNN_MAX_TENSOR_DIMS> input1_dims;
@@ -358,8 +357,8 @@ class BinaryElementwiseOperatorTester {
358357
xnnpack::Buffer<T> input2(XNN_EXTRA_BYTES + num_input2_elements());
359358
xnnpack::Buffer<T> output(num_output_elements);
360359
for (size_t iteration = 0; iteration < iterations(); iteration++) {
361-
xnnpack::randomize_buffer(datatype(), rng, dist, input1);
362-
xnnpack::randomize_buffer(datatype(), rng, dist, input2);
360+
xnnpack::randomize_buffer(datatype(), rng, limits.min, limits.max, input1);
361+
xnnpack::randomize_buffer(datatype(), rng, limits.min, limits.max, input2);
363362

364363
if (mode == RunMode::kCreateReshapeRun) {
365364
// Create, setup, run, and destroy a binary elementwise operator.

test/binary.cc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,9 +183,8 @@ void MatchesOperatorApi(xnn_datatype datatype, xnn_binary_operator binary_op) {
183183
assert(false);
184184
break;
185185
}
186-
std::uniform_real_distribution<double> dist(datatype_min, datatype_max);
187-
randomize_buffer(datatype, rng, dist, input0);
188-
randomize_buffer(datatype, rng, dist, input1);
186+
randomize_buffer(datatype, rng, datatype_min, datatype_max, input0);
187+
randomize_buffer(datatype, rng, datatype_min, datatype_max, input1);
189188

190189
ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
191190

test/dwconv-microkernel-tester.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -972,7 +972,7 @@ void DWConvMicrokernelTester::Test(
972972
xnnpack::Buffer<xnn_float16> bias(channels());
973973
xnnpack::Buffer<xnn_float16, XNN_ALLOCATION_ALIGNMENT> packed_weights(
974974
(kernel_tile() + 1) * packed_channels());
975-
xnnpack::Buffer<xnn_float16> zero(channels() + XNN_EXTRA_BYTES / sizeof(xnn_float16), 0);
975+
xnnpack::Buffer<xnn_float16> zero(channels() + XNN_EXTRA_BYTES / sizeof(xnn_float16), 0.0f);
976976
xnnpack::Buffer<xnn_float16> output((width() - 1) * output_stride() + channels());
977977
xnnpack::Buffer<float> output_ref(width() * channels());
978978

@@ -984,7 +984,7 @@ void DWConvMicrokernelTester::Test(
984984
std::generate(bias.begin(), bias.end(),
985985
[&]() { return f32dist(rng); });
986986

987-
std::fill(packed_weights.begin(), packed_weights.end(), 0);
987+
std::fill(packed_weights.begin(), packed_weights.end(), 0.0f);
988988
xnn_pack_f16_dwconv_ghw_w(
989989
kernel_tile(), 0, 0, kernel_tile(), 1, channels(), channel_tile(),
990990
channel_tile(), channel_tile(),
@@ -1101,7 +1101,7 @@ void DWConvMicrokernelTester::Test(
11011101
std::generate(bias.begin(), bias.end(),
11021102
[&]() { return f32dist(rng); });
11031103

1104-
std::fill(packed_weights.begin(), packed_weights.end(), 0);
1104+
std::fill(packed_weights.begin(), packed_weights.end(), 0.0f);
11051105
xnn_pack_f16_dwconv_ghw_w(
11061106
first_pass_tile(), middle_pass_tile(), last_pass_tile(), kernel_size(),
11071107
1, channels(), channel_tile(), channel_subtile(), channel_round(),

test/fully-connected-operator-tester.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ class FullyConnectedOperatorTester {
447447
}
448448

449449
// Compute reference results, without renormalization.
450-
std::fill(output_ref.begin(), output_ref.end(), 0);
450+
std::fill(output_ref.begin(), output_ref.end(), 0.0f);
451451

452452
// TODO: Not supported right now.
453453
assert (transpose_weights() == false);

test/fully-connected.cc

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1416,9 +1416,12 @@ TEST_F(FullyConnectedTestBF16F32, matches_operator_api) {
14161416
// }); std::generate(bias.begin(), bias.end(), [&]() { return f32dist(rng);
14171417
// });
14181418
int counter = 0;
1419-
std::generate(input.begin(), input.end(), [&]() { return counter++ % 10; });
1420-
std::generate(kernel.begin(), kernel.end(), [&]() { return counter++ % 10; });
1421-
std::generate(bias.begin(), bias.end(), [&]() { return counter++ % 10; });
1419+
std::generate(input.begin(), input.end(),
1420+
[&]() { return static_cast<float>(counter++ % 10); });
1421+
std::generate(kernel.begin(), kernel.end(),
1422+
[&]() { return static_cast<float>(counter++ % 10); });
1423+
std::generate(bias.begin(), bias.end(),
1424+
[&]() { return static_cast<float>(counter++ % 10); });
14221425

14231426
// Call operator API.
14241427
const xnn_status status = xnn_create_fully_connected_nc_bf16_f32(
@@ -3607,8 +3610,9 @@ TEST_F(FullyConnectedTestQD8F32QC4W,
36073610
// 2nd inference: The dq-params should be properly allocated to handle a
36083611
// resize without memory retrigger
36093612
input_dims[0] += 2;
3610-
size_t batch_size2 = std::accumulate(input_dims.begin(), input_dims.end() - 1,
3611-
1, std::multiplies<size_t>());
3613+
size_t batch_size2 =
3614+
std::accumulate(input_dims.begin(), input_dims.end() - 1,
3615+
static_cast<size_t>(1), std::multiplies<size_t>());
36123616
xnnpack::Buffer<float> convert_input2(batch_size2 * input_channels +
36133617
XNN_EXTRA_BYTES / sizeof(float));
36143618
std::generate(convert_input2.begin(), convert_input2.end(),
@@ -3629,7 +3633,8 @@ TEST_F(FullyConnectedTestQD8F32QC4W,
36293633
// retrigger
36303634
input_dims[0] += 2; // +4 total
36313635
size_t batch_size3 = std::accumulate(input_dims.begin(), input_dims.end() - 1,
3632-
1, std::multiplies<size_t>());
3636+
static_cast<size_t>(1),
3637+
std::multiplies<size_t>());
36333638
xnnpack::Buffer<float> convert_input3(batch_size3 * input_channels +
36343639
XNN_EXTRA_BYTES / sizeof(float));
36353640
std::generate(convert_input3.begin(), convert_input3.end(),

test/gemm-microkernel-tester.cc

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1316,7 +1316,7 @@ void GemmMicrokernelTester::Test(
13161316
/* bias */ packed_n() * sizeof(float));
13171317

13181318
xnnpack::Buffer<xnn_float16> c((mr() - 1) * cm_stride() + ((n() - 1) / nr()) * nr() + (n() - 1) % nr() + 1);
1319-
xnnpack::Buffer<float> c_ref(m() * n(), 0);
1319+
xnnpack::Buffer<float> c_ref(m() * n());
13201320

13211321
for (size_t iteration = 0; iteration < kIterations; iteration++) {
13221322
std::generate(input.begin(), input.end(), std::ref(f32rng));
@@ -1377,7 +1377,7 @@ void GemmMicrokernelTester::Test(
13771377
(void*) start);
13781378

13791379
// Compute 32-bit results and output quantization arguments.
1380-
std::fill(c_ref.begin(), c_ref.end(), 0);
1380+
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
13811381
for (size_t m_index = 0; m_index < m(); m_index++) {
13821382
for (size_t n_index = 0; n_index < n(); n_index++) {
13831383
float kfsum = 0.0;
@@ -1473,7 +1473,7 @@ void GemmMicrokernelTester::Test(
14731473
packed_n() * (sizeof(int32_t) + sizeof(float) * 2));
14741474
xnnpack::Buffer<float> c((mr() - 1) * cm_stride() + ((n() - 1) / nr()) * nr() + (n() - 1) % nr() + 1);
14751475
xnnpack::Buffer<int32_t> acc(m() * n());
1476-
xnnpack::Buffer<float> c_ref(m() * n(), 0);
1476+
xnnpack::Buffer<float> c_ref(m() * n());
14771477

14781478
for (size_t iteration = 0; iteration < kIterations; iteration++) {
14791479
std::generate(input.begin(), input.end(), std::ref(f32rng));
@@ -1526,7 +1526,7 @@ void GemmMicrokernelTester::Test(
15261526
(void*) ((uintptr_t) packed_w.data() + nr() * (ks() * packed_k_bytes + 2 * sizeof(float))));
15271527

15281528
// Compute 32-bit results and output quantization arguments.
1529-
std::fill(c_ref.begin(), c_ref.end(), 0);
1529+
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
15301530
for (size_t m_index = 0; m_index < m(); m_index++) {
15311531
for (size_t n_index = 0; n_index < n(); n_index++) {
15321532
int32_t ksum = 0;
@@ -1625,7 +1625,7 @@ void GemmMicrokernelTester::Test(
16251625
/* bias */ packed_n() * sizeof(float));
16261626

16271627
xnnpack::Buffer<float> c((mr() - 1) * cm_stride() + ((n() - 1) / nr()) * nr() + (n() - 1) % nr() + 1);
1628-
xnnpack::Buffer<float> c_ref(m() * n(), 0);
1628+
xnnpack::Buffer<float> c_ref(m() * n());
16291629

16301630
for (size_t iteration = 0; iteration < 1 /* kIterations */; iteration++) {
16311631
std::generate(input.begin(), input.end(), std::ref(f32rng));
@@ -1685,7 +1685,7 @@ void GemmMicrokernelTester::Test(
16851685
(void*) start);
16861686

16871687
// Compute 32-bit results and output quantization arguments.
1688-
std::fill(c_ref.begin(), c_ref.end(), 0);
1688+
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
16891689
for (size_t m_index = 0; m_index < m(); m_index++) {
16901690
for (size_t n_index = 0; n_index < n(); n_index++) {
16911691
float kfsum = 0.0;
@@ -2589,7 +2589,7 @@ void GemmMicrokernelTester::Test(
25892589
std::generate(bias.begin(), bias.end(), [&] { return f32rng(rng); });
25902590
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
25912591

2592-
std::fill(packed_w.begin(), packed_w.end(), 0);
2592+
std::fill(packed_w.begin(), packed_w.end(), 0.0f);
25932593
pack(/*g=*/1, n(), k(), nr(), kr(), sr(),
25942594
b.data(),
25952595
bias.data(), /*scale=*/nullptr,
@@ -2669,7 +2669,7 @@ void GemmMicrokernelTester::Test(
26692669
std::generate(bias.begin(), bias.end(), [&] { return f32rng(rng); });
26702670
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
26712671

2672-
std::fill(packed_w.begin(), packed_w.end(), 0);
2672+
std::fill(packed_w.begin(), packed_w.end(), 0.0f);
26732673
pack(/*g=*/1, n(), k(), nr(), kr(), sr(),
26742674
reinterpret_cast<const uint16_t*>(b.data()),
26752675
reinterpret_cast<const uint16_t*>(bias.data()), /*scale=*/nullptr,
@@ -2749,7 +2749,7 @@ void GemmMicrokernelTester::Test(
27492749
std::generate(bias.begin(), bias.end(), f32rng);
27502750
std::fill(c_ref.begin(), c_ref.end(), 0.0f);
27512751

2752-
std::fill(packed_w.begin(), packed_w.end(), 0);
2752+
std::fill(packed_w.begin(), packed_w.end(), 0.0f);
27532753
pack(/*g=*/1, n(), k(), nr(), kr(), sr(),
27542754
reinterpret_cast<const uint16_t*>(b.data()),
27552755
reinterpret_cast<const uint16_t*>(bias.data()),

test/operator-test-utils.h

Lines changed: 30 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,38 +13,52 @@
1313

1414
namespace xnnpack {
1515

16+
template <typename T, typename Buffer>
17+
void randomize_int_buffer(xnn_datatype datatype,
18+
xnnpack::ReplicableRandomDevice& rng, double min,
19+
double max, Buffer& buf) {
20+
std::uniform_int_distribution<int> dist(static_cast<int>(min),
21+
static_cast<int>(max));
22+
const auto f = [&]() { return static_cast<T>(dist(rng)); };
23+
std::generate(reinterpret_cast<T*>(buf.begin()),
24+
reinterpret_cast<T*>(buf.end()), f);
25+
}
26+
27+
template <typename T, typename Buffer>
28+
void randomize_float_buffer(xnn_datatype datatype,
29+
xnnpack::ReplicableRandomDevice& rng, double min,
30+
double max, Buffer& buf) {
31+
std::uniform_real_distribution<float> dist(static_cast<float>(min),
32+
static_cast<float>(max));
33+
const auto f = [&]() { return dist(rng); };
34+
std::generate(reinterpret_cast<T*>(buf.begin()),
35+
reinterpret_cast<T*>(buf.end()), f);
36+
}
37+
1638
// Given ann xnnpack::Buffer<char> type, initialize it with
1739
// the given datatype using the given RNG and distribution.
1840
template <typename Buffer>
1941
void randomize_buffer(xnn_datatype datatype,
20-
xnnpack::ReplicableRandomDevice& rng,
21-
std::uniform_real_distribution<double>& dist,
22-
Buffer& buf) {
23-
const auto f = [&]() { return dist(rng); };
42+
xnnpack::ReplicableRandomDevice& rng, double min,
43+
double max, Buffer& buf) {
2444
switch (datatype) {
2545
case xnn_datatype_quint8:
26-
std::generate(reinterpret_cast<uint8_t*>(buf.begin()),
27-
reinterpret_cast<uint8_t*>(buf.end()), f);
46+
randomize_int_buffer<uint8_t>(datatype, rng, min, max, buf);
2847
break;
2948
case xnn_datatype_qint8:
30-
std::generate(reinterpret_cast<int8_t*>(buf.begin()),
31-
reinterpret_cast<int8_t*>(buf.end()), f);
49+
randomize_int_buffer<int8_t>(datatype, rng, min, max, buf);
3250
break;
3351
case xnn_datatype_int32:
34-
std::generate(reinterpret_cast<int32_t*>(buf.begin()),
35-
reinterpret_cast<int32_t*>(buf.end()), f);
52+
randomize_int_buffer<int32_t>(datatype, rng, min, max, buf);
3653
break;
3754
case xnn_datatype_fp16:
38-
std::generate(reinterpret_cast<xnn_float16*>(buf.begin()),
39-
reinterpret_cast<xnn_float16*>(buf.end()), f);
55+
randomize_float_buffer<xnn_float16>(datatype, rng, min, max, buf);
4056
break;
4157
case xnn_datatype_bf16:
42-
std::generate(reinterpret_cast<xnn_bfloat16*>(buf.begin()),
43-
reinterpret_cast<xnn_bfloat16*>(buf.end()), f);
58+
randomize_float_buffer<xnn_bfloat16>(datatype, rng, min, max, buf);
4459
break;
4560
case xnn_datatype_fp32:
46-
std::generate(reinterpret_cast<float*>(buf.begin()),
47-
reinterpret_cast<float*>(buf.end()), f);
61+
randomize_float_buffer<float>(datatype, rng, min, max, buf);
4862
break;
4963
default:
5064
assert(false);

test/packing.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4214,7 +4214,7 @@ TEST(PACK_QS8_MULTIPASS_DWCONV_GHW_W, one_middle_pass_channel_subtile_rounded) {
42144214
// c rounded to channel_subtile is 8, so we will have 2 channel_tile loops in first and middle pass.
42154215

42164216
std::vector<int32_t> b(c);
4217-
std::iota(b.begin(), b.end(), 0.0f); // b = [0, 1, 2, 3, 4, 5, 6]
4217+
std::iota(b.begin(), b.end(), 0); // b = [0, 1, 2, 3, 4, 5, 6]
42184218
std::vector<int8_t> k(c * h * w); // k = [7, 8, // first 2x2 kernel
42194219
// 9, 10,
42204220
// 11, 12, // second 2x2 kernel
@@ -4429,7 +4429,7 @@ TEST(PACK_QS8_MULTIPASS_DWCONV_HWG_W, one_middle_pass_tile) {
44294429
const size_t cr = 2;
44304430

44314431
std::vector<int32_t> b(c);
4432-
std::iota(b.begin(), b.end(), 0.0f); // b = [0, 1]
4432+
std::iota(b.begin(), b.end(), 0); // b = [0, 1]
44334433
std::vector<int8_t> k(c * h * w); // k = [2, 3,
44344434
// 4, 5,
44354435
// 6, 7,

0 commit comments

Comments
 (0)