Skip to content

Commit 4be18b5

Browse files
dsharletgxnnpack-bot
authored andcommitted
Fix use of integer constants with float/float16 types
This generates warnings on MSVC (and probably other compilers too). PiperOrigin-RevId: 728489631
1 parent d94d6ec commit 4be18b5

8 files changed

+17
-18
lines changed

bench/rsum-benchmark.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void f16_rsum(
3838
xnnpack::Buffer<xnn_float16, XNN_ALLOCATION_ALIGNMENT> input(
3939
rows * channels + XNN_EXTRA_BYTES / sizeof(xnn_float16));
4040
xnnpack::Buffer<xnn_float16> output(rows);
41-
std::iota(input.begin(), input.end(), 1);
41+
std::iota(input.begin(), input.end(), 1.0f);
4242

4343
// Prepare parameters.
4444
xnn_f16_scale_params params;
@@ -71,7 +71,7 @@ void f16_f32acc_rsum(
7171
xnnpack::Buffer<xnn_float16, XNN_ALLOCATION_ALIGNMENT> input(
7272
rows * channels + XNN_EXTRA_BYTES / sizeof(xnn_float16));
7373
xnnpack::Buffer<float> output(rows);
74-
std::iota(input.begin(), input.end(), 1);
74+
std::iota(input.begin(), input.end(), 1.0f);
7575

7676
// Prepare parameters.
7777
xnn_f16_f32acc_scale_params params;
@@ -104,7 +104,7 @@ void f32_rsum(
104104
xnnpack::Buffer<float, XNN_ALLOCATION_ALIGNMENT> input(
105105
rows * channels + XNN_EXTRA_BYTES / sizeof(float));
106106
xnnpack::Buffer<float> output(rows);
107-
std::iota(input.begin(), input.end(), 1);
107+
std::iota(input.begin(), input.end(), 1.0f);
108108

109109
// Prepare parameters.
110110
xnn_f32_scale_params params;

test/convolution-operator-tester.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1021,7 +1021,7 @@ class ConvolutionOperatorTester {
10211021
std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)>
10221022
auto_convolution_op(convolution_op2, xnn_delete_operator);
10231023

1024-
xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
1024+
xnnpack::Buffer<xnn_float16> output2(output.size());
10251025
size_t workspace_size = SIZE_MAX;
10261026
size_t workspace_alignment = SIZE_MAX;
10271027
ASSERT_EQ(xnn_status_success,

test/fully-connected-operator-tester.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1786,7 +1786,7 @@ class FullyConnectedOperatorTester {
17861786
batch_size(),
17871787
/*threadpool=*/nullptr));
17881788

1789-
xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
1789+
xnnpack::Buffer<xnn_float16> output2(output.size());
17901790
ASSERT_EQ(xnn_status_success,
17911791
xnn_setup_fully_connected_nc_qd8_f16_qc8w(
17921792
fully_connected_op2,

test/indirection.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ class IndirectionTester {
168168
kernel_size + (output_width - 1) * step_width * kernel_height_;
169169

170170
input_ = xnnpack::Buffer<float>(channels_ * input_height_ * input_width_);
171-
std::iota(input_.begin(), input_.end(), 0);
171+
std::iota(input_.begin(), input_.end(), 0.0f);
172172
zero_buffer_ = xnnpack::Buffer<float>(channels_, 0.0f);
173173

174174
const size_t indirect_top_height = divide_round_up(padding_height_ / 2, subsampling_);

test/packq-microkernel-tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ void PackQMicrokernelTester::Test(xnn_x8_packq_f32qp8_ukernel_fn packq) const {
3131
xnnpack::Buffer<int8_t, XNN_ALLOCATION_ALIGNMENT> packed_w_ref(packed_size);
3232

3333
// Populate the input and output data.
34-
std::iota(input.begin(), input.end(), 0);
34+
std::iota(input.begin(), input.end(), 0.0f);
3535
// TODO(b/372820266): Remove these fill calls that hide uninitialized memory bugs.
3636
std::fill(packed_w.begin(), packed_w.end(), INT8_C(0x12));
3737
std::fill(packed_w_ref.begin(), packed_w_ref.end(), INT8_C(0x7B));

test/packw-microkernel-tester.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -343,10 +343,9 @@ class PackWMicrokernelTester {
343343
g() * (packed_n() * packed_k() + packed_n()));
344344
xnnpack::Buffer<xnn_float16> packed_w_ref(g() * (packed_n() * packed_k() + packed_n()));
345345

346-
const xnn_float16 pad_value = std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0;
347-
std::iota(weights.begin(), weights.end(), UINT16_C(0x0001));
348-
std::iota(bias.begin(), bias.end(), UINT16_C(0x8000));
349-
std::fill(packed_w.begin(), packed_w.end(), UINT16_C(0xBEEF));
346+
const xnn_float16 pad_value = xnn_float16_from_bits(std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0);
347+
std::iota(weights.begin(), weights.end(), 1.0f);
348+
std::iota(bias.begin(), bias.end(), 0.5f);
350349
std::fill(packed_w_ref.begin(), packed_w_ref.end(), pad_value);
351350

352351
// Mandate zero-padding of weights to packed_k() in K dimension.
@@ -371,8 +370,8 @@ class PackWMicrokernelTester {
371370

372371
// Call optimized micro-kernel.
373372
packw(g(), n(), k(), nr(), kr(), sr(),
374-
reinterpret_cast<const uint16_t*>(weights.data()),
375-
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
373+
reinterpret_cast<const uint16_t*>(weights.data()),
374+
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
376375
reinterpret_cast<uint16_t*>(packed_w.data()),
377376
/*extra_bytes=*/0, /*params=*/nullptr);
378377

test/static-constant-pad.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ TEST_F(StaticConstantPadTestF16, matches_operator_api)
397397
operator_output = xnnpack::Buffer<xnn_float16>(NumElements(output_dims));
398398
subgraph_output = xnnpack::Buffer<xnn_float16>(operator_output.size());
399399

400-
std::iota(input.begin(), input.end(), 0);
400+
std::iota(input.begin(), input.end(), 0.0f);
401401

402402
ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
403403

@@ -470,7 +470,7 @@ TEST_F(StaticConstantPadTestF32, matches_operator_api)
470470
operator_output = xnnpack::Buffer<float>(NumElements(output_dims));
471471
subgraph_output = xnnpack::Buffer<float>(operator_output.size());
472472

473-
std::iota(input.begin(), input.end(), 0);
473+
std::iota(input.begin(), input.end(), 0.0f);
474474

475475
ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
476476

@@ -540,7 +540,7 @@ TEST_F(StaticConstantPadTestF32, reshape_output)
540540
}
541541
subgraph_output = xnnpack::Buffer<float>(NumElements(output_dims));
542542

543-
std::iota(input.begin(), input.end(), 0);
543+
std::iota(input.begin(), input.end(), 0.0f);
544544

545545
ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));
546546

test/transpose-operator-tester.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ class TransposeOperatorTester {
144144
}
145145
ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
146146
xnn_operator_t transpose_op = nullptr;
147-
std::iota(input.begin(), input.end(), 0);
147+
std::iota(input.begin(), input.end(), 0.0f);
148148

149149
ASSERT_EQ(xnn_status_success,
150150
xnn_create_transpose_nd_x16(0, &transpose_op));
@@ -185,7 +185,7 @@ class TransposeOperatorTester {
185185
input_stride[i - 1] = input_stride[i] * shape_[i];
186186
output_stride[i - 1] = output_stride[i] * shape_[perm()[i]];
187187
}
188-
std::iota(input.begin(), input.end(), 0);
188+
std::iota(input.begin(), input.end(), 0.0f);
189189

190190
// Call transpose eager API
191191
ASSERT_EQ(xnn_status_success,

0 commit comments

Comments
 (0)