Skip to content

Commit 692e0fa

Browse files
dsharletgxnnpack-bot
authored andcommitted
Fix use of integer constants with float16 types
This generate warnings on MSVC (and probably other compilers too). PiperOrigin-RevId: 728489631
1 parent e4113ae commit 692e0fa

File tree

3 files changed

+7
-8
lines changed

3 files changed

+7
-8
lines changed

test/convolution-operator-tester.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1021,7 +1021,7 @@ class ConvolutionOperatorTester {
10211021
std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)>
10221022
auto_convolution_op(convolution_op2, xnn_delete_operator);
10231023

1024-
xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
1024+
xnnpack::Buffer<xnn_float16> output2(output.size());
10251025
size_t workspace_size = SIZE_MAX;
10261026
size_t workspace_alignment = SIZE_MAX;
10271027
ASSERT_EQ(xnn_status_success,

test/fully-connected-operator-tester.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1786,7 +1786,7 @@ class FullyConnectedOperatorTester {
17861786
batch_size(),
17871787
/*threadpool=*/nullptr));
17881788

1789-
xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
1789+
xnnpack::Buffer<xnn_float16> output2(output.size());
17901790
ASSERT_EQ(xnn_status_success,
17911791
xnn_setup_fully_connected_nc_qd8_f16_qc8w(
17921792
fully_connected_op2,

test/packw-microkernel-tester.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -343,10 +343,9 @@ class PackWMicrokernelTester {
343343
g() * (packed_n() * packed_k() + packed_n()));
344344
xnnpack::Buffer<xnn_float16> packed_w_ref(g() * (packed_n() * packed_k() + packed_n()));
345345

346-
const xnn_float16 pad_value = std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0;
347-
std::iota(weights.begin(), weights.end(), UINT16_C(0x0001));
348-
std::iota(bias.begin(), bias.end(), UINT16_C(0x8000));
349-
std::fill(packed_w.begin(), packed_w.end(), UINT16_C(0xBEEF));
346+
const xnn_float16 pad_value = xnn_float16_from_bits(std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0);
347+
std::iota(weights.begin(), weights.end(), 1.0f);
348+
std::iota(bias.begin(), bias.end(), 0.5f);
350349
std::fill(packed_w_ref.begin(), packed_w_ref.end(), pad_value);
351350

352351
// Mandate zero-padding of weights to packed_k() in K dimension.
@@ -371,8 +370,8 @@ class PackWMicrokernelTester {
371370

372371
// Call optimized micro-kernel.
373372
packw(g(), n(), k(), nr(), kr(), sr(),
374-
reinterpret_cast<const uint16_t*>(weights.data()),
375-
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
373+
reinterpret_cast<const uint16_t*>(weights.data()),
374+
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
376375
reinterpret_cast<uint16_t*>(packed_w.data()),
377376
/*extra_bytes=*/0, /*params=*/nullptr);
378377

0 commit comments

Comments
 (0)