Skip to content

Commit

Permalink
Fix use of integer constants with float16 types
Browse files Browse the repository at this point in the history
This generate warnings on MSVC (and probably other compilers too).

PiperOrigin-RevId: 728489631
  • Loading branch information
dsharletg authored and xnnpack-bot committed Feb 19, 2025
1 parent e4113ae commit 692e0fa
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 8 deletions.
2 changes: 1 addition & 1 deletion test/convolution-operator-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -1021,7 +1021,7 @@ class ConvolutionOperatorTester {
std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)>
auto_convolution_op(convolution_op2, xnn_delete_operator);

xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
xnnpack::Buffer<xnn_float16> output2(output.size());
size_t workspace_size = SIZE_MAX;
size_t workspace_alignment = SIZE_MAX;
ASSERT_EQ(xnn_status_success,
Expand Down
2 changes: 1 addition & 1 deletion test/fully-connected-operator-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -1786,7 +1786,7 @@ class FullyConnectedOperatorTester {
batch_size(),
/*threadpool=*/nullptr));

xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
xnnpack::Buffer<xnn_float16> output2(output.size());
ASSERT_EQ(xnn_status_success,
xnn_setup_fully_connected_nc_qd8_f16_qc8w(
fully_connected_op2,
Expand Down
11 changes: 5 additions & 6 deletions test/packw-microkernel-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -343,10 +343,9 @@ class PackWMicrokernelTester {
g() * (packed_n() * packed_k() + packed_n()));
xnnpack::Buffer<xnn_float16> packed_w_ref(g() * (packed_n() * packed_k() + packed_n()));

const xnn_float16 pad_value = std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0;
std::iota(weights.begin(), weights.end(), UINT16_C(0x0001));
std::iota(bias.begin(), bias.end(), UINT16_C(0x8000));
std::fill(packed_w.begin(), packed_w.end(), UINT16_C(0xBEEF));
const xnn_float16 pad_value = xnn_float16_from_bits(std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0);
std::iota(weights.begin(), weights.end(), 1.0f);
std::iota(bias.begin(), bias.end(), 0.5f);
std::fill(packed_w_ref.begin(), packed_w_ref.end(), pad_value);

// Mandate zero-padding of weights to packed_k() in K dimension.
Expand All @@ -371,8 +370,8 @@ class PackWMicrokernelTester {

// Call optimized micro-kernel.
packw(g(), n(), k(), nr(), kr(), sr(),
reinterpret_cast<const uint16_t*>(weights.data()),
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
reinterpret_cast<const uint16_t*>(weights.data()),
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
reinterpret_cast<uint16_t*>(packed_w.data()),
/*extra_bytes=*/0, /*params=*/nullptr);

Expand Down

0 comments on commit 692e0fa

Please sign in to comment.