Skip to content

Commit

Permalink
Fix use of integer constants with float/float16 types
Browse files Browse the repository at this point in the history
This generates warnings on MSVC (and probably other compilers too).

PiperOrigin-RevId: 728489631
  • Loading branch information
dsharletg authored and xnnpack-bot committed Feb 19, 2025
1 parent d94d6ec commit 4be18b5
Show file tree
Hide file tree
Showing 8 changed files with 17 additions and 18 deletions.
6 changes: 3 additions & 3 deletions bench/rsum-benchmark.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ void f16_rsum(
xnnpack::Buffer<xnn_float16, XNN_ALLOCATION_ALIGNMENT> input(
rows * channels + XNN_EXTRA_BYTES / sizeof(xnn_float16));
xnnpack::Buffer<xnn_float16> output(rows);
std::iota(input.begin(), input.end(), 1);
std::iota(input.begin(), input.end(), 1.0f);

// Prepare parameters.
xnn_f16_scale_params params;
Expand Down Expand Up @@ -71,7 +71,7 @@ void f16_f32acc_rsum(
xnnpack::Buffer<xnn_float16, XNN_ALLOCATION_ALIGNMENT> input(
rows * channels + XNN_EXTRA_BYTES / sizeof(xnn_float16));
xnnpack::Buffer<float> output(rows);
std::iota(input.begin(), input.end(), 1);
std::iota(input.begin(), input.end(), 1.0f);

// Prepare parameters.
xnn_f16_f32acc_scale_params params;
Expand Down Expand Up @@ -104,7 +104,7 @@ void f32_rsum(
xnnpack::Buffer<float, XNN_ALLOCATION_ALIGNMENT> input(
rows * channels + XNN_EXTRA_BYTES / sizeof(float));
xnnpack::Buffer<float> output(rows);
std::iota(input.begin(), input.end(), 1);
std::iota(input.begin(), input.end(), 1.0f);

// Prepare parameters.
xnn_f32_scale_params params;
Expand Down
2 changes: 1 addition & 1 deletion test/convolution-operator-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -1021,7 +1021,7 @@ class ConvolutionOperatorTester {
std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)>
auto_convolution_op(convolution_op2, xnn_delete_operator);

xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
xnnpack::Buffer<xnn_float16> output2(output.size());
size_t workspace_size = SIZE_MAX;
size_t workspace_alignment = SIZE_MAX;
ASSERT_EQ(xnn_status_success,
Expand Down
2 changes: 1 addition & 1 deletion test/fully-connected-operator-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -1786,7 +1786,7 @@ class FullyConnectedOperatorTester {
batch_size(),
/*threadpool=*/nullptr));

xnnpack::Buffer<xnn_float16> output2(output.size(), UINT16_C(0xDEAD));
xnnpack::Buffer<xnn_float16> output2(output.size());
ASSERT_EQ(xnn_status_success,
xnn_setup_fully_connected_nc_qd8_f16_qc8w(
fully_connected_op2,
Expand Down
2 changes: 1 addition & 1 deletion test/indirection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class IndirectionTester {
kernel_size + (output_width - 1) * step_width * kernel_height_;

input_ = xnnpack::Buffer<float>(channels_ * input_height_ * input_width_);
std::iota(input_.begin(), input_.end(), 0);
std::iota(input_.begin(), input_.end(), 0.0f);
zero_buffer_ = xnnpack::Buffer<float>(channels_, 0.0f);

const size_t indirect_top_height = divide_round_up(padding_height_ / 2, subsampling_);
Expand Down
2 changes: 1 addition & 1 deletion test/packq-microkernel-tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ void PackQMicrokernelTester::Test(xnn_x8_packq_f32qp8_ukernel_fn packq) const {
xnnpack::Buffer<int8_t, XNN_ALLOCATION_ALIGNMENT> packed_w_ref(packed_size);

// Populate the input and output data.
std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);
// TODO(b/372820266): Remove these fill calls that hide uninitialized memory bugs.
std::fill(packed_w.begin(), packed_w.end(), INT8_C(0x12));
std::fill(packed_w_ref.begin(), packed_w_ref.end(), INT8_C(0x7B));
Expand Down
11 changes: 5 additions & 6 deletions test/packw-microkernel-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -343,10 +343,9 @@ class PackWMicrokernelTester {
g() * (packed_n() * packed_k() + packed_n()));
xnnpack::Buffer<xnn_float16> packed_w_ref(g() * (packed_n() * packed_k() + packed_n()));

const xnn_float16 pad_value = std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0;
std::iota(weights.begin(), weights.end(), UINT16_C(0x0001));
std::iota(bias.begin(), bias.end(), UINT16_C(0x8000));
std::fill(packed_w.begin(), packed_w.end(), UINT16_C(0xBEEF));
const xnn_float16 pad_value = xnn_float16_from_bits(std::max(sr(), kr()) == 1 ? UINT16_C(0xDEAD) : 0);
std::iota(weights.begin(), weights.end(), 1.0f);
std::iota(bias.begin(), bias.end(), 0.5f);
std::fill(packed_w_ref.begin(), packed_w_ref.end(), pad_value);

// Mandate zero-padding of weights to packed_k() in K dimension.
Expand All @@ -371,8 +370,8 @@ class PackWMicrokernelTester {

// Call optimized micro-kernel.
packw(g(), n(), k(), nr(), kr(), sr(),
reinterpret_cast<const uint16_t*>(weights.data()),
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
reinterpret_cast<const uint16_t*>(weights.data()),
reinterpret_cast<const uint16_t*>(bias_data), /*scale=*/nullptr,
reinterpret_cast<uint16_t*>(packed_w.data()),
/*extra_bytes=*/0, /*params=*/nullptr);

Expand Down
6 changes: 3 additions & 3 deletions test/static-constant-pad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ TEST_F(StaticConstantPadTestF16, matches_operator_api)
operator_output = xnnpack::Buffer<xnn_float16>(NumElements(output_dims));
subgraph_output = xnnpack::Buffer<xnn_float16>(operator_output.size());

std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);

ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));

Expand Down Expand Up @@ -470,7 +470,7 @@ TEST_F(StaticConstantPadTestF32, matches_operator_api)
operator_output = xnnpack::Buffer<float>(NumElements(output_dims));
subgraph_output = xnnpack::Buffer<float>(operator_output.size());

std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);

ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));

Expand Down Expand Up @@ -540,7 +540,7 @@ TEST_F(StaticConstantPadTestF32, reshape_output)
}
subgraph_output = xnnpack::Buffer<float>(NumElements(output_dims));

std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);

ASSERT_EQ(xnn_status_success, xnn_initialize(/*allocator=*/nullptr));

Expand Down
4 changes: 2 additions & 2 deletions test/transpose-operator-tester.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ class TransposeOperatorTester {
}
ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
xnn_operator_t transpose_op = nullptr;
std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);

ASSERT_EQ(xnn_status_success,
xnn_create_transpose_nd_x16(0, &transpose_op));
Expand Down Expand Up @@ -185,7 +185,7 @@ class TransposeOperatorTester {
input_stride[i - 1] = input_stride[i] * shape_[i];
output_stride[i - 1] = output_stride[i] * shape_[perm()[i]];
}
std::iota(input.begin(), input.end(), 0);
std::iota(input.begin(), input.end(), 0.0f);

// Call transpose eager API
ASSERT_EQ(xnn_status_success,
Expand Down

0 comments on commit 4be18b5

Please sign in to comment.