Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions src/main/cpp/src/from_json_to_structs.cu
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "cast_string.hpp"
#include "json_utils.hpp"
#include "nvtx_ranges.hpp"
#include "utilities/iterator.cuh"

#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
Expand Down Expand Up @@ -324,7 +325,7 @@ std::pair<std::unique_ptr<cudf::column>, bool> try_remove_quotes_for_floats(
return {str, size};
});

auto const size_it = cudf::detail::make_counting_transform_iterator(
auto const size_it = spark_rapids_jni::util::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<cudf::size_type>(
[string_pairs = string_pairs.begin()] __device__(cudf::size_type idx) -> cudf::size_type {
Expand Down Expand Up @@ -398,7 +399,7 @@ std::unique_ptr<cudf::column> cast_strings_to_decimals(cudf::column_view const&

{
using count_type = cuda::std::tuple<int8_t, int8_t>;
auto const check_it = cudf::detail::make_counting_transform_iterator(
auto const check_it = spark_rapids_jni::util::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<count_type>(
[chars = input_sv.chars_begin(stream)] __device__(auto idx) {
Expand Down Expand Up @@ -440,7 +441,7 @@ std::unique_ptr<cudf::column> cast_strings_to_decimals(cudf::column_view const&
stream.value());
}

auto const out_size_it = cudf::detail::make_counting_transform_iterator(
auto const out_size_it = spark_rapids_jni::util::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<cudf::size_type>(
[offsets = in_offsets,
Expand Down Expand Up @@ -557,7 +558,7 @@ std::pair<std::unique_ptr<cudf::column>, bool> try_remove_quotes(
return {chars + start_offset, size};
});

auto const size_it = cudf::detail::make_counting_transform_iterator(
auto const size_it = spark_rapids_jni::util::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<cudf::size_type>(
[string_pairs = string_pairs.begin()] __device__(cudf::size_type idx) -> cudf::size_type {
Expand Down
3 changes: 2 additions & 1 deletion src/main/cpp/src/get_json_object.cu
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "get_json_object.hpp"
#include "json_parser.cuh"
#include "nvtx_ranges.hpp"
#include "utilities/iterator.cuh"

#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
Expand Down Expand Up @@ -1098,7 +1099,7 @@ std::vector<std::unique_ptr<cudf::column>> get_json_object_batch(

// The string sizes computed in the previous kernel call will be used to allocate a new char
// buffer to store the output.
auto const size_it = cudf::detail::make_counting_transform_iterator(
auto const size_it = spark_rapids_jni::util::make_counting_transform_iterator(
0,
cuda::proclaim_return_type<cudf::size_type>(
[string_pairs = out_sview.data()] __device__(auto const idx) {
Expand Down
18 changes: 10 additions & 8 deletions src/main/cpp/src/hyper_log_log_plus_plus.cu
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,18 @@
#include "hash/hash.hpp"
#include "hyper_log_log_plus_plus.hpp"
#include "hyper_log_log_plus_plus_const.hpp"
#include "utilities/iterator.cuh"

#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/grid_1d.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/span.hpp>

Expand Down Expand Up @@ -445,7 +447,7 @@ std::unique_ptr<cudf::column> group_hllpp(cudf::column_view const& input,

// 4. create output columns
auto num_long_cols = num_registers_per_sketch / REGISTERS_PER_LONG + 1;
auto const results_iter = cudf::detail::make_counting_transform_iterator(0, [&](int i) {
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(0, [&](int i) {
return cudf::make_numeric_column(
cudf::data_type{cudf::type_id::INT64}, num_groups, cudf::mask_state::UNALLOCATED, stream, mr);
});
Expand Down Expand Up @@ -609,7 +611,7 @@ std::unique_ptr<cudf::column> group_merge_hllpp(
rmm::device_uvector<int32_t>(num_threads_per_col_phase1, stream, default_mr);

cudf::structs_column_view scv(hll_input);
auto const input_iter = cudf::detail::make_counting_transform_iterator(
auto const input_iter = spark_rapids_jni::util::make_counting_transform_iterator(
0, [&](int i) { return scv.get_sliced_child(i, stream).begin<int64_t>(); });
auto input_cols = std::vector<int64_t const*>(input_iter, input_iter + num_long_cols);
auto d_inputs = cudf::detail::make_device_uvector(input_cols, stream, default_mr);
Expand Down Expand Up @@ -637,7 +639,7 @@ std::unique_ptr<cudf::column> group_merge_hllpp(
}

// create output columns
auto const results_iter = cudf::detail::make_counting_transform_iterator(0, [&](int i) {
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(0, [&](int i) {
return cudf::make_numeric_column(
cudf::data_type{cudf::type_id::INT64}, num_groups, cudf::mask_state::UNALLOCATED, stream, mr);
});
Expand Down Expand Up @@ -740,7 +742,7 @@ std::unique_ptr<cudf::scalar> reduce_hllpp(cudf::column_view const& input,

// 2. generate long columns, the size of each long column is 1
auto num_long_cols = num_registers_per_sketch / REGISTERS_PER_LONG + 1;
auto const results_iter = cudf::detail::make_counting_transform_iterator(0, [&](int i) {
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(0, [&](int i) {
return cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT64},
1 /**num_groups*/,
cudf::mask_state::UNALLOCATED,
Expand Down Expand Up @@ -804,14 +806,14 @@ std::unique_ptr<cudf::scalar> reduce_merge_hllpp(cudf::column_view const& input,
int64_t num_registers_per_sketch = 1 << precision;
auto num_long_cols = num_registers_per_sketch / REGISTERS_PER_LONG + 1;
cudf::structs_column_view scv(input);
auto const input_iter = cudf::detail::make_counting_transform_iterator(
auto const input_iter = spark_rapids_jni::util::make_counting_transform_iterator(
0, [&](int i) { return scv.get_sliced_child(i, stream).begin<int64_t>(); });
auto input_cols = std::vector<int64_t const*>(input_iter, input_iter + num_long_cols);
auto const default_mr = cudf::get_current_device_resource_ref();
auto d_inputs = cudf::detail::make_device_uvector(input_cols, stream, default_mr);

// create one row output
auto const results_iter = cudf::detail::make_counting_transform_iterator(0, [&](int i) {
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(0, [&](int i) {
return cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT64},
1 /** num_rows */,
cudf::mask_state::UNALLOCATED,
Expand Down Expand Up @@ -951,7 +953,7 @@ std::unique_ptr<cudf::column> estimate_from_hll_sketches(cudf::column_view const
CUDF_EXPECTS(input.child(i).type().id() == cudf::type_id::INT64,
"HyperLogLogPlusPlus buffer type must be a STRUCT of long columns.");
}
auto const input_iter = cudf::detail::make_counting_transform_iterator(
auto const input_iter = spark_rapids_jni::util::make_counting_transform_iterator(
0, [&](int i) { return input.child(i).begin<int64_t>(); });
auto const h_input_ptrs =
std::vector<int64_t const*>(input_iter, input_iter + input.num_children());
Expand Down
8 changes: 5 additions & 3 deletions src/main/cpp/src/hyper_log_log_plus_plus_host_udf.cu
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@
#include "hyper_log_log_plus_plus.hpp"
#include "hyper_log_log_plus_plus_const.hpp"
#include "hyper_log_log_plus_plus_host_udf.hpp"
#include "utilities/iterator.cuh"

#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>

namespace spark_rapids_jni {

Expand Down Expand Up @@ -55,7 +57,7 @@ struct hllpp_groupby_udf : cudf::groupby_host_udf {
{
int num_registers = 1 << precision;
int num_long_cols = num_registers / REGISTERS_PER_LONG + 1;
auto const results_iter = cudf::detail::make_counting_transform_iterator(
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(
0, [&](int i) { return cudf::make_empty_column(cudf::data_type{cudf::type_id::INT64}); });
auto children =
std::vector<std::unique_ptr<cudf::column>>(results_iter, results_iter + num_long_cols);
Expand Down Expand Up @@ -99,7 +101,7 @@ struct hllpp_reduct_udf : cudf::reduce_host_udf {
{
int num_registers = 1 << precision;
int num_long_cols = num_registers / REGISTERS_PER_LONG + 1;
auto const results_iter = cudf::detail::make_counting_transform_iterator(
auto const results_iter = spark_rapids_jni::util::make_counting_transform_iterator(
0, [&](int i) { return cudf::make_empty_column(cudf::data_type{cudf::type_id::INT64}); });
auto children =
std::vector<std::unique_ptr<cudf::column>>(results_iter, results_iter + num_long_cols);
Expand Down
Loading
Loading