Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 52 additions & 2 deletions datafusion/physical-plan/benches/aggregate_vectorized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
// specific language governing permissions and limitations
// under the License.

use arrow::array::ArrayRef;
use arrow::datatypes::{Int32Type, StringViewType};
use arrow::array::{ArrayRef, UInt64Array};
use arrow::datatypes::{Field, Int32Type, Schema, StringViewType};
use arrow::util::bench_util::{
create_primitive_array, create_string_view_array_with_len,
create_string_view_array_with_max_len,
Expand All @@ -30,6 +30,8 @@ use criterion::{
use datafusion_physical_plan::aggregates::group_values::multi_group_by::GroupColumn;
use datafusion_physical_plan::aggregates::group_values::multi_group_by::bytes_view::ByteViewGroupValueBuilder;
use datafusion_physical_plan::aggregates::group_values::multi_group_by::primitive::PrimitiveGroupValueBuilder;
use datafusion_physical_plan::aggregates::group_values::new_group_values;
use datafusion_physical_plan::aggregates::order::GroupOrdering;
use rand::distr::{Bernoulli, Distribution};
use std::hint::black_box;
use std::sync::Arc;
Expand All @@ -40,6 +42,7 @@ const NULL_DENSITIES: [f32; 3] = [0.0, 0.1, 0.5];
fn bench_vectorized_append(c: &mut Criterion) {
byte_view_vectorized_append(c);
primitive_vectorized_append(c);
single_group_by_primitive_intern(c);
}

fn byte_view_vectorized_append(c: &mut Criterion) {
Expand Down Expand Up @@ -179,6 +182,53 @@ fn primitive_vectorized_append(c: &mut Criterion) {
group.finish();
}

fn single_group_by_primitive_intern(c: &mut Criterion) {
const BATCH_SIZE: usize = 4096;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should be batch size be like default?

const BATCHES: usize = 16;

let cases = [
("low_cardinality", 128),
("high_cardinality", BATCH_SIZE * BATCHES),
];

let schema = Arc::new(Schema::new(vec![Field::new(
"group_key",
DataType::UInt64,
true,
)]));
let mut group = c.benchmark_group("GroupValuesPrimitive_intern");

for (case_name, distinct) in cases {
let batches = (0..BATCHES)
.map(|batch| {
let start = batch * BATCH_SIZE;
let values = (start..start + BATCH_SIZE)
.map(|value| Some((value % distinct) as u64));
Arc::new(UInt64Array::from_iter(values)) as ArrayRef
})
.collect::<Vec<_>>();

group.bench_function(case_name, |b| {
b.iter(|| {
let mut group_values =
new_group_values(Arc::clone(&schema), &GroupOrdering::None).unwrap();
let mut groups = Vec::with_capacity(BATCH_SIZE);

for batch in &batches {
group_values
.intern(std::slice::from_ref(batch), &mut groups)
.unwrap();
black_box(&groups);
}

black_box(group_values.len());
});
});
}

group.finish();
}

fn bench_single_primitive<const NULLABLE: bool>(
group: &mut BenchmarkGroup<WallTime>,
size: usize,
Expand Down
45 changes: 44 additions & 1 deletion datafusion/physical-plan/src/aggregates/group_values/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ use arrow::array::types::{
use arrow::array::{ArrayRef, downcast_primitive};
use arrow::datatypes::{DataType, SchemaRef, TimeUnit};
use datafusion_common::Result;
use std::mem::size_of;

use datafusion_expr::EmitTo;

Expand All @@ -51,6 +52,32 @@ mod null_builder;

pub(crate) use metrics::GroupByMetrics;

pub(crate) const DENSE_LOOKUP_MAX_BYTES: usize = 2 * 1024 * 1024;
pub(crate) const DENSE_LOOKUP_MIN_FILL_NUMERATOR: usize = 1;
pub(crate) const DENSE_LOOKUP_MIN_FILL_DENOMINATOR: usize = 5;

#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) struct DenseLookupHint {
pub(crate) min: i128,
pub(crate) range_len: usize,
}

impl DenseLookupHint {
pub(crate) fn try_new(min: i128, max: i128) -> Option<Self> {
let range_len: usize = max.checked_sub(min)?.checked_add(1)?.try_into().ok()?;
let bytes = range_len.checked_mul(size_of::<usize>())?;
(bytes <= DENSE_LOOKUP_MAX_BYTES).then_some(Self { min, range_len })
}

pub(crate) fn has_minimum_estimated_fill(self, rows_per_partition: usize) -> bool {
let range_threshold = self.range_len * DENSE_LOOKUP_MIN_FILL_NUMERATOR;
match rows_per_partition.checked_mul(DENSE_LOOKUP_MIN_FILL_DENOMINATOR) {
Some(rows) => rows > range_threshold,
None => true,
}
}
}

/// Stores the group values during hash aggregation.
///
/// # Background
Expand Down Expand Up @@ -134,13 +161,29 @@ pub trait GroupValues: Send {
pub fn new_group_values(
schema: SchemaRef,
group_ordering: &GroupOrdering,
) -> Result<Box<dyn GroupValues>> {
new_group_values_with_hint(schema, group_ordering, None)
}

pub(crate) fn new_group_values_with_hint(
schema: SchemaRef,
group_ordering: &GroupOrdering,
dense_lookup_hint: Option<DenseLookupHint>,
) -> Result<Box<dyn GroupValues>> {
if schema.fields.len() == 1 {
let d = schema.fields[0].data_type();

macro_rules! downcast_helper {
($t:ty, $d:ident) => {
return Ok(Box::new(GroupValuesPrimitive::<$t>::new($d.clone())))
return Ok(match dense_lookup_hint {
Some(hint) => {
Box::new(GroupValuesPrimitive::<$t>::new_with_dense_lookup_hint(
$d.clone(),
Some(hint),
))
}
None => Box::new(GroupValuesPrimitive::<$t>::new($d.clone())),
})
};
}

Expand Down
Loading
Loading