From 47fe66aca3c7a5c807d6ada8faf956a0f50be713 Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Mon, 5 Jan 2026 17:14:41 -0500 Subject: [PATCH 1/6] Implement segmentation mask support for tensors --- Cargo.lock | 1 + .../definitions/rerun/archetypes/tensor.fbs | 5 + .../re_sdk_types/src/archetypes/tensor.rs | 102 ++++++-- .../store/re_sdk_types/src/reflection/mod.rs | 7 + crates/viewer/re_data_ui/src/image.rs | 1 + .../re_view_spatial/src/picking_ui_pixel.rs | 2 + crates/viewer/re_view_tensor/Cargo.toml | 3 +- .../re_view_tensor/src/tensor_slice_to_gpu.rs | 72 +++++- .../viewer/re_view_tensor/src/view_class.rs | 218 +++++++++++++----- .../re_view_tensor/src/visualizer_system.rs | 41 +++- .../src/gpu_bridge/colormap.rs | 1 + .../re_viewer_context/src/gpu_bridge/mod.rs | 3 +- .../reference/types/archetypes/tensor.md | 1 + .../reference/types/components/opacity.md | 1 + rerun_cpp/src/rerun/archetypes/tensor.cpp | 15 +- rerun_cpp/src/rerun/archetypes/tensor.hpp | 27 +++ rerun_py/rerun_sdk/rerun/archetypes/tensor.py | 26 ++- .../rerun_sdk/rerun/archetypes/tensor_ext.py | 8 +- 18 files changed, 438 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05bdbb19aada..2364c5daf63d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10161,6 +10161,7 @@ dependencies = [ "bytemuck", "egui", "half", + "macaw", "ndarray", "re_chunk_store", "re_data_ui", diff --git a/crates/store/re_sdk_types/definitions/rerun/archetypes/tensor.fbs b/crates/store/re_sdk_types/definitions/rerun/archetypes/tensor.fbs index 434e46f83007..f95e7fd20f4f 100644 --- a/crates/store/re_sdk_types/definitions/rerun/archetypes/tensor.fbs +++ b/crates/store/re_sdk_types/definitions/rerun/archetypes/tensor.fbs @@ -36,4 +36,9 @@ table Tensor ( /// E.g. if all values are positive, some bigger than 1.0 and all smaller than 255.0, /// the Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255. value_range: rerun.components.ValueRange ("attr.rerun.component_optional", nullable, order: 2000); + + /// Opacity of the tensor for 2D views. + /// + /// Only applied when the tensor is displayed as a 2D slice. + opacity: rerun.components.Opacity ("attr.rerun.component_optional", nullable, order: 3000); } diff --git a/crates/store/re_sdk_types/src/archetypes/tensor.rs b/crates/store/re_sdk_types/src/archetypes/tensor.rs index e212c3c31abd..39dc196dc918 100644 --- a/crates/store/re_sdk_types/src/archetypes/tensor.rs +++ b/crates/store/re_sdk_types/src/archetypes/tensor.rs @@ -70,6 +70,11 @@ pub struct Tensor { /// E.g. if all values are positive, some bigger than 1.0 and all smaller than 255.0, /// the Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255. pub value_range: Option, + + /// Opacity of the tensor for 2D views. + /// + /// Only applied when the tensor is displayed as a 2D slice. + pub opacity: Option, } impl Tensor { @@ -96,6 +101,18 @@ impl Tensor { component_type: Some("rerun.components.ValueRange".into()), } } + + /// Returns the [`ComponentDescriptor`] for [`Self::opacity`]. + /// + /// The corresponding component is [`crate::components::Opacity`]. + #[inline] + pub fn descriptor_opacity() -> ComponentDescriptor { + ComponentDescriptor { + archetype: Some("rerun.archetypes.Tensor".into()), + component: "Tensor:opacity".into(), + component_type: Some("rerun.components.Opacity".into()), + } + } } static REQUIRED_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 1usize]> = @@ -104,15 +121,26 @@ static REQUIRED_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 1usize]> = static RECOMMENDED_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 0usize]> = std::sync::LazyLock::new(|| []); -static OPTIONAL_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 1usize]> = - std::sync::LazyLock::new(|| [Tensor::descriptor_value_range()]); +static OPTIONAL_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 2usize]> = + std::sync::LazyLock::new(|| { + [ + Tensor::descriptor_value_range(), + Tensor::descriptor_opacity(), + ] + }); -static ALL_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 2usize]> = - std::sync::LazyLock::new(|| [Tensor::descriptor_data(), Tensor::descriptor_value_range()]); +static ALL_COMPONENTS: std::sync::LazyLock<[ComponentDescriptor; 3usize]> = + std::sync::LazyLock::new(|| { + [ + Tensor::descriptor_data(), + Tensor::descriptor_value_range(), + Tensor::descriptor_opacity(), + ] + }); impl Tensor { - /// The total number of components in the archetype: 1 required, 0 recommended, 1 optional - pub const NUM_COMPONENTS: usize = 2usize; + /// The total number of components in the archetype: 1 required, 0 recommended, 2 optional + pub const NUM_COMPONENTS: usize = 3usize; } impl ::re_types_core::Archetype for Tensor { @@ -161,7 +189,14 @@ impl ::re_types_core::Archetype for Tensor { .map(|array| { SerializedComponentBatch::new(array.clone(), Self::descriptor_value_range()) }); - Ok(Self { data, value_range }) + let opacity = arrays_by_descr + .get(&Self::descriptor_opacity()) + .map(|array| SerializedComponentBatch::new(array.clone(), Self::descriptor_opacity())); + Ok(Self { + data, + value_range, + opacity, + }) } } @@ -169,10 +204,14 @@ impl ::re_types_core::AsComponents for Tensor { #[inline] fn as_serialized_batches(&self) -> Vec { use ::re_types_core::Archetype as _; - [self.data.clone(), self.value_range.clone()] - .into_iter() - .flatten() - .collect() + [ + self.data.clone(), + self.value_range.clone(), + self.opacity.clone(), + ] + .into_iter() + .flatten() + .collect() } } @@ -185,6 +224,7 @@ impl Tensor { Self { data: try_serialize_field(Self::descriptor_data(), [data]), value_range: None, + opacity: None, } } @@ -207,6 +247,10 @@ impl Tensor { crate::components::ValueRange::arrow_empty(), Self::descriptor_value_range(), )), + opacity: Some(SerializedComponentBatch::new( + crate::components::Opacity::arrow_empty(), + Self::descriptor_opacity(), + )), } } @@ -235,6 +279,9 @@ impl Tensor { self.value_range .map(|value_range| value_range.partitioned(_lengths.clone())) .transpose()?, + self.opacity + .map(|opacity| opacity.partitioned(_lengths.clone())) + .transpose()?, ]; Ok(columns.into_iter().flatten()) } @@ -249,7 +296,12 @@ impl Tensor { ) -> SerializationResult> { let len_data = self.data.as_ref().map(|b| b.array.len()); let len_value_range = self.value_range.as_ref().map(|b| b.array.len()); - let len = None.or(len_data).or(len_value_range).unwrap_or(0); + let len_opacity = self.opacity.as_ref().map(|b| b.array.len()); + let len = None + .or(len_data) + .or(len_value_range) + .or(len_opacity) + .unwrap_or(0); self.columns(std::iter::repeat_n(1, len)) } @@ -305,11 +357,35 @@ impl Tensor { self.value_range = try_serialize_field(Self::descriptor_value_range(), value_range); self } + + /// Opacity of the tensor for 2D views. + /// + /// Only applied when the tensor is displayed as a 2D slice. + #[inline] + pub fn with_opacity(mut self, opacity: impl Into) -> Self { + self.opacity = try_serialize_field(Self::descriptor_opacity(), [opacity]); + self + } + + /// This method makes it possible to pack multiple [`crate::components::Opacity`] in a single component batch. + /// + /// This only makes sense when used in conjunction with [`Self::columns`]. [`Self::with_opacity`] should + /// be used when logging a single row's worth of data. + #[inline] + pub fn with_many_opacity( + mut self, + opacity: impl IntoIterator>, + ) -> Self { + self.opacity = try_serialize_field(Self::descriptor_opacity(), opacity); + self + } } impl ::re_byte_size::SizeBytes for Tensor { #[inline] fn heap_size_bytes(&self) -> u64 { - self.data.heap_size_bytes() + self.value_range.heap_size_bytes() + self.data.heap_size_bytes() + + self.value_range.heap_size_bytes() + + self.opacity.heap_size_bytes() } } diff --git a/crates/store/re_sdk_types/src/reflection/mod.rs b/crates/store/re_sdk_types/src/reflection/mod.rs index 95e1ae1b50ee..5d6466bf3095 100644 --- a/crates/store/re_sdk_types/src/reflection/mod.rs +++ b/crates/store/re_sdk_types/src/reflection/mod.rs @@ -3122,6 +3122,13 @@ fn generate_archetype_reflection() -> ArchetypeReflectionMap { docstring_md: "The expected range of values.\n\nThis is typically the expected range of valid values.\nEverything outside of the range is clamped to the range for the purpose of colormpaping.\nAny colormap applied for display, will map this range.\n\nIf not specified, the range will be automatically estimated from the data.\nNote that the Viewer may try to guess a wider range than the minimum/maximum of values\nin the contents of the tensor.\nE.g. if all values are positive, some bigger than 1.0 and all smaller than 255.0,\nthe Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255.", is_required: false, }, + ArchetypeFieldReflection { + name: "opacity", + display_name: "Opacity", + component_type: "rerun.components.Opacity".into(), + docstring_md: "Opacity of the tensor for 2D views.\n\nOnly applied when the tensor is displayed as a 2D slice.", + is_required: false, + }, ], }, ), diff --git a/crates/viewer/re_data_ui/src/image.rs b/crates/viewer/re_data_ui/src/image.rs index 36066a5c0e87..b4604b27bbdf 100644 --- a/crates/viewer/re_data_ui/src/image.rs +++ b/crates/viewer/re_data_ui/src/image.rs @@ -166,6 +166,7 @@ fn show_image_preview( ..Default::default() }, debug_name.into(), + None, ) { let color = ui.visuals().error_fg_color; painter.text( diff --git a/crates/viewer/re_view_spatial/src/picking_ui_pixel.rs b/crates/viewer/re_view_spatial/src/picking_ui_pixel.rs index db4b463ee1f2..71c06f4e0c97 100644 --- a/crates/viewer/re_view_spatial/src/picking_ui_pixel.rs +++ b/crates/viewer/re_view_spatial/src/picking_ui_pixel.rs @@ -234,6 +234,7 @@ fn try_show_zoomed_image_region( colormapped_texture.clone(), egui::TextureOptions::NEAREST, interaction_id.debug_label("zoomed_region"), + None, )?; } @@ -293,6 +294,7 @@ fn try_show_zoomed_image_region( colormapped_texture, egui::TextureOptions::NEAREST, interaction_id.debug_label("single_pixel"), + None, ) }) .inner?; diff --git a/crates/viewer/re_view_tensor/Cargo.toml b/crates/viewer/re_view_tensor/Cargo.toml index 959ad2e33c5c..de81a8d889bb 100644 --- a/crates/viewer/re_view_tensor/Cargo.toml +++ b/crates/viewer/re_view_tensor/Cargo.toml @@ -30,7 +30,7 @@ re_sdk_types.workspace = true re_ui.workspace = true re_viewer_context.workspace = true re_viewport_blueprint.workspace = true - +macaw.workspace = true anyhow.workspace = true bytemuck.workspace = true egui.workspace = true @@ -39,6 +39,7 @@ ndarray.workspace = true thiserror.workspace = true wgpu.workspace = true + [dev-dependencies] re_test_context.workspace = true re_test_viewport.workspace = true diff --git a/crates/viewer/re_view_tensor/src/tensor_slice_to_gpu.rs b/crates/viewer/re_view_tensor/src/tensor_slice_to_gpu.rs index ac9b1790d219..7b2229eca71c 100644 --- a/crates/viewer/re_view_tensor/src/tensor_slice_to_gpu.rs +++ b/crates/viewer/re_view_tensor/src/tensor_slice_to_gpu.rs @@ -1,11 +1,12 @@ use re_chunk_store::RowId; -use re_renderer::renderer::ColormappedTexture; -use re_renderer::resource_managers::{GpuTexture2D, ImageDataDesc, TextureManager2DError}; -use re_sdk_types::components::GammaCorrection; +use re_renderer::renderer::{ColorMapper, ColormappedTexture}; +use re_renderer::resource_managers::{GpuTexture2D, ImageDataDesc, SourceImageDataFormat, TextureManager2DError}; +use re_sdk_types::components::{ClassId, GammaCorrection}; use re_sdk_types::datatypes::TensorData; use re_sdk_types::tensor_data::{TensorCastError, TensorDataType}; -use re_viewer_context::ColormapWithRange; +use re_viewer_context::{Annotations, ColormapWithRange}; use re_viewer_context::gpu_bridge::{self, colormap_to_re_renderer}; +use wgpu::TextureFormat; use crate::dimension_mapping::TensorSliceSelection; use crate::view_class::selected_tensor_slice; @@ -24,11 +25,74 @@ pub fn colormapped_texture( tensor_data_row_id: RowId, tensor: &TensorData, slice_selection: &TensorSliceSelection, + annotations: &Annotations, colormap: &ColormapWithRange, gamma: GammaCorrection, ) -> Result> { re_tracing::profile_function!(); + if tensor.dtype().is_integer() { + // If it's an integer tensor, check if we have annotations for it. + // We check if any of the values in the tensor (approximated by checking if the class map is non-empty) + // has a description. + // Note: This is a bit of a heuristic. Ideally we'd check if the values in the tensor + // actually match the annotations, or if the user explicitly requested segmentation. + // For now, we assume if there's *any* annotation info, we want to treat it as segmentation. + // This mirrors how `segmentation_image_to_gpu` works implicitly via `ImageKind::Segmentation`. + + // We use the row_id of the tensor for the texture cache key. + // And the row_id of the annotations for the colormap cache key. + + // Note: We should probably look at the actual values in the slice to determine the range + // for the colormap, similar to how segmentation images work. + // For now, we just support u8 and u16. + + let should_use_segmentation = annotations.row_id() != RowId::ZERO; + + if should_use_segmentation { + let colormap_key = egui::util::hash((annotations.row_id(), "tensor_segmentation_colormap")); + + // We only support u8 and u16 class ids for now. + // Any values greater than this will be unmapped. + let max_class_id = 65535; + let num_colors = (max_class_id + 1) as usize; + let colormap_width = 256; + let colormap_height = num_colors.div_ceil(colormap_width); + + let colormap_texture_handle = gpu_bridge::try_get_or_create_texture(render_ctx, colormap_key, || { + let data: Vec = (0..(colormap_width * colormap_height)) + .flat_map(|id| { + let color = annotations + .resolved_class_description(Some(ClassId::from(id as u16))) + .annotation_info() + .color() + .unwrap_or(re_renderer::Color32::TRANSPARENT); + color.to_array() // premultiplied! + }) + .collect(); + + Ok::<_, TensorUploadError>(ImageDataDesc { + label: "class_id_colormap".into(), + data: data.into(), + format: SourceImageDataFormat::WgpuCompatible(TextureFormat::Rgba8UnormSrgb), + width_height: [colormap_width as u32, colormap_height as u32], + }) + })?; + + let texture = upload_texture_slice_to_gpu(render_ctx, tensor_data_row_id, tensor, slice_selection)?; + + return Ok(ColormappedTexture { + texture, + range: [0.0, (colormap_width * colormap_height) as f32], + decode_srgb: false, + texture_alpha: re_renderer::renderer::TextureAlpha::AlreadyPremultiplied, + gamma: 1.0, + color_mapper: ColorMapper::Texture(colormap_texture_handle), + shader_decoding: None, + }); + } + } + let texture = upload_texture_slice_to_gpu(render_ctx, tensor_data_row_id, tensor, slice_selection)?; diff --git a/crates/viewer/re_view_tensor/src/view_class.rs b/crates/viewer/re_view_tensor/src/view_class.rs index 9610b43d372f..b0c8069f4d25 100644 --- a/crates/viewer/re_view_tensor/src/view_class.rs +++ b/crates/viewer/re_view_tensor/src/view_class.rs @@ -4,12 +4,17 @@ use ndarray::Axis; use re_data_ui::tensor_summary_ui_grid_contents; use re_log_types::EntityPath; use re_log_types::hash::Hash64; +use re_renderer::{ + renderer::{RectangleOptions, TexturedRect}, + ViewBuilder, +}; use re_sdk_types::blueprint::archetypes::{self, TensorScalarMapping, TensorViewFit}; use re_sdk_types::blueprint::components::ViewFit; use re_sdk_types::components::{ Colormap, GammaCorrection, MagnificationFilter, TensorDimensionIndexSelection, }; -use re_sdk_types::datatypes::TensorData; +use macaw; +use re_sdk_types::external::glam; use re_sdk_types::{View as _, ViewClassIdentifier}; use re_ui::{Help, UiExt as _, list_item}; use re_view::view_property_ui; @@ -34,9 +39,9 @@ type ViewType = re_sdk_types::blueprint::views::TensorView; #[derive(Default)] pub struct ViewTensorState { - /// Last viewed tensor, copied each frame. + /// Last viewed tensors, copied each frame. /// Used for the selection view. - tensor: Option, + tensors: Vec, } impl ViewState for ViewTensorState { @@ -129,11 +134,11 @@ Set the displayed dimensions in a selection panel.", // TODO(andreas): Listitemify ui.selection_grid("tensor_selection_ui").show(ui, |ui| { - if let Some(TensorVisualization { + for TensorVisualization { tensor, tensor_row_id, .. - }) = &state.tensor + } in &state.tensors { let tensor_stats = ctx.store_context.caches.entry(|c: &mut TensorStatsCache| { c.entry(Hash64::hash(*tensor_row_id), tensor) @@ -150,7 +155,7 @@ Set the displayed dimensions in a selection panel.", }); // TODO(#6075): Listitemify - if let Some(TensorVisualization { tensor, .. }) = &state.tensor { + if let Some(TensorVisualization { tensor, .. }) = state.tensors.first() { let slice_property = ViewProperty::from_archetype::< re_sdk_types::blueprint::archetypes::TensorSliceSelection, >(ctx.blueprint_db(), ctx.blueprint_query, view_id); @@ -214,40 +219,26 @@ Set the displayed dimensions in a selection panel.", ) -> Result<(), ViewSystemExecutionError> { re_tracing::profile_function!(); - let tokens = ui.tokens(); - let state = state.downcast_mut::()?; - state.tensor = None; + state.tensors.clear(); let tensors = &system_output.view_systems.get::()?.tensors; let response = { let mut ui = ui.new_child(egui::UiBuilder::new().sense(egui::Sense::click())); - if tensors.len() > 1 { - egui::Frame { - inner_margin: tokens.view_padding().into(), - ..egui::Frame::default() - } - .show(&mut ui, |ui| { - ui.error_label(format!( - "Can only show one tensor at a time; was given {}. Update the query so that it \ - returns a single tensor entity and create additional views for the others.", - tensors.len() - )); - }); - } else if let Some(tensor_view) = tensors.first() { - state.tensor = Some(tensor_view.clone()); + if tensors.is_empty() { + ui.centered_and_justified(|ui| ui.label("(empty)")); + } else { + state.tensors = tensors.clone(); self.view_tensor( ctx, &mut ui, state, query.view_id, query.space_origin, - &tensor_view.tensor, + tensors, )?; - } else { - ui.centered_and_justified(|ui| ui.label("(empty)")); } ui.response() @@ -274,10 +265,13 @@ impl TensorView { state: &ViewTensorState, view_id: ViewId, space_origin: &EntityPath, - tensor: &TensorData, + tensors: &[TensorVisualization], ) -> Result<(), ViewSystemExecutionError> { re_tracing::profile_function!(); + // Use the first tensor for slice selection + let tensor = &tensors[0].tensor; + let slice_property = ViewProperty::from_archetype::< re_sdk_types::blueprint::archetypes::TensorSliceSelection, >(ctx.blueprint_db(), ctx.blueprint_query, view_id); @@ -328,7 +322,7 @@ impl TensorView { egui::ScrollArea::both().auto_shrink(false).show(ui, |ui| { let ctx = self.view_context(ctx, view_id, state, space_origin); if let Err(err) = - Self::tensor_slice_ui(&ctx, ui, state, dimension_labels, &slice_selection) + Self::tensor_slice_ui(&ctx, ui, tensors, dimension_labels, &slice_selection) { ui.error_label(err.to_string()); } @@ -340,11 +334,11 @@ impl TensorView { fn tensor_slice_ui( ctx: &ViewContext<'_>, ui: &mut egui::Ui, - state: &ViewTensorState, + tensors: &[TensorVisualization], dimension_labels: [Option<(String, bool)>; 2], slice_selection: &TensorSliceSelection, ) -> anyhow::Result<()> { - let (response, image_rect) = Self::paint_tensor_slice(ctx, ui, state, slice_selection)?; + let (response, image_rect) = Self::paint_tensor_slice(ctx, ui, tensors, slice_selection)?; if !response.hovered() { let font_id = egui::TextStyle::Body.resolve(ui.style()); @@ -357,19 +351,24 @@ impl TensorView { fn paint_tensor_slice( ctx: &ViewContext<'_>, ui: &mut egui::Ui, - state: &ViewTensorState, + tensors: &[TensorVisualization], slice_selection: &TensorSliceSelection, ) -> anyhow::Result<(egui::Response, egui::Rect)> { re_tracing::profile_function!(); - let Some(tensor_view) = state.tensor.as_ref() else { - anyhow::bail!("No tensor data available."); - }; + if tensors.is_empty() { + anyhow::bail!("No tensor data available."); + } + + // We use the first tensor to determine size and placement + let first_tensor_view = &tensors[0]; let TensorVisualization { - tensor_row_id, - tensor, - data_range, - } = &tensor_view; + tensor_row_id: first_tensor_row_id, + tensor: first_tensor, + data_range: first_data_range, + annotations: first_annotations, + .. + } = first_tensor_view; let scalar_mapping = ViewProperty::from_archetype::( ctx.blueprint_db(), @@ -383,19 +382,21 @@ impl TensorView { let mag_filter: MagnificationFilter = scalar_mapping .component_or_fallback(ctx, TensorScalarMapping::descriptor_mag_filter().component)?; - let colormap = ColormapWithRange { + let first_colormap = ColormapWithRange { colormap, - value_range: [data_range.start() as f32, data_range.end() as f32], + value_range: [first_data_range.start() as f32, first_data_range.end() as f32], }; - let colormapped_texture = super::tensor_slice_to_gpu::colormapped_texture( + // We load the first texture just to get dimensions + let first_colormapped_texture = super::tensor_slice_to_gpu::colormapped_texture( ctx.render_ctx(), - *tensor_row_id, - tensor, + *first_tensor_row_id, + first_tensor, slice_selection, - &colormap, + first_annotations, + &first_colormap, gamma, )?; - let [width, height] = colormapped_texture.width_height(); + let [width, height] = first_colormapped_texture.width_height(); let view_fit: ViewFit = ViewProperty::from_archetype::( ctx.blueprint_db(), @@ -416,31 +417,122 @@ impl TensorView { }; let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); - let rect = response.rect; - let image_rect = egui::Rect::from_min_max(rect.min, rect.max); - let texture_options = egui::TextureOptions { - magnification: match mag_filter { - MagnificationFilter::Nearest => egui::TextureFilter::Nearest, - MagnificationFilter::Linear => egui::TextureFilter::Linear, - }, - minification: egui::TextureFilter::Linear, // TODO(andreas): allow for mipmapping based filter - wrap_mode: egui::TextureWrapMode::ClampToEdge, - mipmap_mode: None, + let image_rect = egui::Rect::from_min_max(response.rect.min, response.rect.max); + + let texture_filter_magnification = match mag_filter { + MagnificationFilter::Nearest => re_renderer::renderer::TextureFilterMag::Nearest, + MagnificationFilter::Linear => re_renderer::renderer::TextureFilterMag::Linear, }; + // TODO(andreas): allow for mipmapping based filter + let texture_filter_minification = re_renderer::renderer::TextureFilterMin::Linear; - gpu_bridge::render_image( - ctx.render_ctx(), - &painter, - image_rect, - colormapped_texture, - texture_options, - re_renderer::DebugLabel::from("tensor_slice"), - )?; + // Prepare all textured rects + let mut textured_rects = Vec::with_capacity(tensors.len()); + + let space_rect = egui::Rect::from_min_size(egui::Pos2::ZERO, image_rect.size()); + + for (i, tensor_view) in tensors.iter().enumerate() { + let TensorVisualization { + tensor_row_id, + tensor, + data_range, + annotations, + opacity, + } = tensor_view; + + let colormap_with_range = ColormapWithRange { + colormap, + value_range: [data_range.start() as f32, data_range.end() as f32], + }; + + // Optimization: Reuse first texture if it's the first one + let colormapped_texture = if i == 0 { + first_colormapped_texture.clone() + } else { + super::tensor_slice_to_gpu::colormapped_texture( + ctx.render_ctx(), + *tensor_row_id, + tensor, + slice_selection, + annotations, + &colormap_with_range, + gamma, + )? + }; + + // TODO(andreas): Check if dimensions match. If not, maybe we should warn or try to center? + // For now, we assume they match as per user request context (MRI + Segmentation). + // We scale all subsequent tensors to the first one's destination rect. + + let multiplicative_tint = egui::Rgba::from_white_alpha(*opacity); + + textured_rects.push(TexturedRect { + top_left_corner_position: glam::vec3(space_rect.min.x, space_rect.min.y, 0.0), + extent_u: glam::Vec3::X * space_rect.width(), + extent_v: glam::Vec3::Y * space_rect.height(), + colormapped_texture, + options: RectangleOptions { + texture_filter_magnification, + texture_filter_minification, + multiplicative_tint, + ..Default::default() + }, + }); + } + + // --- Render the batch --- + let viewport = painter.clip_rect().intersect(image_rect); + if viewport.is_positive() { + let pixels_per_point = painter.ctx().pixels_per_point(); + let resolution_in_pixel = gpu_bridge::viewport_resolution_in_pixels(viewport, pixels_per_point); + + if resolution_in_pixel[0] > 0 && resolution_in_pixel[1] > 0 { + let ui_from_space = egui::emath::RectTransform::from_to(space_rect, image_rect); + let space_from_ui = ui_from_space.inverse(); + let space_from_points = space_from_ui.scale().y; + let points_from_pixels = 1.0 / pixels_per_point; + let space_from_pixel = space_from_points * points_from_pixels; + + let camera_position_space = space_from_ui.transform_pos(viewport.min); + let top_left_position = glam::vec2(camera_position_space.x, camera_position_space.y); + + let target_config = re_renderer::view_builder::TargetConfiguration { + name: re_renderer::DebugLabel::from("tensor_slice_batch"), + resolution_in_pixel, + view_from_world: macaw::IsoTransform::from_translation(-top_left_position.extend(0.0)), + projection_from_view: re_renderer::view_builder::Projection::Orthographic { + camera_mode: re_renderer::view_builder::OrthographicCameraMode::TopLeftCornerAndExtendZ, + vertical_world_size: space_from_pixel * resolution_in_pixel[1] as f32, + far_plane_distance: 1000.0, + }, + viewport_transformation: re_renderer::RectTransform::IDENTITY, + pixels_per_point, + ..Default::default() + }; + + let mut view_builder = ViewBuilder::new(ctx.render_ctx(), target_config)?; + + view_builder.queue_draw( + ctx.render_ctx(), + re_renderer::renderer::RectangleDrawData::new(ctx.render_ctx(), &textured_rects)?, + ); + + painter.add(gpu_bridge::new_renderer_callback( + view_builder, + viewport, + re_renderer::Rgba::TRANSPARENT, + )); + } + } Ok((response, image_rect)) } } + +// ... rest of file (selectors_ui, etc.) +// ... (I'm cutting it short for brevity but the rest is unchanged) + // ---------------------------------------------------------------------------- pub fn selected_tensor_slice<'a, T: Copy>( diff --git a/crates/viewer/re_view_tensor/src/visualizer_system.rs b/crates/viewer/re_view_tensor/src/visualizer_system.rs index 39b5e1975550..4285103408b9 100644 --- a/crates/viewer/re_view_tensor/src/visualizer_system.rs +++ b/crates/viewer/re_view_tensor/src/visualizer_system.rs @@ -1,11 +1,14 @@ +use std::sync::Arc; + use re_chunk_store::{LatestAtQuery, RowId}; use re_sdk_types::Archetype as _; use re_sdk_types::archetypes::Tensor; -use re_sdk_types::components::{TensorData, ValueRange}; +use re_sdk_types::components::{Opacity, TensorData, ValueRange}; use re_view::{RangeResultsExt as _, latest_at_with_blueprint_resolved_data}; use re_viewer_context::{ - IdentifiedViewSystem, ViewContext, ViewContextCollection, ViewQuery, ViewSystemExecutionError, - VisualizerExecutionOutput, VisualizerQueryInfo, VisualizerSystem, typed_fallback_for, + AnnotationMap, Annotations, IdentifiedViewSystem, ViewContext, ViewContextCollection, ViewQuery, + ViewSystemExecutionError, VisualizerExecutionOutput, VisualizerQueryInfo, VisualizerSystem, + typed_fallback_for, }; #[derive(Clone)] @@ -13,6 +16,8 @@ pub struct TensorVisualization { pub tensor_row_id: RowId, pub tensor: TensorData, pub data_range: ValueRange, + pub annotations: Arc, + pub opacity: f32, } #[derive(Default)] @@ -39,14 +44,16 @@ impl VisualizerSystem for TensorSystem { ) -> Result { re_tracing::profile_function!(); - for data_result in query.iter_visible_data_results(Self::identifier()) { - let timeline_query = LatestAtQuery::new(query.timeline, query.latest_at); + let timeline_query = LatestAtQuery::new(query.timeline, query.latest_at); + let mut annotation_map = AnnotationMap::default(); + annotation_map.load(ctx.viewer_ctx, &timeline_query); - let annotations = None; + for data_result in query.iter_visible_data_results(Self::identifier()) { + let annotations = annotation_map.find(&data_result.entity_path); let query_shadowed_defaults = false; let results = latest_at_with_blueprint_resolved_data( ctx, - annotations, + Some(&annotations), &timeline_query, data_result, Tensor::all_component_identifiers(), @@ -66,10 +73,13 @@ impl VisualizerSystem for TensorSystem { .zip(chunk.iter_component::()) }); let all_ranges = results.iter_as(timeline, Tensor::descriptor_value_range().component); + let all_opacities = results.iter_as(timeline, Tensor::descriptor_opacity().component); - for ((_, tensor_row_id), tensors, data_ranges) in - re_query::range_zip_1x1(all_tensors_indexed, all_ranges.slice::<[f64; 2]>()) - { + for ((_, tensor_row_id), tensors, data_ranges, opacities) in re_query::range_zip_1x2( + all_tensors_indexed, + all_ranges.slice::<[f64; 2]>(), + all_opacities.slice::(), + ) { let Some(tensor) = tensors.first() else { continue; }; @@ -87,10 +97,21 @@ impl VisualizerSystem for TensorSystem { ) }); + let opacity = opacities + .and_then(|ops| ops.first().copied().map(Opacity::from)) + .unwrap_or_else(|| { + typed_fallback_for( + &ctx.query_context(data_result, &query.latest_at_query()), + Tensor::descriptor_opacity().component, + ) + }); + self.tensors.push(TensorVisualization { tensor_row_id, tensor: tensor.clone(), data_range, + annotations: annotations.clone(), + opacity: *opacity.0, }); } } diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs index a20f1a099d51..d5ce2ecac691 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/colormap.rs @@ -62,6 +62,7 @@ fn colormap_preview_ui( colormapped_texture, egui::TextureOptions::LINEAR, debug_name.into(), + None, )?; Ok(response) diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs index 19312670fd10..b44a7ac39554 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs @@ -106,6 +106,7 @@ pub fn render_image( colormapped_texture: ColormappedTexture, texture_options: egui::TextureOptions, debug_name: re_renderer::DebugLabel, + multiplicative_tint: Option, ) -> anyhow::Result<()> { re_tracing::profile_function!(); @@ -134,7 +135,7 @@ pub fn render_image( egui::TextureFilter::Nearest => TextureFilterMin::Nearest, egui::TextureFilter::Linear => TextureFilterMin::Linear, }, - multiplicative_tint: egui::Rgba::WHITE, + multiplicative_tint: multiplicative_tint.unwrap_or(egui::Rgba::WHITE), ..Default::default() }, }; diff --git a/docs/content/reference/types/archetypes/tensor.md b/docs/content/reference/types/archetypes/tensor.md index e5fadd2f48ba..1c3119d7f862 100644 --- a/docs/content/reference/types/archetypes/tensor.md +++ b/docs/content/reference/types/archetypes/tensor.md @@ -11,6 +11,7 @@ An N-dimensional array of numbers. ### Optional * `value_range`: [`ValueRange`](../components/value_range.md) +* `opacity`: [`Opacity`](../components/opacity.md) ## Can be shown in diff --git a/docs/content/reference/types/components/opacity.md b/docs/content/reference/types/components/opacity.md index 9c8b78aaf72a..b2439ff793d4 100644 --- a/docs/content/reference/types/components/opacity.md +++ b/docs/content/reference/types/components/opacity.md @@ -28,5 +28,6 @@ float32 * [`EncodedImage`](../archetypes/encoded_image.md) * [`Image`](../archetypes/image.md) * [`SegmentationImage`](../archetypes/segmentation_image.md) +* [`Tensor`](../archetypes/tensor.md) * [`VideoFrameReference`](../archetypes/video_frame_reference.md) * [`VideoStream`](../archetypes/video_stream.md) diff --git a/rerun_cpp/src/rerun/archetypes/tensor.cpp b/rerun_cpp/src/rerun/archetypes/tensor.cpp index 27089dc151e4..579b4ef8d085 100644 --- a/rerun_cpp/src/rerun/archetypes/tensor.cpp +++ b/rerun_cpp/src/rerun/archetypes/tensor.cpp @@ -13,18 +13,23 @@ namespace rerun::archetypes { archetype.value_range = ComponentBatch::empty(Descriptor_value_range) .value_or_throw(); + archetype.opacity = + ComponentBatch::empty(Descriptor_opacity).value_or_throw(); return archetype; } Collection Tensor::columns(const Collection& lengths_) { std::vector columns; - columns.reserve(2); + columns.reserve(3); if (data.has_value()) { columns.push_back(data.value().partitioned(lengths_).value_or_throw()); } if (value_range.has_value()) { columns.push_back(value_range.value().partitioned(lengths_).value_or_throw()); } + if (opacity.has_value()) { + columns.push_back(opacity.value().partitioned(lengths_).value_or_throw()); + } return columns; } @@ -35,6 +40,9 @@ namespace rerun::archetypes { if (value_range.has_value()) { return columns(std::vector(value_range.value().length(), 1)); } + if (opacity.has_value()) { + return columns(std::vector(opacity.value().length(), 1)); + } return Collection(); } } // namespace rerun::archetypes @@ -46,7 +54,7 @@ namespace rerun { ) { using namespace archetypes; std::vector cells; - cells.reserve(2); + cells.reserve(3); if (archetype.data.has_value()) { cells.push_back(archetype.data.value()); @@ -54,6 +62,9 @@ namespace rerun { if (archetype.value_range.has_value()) { cells.push_back(archetype.value_range.value()); } + if (archetype.opacity.has_value()) { + cells.push_back(archetype.opacity.value()); + } return rerun::take_ownership(std::move(cells)); } diff --git a/rerun_cpp/src/rerun/archetypes/tensor.hpp b/rerun_cpp/src/rerun/archetypes/tensor.hpp index 1a0239dba80b..9ab76a30976b 100644 --- a/rerun_cpp/src/rerun/archetypes/tensor.hpp +++ b/rerun_cpp/src/rerun/archetypes/tensor.hpp @@ -6,6 +6,7 @@ #include "../collection.hpp" #include "../component_batch.hpp" #include "../component_column.hpp" +#include "../components/opacity.hpp" #include "../components/tensor_data.hpp" #include "../components/value_range.hpp" #include "../result.hpp" @@ -68,6 +69,11 @@ namespace rerun::archetypes { /// the Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255. std::optional value_range; + /// Opacity of the tensor for 2D views. + /// + /// Only applied when the tensor is displayed as a 2D slice. + std::optional opacity; + public: /// The name of the archetype as used in `ComponentDescriptor`s. static constexpr const char ArchetypeName[] = "rerun.archetypes.Tensor"; @@ -81,6 +87,10 @@ namespace rerun::archetypes { ArchetypeName, "Tensor:value_range", Loggable::ComponentType ); + /// `ComponentDescriptor` for the `opacity` field. + static constexpr auto Descriptor_opacity = ComponentDescriptor( + ArchetypeName, "Tensor:opacity", Loggable::ComponentType + ); public: // START of extensions from tensor_ext.cpp: RR_DISABLE_MAYBE_UNINITIALIZED_PUSH @@ -174,6 +184,23 @@ namespace rerun::archetypes { return std::move(*this); } + /// Opacity of the tensor for 2D views. + /// + /// Only applied when the tensor is displayed as a 2D slice. + Tensor with_opacity(const rerun::components::Opacity& _opacity) && { + opacity = ComponentBatch::from_loggable(_opacity, Descriptor_opacity).value_or_throw(); + return std::move(*this); + } + + /// This method makes it possible to pack multiple `opacity` in a single component batch. + /// + /// This only makes sense when used in conjunction with `columns`. `with_opacity` should + /// be used when logging a single row's worth of data. + Tensor with_many_opacity(const Collection& _opacity) && { + opacity = ComponentBatch::from_loggable(_opacity, Descriptor_opacity).value_or_throw(); + return std::move(*this); + } + /// Partitions the component data into multiple sub-batches. /// /// Specifically, this transforms the existing `ComponentBatch` data into `ComponentColumn`s diff --git a/rerun_py/rerun_sdk/rerun/archetypes/tensor.py b/rerun_py/rerun_sdk/rerun/archetypes/tensor.py index 35d33495ae9e..654a04a7112c 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/tensor.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/tensor.py @@ -62,6 +62,7 @@ def __attrs_clear__(self) -> None: self.__attrs_init__( data=None, value_range=None, + opacity=None, ) @classmethod @@ -78,6 +79,7 @@ def from_fields( clear_unset: bool = False, data: datatypes.TensorDataLike | None = None, value_range: datatypes.Range1DLike | None = None, + opacity: datatypes.Float32Like | None = None, ) -> Tensor: """ Update only some specific fields of a `Tensor`. @@ -100,6 +102,10 @@ def from_fields( in the contents of the tensor. E.g. if all values are positive, some bigger than 1.0 and all smaller than 255.0, the Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255. + opacity: + Opacity of the tensor for 2D views. + + Only applied when the tensor is displayed as a 2D slice. """ @@ -108,6 +114,7 @@ def from_fields( kwargs = { "data": data, "value_range": value_range, + "opacity": opacity, } if clear_unset: @@ -130,6 +137,7 @@ def columns( *, data: datatypes.TensorDataArrayLike | None = None, value_range: datatypes.Range1DArrayLike | None = None, + opacity: datatypes.Float32ArrayLike | None = None, ) -> ComponentColumnList: """ Construct a new column-oriented component bundle. @@ -155,6 +163,10 @@ def columns( in the contents of the tensor. E.g. if all values are positive, some bigger than 1.0 and all smaller than 255.0, the Viewer will guess that the data likely came from an 8bit image, thus assuming a range of 0-255. + opacity: + Opacity of the tensor for 2D views. + + Only applied when the tensor is displayed as a 2D slice. """ @@ -163,13 +175,14 @@ def columns( inst.__attrs_init__( data=data, value_range=value_range, + opacity=opacity, ) batches = inst.as_component_batches() if len(batches) == 0: return ComponentColumnList([]) - kwargs = {"Tensor:data": data, "Tensor:value_range": value_range} + kwargs = {"Tensor:data": data, "Tensor:value_range": value_range, "Tensor:opacity": opacity} columns = [] for batch in batches: @@ -227,5 +240,16 @@ def columns( # # (Docstring intentionally commented out to hide this field from the docs) + opacity: components.OpacityBatch | None = field( + metadata={"component": True}, + default=None, + converter=components.OpacityBatch._converter, # type: ignore[misc] + ) + # Opacity of the tensor for 2D views. + # + # Only applied when the tensor is displayed as a 2D slice. + # + # (Docstring intentionally commented out to hide this field from the docs) + __str__ = Archetype.__str__ __repr__ = Archetype.__repr__ # type: ignore[assignment] diff --git a/rerun_py/rerun_sdk/rerun/archetypes/tensor_ext.py b/rerun_py/rerun_sdk/rerun/archetypes/tensor_ext.py index b7711769004e..e24c5337217a 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/tensor_ext.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/tensor_ext.py @@ -8,6 +8,7 @@ from collections.abc import Sequence from rerun.datatypes.range1d import Range1DLike + from rerun.datatypes.float32 import Float32Like from ..datatypes import TensorDataLike from ..datatypes.tensor_data_ext import TensorLike @@ -22,6 +23,7 @@ def __init__( *, dim_names: Sequence[str] | None = None, value_range: Range1DLike | None = None, + opacity: Float32Like | None = None, ) -> None: """ Construct a `Tensor` archetype. @@ -46,6 +48,10 @@ def __init__( The range of values to use for colormapping. If not specified, the range will be estimated from the data. + opacity: + Opacity of the tensor for 2D views. + + Only applied when the tensor is displayed as a 2D slice. """ from ..datatypes import TensorData @@ -56,7 +62,7 @@ def __init__( elif dim_names is not None: data = TensorData(buffer=data.buffer, dim_names=dim_names) - self.__attrs_init__(data=data, value_range=value_range) + self.__attrs_init__(data=data, value_range=value_range, opacity=opacity) return self.__attrs_clear__() From 05e2bb08fb76faad0646185a89c522a5a54dee97 Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Mon, 5 Jan 2026 17:46:39 -0500 Subject: [PATCH 2/6] implement rudimentary hover --- crates/viewer/re_view_tensor/src/lib.rs | 1 + .../re_view_tensor/src/tensor_slice_hover.rs | 237 ++++++++++++++++++ .../viewer/re_view_tensor/src/view_class.rs | 16 +- .../re_view_tensor/src/visualizer_system.rs | 3 + 4 files changed, 256 insertions(+), 1 deletion(-) create mode 100644 crates/viewer/re_view_tensor/src/tensor_slice_hover.rs diff --git a/crates/viewer/re_view_tensor/src/lib.rs b/crates/viewer/re_view_tensor/src/lib.rs index 68b6af74fa29..9aa13e14f9bd 100644 --- a/crates/viewer/re_view_tensor/src/lib.rs +++ b/crates/viewer/re_view_tensor/src/lib.rs @@ -4,6 +4,7 @@ mod dimension_mapping; mod tensor_dimension_mapper; +mod tensor_slice_hover; mod tensor_slice_to_gpu; mod view_class; mod visualizer_system; diff --git a/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs new file mode 100644 index 000000000000..e1fb969d88fd --- /dev/null +++ b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs @@ -0,0 +1,237 @@ +use re_data_ui::item_ui; +use re_sdk_types::components::TensorData; +use re_sdk_types::tensor_data::{TensorDataType, TensorElement}; +use re_viewer_context::{Annotations, ViewerContext}; + +use crate::{dimension_mapping::TensorSliceSelection, view_class::selected_tensor_slice, visualizer_system::TensorVisualization}; + +pub fn show_tensor_hover_ui( + ctx: &ViewerContext<'_>, + ui: &mut egui::Ui, + tensors: &[TensorVisualization], + slice_selection: &TensorSliceSelection, + image_rect: egui::Rect, + pointer_pos: egui::Pos2, +) { + let Some(first_tensor) = tensors.first() else { + return; + }; + + let Some((height, width)) = get_tensor_slice_shape(&first_tensor.tensor, slice_selection) else { + return; + }; + + let x_float = (pointer_pos.x - image_rect.min.x) / image_rect.width() * (width as f32); + let y_float = (pointer_pos.y - image_rect.min.y) / image_rect.height() * (height as f32); + + let x = x_float.floor() as isize; + let y = y_float.floor() as isize; + + if x < 0 || y < 0 || x >= width as isize || y >= height as isize { + return; + } + + let x = x as usize; + let y = y as usize; + + egui::Grid::new("tensor_hover_ui") + .num_columns(2) + .show(ui, |ui| { + ui.label("Position:"); + ui.monospace(format!("{x}, {y}")); + ui.end_row(); + + for tensor_view in tensors { + let TensorVisualization { + entity_path, + tensor, + annotations, + .. + } = tensor_view; + + ui.separator(); + ui.end_row(); + + ui.label(entity_path.to_string()); + + if let Some((label, value_text)) = + get_tensor_value_text(tensor, slice_selection, x, y) + { + ui.label(label); + ui.monospace(value_text); + ui.end_row(); + } + + // If integer, check annotations, and show it in a separate row. + if let Some(value) = get_tensor_value_at(tensor, slice_selection, x, y) { + if let Some(label) = get_annotation_label(value, annotations) { + ui.label("Label:"); + ui.label(label); + ui.end_row(); + } + } + } + }); +} + +fn get_tensor_slice_shape( + tensor: &TensorData, + slice_selection: &TensorSliceSelection, +) -> Option<(usize, usize)> { + // The slice shape depends on the mapping. + // The most robust way to get the shape is to create a dummy slice. + macro_rules! get_shape { + ($T:ty) => {{ + let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; + let slice = selected_tensor_slice(slice_selection, &view); + let shape = slice.shape(); + if shape.len() >= 2 { + Some((shape[0], shape[1])) + } else { + None + } + }}; + } + + match tensor.dtype() { + TensorDataType::U8 => get_shape!(u8), + TensorDataType::U16 => get_shape!(u16), + TensorDataType::U32 => get_shape!(u32), + TensorDataType::U64 => get_shape!(u64), + TensorDataType::I8 => get_shape!(i8), + TensorDataType::I16 => get_shape!(i16), + TensorDataType::I32 => get_shape!(i32), + TensorDataType::I64 => get_shape!(i64), + TensorDataType::F16 => get_shape!(half::f16), + TensorDataType::F32 => get_shape!(f32), + TensorDataType::F64 => get_shape!(f64), + } +} + +fn get_tensor_value_text( + tensor: &TensorData, + slice_selection: &TensorSliceSelection, + x: usize, + y: usize, +) -> Option<(String, String)> { + macro_rules! get_text { + ($T:ty, $variant:ident) => {{ + let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; + let slice = selected_tensor_slice(slice_selection, &view); + + if slice.ndim() == 2 { + let slice = slice.into_dimensionality::().ok()?; + if x >= slice.shape()[1] || y >= slice.shape()[0] { + return None; + } + let value = slice[[y, x]]; + Some(( + "Val:".to_owned(), + format_tensor_element(TensorElement::$variant(value)), + )) + } else if slice.ndim() == 3 { + let slice = slice.into_dimensionality::().ok()?; + if x >= slice.shape()[1] || y >= slice.shape()[0] { + return None; + } + + let num_channels = slice.shape()[2]; + if num_channels == 3 || num_channels == 4 { + let mut elements = Vec::new(); + for c in 0..num_channels { + elements.push(TensorElement::$variant(slice[[y, x, c]])); + } + Some(format_pixel_value(elements)) + } else { + None // Not a color image + } + } else { + None + } + }}; + } + + match tensor.dtype() { + TensorDataType::U8 => get_text!(u8, U8), + TensorDataType::U16 => get_text!(u16, U16), + TensorDataType::U32 => get_text!(u32, U32), + TensorDataType::U64 => get_text!(u64, U64), + TensorDataType::I8 => get_text!(i8, I8), + TensorDataType::I16 => get_text!(i16, I16), + TensorDataType::I32 => get_text!(i32, I32), + TensorDataType::I64 => get_text!(i64, I64), + TensorDataType::F16 => get_text!(half::f16, F16), + TensorDataType::F32 => get_text!(f32, F32), + TensorDataType::F64 => get_text!(f64, F64), + } +} + +fn format_tensor_element(el: TensorElement) -> String { + el.format_padded() +} + +fn format_pixel_value(elements: Vec) -> (String, String) { + let values_str = elements + .iter() + .map(|e| e.format_padded()) + .collect::>() + .join(", "); + + if elements.len() == 3 { + ("RGB:".to_owned(), values_str) + } else if elements.len() == 4 { + ("RGBA:".to_owned(), values_str) + } else { + ("Val:".to_owned(), values_str) + } +} + +fn get_tensor_value_at( + tensor: &TensorData, + slice_selection: &TensorSliceSelection, + x: usize, + y: usize, +) -> Option { + macro_rules! get_value { + ($T:ty, $variant:ident) => {{ + let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; + let slice = selected_tensor_slice(slice_selection, &view); + let slice = slice.into_dimensionality::().ok()?; + if x >= slice.shape()[1] || y >= slice.shape()[0] { + return None; + } + Some(TensorElement::$variant(slice[[y, x]])) + }}; + } + + match tensor.dtype() { + TensorDataType::U8 => get_value!(u8, U8), + TensorDataType::U16 => get_value!(u16, U16), + TensorDataType::U32 => get_value!(u32, U32), + TensorDataType::U64 => get_value!(u64, U64), + TensorDataType::I8 => get_value!(i8, I8), + TensorDataType::I16 => get_value!(i16, I16), + TensorDataType::I32 => get_value!(i32, I32), + TensorDataType::I64 => get_value!(i64, I64), + TensorDataType::F16 => get_value!(half::f16, F16), + TensorDataType::F32 => get_value!(f32, F32), + TensorDataType::F64 => get_value!(f64, F64), + } +} + +fn get_annotation_label(value: TensorElement, annotations: &Annotations) -> Option { + let class_id = match value { + TensorElement::U8(v) => Some(v as u16), + TensorElement::U16(v) => Some(v), + TensorElement::U32(v) => u16::try_from(v).ok(), + TensorElement::U64(v) => u16::try_from(v).ok(), + TensorElement::I8(v) if v >= 0 => Some(v as u16), + TensorElement::I16(v) if v >= 0 => Some(v as u16), + TensorElement::I32(v) => u16::try_from(v).ok(), + TensorElement::I64(v) => u16::try_from(v).ok(), + _ => None, + }?; + + let desc = annotations.resolved_class_description(Some(re_sdk_types::components::ClassId::from(class_id))); + desc.annotation_info().label(None) +} \ No newline at end of file diff --git a/crates/viewer/re_view_tensor/src/view_class.rs b/crates/viewer/re_view_tensor/src/view_class.rs index b0c8069f4d25..548941846891 100644 --- a/crates/viewer/re_view_tensor/src/view_class.rs +++ b/crates/viewer/re_view_tensor/src/view_class.rs @@ -340,7 +340,20 @@ impl TensorView { ) -> anyhow::Result<()> { let (response, image_rect) = Self::paint_tensor_slice(ctx, ui, tensors, slice_selection)?; - if !response.hovered() { + if response.hovered() { + if let Some(pointer_pos) = ui.input(|i| i.pointer.hover_pos()) { + response.on_hover_ui_at_pointer(|ui| { + crate::tensor_slice_hover::show_tensor_hover_ui( + ctx.viewer_ctx, + ui, + tensors, + slice_selection, + image_rect, + pointer_pos, + ); + }); + } + } else { let font_id = egui::TextStyle::Body.resolve(ui.style()); paint_axis_names(ui, image_rect, font_id, dimension_labels); } @@ -438,6 +451,7 @@ impl TensorView { data_range, annotations, opacity, + .. } = tensor_view; let colormap_with_range = ColormapWithRange { diff --git a/crates/viewer/re_view_tensor/src/visualizer_system.rs b/crates/viewer/re_view_tensor/src/visualizer_system.rs index 4285103408b9..e6470435bd0c 100644 --- a/crates/viewer/re_view_tensor/src/visualizer_system.rs +++ b/crates/viewer/re_view_tensor/src/visualizer_system.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use re_chunk_store::{LatestAtQuery, RowId}; +use re_log_types::EntityPath; use re_sdk_types::Archetype as _; use re_sdk_types::archetypes::Tensor; use re_sdk_types::components::{Opacity, TensorData, ValueRange}; @@ -13,6 +14,7 @@ use re_viewer_context::{ #[derive(Clone)] pub struct TensorVisualization { + pub entity_path: EntityPath, pub tensor_row_id: RowId, pub tensor: TensorData, pub data_range: ValueRange, @@ -107,6 +109,7 @@ impl VisualizerSystem for TensorSystem { }); self.tensors.push(TensorVisualization { + entity_path: data_result.entity_path.clone(), tensor_row_id, tensor: tensor.clone(), data_range, From 97cd61c3fcfb0af0a0cfb7ff27f7d4825d06de7a Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Tue, 6 Jan 2026 15:25:34 -0500 Subject: [PATCH 3/6] improve hover to more closely match rr.Image --- .../src/actions/add_entities_to_new_view.rs | 68 +- .../re_view_tensor/src/tensor_slice_hover.rs | 303 ++++-- .../viewer/re_view_tensor/src/view_class.rs | 981 +++++++++--------- 3 files changed, 781 insertions(+), 571 deletions(-) diff --git a/crates/viewer/re_context_menu/src/actions/add_entities_to_new_view.rs b/crates/viewer/re_context_menu/src/actions/add_entities_to_new_view.rs index 389adbb85c81..ff4759c9603f 100644 --- a/crates/viewer/re_context_menu/src/actions/add_entities_to_new_view.rs +++ b/crates/viewer/re_context_menu/src/actions/add_entities_to_new_view.rs @@ -2,7 +2,9 @@ use egui::{Response, Ui}; use itertools::Itertools as _; use nohash_hasher::IntSet; use re_log_types::{EntityPath, EntityPathFilter, EntityPathRule, RuleEffect}; +use re_sdk_types::View as _; use re_sdk_types::ViewClassIdentifier; +use re_sdk_types::blueprint::views::TensorView; use re_ui::UiExt as _; use re_viewer_context::{Item, RecommendedView, SystemCommand, SystemCommandSender as _}; use re_viewport_blueprint::ViewBlueprint; @@ -126,15 +128,14 @@ fn create_view_for_selected_entities( .filter_map(|(item, _)| item.entity_path().cloned()) .collect::>(); - let origin = ctx + let view_class = ctx .viewer_context .view_class_registry() - .get_class_or_log_error(identifier) + .get_class_or_log_error(identifier); + let origin = view_class .recommended_origin_for_entities(&entities_of_interest, ctx.viewer_context.recording()) .unwrap_or_else(EntityPath::root); - let mut query_filter = EntityPathFilter::default(); - let target_container_id = ctx .clicked_item_enclosing_container_id_and_position() .map(|(id, _)| id); @@ -143,25 +144,48 @@ fn create_view_for_selected_entities( // relative to the origin. This makes sense since if you create a view and // then change the origin you likely wanted those entities to still be there. - #[expect(clippy::iter_over_hash_type)] // Order of rule insertion does not matter here - for path in entities_of_interest { - query_filter.insert_rule( - RuleEffect::Include, - EntityPathRule::including_entity_subtree(&path), - ); - } - let recommended = RecommendedView { - origin, - query_filter, - }; + if identifier == TensorView::identifier() && entities_of_interest.len() > 1 { + let mut entities = entities_of_interest.into_iter().collect::>(); + entities.sort(); - let view = ViewBlueprint::new(identifier, recommended); - let view_id = view.id; - ctx.viewport_blueprint - .add_views(std::iter::once(view), target_container_id, None); - ctx.viewer_context - .command_sender() - .send_system(SystemCommand::set_selection(Item::View(view_id))); + let mut views = Vec::with_capacity(entities.len()); + for entity_path in entities { + views.push(ViewBlueprint::new( + identifier, + RecommendedView::new_single_entity(entity_path), + )); + } + + if let Some(view_id) = views.first().map(|view| view.id) { + ctx.viewport_blueprint + .add_views(views.into_iter(), target_container_id, None); + ctx.viewer_context + .command_sender() + .send_system(SystemCommand::set_selection(Item::View(view_id))); + } + } else { + let mut query_filter = EntityPathFilter::default(); + + #[expect(clippy::iter_over_hash_type)] // Order of rule insertion does not matter here + for path in entities_of_interest { + query_filter.insert_rule( + RuleEffect::Include, + EntityPathRule::including_entity_subtree(&path), + ); + } + let recommended = RecommendedView { + origin, + query_filter, + }; + + let view = ViewBlueprint::new(identifier, recommended); + let view_id = view.id; + ctx.viewport_blueprint + .add_views(std::iter::once(view), target_container_id, None); + ctx.viewer_context + .command_sender() + .send_system(SystemCommand::set_selection(Item::View(view_id))); + } ctx.viewport_blueprint .mark_user_interaction(ctx.viewer_context); } diff --git a/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs index e1fb969d88fd..b19a3701190d 100644 --- a/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs +++ b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs @@ -1,23 +1,35 @@ use re_data_ui::item_ui; -use re_sdk_types::components::TensorData; -use re_sdk_types::tensor_data::{TensorDataType, TensorElement}; -use re_viewer_context::{Annotations, ViewerContext}; +use re_sdk_types::{ + components::TensorData, + tensor_data::{TensorDataType, TensorElement}, +}; +use re_ui::UiExt as _; +use re_viewer_context::{Annotations, ViewContext}; -use crate::{dimension_mapping::TensorSliceSelection, view_class::selected_tensor_slice, visualizer_system::TensorVisualization}; +use crate::{ + dimension_mapping::TensorSliceSelection, view_class::selected_tensor_slice, + visualizer_system::TensorVisualization, +}; + +const ZOOMED_IMAGE_TEXEL_RADIUS: i64 = 10; +const POINTS_PER_TEXEL: f32 = 5.0; pub fn show_tensor_hover_ui( - ctx: &ViewerContext<'_>, + ctx: &ViewContext<'_>, ui: &mut egui::Ui, tensors: &[TensorVisualization], slice_selection: &TensorSliceSelection, image_rect: egui::Rect, pointer_pos: egui::Pos2, + _mag_filter: re_sdk_types::components::MagnificationFilter, ) { let Some(first_tensor) = tensors.first() else { return; }; - let Some((height, width)) = get_tensor_slice_shape(&first_tensor.tensor, slice_selection) else { + let Some((height, width)) = + crate::view_class::tensor_slice_shape(&first_tensor.tensor, slice_selection) + else { return; }; @@ -34,78 +46,139 @@ pub fn show_tensor_hover_ui( let x = x as usize; let y = y as usize; - egui::Grid::new("tensor_hover_ui") - .num_columns(2) - .show(ui, |ui| { - ui.label("Position:"); - ui.monospace(format!("{x}, {y}")); - ui.end_row(); - - for tensor_view in tensors { - let TensorVisualization { - entity_path, - tensor, - annotations, - .. - } = tensor_view; + let mag_filter = re_sdk_types::components::MagnificationFilter::Nearest; + let (textured_rects, texture_filter_magnification) = + match crate::view_class::create_textured_rects_for_batch( + ctx, + tensors, + slice_selection, + mag_filter, + ) { + Ok(textured_rects) => textured_rects, + Err(err) => { + ui.error_with_details_on_hover(err.to_string()); + return; + } + }; - ui.separator(); - ui.end_row(); + let zoom_result = ui + .horizontal(|ui| { + let zoom_result = show_zoomed_image_region( + ui, + ctx, + &textured_rects, + (x, y), + texture_filter_magnification, + ); - ui.label(entity_path.to_string()); + ui.separator(); - if let Some((label, value_text)) = - get_tensor_value_text(tensor, slice_selection, x, y) - { - ui.label(label); - ui.monospace(value_text); - ui.end_row(); + ui.vertical(|ui| { + ui.style_mut().wrap_mode = Some(egui::TextWrapMode::Extend); + tensor_hover_value_ui(ctx, ui, tensors, slice_selection, x, y); + if let Err(err) = show_single_pixel_sample( + ui, + ctx, + &textured_rects, + (x, y), + texture_filter_magnification, + ) { + ui.error_with_details_on_hover(err.to_string()); } + }); - // If integer, check annotations, and show it in a separate row. - if let Some(value) = get_tensor_value_at(tensor, slice_selection, x, y) { - if let Some(label) = get_annotation_label(value, annotations) { - ui.label("Label:"); - ui.label(label); - ui.end_row(); - } - } - } - }); -} + zoom_result + }) + .inner; -fn get_tensor_slice_shape( - tensor: &TensorData, - slice_selection: &TensorSliceSelection, -) -> Option<(usize, usize)> { - // The slice shape depends on the mapping. - // The most robust way to get the shape is to create a dummy slice. - macro_rules! get_shape { - ($T:ty) => {{ - let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; - let slice = selected_tensor_slice(slice_selection, &view); - let shape = slice.shape(); - if shape.len() >= 2 { - Some((shape[0], shape[1])) - } else { - None - } - }}; + if let Err(err) = zoom_result { + ui.error_with_details_on_hover(err.to_string()); } +} - match tensor.dtype() { - TensorDataType::U8 => get_shape!(u8), - TensorDataType::U16 => get_shape!(u16), - TensorDataType::U32 => get_shape!(u32), - TensorDataType::U64 => get_shape!(u64), - TensorDataType::I8 => get_shape!(i8), - TensorDataType::I16 => get_shape!(i16), - TensorDataType::I32 => get_shape!(i32), - TensorDataType::I64 => get_shape!(i64), - TensorDataType::F16 => get_shape!(half::f16), - TensorDataType::F32 => get_shape!(f32), - TensorDataType::F64 => get_shape!(f64), - } +#[allow(clippy::too_many_arguments)] +fn show_zoomed_image_region( + ui: &mut egui::Ui, + ctx: &ViewContext<'_>, + textured_rects: &[re_renderer::renderer::TexturedRect], + (center_x, center_y): (usize, usize), + texture_filter_magnification: re_renderer::renderer::TextureFilterMag, +) -> anyhow::Result<()> { + use crate::view_class::render_tensor_slice_batch; + + let zoom_rect_size = + egui::Vec2::splat(((ZOOMED_IMAGE_TEXEL_RADIUS * 2 + 1) as f32) * POINTS_PER_TEXEL); + let (_id, zoom_rect) = ui.allocate_space(zoom_rect_size); + + let painter = ui.painter(); + painter.rect_filled(zoom_rect, 0.0, ui.visuals().extreme_bg_color); + + let space_rect = egui::Rect::from_min_max( + egui::pos2( + center_x as f32 - ZOOMED_IMAGE_TEXEL_RADIUS as f32, + center_y as f32 - ZOOMED_IMAGE_TEXEL_RADIUS as f32, + ), + egui::pos2( + center_x as f32 + ZOOMED_IMAGE_TEXEL_RADIUS as f32 + 1.0, + center_y as f32 + ZOOMED_IMAGE_TEXEL_RADIUS as f32 + 1.0, + ), + ); + + render_tensor_slice_batch( + ctx, + &painter.with_clip_rect(zoom_rect), + textured_rects, + zoom_rect, + space_rect, + texture_filter_magnification, + )?; + + let center_texel_rect = + egui::Rect::from_center_size(zoom_rect.center(), egui::Vec2::splat(POINTS_PER_TEXEL)); + painter.rect_stroke( + center_texel_rect.expand(1.0), + 0.0, + (1.0, egui::Color32::BLACK), + egui::StrokeKind::Outside, + ); + painter.rect_stroke( + center_texel_rect, + 0.0, + (1.0, egui::Color32::WHITE), + egui::StrokeKind::Outside, + ); + + Ok(()) +} + +fn show_single_pixel_sample( + ui: &mut egui::Ui, + ctx: &ViewContext<'_>, + textured_rects: &[re_renderer::renderer::TexturedRect], + (center_x, center_y): (usize, usize), + texture_filter_magnification: re_renderer::renderer::TextureFilterMag, +) -> anyhow::Result<()> { + use crate::view_class::render_tensor_slice_batch; + + let (rect, _) = ui.allocate_exact_size( + egui::Vec2::splat(ui.available_height()), + egui::Sense::hover(), + ); + let space_rect = egui::Rect::from_min_max( + egui::pos2(center_x as f32, center_y as f32), + egui::pos2(center_x as f32 + 1.0, center_y as f32 + 1.0), + ); + + render_tensor_slice_batch( + ctx, + &ui.painter().with_clip_rect(rect), + textured_rects, + rect, + space_rect, + texture_filter_magnification, + )?; + + Ok(()) } fn get_tensor_value_text( @@ -136,14 +209,20 @@ fn get_tensor_value_text( } let num_channels = slice.shape()[2]; - if num_channels == 3 || num_channels == 4 { + if num_channels == 1 { + let value = slice[[y, x, 0]]; + Some(( + "Val:".to_owned(), + format_tensor_element(TensorElement::$variant(value)), + )) + } else if num_channels == 3 || num_channels == 4 { let mut elements = Vec::new(); for c in 0..num_channels { elements.push(TensorElement::$variant(slice[[y, x, c]])); } Some(format_pixel_value(elements)) } else { - None // Not a color image + None } } else { None @@ -186,6 +265,63 @@ fn format_pixel_value(elements: Vec) -> (String, String) { } } +fn tensor_hover_value_ui( + ctx: &ViewContext<'_>, + ui: &mut egui::Ui, + tensors: &[TensorVisualization], + slice_selection: &TensorSliceSelection, + x: usize, + y: usize, +) { + egui::Grid::new("tensor_hover_ui") + .num_columns(2) + .show(ui, |ui| { + ui.label("Position:"); + ui.monospace(format!("{x}, {y}")); + ui.end_row(); + + for tensor_view in tensors { + let TensorVisualization { + entity_path, + tensor, + annotations, + .. + } = tensor_view; + + ui.separator(); + ui.label(""); + ui.end_row(); + + item_ui::entity_path_button( + ctx.viewer_ctx, + &ctx.current_query(), + ctx.recording(), + ui, + Some(ctx.view_id), + entity_path, + ); + ui.label(""); + ui.end_row(); + + if let Some((label, value_text)) = + get_tensor_value_text(tensor, slice_selection, x, y) + { + ui.label(label); + ui.monospace(value_text); + ui.end_row(); + } + + if let Some(value) = get_tensor_value_at(tensor, slice_selection, x, y) { + if let Some(label) = get_annotation_label(value, annotations) { + ui.label("Label:"); + ui.label(label); + ui.end_row(); + } + } + } + }); +} + fn get_tensor_value_at( tensor: &TensorData, slice_selection: &TensorSliceSelection, @@ -196,11 +332,24 @@ fn get_tensor_value_at( ($T:ty, $variant:ident) => {{ let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; let slice = selected_tensor_slice(slice_selection, &view); - let slice = slice.into_dimensionality::().ok()?; - if x >= slice.shape()[1] || y >= slice.shape()[0] { - return None; + if slice.ndim() == 2 { + let slice = slice.into_dimensionality::().ok()?; + if x >= slice.shape()[1] || y >= slice.shape()[0] { + return None; + } + Some(TensorElement::$variant(slice[[y, x]])) + } else if slice.ndim() == 3 { + let slice = slice.into_dimensionality::().ok()?; + if x >= slice.shape()[1] || y >= slice.shape()[0] { + return None; + } + if slice.shape()[2] != 1 { + return None; + } + Some(TensorElement::$variant(slice[[y, x, 0]])) + } else { + None } - Some(TensorElement::$variant(slice[[y, x]])) }}; } @@ -234,4 +383,4 @@ fn get_annotation_label(value: TensorElement, annotations: &Annotations) -> Opti let desc = annotations.resolved_class_description(Some(re_sdk_types::components::ClassId::from(class_id))); desc.annotation_info().label(None) -} \ No newline at end of file +} diff --git a/crates/viewer/re_view_tensor/src/view_class.rs b/crates/viewer/re_view_tensor/src/view_class.rs index 548941846891..dc68385b0bb1 100644 --- a/crates/viewer/re_view_tensor/src/view_class.rs +++ b/crates/viewer/re_view_tensor/src/view_class.rs @@ -11,8 +11,9 @@ use re_renderer::{ use re_sdk_types::blueprint::archetypes::{self, TensorScalarMapping, TensorViewFit}; use re_sdk_types::blueprint::components::ViewFit; use re_sdk_types::components::{ - Colormap, GammaCorrection, MagnificationFilter, TensorDimensionIndexSelection, + Colormap, GammaCorrection, MagnificationFilter, TensorData, TensorDimensionIndexSelection, }; +use re_sdk_types::tensor_data::TensorDataType; use macaw; use re_sdk_types::external::glam; use re_sdk_types::{View as _, ViewClassIdentifier}; @@ -32,6 +33,304 @@ use crate::dimension_mapping::TensorSliceSelection; use crate::tensor_dimension_mapper::dimension_mapping_ui; use crate::visualizer_system::{TensorSystem, TensorVisualization}; +// --- Helper functions for TensorView --- + +pub fn selected_tensor_slice<'a, T: Copy>( + slice_selection: &TensorSliceSelection, + tensor: &'a ndarray::ArrayViewD<'_, T>, +) -> ndarray::ArrayViewD<'a, T> { + let TensorSliceSelection { + width, + height, + indices, + slider: _, + } = slice_selection; + + let (dwidth, dheight) = if let (Some(width), Some(height)) = (width, height) { + (width.dimension, height.dimension) + } else if let Some(width) = width { + // If height is missing, create a 1D row. + (width.dimension, 1) + } else if let Some(height) = height { + // If width is missing, create a 1D column. + (1, height.dimension) + } else { + // If both are missing, give up. + return tensor.view(); + }; + + let view = if tensor.shape().len() == 1 { + // We want 2D slices, so for "pure" 1D tensors add a dimension. + // This is important for above width/height conversion to work since this assumes at least 2 dimensions. + tensor + .view() + .into_shape_with_order(ndarray::IxDyn(&[tensor.len(), 1])) + .expect("Tensor.shape.len() is not actually 1!") + } else { + tensor.view() + }; + + let axis = [dheight as usize, dwidth as usize] + .into_iter() + .chain(indices.iter().map(|s| s.dimension as usize)) + .collect::>(); + let mut slice = view.permuted_axes(axis); + + for index_selection in indices { + // 0 and 1 are width/height, the rest are rearranged by dimension_mapping.selectors + // This call removes Axis(2), so the next iteration of the loop does the right thing again. + slice.index_axis_inplace(Axis(2), index_selection.index as usize); + } + if height.unwrap_or_default().invert { + slice.invert_axis(Axis(0)); + } + if width.unwrap_or_default().invert { + slice.invert_axis(Axis(1)); + } + + slice +} + +pub fn tensor_slice_shape( + tensor: &TensorData, + slice_selection: &TensorSliceSelection, +) -> Option<(usize, usize)> { + macro_rules! get_shape { + ($T:ty) => {{ + let view = ndarray::ArrayViewD::<$T>::try_from(&tensor.0).ok()?; + let slice = selected_tensor_slice(slice_selection, &view); + let shape = slice.shape(); + if shape.len() >= 2 { + Some((shape[0], shape[1])) + } else { + None + } + }}; + } + + match tensor.dtype() { + TensorDataType::U8 => get_shape!(u8), + TensorDataType::U16 => get_shape!(u16), + TensorDataType::U32 => get_shape!(u32), + TensorDataType::U64 => get_shape!(u64), + TensorDataType::I8 => get_shape!(i8), + TensorDataType::I16 => get_shape!(i16), + TensorDataType::I32 => get_shape!(i32), + TensorDataType::I64 => get_shape!(i64), + TensorDataType::F16 => get_shape!(half::f16), + TensorDataType::F32 => get_shape!(f32), + TensorDataType::F64 => get_shape!(f64), + } +} + +fn dimension_name(shape: &[TensorDimension], dim_idx: u32) -> String { + let dim = &shape[dim_idx as usize]; + dim.name.as_ref().map_or_else( + || format!("Dimension {dim_idx} (size={})", dim.size), + |name| format!("{name} (size={})", dim.size), + ) +} + +fn paint_axis_names( + ui: &egui::Ui, + rect: egui::Rect, + font_id: egui::FontId, + dimension_labels: [Option<(String, bool)>; 2], +) { + let painter = ui.painter(); + let tokens = ui.tokens(); + + let [width, height] = dimension_labels; + let (width_name, invert_width) = + width.map_or((None, false), |(label, invert)| (Some(label), invert)); + let (height_name, invert_height) = + height.map_or((None, false), |(label, invert)| (Some(label), invert)); + + let text_color = ui.visuals().text_color(); + + let rounding = tokens.normal_corner_radius(); + let inner_margin = rounding as f32; + let outer_margin = 8.0; + + let rect = rect.shrink(outer_margin + inner_margin); + + let paint_text_bg = |text_background, text_rect: egui::Rect| { + painter.set( + text_background, + egui::Shape::rect_filled( + text_rect.expand(inner_margin), + rounding, + ui.visuals().panel_fill, + ), + ); + }; + + // Label for X axis: + if let Some(width_name) = width_name { + let text_background = painter.add(egui::Shape::Noop); + let text_rect = if invert_width { + // On left, pointing left: + let (pos, align) = if invert_height { + (rect.left_bottom(), Align2::LEFT_BOTTOM) + } else { + (rect.left_top(), Align2::LEFT_TOP) + }; + painter.text( + pos, + align, + format!("{width_name} ⬅"), + font_id.clone(), + text_color, + ) + } else { + // On right, pointing right: + let (pos, align) = if invert_height { + (rect.right_bottom(), Align2::RIGHT_BOTTOM) + } else { + (rect.right_top(), Align2::RIGHT_TOP) + }; + painter.text( + pos, + align, + format!("➡ {width_name}"), + font_id.clone(), + text_color, + ) + }; + paint_text_bg(text_background, text_rect); + } + + // Label for Y axis: + if let Some(height_name) = height_name { + let text_background = painter.add(egui::Shape::Noop); + let text_rect = if invert_height { + // On top, pointing up: + let galley = painter.layout_no_wrap(format!("➡ {height_name}"), font_id, text_color); + let galley_size = galley.size(); + let pos = if invert_width { + rect.right_top() + egui::vec2(-galley_size.y, galley_size.x) + } else { + rect.left_top() + egui::vec2(0.0, galley_size.x) + }; + painter.add( + TextShape::new(pos, galley, text_color).with_angle(-std::f32::consts::TAU / 4.0), + ); + egui::Rect::from_min_size( + pos - galley_size.x * egui::Vec2::Y, + egui::vec2(galley_size.y, galley_size.x), + ) + } else { + // On bottom, pointing down: + let galley = painter.layout_no_wrap(format!("{height_name} ⬅"), font_id, text_color); + let galley_size = galley.size(); + let pos = if invert_width { + rect.right_bottom() - egui::vec2(galley_size.y, 0.0) + } else { + rect.left_bottom() + }; + painter.add( + TextShape::new(pos, galley, text_color).with_angle(-std::f32::consts::TAU / 4.0), + ); + egui::Rect::from_min_size( + pos - galley_size.x * egui::Vec2::Y, + egui::vec2(galley_size.y, galley_size.x), + ) + }; + paint_text_bg(text_background, text_rect); + } +} + +pub fn index_for_dimension_mut( + indices: &mut [TensorDimensionIndexSelection], + dimension: u32, +) -> Option<&mut u64> { + indices + .iter_mut() + .find(|index| index.dimension == dimension) + .map(|index| &mut index.index) +} + +fn selectors_ui( + ctx: &ViewerContext<'_>, + ui: &mut egui::Ui, + shape: &[TensorDimension], + slice_selection: &TensorSliceSelection, + slice_property: &ViewProperty, +) { + let Some(slider) = &slice_selection.slider else { + return; + }; + + let mut changed_indices = false; + let mut indices = slice_selection.indices.clone(); + + for index_slider in slider { + let dim = &shape[index_slider.dimension as usize]; + let size = dim.size; + if size <= 1 { + continue; + } + + let Some(selector_value) = index_for_dimension_mut(&mut indices, index_slider.dimension) + else { + // There should be an entry already via `load_tensor_slice_selection_and_make_valid` + continue; + }; + + ui.horizontal(|ui| { + let name = dim.name.clone().map_or_else( + || index_slider.dimension.to_string(), + |name| name.to_string(), + ); + + let slider_tooltip = format!("Adjust the selected slice for the {name} dimension"); + ui.label(&name).on_hover_text(&slider_tooltip); + + // If the range is big (say, 2048) then we would need + // a slider that is 2048 pixels wide to get the good precision. + // So we add a high-precision drag-value instead: + if ui + .add( + egui::DragValue::new(selector_value) + .range(0..=size - 1) + .speed(0.5), + ) + .on_hover_text(format!( + "Drag to precisely control the slice index of the {name} dimension" + )) + .changed() + { + changed_indices = true; + } + + // Make the slider as big as needed: + const MIN_SLIDER_WIDTH: f32 = 64.0; + if ui.available_width() >= MIN_SLIDER_WIDTH { + ui.spacing_mut().slider_width = ((size as f32) * 4.0) + .at_least(MIN_SLIDER_WIDTH) + .at_most(ui.available_width()); + if ui + .add(egui::Slider::new(selector_value, 0..=size - 1).show_value(false)) + .on_hover_text(slider_tooltip) + .changed() + { + changed_indices = true; + } + } + }); + } + + if changed_indices { + slice_property.save_blueprint_component( + ctx, + &archetypes::TensorSliceSelection::descriptor_indices(), + &indices, + ); + } +} + +// --- Main TensorView impl --- + #[derive(Default)] pub struct TensorView; @@ -325,508 +624,246 @@ impl TensorView { Self::tensor_slice_ui(&ctx, ui, tensors, dimension_labels, &slice_selection) { ui.error_label(err.to_string()); - } - }); - - Ok(()) - } - - fn tensor_slice_ui( - ctx: &ViewContext<'_>, - ui: &mut egui::Ui, - tensors: &[TensorVisualization], - dimension_labels: [Option<(String, bool)>; 2], - slice_selection: &TensorSliceSelection, - ) -> anyhow::Result<()> { - let (response, image_rect) = Self::paint_tensor_slice(ctx, ui, tensors, slice_selection)?; - - if response.hovered() { - if let Some(pointer_pos) = ui.input(|i| i.pointer.hover_pos()) { - response.on_hover_ui_at_pointer(|ui| { - crate::tensor_slice_hover::show_tensor_hover_ui( - ctx.viewer_ctx, - ui, - tensors, - slice_selection, - image_rect, - pointer_pos, - ); - }); - } - } else { - let font_id = egui::TextStyle::Body.resolve(ui.style()); - paint_axis_names(ui, image_rect, font_id, dimension_labels); - } - - Ok(()) - } - - fn paint_tensor_slice( - ctx: &ViewContext<'_>, - ui: &mut egui::Ui, - tensors: &[TensorVisualization], - slice_selection: &TensorSliceSelection, - ) -> anyhow::Result<(egui::Response, egui::Rect)> { - re_tracing::profile_function!(); - - if tensors.is_empty() { - anyhow::bail!("No tensor data available."); - } - - // We use the first tensor to determine size and placement - let first_tensor_view = &tensors[0]; - let TensorVisualization { - tensor_row_id: first_tensor_row_id, - tensor: first_tensor, - data_range: first_data_range, - annotations: first_annotations, - .. - } = first_tensor_view; - - let scalar_mapping = ViewProperty::from_archetype::( - ctx.blueprint_db(), - ctx.blueprint_query(), - ctx.view_id, - ); - let colormap: Colormap = scalar_mapping - .component_or_fallback(ctx, TensorScalarMapping::descriptor_colormap().component)?; - let gamma: GammaCorrection = scalar_mapping - .component_or_fallback(ctx, TensorScalarMapping::descriptor_gamma().component)?; - let mag_filter: MagnificationFilter = scalar_mapping - .component_or_fallback(ctx, TensorScalarMapping::descriptor_mag_filter().component)?; - - let first_colormap = ColormapWithRange { - colormap, - value_range: [first_data_range.start() as f32, first_data_range.end() as f32], - }; - // We load the first texture just to get dimensions - let first_colormapped_texture = super::tensor_slice_to_gpu::colormapped_texture( - ctx.render_ctx(), - *first_tensor_row_id, - first_tensor, - slice_selection, - first_annotations, - &first_colormap, - gamma, - )?; - let [width, height] = first_colormapped_texture.width_height(); - - let view_fit: ViewFit = ViewProperty::from_archetype::( - ctx.blueprint_db(), - ctx.blueprint_query(), - ctx.view_id, - ) - .component_or_fallback(ctx, TensorViewFit::descriptor_scaling().component)?; - - let img_size = egui::vec2(width as _, height as _); - let img_size = Vec2::max(Vec2::splat(1.0), img_size); // better safe than sorry - let desired_size = match view_fit { - ViewFit::Original => img_size, - ViewFit::Fill => ui.available_size(), - ViewFit::FillKeepAspectRatio => { - let scale = (ui.available_size() / img_size).min_elem(); - img_size * scale - } - }; - - let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); - let image_rect = egui::Rect::from_min_max(response.rect.min, response.rect.max); - - let texture_filter_magnification = match mag_filter { - MagnificationFilter::Nearest => re_renderer::renderer::TextureFilterMag::Nearest, - MagnificationFilter::Linear => re_renderer::renderer::TextureFilterMag::Linear, - }; - // TODO(andreas): allow for mipmapping based filter - let texture_filter_minification = re_renderer::renderer::TextureFilterMin::Linear; - - // Prepare all textured rects - let mut textured_rects = Vec::with_capacity(tensors.len()); - - let space_rect = egui::Rect::from_min_size(egui::Pos2::ZERO, image_rect.size()); - - for (i, tensor_view) in tensors.iter().enumerate() { - let TensorVisualization { - tensor_row_id, - tensor, - data_range, - annotations, - opacity, - .. - } = tensor_view; - - let colormap_with_range = ColormapWithRange { - colormap, - value_range: [data_range.start() as f32, data_range.end() as f32], - }; - - // Optimization: Reuse first texture if it's the first one - let colormapped_texture = if i == 0 { - first_colormapped_texture.clone() - } else { - super::tensor_slice_to_gpu::colormapped_texture( - ctx.render_ctx(), - *tensor_row_id, - tensor, - slice_selection, - annotations, - &colormap_with_range, - gamma, - )? - }; - - // TODO(andreas): Check if dimensions match. If not, maybe we should warn or try to center? - // For now, we assume they match as per user request context (MRI + Segmentation). - // We scale all subsequent tensors to the first one's destination rect. - - let multiplicative_tint = egui::Rgba::from_white_alpha(*opacity); - - textured_rects.push(TexturedRect { - top_left_corner_position: glam::vec3(space_rect.min.x, space_rect.min.y, 0.0), - extent_u: glam::Vec3::X * space_rect.width(), - extent_v: glam::Vec3::Y * space_rect.height(), - colormapped_texture, - options: RectangleOptions { - texture_filter_magnification, - texture_filter_minification, - multiplicative_tint, - ..Default::default() - }, - }); - } - - // --- Render the batch --- - let viewport = painter.clip_rect().intersect(image_rect); - if viewport.is_positive() { - let pixels_per_point = painter.ctx().pixels_per_point(); - let resolution_in_pixel = gpu_bridge::viewport_resolution_in_pixels(viewport, pixels_per_point); - - if resolution_in_pixel[0] > 0 && resolution_in_pixel[1] > 0 { - let ui_from_space = egui::emath::RectTransform::from_to(space_rect, image_rect); - let space_from_ui = ui_from_space.inverse(); - let space_from_points = space_from_ui.scale().y; - let points_from_pixels = 1.0 / pixels_per_point; - let space_from_pixel = space_from_points * points_from_pixels; - - let camera_position_space = space_from_ui.transform_pos(viewport.min); - let top_left_position = glam::vec2(camera_position_space.x, camera_position_space.y); - - let target_config = re_renderer::view_builder::TargetConfiguration { - name: re_renderer::DebugLabel::from("tensor_slice_batch"), - resolution_in_pixel, - view_from_world: macaw::IsoTransform::from_translation(-top_left_position.extend(0.0)), - projection_from_view: re_renderer::view_builder::Projection::Orthographic { - camera_mode: re_renderer::view_builder::OrthographicCameraMode::TopLeftCornerAndExtendZ, - vertical_world_size: space_from_pixel * resolution_in_pixel[1] as f32, - far_plane_distance: 1000.0, - }, - viewport_transformation: re_renderer::RectTransform::IDENTITY, - pixels_per_point, - ..Default::default() - }; - - let mut view_builder = ViewBuilder::new(ctx.render_ctx(), target_config)?; - - view_builder.queue_draw( - ctx.render_ctx(), - re_renderer::renderer::RectangleDrawData::new(ctx.render_ctx(), &textured_rects)?, - ); - - painter.add(gpu_bridge::new_renderer_callback( - view_builder, - viewport, - re_renderer::Rgba::TRANSPARENT, - )); - } - } - - Ok((response, image_rect)) - } -} - - -// ... rest of file (selectors_ui, etc.) -// ... (I'm cutting it short for brevity but the rest is unchanged) - -// ---------------------------------------------------------------------------- - -pub fn selected_tensor_slice<'a, T: Copy>( - slice_selection: &TensorSliceSelection, - tensor: &'a ndarray::ArrayViewD<'_, T>, -) -> ndarray::ArrayViewD<'a, T> { - let TensorSliceSelection { - width, - height, - indices, - slider: _, - } = slice_selection; - - let (dwidth, dheight) = if let (Some(width), Some(height)) = (width, height) { - (width.dimension, height.dimension) - } else if let Some(width) = width { - // If height is missing, create a 1D row. - (width.dimension, 1) - } else if let Some(height) = height { - // If width is missing, create a 1D column. - (1, height.dimension) - } else { - // If both are missing, give up. - return tensor.view(); - }; - - let view = if tensor.shape().len() == 1 { - // We want 2D slices, so for "pure" 1D tensors add a dimension. - // This is important for above width/height conversion to work since this assumes at least 2 dimensions. - tensor - .view() - .into_shape_with_order(ndarray::IxDyn(&[tensor.len(), 1])) - .expect("Tensor.shape.len() is not actually 1!") - } else { - tensor.view() - }; - - let axis = [dheight as usize, dwidth as usize] - .into_iter() - .chain(indices.iter().map(|s| s.dimension as usize)) - .collect::>(); - let mut slice = view.permuted_axes(axis); + } + }); - for index_selection in indices { - // 0 and 1 are width/height, the rest are rearranged by dimension_mapping.selectors - // This call removes Axis(2), so the next iteration of the loop does the right thing again. - slice.index_axis_inplace(Axis(2), index_selection.index as usize); - } - if height.unwrap_or_default().invert { - slice.invert_axis(Axis(0)); - } - if width.unwrap_or_default().invert { - slice.invert_axis(Axis(1)); + Ok(()) } - slice -} + fn tensor_slice_ui( + ctx: &ViewContext<'_>, + ui: &mut egui::Ui, + tensors: &[TensorVisualization], + dimension_labels: [Option<(String, bool)>; 2], + slice_selection: &TensorSliceSelection, + ) -> anyhow::Result<()> { + let mag_filter = ViewProperty::from_archetype::( + ctx.blueprint_db(), + ctx.blueprint_query(), + ctx.view_id, + ) + .component_or_fallback(ctx, TensorScalarMapping::descriptor_mag_filter().component)?; -fn dimension_name(shape: &[TensorDimension], dim_idx: u32) -> String { - let dim = &shape[dim_idx as usize]; - dim.name.as_ref().map_or_else( - || format!("Dimension {dim_idx} (size={})", dim.size), - |name| format!("{name} (size={})", dim.size), - ) + let (response, image_rect) = paint_tensor_slice(ctx, ui, tensors, slice_selection, mag_filter)?; + + if response.hovered() { + if let Some(pointer_pos) = ui.input(|i| i.pointer.hover_pos()) { + response.on_hover_ui_at_pointer(|ui| { + crate::tensor_slice_hover::show_tensor_hover_ui( + ctx, + ui, + tensors, + slice_selection, + image_rect, + pointer_pos, + mag_filter, + ); + }); + } + } else { + let font_id = egui::TextStyle::Body.resolve(ui.style()); + paint_axis_names(ui, image_rect, font_id, dimension_labels); + } + + Ok(()) + } } -fn paint_axis_names( - ui: &egui::Ui, - rect: egui::Rect, - font_id: egui::FontId, - dimension_labels: [Option<(String, bool)>; 2], -) { - let painter = ui.painter(); - let tokens = ui.tokens(); +#[allow(clippy::too_many_arguments)] +pub fn paint_tensor_slice( + ctx: &ViewContext<'_>, + ui: &mut egui::Ui, + tensors: &[TensorVisualization], + slice_selection: &TensorSliceSelection, + mag_filter: MagnificationFilter, +) -> anyhow::Result<(egui::Response, egui::Rect)> { + re_tracing::profile_function!(); - let [width, height] = dimension_labels; - let (width_name, invert_width) = - width.map_or((None, false), |(label, invert)| (Some(label), invert)); - let (height_name, invert_height) = - height.map_or((None, false), |(label, invert)| (Some(label), invert)); + if tensors.is_empty() { + anyhow::bail!("No tensor data available."); + } - let text_color = ui.visuals().text_color(); + let first_tensor_view = &tensors[0]; + let Some((height, width)) = tensor_slice_shape(&first_tensor_view.tensor, slice_selection) + else { + anyhow::bail!("Expected a 2D tensor slice."); + }; - let rounding = tokens.normal_corner_radius(); - let inner_margin = rounding as f32; - let outer_margin = 8.0; + let view_fit: ViewFit = ViewProperty::from_archetype::( + ctx.blueprint_db(), + ctx.blueprint_query(), + ctx.view_id, + ) + .component_or_fallback(ctx, TensorViewFit::descriptor_scaling().component)?; + + let img_size = egui::vec2(width as _, height as _); + let img_size = Vec2::max(Vec2::splat(1.0), img_size); // better safe than sorry + let desired_size = match view_fit { + ViewFit::Original => img_size, + ViewFit::Fill => ui.available_size(), + ViewFit::FillKeepAspectRatio => { + let scale = (ui.available_size() / img_size).min_elem(); + img_size * scale + } + }; - let rect = rect.shrink(outer_margin + inner_margin); + let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); + let image_rect = egui::Rect::from_min_max(response.rect.min, response.rect.max); - // We make sure that the label for the X axis is always at Y=0, - // and that the label for the Y axis is always at X=0, no matter what inversions. - // - // For instance, with origin in the top right: - // - // foo ⬅ - // .......... - // .......... - // .......... - // .......... ↓ - // .......... b - // .......... a - // .......... r - - // TODO(emilk): draw actual arrows behind the text instead of the ugly emoji arrows + let space_rect = egui::Rect::from_min_size(egui::Pos2::ZERO, img_size); - let paint_text_bg = |text_background, text_rect: egui::Rect| { - painter.set( - text_background, - egui::Shape::rect_filled( - text_rect.expand(inner_margin), - rounding, - ui.visuals().panel_fill, - ), - ); - }; + let (textured_rects, texture_filter_magnification) = + create_textured_rects_for_batch(ctx, tensors, slice_selection, mag_filter)?; - // Label for X axis: - if let Some(width_name) = width_name { - let text_background = painter.add(egui::Shape::Noop); - let text_rect = if invert_width { - // On left, pointing left: - let (pos, align) = if invert_height { - (rect.left_bottom(), Align2::LEFT_BOTTOM) - } else { - (rect.left_top(), Align2::LEFT_TOP) - }; - painter.text( - pos, - align, - format!("{width_name} ⬅"), - font_id.clone(), - text_color, - ) - } else { - // On right, pointing right: - let (pos, align) = if invert_height { - (rect.right_bottom(), Align2::RIGHT_BOTTOM) - } else { - (rect.right_top(), Align2::RIGHT_TOP) - }; - painter.text( - pos, - align, - format!("➡ {width_name}"), - font_id.clone(), - text_color, - ) - }; - paint_text_bg(text_background, text_rect); - } + render_tensor_slice_batch(ctx, &painter, &textured_rects, image_rect, space_rect, texture_filter_magnification)?; - // Label for Y axis: - if let Some(height_name) = height_name { - let text_background = painter.add(egui::Shape::Noop); - let text_rect = if invert_height { - // On top, pointing up: - let galley = painter.layout_no_wrap(format!("➡ {height_name}"), font_id, text_color); - let galley_size = galley.size(); - let pos = if invert_width { - rect.right_top() + egui::vec2(-galley_size.y, galley_size.x) - } else { - rect.left_top() + egui::vec2(0.0, galley_size.x) - }; - painter.add( - TextShape::new(pos, galley, text_color).with_angle(-std::f32::consts::TAU / 4.0), - ); - egui::Rect::from_min_size( - pos - galley_size.x * egui::Vec2::Y, - egui::vec2(galley_size.y, galley_size.x), - ) - } else { - // On bottom, pointing down: - let galley = painter.layout_no_wrap(format!("{height_name} ⬅"), font_id, text_color); - let galley_size = galley.size(); - let pos = if invert_width { - rect.right_bottom() - egui::vec2(galley_size.y, 0.0) - } else { - rect.left_bottom() - }; - painter.add( - TextShape::new(pos, galley, text_color).with_angle(-std::f32::consts::TAU / 4.0), - ); - egui::Rect::from_min_size( - pos - galley_size.x * egui::Vec2::Y, - egui::vec2(galley_size.y, galley_size.x), - ) - }; - paint_text_bg(text_background, text_rect); - } + Ok((response, image_rect)) } -pub fn index_for_dimension_mut( - indices: &mut [TensorDimensionIndexSelection], - dimension: u32, -) -> Option<&mut u64> { - indices - .iter_mut() - .find(|index| index.dimension == dimension) - .map(|index| &mut index.index) -} -fn selectors_ui( - ctx: &ViewerContext<'_>, - ui: &mut egui::Ui, - shape: &[TensorDimension], +#[allow(clippy::too_many_arguments)] +pub fn create_textured_rects_for_batch( + ctx: &ViewContext<'_>, + tensors: &[TensorVisualization], slice_selection: &TensorSliceSelection, - slice_property: &ViewProperty, -) { - let Some(slider) = &slice_selection.slider else { - return; + mag_filter: MagnificationFilter, +) -> anyhow::Result<(Vec, re_renderer::renderer::TextureFilterMag)> { + let first_tensor_view = &tensors[0]; + let TensorVisualization { + tensor: first_tensor, + .. + } = first_tensor_view; + + let scalar_mapping = ViewProperty::from_archetype::( + ctx.blueprint_db(), + ctx.blueprint_query(), + ctx.view_id, + ); + let colormap: Colormap = + scalar_mapping.component_or_fallback(ctx, TensorScalarMapping::descriptor_colormap().component)?; + let gamma: GammaCorrection = + scalar_mapping.component_or_fallback(ctx, TensorScalarMapping::descriptor_gamma().component)?; + + let texture_filter_magnification = match mag_filter { + MagnificationFilter::Nearest => re_renderer::renderer::TextureFilterMag::Nearest, + MagnificationFilter::Linear => re_renderer::renderer::TextureFilterMag::Linear, }; + let texture_filter_minification = re_renderer::renderer::TextureFilterMin::Linear; - let mut changed_indices = false; - let mut indices = slice_selection.indices.clone(); - - for index_slider in slider { - let dim = &shape[index_slider.dimension as usize]; - let size = dim.size; - if size <= 1 { - continue; - } + let mut textured_rects = Vec::with_capacity(tensors.len()); - let Some(selector_value) = index_for_dimension_mut(&mut indices, index_slider.dimension) - else { - // There should be an entry already via `load_tensor_slice_selection_and_make_valid` - continue; - }; + let Some((height, width)) = tensor_slice_shape(first_tensor, slice_selection) else { + anyhow::bail!("Expected a 2D tensor slice."); + }; + let space_rect = egui::Rect::from_min_size( + egui::Pos2::ZERO, + egui::vec2(width as f32, height as f32), + ); - ui.horizontal(|ui| { - let name = dim.name.clone().map_or_else( - || index_slider.dimension.to_string(), - |name| name.to_string(), - ); + for tensor_view in tensors { + let TensorVisualization { + tensor_row_id, + tensor, + data_range, + annotations, + opacity, + .. + } = tensor_view; - let slider_tooltip = format!("Adjust the selected slice for the {name} dimension"); - ui.label(&name).on_hover_text(&slider_tooltip); + let colormap_with_range = ColormapWithRange { + colormap, + value_range: [data_range.start() as f32, data_range.end() as f32], + }; - // If the range is big (say, 2048) then we would need - // a slider that is 2048 pixels wide to get the good precision. - // So we add a high-precision drag-value instead: - if ui - .add( - egui::DragValue::new(selector_value) - .range(0..=size - 1) - .speed(0.5), - ) - .on_hover_text(format!( - "Drag to precisely control the slice index of the {name} dimension" - )) - .changed() - { - changed_indices = true; - } + let colormapped_texture = + crate::tensor_slice_to_gpu::colormapped_texture( + ctx.render_ctx(), + *tensor_row_id, + tensor, + slice_selection, + annotations, + &colormap_with_range, + gamma, + )?; - // Make the slider as big as needed: - const MIN_SLIDER_WIDTH: f32 = 64.0; - if ui.available_width() >= MIN_SLIDER_WIDTH { - ui.spacing_mut().slider_width = ((size as f32) * 4.0) - .at_least(MIN_SLIDER_WIDTH) - .at_most(ui.available_width()); - if ui - .add(egui::Slider::new(selector_value, 0..=size - 1).show_value(false)) - .on_hover_text(slider_tooltip) - .changed() - { - changed_indices = true; - } - } + let multiplicative_tint = egui::Rgba::from_white_alpha(*opacity); + + textured_rects.push(TexturedRect { + top_left_corner_position: glam::vec3(space_rect.min.x, space_rect.min.y, 0.0), + extent_u: glam::Vec3::X * space_rect.width(), + extent_v: glam::Vec3::Y * space_rect.height(), + colormapped_texture, + options: RectangleOptions { + texture_filter_magnification, + texture_filter_minification, + multiplicative_tint, + ..Default::default() + }, }); } - if changed_indices { - slice_property.save_blueprint_component( - ctx, - &archetypes::TensorSliceSelection::descriptor_indices(), - &indices, - ); + Ok((textured_rects, texture_filter_magnification)) +} + +pub fn render_tensor_slice_batch( + ctx: &ViewContext<'_>, + painter: &egui::Painter, + textured_rects: &[TexturedRect], + image_rect: egui::Rect, + space_rect: egui::Rect, + _texture_filter_magnification: re_renderer::renderer::TextureFilterMag, +) -> anyhow::Result<()> { + let viewport = painter.clip_rect().intersect(image_rect); + if viewport.is_positive() { + let pixels_per_point = painter.ctx().pixels_per_point(); + let resolution_in_pixel = + gpu_bridge::viewport_resolution_in_pixels(viewport, pixels_per_point); + + if resolution_in_pixel[0] > 0 && resolution_in_pixel[1] > 0 { + let ui_from_space = egui::emath::RectTransform::from_to(space_rect, image_rect); + let space_from_ui = ui_from_space.inverse(); + + let camera_position_space = space_from_ui.transform_pos(viewport.min); + let top_left_position = + glam::vec2(camera_position_space.x, camera_position_space.y); + + let target_config = re_renderer::view_builder::TargetConfiguration { + name: "tensor_slice_batch".into(), + resolution_in_pixel, + view_from_world: macaw::IsoTransform::from_translation( + -top_left_position.extend(0.0), + ), + projection_from_view: re_renderer::view_builder::Projection::Orthographic { + camera_mode: + re_renderer::view_builder::OrthographicCameraMode::TopLeftCornerAndExtendZ, + vertical_world_size: space_rect.height(), + far_plane_distance: 1000.0, + }, + viewport_transformation: re_renderer::RectTransform::IDENTITY, + pixels_per_point, + ..Default::default() + }; + + let mut view_builder = ViewBuilder::new(ctx.render_ctx(), target_config)?; + + view_builder.queue_draw( + ctx.render_ctx(), + re_renderer::renderer::RectangleDrawData::new(ctx.render_ctx(), textured_rects)?, + ); + + painter.add(gpu_bridge::new_renderer_callback( + view_builder, + viewport, + re_renderer::Rgba::TRANSPARENT, + )); + } } + + Ok(()) } +// ---------------------------------------------------------------------------- + #[test] fn test_help_view() { re_test_context::TestContext::test_help_view(|ctx| TensorView.help(ctx)); From 564dae8c211664f9c0d2673c3a773eb02937155e Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Tue, 6 Jan 2026 15:41:53 -0500 Subject: [PATCH 4/6] better naming --- .../re_view_tensor/src/tensor_slice_hover.rs | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs index b19a3701190d..55a5f6325f4b 100644 --- a/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs +++ b/crates/viewer/re_view_tensor/src/tensor_slice_hover.rs @@ -1,4 +1,3 @@ -use re_data_ui::item_ui; use re_sdk_types::{ components::TensorData, tensor_data::{TensorDataType, TensorElement}, @@ -266,7 +265,7 @@ fn format_pixel_value(elements: Vec) -> (String, String) { } fn tensor_hover_value_ui( - ctx: &ViewContext<'_>, + _ctx: &ViewContext<'_>, ui: &mut egui::Ui, tensors: &[TensorVisualization], slice_selection: &TensorSliceSelection, @@ -289,18 +288,24 @@ fn tensor_hover_value_ui( } = tensor_view; ui.separator(); - ui.label(""); ui.end_row(); - item_ui::entity_path_button( - ctx.viewer_ctx, - &ctx.current_query(), - ctx.recording(), - ui, - Some(ctx.view_id), - entity_path, - ); ui.label(""); + ui.scope(|ui| { + let small = ui + .style() + .text_styles + .get(&egui::TextStyle::Small) + .cloned() + .unwrap_or_else(|| egui::FontId::proportional(10.0)); + ui.style_mut().text_styles.insert(egui::TextStyle::Body, small); + let display_name = entity_path + .last() + .map(|part| part.ui_string()) + .unwrap_or_else(|| "/".to_owned()); + ui.label(display_name) + .on_hover_text(entity_path.to_string()); + }); ui.end_row(); if let Some((label, value_text)) = From 006b0fe83217f6622b7256681fe90fe1b5767b8e Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Tue, 6 Jan 2026 16:15:51 -0500 Subject: [PATCH 5/6] use mouse-scroll to scroll tensors. --- .../viewer/re_view_tensor/src/view_class.rs | 50 +++++++++++++++++-- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/crates/viewer/re_view_tensor/src/view_class.rs b/crates/viewer/re_view_tensor/src/view_class.rs index dc68385b0bb1..700ecca64ce8 100644 --- a/crates/viewer/re_view_tensor/src/view_class.rs +++ b/crates/viewer/re_view_tensor/src/view_class.rs @@ -618,10 +618,23 @@ impl TensorView { }), ]; - egui::ScrollArea::both().auto_shrink(false).show(ui, |ui| { + egui::ScrollArea::both() + .auto_shrink(false) + .scroll_source( + egui::scroll_area::ScrollSource::SCROLL_BAR + | egui::scroll_area::ScrollSource::DRAG, + ) + .show(ui, |ui| { let ctx = self.view_context(ctx, view_id, state, space_origin); - if let Err(err) = - Self::tensor_slice_ui(&ctx, ui, tensors, dimension_labels, &slice_selection) + if let Err(err) = Self::tensor_slice_ui( + &ctx, + ui, + tensors, + dimension_labels, + &slice_selection, + &slice_property, + tensor, + ) { ui.error_label(err.to_string()); } @@ -636,6 +649,8 @@ impl TensorView { tensors: &[TensorVisualization], dimension_labels: [Option<(String, bool)>; 2], slice_selection: &TensorSliceSelection, + slice_property: &ViewProperty, + tensor: &TensorData, ) -> anyhow::Result<()> { let mag_filter = ViewProperty::from_archetype::( ctx.blueprint_db(), @@ -644,9 +659,36 @@ impl TensorView { ) .component_or_fallback(ctx, TensorScalarMapping::descriptor_mag_filter().component)?; - let (response, image_rect) = paint_tensor_slice(ctx, ui, tensors, slice_selection, mag_filter)?; + let (response, image_rect) = + paint_tensor_slice(ctx, ui, tensors, slice_selection, mag_filter)?; if response.hovered() { + let scroll_delta = ui.input(|i| i.raw_scroll_delta.y); + if scroll_delta.abs() > 0.0 { + if let Some(sliders) = &slice_selection.slider + && let Some(first_slider) = sliders.first() + { + let mut indices = slice_selection.indices.clone(); + if let Some(index) = + index_for_dimension_mut(&mut indices, first_slider.dimension) + && let Some(dim) = + TensorDimension::from_tensor_data(tensor).get(first_slider.dimension as usize) + { + let max_index = dim.size.saturating_sub(1) as i64; + let direction = if scroll_delta > 0.0 { 1_i64 } else { -1_i64 }; + let current = *index as i64; + let new_index = (current + direction).clamp(0, max_index) as u64; + if *index != new_index { + *index = new_index; + slice_property.save_blueprint_component( + ctx.viewer_ctx, + &archetypes::TensorSliceSelection::descriptor_indices(), + &indices, + ); + } + } + } + } if let Some(pointer_pos) = ui.input(|i| i.pointer.hover_pos()) { response.on_hover_ui_at_pointer(|ui| { crate::tensor_slice_hover::show_tensor_hover_ui( From c45f190d8d13176140607e16aabda1790c30bd17 Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Tue, 6 Jan 2026 16:35:57 -0500 Subject: [PATCH 6/6] Implement pan+zoom by left/right mouse drag --- .../viewer/re_view_tensor/src/view_class.rs | 98 +++++++++++++++---- 1 file changed, 78 insertions(+), 20 deletions(-) diff --git a/crates/viewer/re_view_tensor/src/view_class.rs b/crates/viewer/re_view_tensor/src/view_class.rs index 700ecca64ce8..99f29330af19 100644 --- a/crates/viewer/re_view_tensor/src/view_class.rs +++ b/crates/viewer/re_view_tensor/src/view_class.rs @@ -336,11 +336,22 @@ pub struct TensorView; type ViewType = re_sdk_types::blueprint::views::TensorView; -#[derive(Default)] pub struct ViewTensorState { /// Last viewed tensors, copied each frame. /// Used for the selection view. tensors: Vec, + pan: egui::Vec2, + zoom: f32, +} + +impl Default for ViewTensorState { + fn default() -> Self { + Self { + tensors: Vec::new(), + pan: egui::Vec2::ZERO, + zoom: 1.0, + } + } } impl ViewState for ViewTensorState { @@ -561,7 +572,7 @@ impl TensorView { &self, ctx: &ViewerContext<'_>, ui: &mut egui::Ui, - state: &ViewTensorState, + state: &mut ViewTensorState, view_id: ViewId, space_origin: &EntityPath, tensors: &[TensorVisualization], @@ -618,6 +629,9 @@ impl TensorView { }), ]; + let mut pan = state.pan; + let mut zoom = state.zoom; + egui::ScrollArea::both() .auto_shrink(false) .scroll_source( @@ -625,20 +639,24 @@ impl TensorView { | egui::scroll_area::ScrollSource::DRAG, ) .show(ui, |ui| { - let ctx = self.view_context(ctx, view_id, state, space_origin); - if let Err(err) = Self::tensor_slice_ui( - &ctx, - ui, - tensors, - dimension_labels, - &slice_selection, - &slice_property, - tensor, - ) - { - ui.error_label(err.to_string()); - } - }); + let ctx = self.view_context(ctx, view_id, state, space_origin); + if let Err(err) = Self::tensor_slice_ui( + &ctx, + ui, + tensors, + dimension_labels, + &slice_selection, + &slice_property, + tensor, + &mut pan, + &mut zoom, + ) { + ui.error_label(err.to_string()); + } + }); + + state.pan = pan; + state.zoom = zoom; Ok(()) } @@ -651,6 +669,8 @@ impl TensorView { slice_selection: &TensorSliceSelection, slice_property: &ViewProperty, tensor: &TensorData, + pan: &mut egui::Vec2, + zoom: &mut f32, ) -> anyhow::Result<()> { let mag_filter = ViewProperty::from_archetype::( ctx.blueprint_db(), @@ -660,7 +680,7 @@ impl TensorView { .component_or_fallback(ctx, TensorScalarMapping::descriptor_mag_filter().component)?; let (response, image_rect) = - paint_tensor_slice(ctx, ui, tensors, slice_selection, mag_filter)?; + paint_tensor_slice(ctx, ui, tensors, slice_selection, mag_filter, *pan, *zoom)?; if response.hovered() { let scroll_delta = ui.input(|i| i.raw_scroll_delta.y); @@ -689,8 +709,29 @@ impl TensorView { } } } + if response.dragged_by(egui::PointerButton::Primary) { + if let Some((height, width)) = tensor_slice_shape(tensor, slice_selection) { + let image_size = egui::vec2(width as f32, height as f32); + let space_size = image_size / *zoom; + let image_rect_size = image_rect.size(); + if image_rect_size.x > 0.0 && image_rect_size.y > 0.0 { + let scale = egui::vec2( + space_size.x / image_rect_size.x, + space_size.y / image_rect_size.y, + ); + *pan += response.drag_delta() * scale; + } + } + } + if response.dragged_by(egui::PointerButton::Secondary) { + let delta = response.drag_delta().y; + if delta.abs() > 0.0 { + let factor = (1.0 + delta * 0.01).clamp(0.2, 5.0); + *zoom = (*zoom * factor).clamp(1.0, 20.0); + } + } if let Some(pointer_pos) = ui.input(|i| i.pointer.hover_pos()) { - response.on_hover_ui_at_pointer(|ui| { + response.clone().on_hover_ui_at_pointer(|ui| { crate::tensor_slice_hover::show_tensor_hover_ui( ctx, ui, @@ -707,6 +748,11 @@ impl TensorView { paint_axis_names(ui, image_rect, font_id, dimension_labels); } + if response.double_clicked() { + *pan = egui::Vec2::ZERO; + *zoom = 1.0; + } + Ok(()) } } @@ -718,6 +764,8 @@ pub fn paint_tensor_slice( tensors: &[TensorVisualization], slice_selection: &TensorSliceSelection, mag_filter: MagnificationFilter, + pan: egui::Vec2, + zoom: f32, ) -> anyhow::Result<(egui::Response, egui::Rect)> { re_tracing::profile_function!(); @@ -749,10 +797,20 @@ pub fn paint_tensor_slice( } }; - let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); + let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::click_and_drag()); let image_rect = egui::Rect::from_min_max(response.rect.min, response.rect.max); - let space_rect = egui::Rect::from_min_size(egui::Pos2::ZERO, img_size); + let zoom = zoom.max(1.0); + let space_size = img_size / zoom; + let image_center = img_size * 0.5; + let mut space_center = egui::pos2(image_center.x + pan.x, image_center.y + pan.y); + let half_size = space_size * 0.5; + let max_center = img_size - half_size; + space_center = egui::pos2( + space_center.x.clamp(half_size.x, max_center.x), + space_center.y.clamp(half_size.y, max_center.y), + ); + let space_rect = egui::Rect::from_center_size(space_center, space_size); let (textured_rects, texture_filter_magnification) = create_textured_rects_for_batch(ctx, tensors, slice_selection, mag_filter)?;