Skip to content

Commit c602a36

Browse files
add example loading an entire RNTuple into a LLAMA view
1 parent cbe5eea commit c602a36

File tree

3 files changed

+70
-0
lines changed

3 files changed

+70
-0
lines changed

CMakeLists.txt

+6
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,12 @@ if (LLAMA_BUILD_EXAMPLES)
6868
message(WARNING "Could not find alpaka. Alpaka examples are disabled.")
6969
endif()
7070

71+
# ROOT examples
72+
find_package(ROOT QUIET)
73+
if (ROOT_FOUND)
74+
add_subdirectory("examples/hep_rntuple")
75+
endif()
76+
7177
# CUDA examples
7278
include(CheckLanguage)
7379
check_language(CUDA)

examples/hep_rntuple/CMakeLists.txt

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
cmake_minimum_required (VERSION 3.15)
2+
project(llama-hep_rntuple)
3+
4+
set(CMAKE_CXX_STANDARD 17)
5+
6+
find_package(ROOT REQUIRED)
7+
if (NOT TARGET llama::llama)
8+
find_package(llama REQUIRED)
9+
endif()
10+
add_executable(${PROJECT_NAME} hep_rntuple.cpp)
11+
target_link_libraries(${PROJECT_NAME} PRIVATE ROOT::Hist ROOT::Graf ROOT::Gpad ROOT::ROOTNTuple llama::llama)

examples/hep_rntuple/hep_rntuple.cpp

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
// This example uses a non-public CMS NanoAOD file called: ttjet_13tev_june2019_lzma.
2+
// Please ask contact us if you need it.
3+
4+
#include "../common/ttjet_13tev_june2019.hpp"
5+
6+
#include <RConfigure.h>
7+
#define R__HAS_STD_STRING_VIEW
8+
#include <ROOT/RNTuple.hxx>
9+
#include <ROOT/RNTupleDS.hxx>
10+
#include <ROOT/RNTupleModel.hxx>
11+
#include <ROOT/RNTupleOptions.hxx>
12+
#include <ROOT/RNTupleView.hxx>
13+
#include <chrono>
14+
#include <llama/DumpMapping.hpp>
15+
#include <llama/llama.hpp>
16+
17+
int main(int argc, const char* argv[])
18+
{
19+
if (argc != 2)
20+
{
21+
fmt::print("Please specify input file!\n");
22+
return 1;
23+
}
24+
25+
using namespace std::chrono;
26+
using namespace ROOT::Experimental;
27+
28+
auto ntuple = RNTupleReader::Open(RNTupleModel::Create(), "NTuple", argv[1]);
29+
const auto n = ntuple->GetNEntries();
30+
31+
auto start = steady_clock::now();
32+
auto view = llama::allocView(llama::mapping::SoA<llama::ArrayDims<1>, Event, true>{llama::ArrayDims{n}});
33+
fmt::print("Alloc LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count());
34+
35+
std::size_t totalSize = 0;
36+
for (auto i = 0u; i < view.mapping.blobCount; i++)
37+
totalSize += view.mapping.blobSize(i);
38+
fmt::print("Total LLAMA view memory: {}MiB in {} blobs\n", totalSize / 1024 / 1024, view.mapping.blobCount);
39+
40+
start = steady_clock::now();
41+
llama::forEachLeaf<Event>(
42+
[&](auto coord)
43+
{
44+
using Name = llama::GetTag<Event, decltype(coord)>;
45+
using Type = llama::GetType<Event, decltype(coord)>;
46+
auto column = ntuple->GetView<Type>(llama::structName<Name>());
47+
for (std::size_t i = 0; i < n; i++)
48+
view(i)(coord) = column(i);
49+
});
50+
fmt::print("Copy RNTuple -> LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count());
51+
52+
start = steady_clock::now();
53+
}

0 commit comments

Comments
 (0)