From ab79f17f3127dbe59a70a0e9bbd7ab2d0441821b Mon Sep 17 00:00:00 2001 From: Google AI Edge Date: Tue, 17 Mar 2026 21:06:24 -0700 Subject: [PATCH] Integrate the new auto registration control option into benchmark litert model LiteRT-PiperOrigin-RevId: 885358805 --- litert/tools/benchmark_litert_model.cc | 45 ++++++++++++++++----- litert/tools/benchmark_litert_model_test.cc | 21 ++++++++++ 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/litert/tools/benchmark_litert_model.cc b/litert/tools/benchmark_litert_model.cc index ed0433ed83..0f70eaf7f3 100644 --- a/litert/tools/benchmark_litert_model.cc +++ b/litert/tools/benchmark_litert_model.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include "litert/tools/benchmark_litert_model.h" +#include #include #include #include @@ -27,7 +28,6 @@ limitations under the License. #include "litert/c/internal/litert_logging.h" #include "litert/c/litert_common.h" #include "litert/c/options/litert_mediatek_options.h" -#include "litert/c/options/litert_qualcomm_options.h" #include "litert/cc/internal/litert_compiled_model_next.h" #include "litert/cc/internal/litert_tflite_error_status_builder.h" #include "litert/cc/litert_common.h" @@ -58,6 +58,22 @@ using ::litert::Options; using ::litert::RuntimeOptions; using ::litert::TensorBuffer; +HwAcceleratorSet GetRequestedHardwareAccelerators( + const BenchmarkParams& params) { + HwAcceleratorSet hardware_accelerators(HwAccelerators::kNone); + if (params.Get("use_npu")) { + hardware_accelerators |= HwAccelerators::kNpu; + } + if (params.Get("use_gpu")) { + hardware_accelerators |= HwAccelerators::kGpu; + } + if (params.Get("use_cpu") || + !params.Get("require_full_delegation")) { + hardware_accelerators |= HwAccelerators::kCpu; + } + return hardware_accelerators; +} + Options CreateCompiledModelOptions(const BenchmarkParams& params) { auto use_gpu = params.Get("use_gpu"); auto use_npu = params.Get("use_npu"); @@ -81,10 +97,10 @@ Options CreateCompiledModelOptions(const BenchmarkParams& params) { std::abort(); } - HwAcceleratorSet hardware_accelerators(HwAccelerators::kNone); + HwAcceleratorSet hardware_accelerators = + GetRequestedHardwareAccelerators(params); if (use_npu) { - hardware_accelerators |= HwAccelerators::kNpu; // QNN options LITERT_ASSIGN_OR_ABORT(auto& qnn_opts, compilation_options.GetQualcommOptions()); @@ -113,7 +129,6 @@ Options CreateCompiledModelOptions(const BenchmarkParams& params) { } if (use_gpu) { - hardware_accelerators |= HwAccelerators::kGpu; LITERT_ASSIGN_OR_ABORT(auto& gpu_options, compilation_options.GetGpuOptions()); // Enable benchmark mode to run clFinish() after each inference. @@ -139,9 +154,7 @@ Options CreateCompiledModelOptions(const BenchmarkParams& params) { } } - if (use_cpu || !require_full_delegation) { - hardware_accelerators |= HwAccelerators::kCpu; - + if (hardware_accelerators & HwAccelerators::kCpu) { if (num_threads > 0) { LITERT_ASSIGN_OR_ABORT(auto& cpu_options, compilation_options.GetCpuOptions()); @@ -162,9 +175,19 @@ Options CreateCompiledModelOptions(const BenchmarkParams& params) { litert::Expected CreateDefaultEnvironment( const BenchmarkParams& params) { + const int64_t requested_hardware_accelerators = + GetRequestedHardwareAccelerators(params).value; if (!params.Get("use_npu")) { - // If NPU is not used, we don't need to set the dispatch library directory. - return litert::Environment::Create({}); + // Only auto-register accelerators required by the selected benchmark path. + const std::vector environment_options = + { + litert::EnvironmentOptions::Option{ + litert::EnvironmentOptions::Tag::kAutoRegisterAccelerators, + requested_hardware_accelerators, + }, + }; + return litert::Environment::Create( + litert::EnvironmentOptions(absl::MakeConstSpan(environment_options))); } auto dispatch_library_path = params.Get("dispatch_library_path"); LITERT_LOG(LITERT_INFO, "dispatch_library_path: %s", @@ -190,6 +213,10 @@ litert::Expected CreateDefaultEnvironment( litert::EnvironmentOptions::Tag::kCompilerCacheDir, compiler_cache_path.c_str(), }, + litert::EnvironmentOptions::Option{ + litert::EnvironmentOptions::Tag::kAutoRegisterAccelerators, + requested_hardware_accelerators, + }, }; return litert::Environment::Create( litert::EnvironmentOptions(absl::MakeConstSpan(environment_options))); diff --git a/litert/tools/benchmark_litert_model_test.cc b/litert/tools/benchmark_litert_model_test.cc index af8640e2e2..0a2b9f55bc 100644 --- a/litert/tools/benchmark_litert_model_test.cc +++ b/litert/tools/benchmark_litert_model_test.cc @@ -88,6 +88,27 @@ TEST_F(BenchmarkLiteRtModelTest, GetModelSizeFromPathSucceeded) { EXPECT_GE(listener.results_.model_size_mb(), 0); } +TEST_F(BenchmarkLiteRtModelTest, CpuOnlyInitDoesNotProbeGpuAccelerator) { +#if defined(LITERT_DISABLE_GPU) + GTEST_SKIP() << "GPU accelerator auto-registration is disabled."; +#endif + BenchmarkParams params = BenchmarkLiteRtModel::DefaultParams(); + params.Set("graph", kModelPath); + params.Set("signature_to_run_for", kSignatureToRunFor); + params.Set("use_cpu", true); + params.Set("use_gpu", false); + params.Set("use_npu", false); + params.Set("require_full_delegation", false); + + BenchmarkLiteRtModel benchmark = BenchmarkLiteRtModel(std::move(params)); + + testing::internal::CaptureStderr(); + EXPECT_EQ(benchmark.Init(), kTfLiteOk); + const std::string logs = testing::internal::GetCapturedStderr(); + + EXPECT_EQ(logs.find("Loading GPU accelerator("), std::string::npos) << logs; +} + TEST_F(BenchmarkLiteRtModelTest, BenchmarkWithResultFilePath) { BenchmarkParams params = BenchmarkLiteRtModel::DefaultParams(); params.Set("graph", kModelPath);