From 8efca195f35c4695ecde9c302b0bd182a6839fb0 Mon Sep 17 00:00:00 2001 From: ingyukoh Date: Thu, 12 Feb 2026 16:28:41 +0900 Subject: [PATCH 1/2] [Node.js] Fix float16 tensor input support Accept both Uint16Array and Float16Array for float16 tensors in the Node.js binding. Float16Array is a newer JavaScript type (ES2024) that N-API supports as napi_float16_array (type 11) in Node.js 23+. For older Node.js versions, define napi_float16_array to ensure compatibility when users pass Float16Array data. Fixes #26791 Co-Authored-By: Claude Opus 4.5 --- js/node/src/tensor_helper.cc | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/js/node/src/tensor_helper.cc b/js/node/src/tensor_helper.cc index f6b9f3132ec31..8f23f74a960a7 100644 --- a/js/node/src/tensor_helper.cc +++ b/js/node/src/tensor_helper.cc @@ -11,6 +11,12 @@ #include "tensor_helper.h" #include "inference_session_wrap.h" +// napi_float16_array was added in Node.js 23 (N-API version 10). +// Define it for older Node.js versions to support Float16Array input tensors. +#ifndef napi_float16_array +#define napi_float16_array static_cast(11) +#endif + // make sure consistent with origin definition static_assert(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED == 0, "definition not consistent with OnnxRuntime"); static_assert(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT == 1, "definition not consistent with OnnxRuntime"); @@ -196,9 +202,19 @@ Ort::Value NapiValueToOrtValue(Napi::Env env, Napi::Value value, OrtMemoryInfo* auto tensorDataTypedArray = tensorDataValue.As(); std::underlying_type_t typedArrayType = tensorDataValue.As().TypedArrayType(); - ORT_NAPI_THROW_TYPEERROR_IF(DATA_TYPE_TYPEDARRAY_MAP[elemType] != typedArrayType, env, - "Tensor.data must be a typed array (", DATA_TYPE_TYPEDARRAY_MAP[elemType], ") for ", - tensorTypeString, " tensors, but got typed array (", typedArrayType, ")."); + + // For float16 tensors, accept both Uint16Array and Float16Array. + // Float16Array is a newer JavaScript type (ES2024) that may be passed by users. + // Both use 16-bit storage, so they are compatible at the binary level. + bool isValidTypedArray = (DATA_TYPE_TYPEDARRAY_MAP[elemType] == typedArrayType); + if (!isValidTypedArray && elemType == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16) { + // Accept Float16Array (napi_float16_array = 11) for float16 tensors + isValidTypedArray = (typedArrayType == napi_float16_array); + } + + ORT_NAPI_THROW_TYPEERROR_IF(!isValidTypedArray, env, + "Tensor.data must be a typed array (", DATA_TYPE_TYPEDARRAY_MAP[elemType], + " or Float16Array) for ", tensorTypeString, " tensors, but got typed array (", typedArrayType, ")."); char* buffer = reinterpret_cast(tensorDataTypedArray.ArrayBuffer().Data()); size_t bufferByteOffset = tensorDataTypedArray.ByteOffset(); From d25716c1931fa591fc1d7230005d2a0f8a36d372 Mon Sep 17 00:00:00 2001 From: ingyukoh Date: Mon, 16 Feb 2026 16:30:46 +0900 Subject: [PATCH 2/2] Fix error message to only mention Float16Array for float16 tensors Address review comment: the "or Float16Array" text in the type mismatch error was shown for all tensor types. Now it only appears when the expected type is float16. --- js/node/src/tensor_helper.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/js/node/src/tensor_helper.cc b/js/node/src/tensor_helper.cc index 8f23f74a960a7..c4bf5628ce6fe 100644 --- a/js/node/src/tensor_helper.cc +++ b/js/node/src/tensor_helper.cc @@ -214,7 +214,8 @@ Ort::Value NapiValueToOrtValue(Napi::Env env, Napi::Value value, OrtMemoryInfo* ORT_NAPI_THROW_TYPEERROR_IF(!isValidTypedArray, env, "Tensor.data must be a typed array (", DATA_TYPE_TYPEDARRAY_MAP[elemType], - " or Float16Array) for ", tensorTypeString, " tensors, but got typed array (", typedArrayType, ")."); + elemType == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16 ? " or Float16Array" : "", + ") for ", tensorTypeString, " tensors, but got typed array (", typedArrayType, ")."); char* buffer = reinterpret_cast(tensorDataTypedArray.ArrayBuffer().Data()); size_t bufferByteOffset = tensorDataTypedArray.ByteOffset();