@@ -242,29 +242,28 @@ void OnnxPredict::compute() {
242242 const Pool& poolIn = _poolIn.get ();
243243 Pool& poolOut = _poolOut.get ();
244244
245- std:: vector<int64_t > shape ;
245+ std:vector<std:: vector<int64_t >> shapes ;
246246
247247 // Parse the input tensors from the pool into ONNX Runtime tensors.
248248 for (size_t i = 0 ; i < _nInputs; i++) {
249249
250- cout << " _inputs[i ]: " << _inputs[i] << endl;
250+ cout << " _inputs[" << i << " ]: " << _inputs[i] << endl;
251251 const Tensor<Real>& inputData = poolIn.value <Tensor<Real> >(_inputs[i]);
252-
253- // Convert data to float32
254- std::vector<float > float_data (inputData.size ());
255- for (size_t j = 0 ; j < inputData.size (); ++j) {
256- float_data[j] = static_cast <float >(inputData.data ()[j]);
257- }
258-
259- // Step 2: Get shape
252+ cout << " inputData.size(): " << inputData.size () << endl;
253+
254+ // Step 1: Get tensor shape
255+ std::vector<int64_t > shape;
260256 int dims = 1 ;
261257
262258 shape.push_back ((int64_t )inputData.dimension (0 ));
263259
264260 if (_squeeze) {
265- for (int i = 1 ; i < inputData.rank (); i++) {
266- if (inputData.dimension (i) > 1 ) {
267- shape.push_back ((int64_t )inputData.dimension (i));
261+ // cout << "Applying squeeze!!!" << endl;
262+ // cout << "inputData.rank(): " << inputData.rank() << endl;
263+
264+ for (int j = 1 ; j < inputData.rank (); j++) {
265+ if (inputData.dimension (j) > 1 ) {
266+ shape.push_back ((int64_t )inputData.dimension (j));
268267 dims++;
269268 }
270269 }
@@ -274,23 +273,46 @@ void OnnxPredict::compute() {
274273 shape.push_back ((int64_t ) 1 );
275274 dims++;
276275 }
276+
277277 } else {
278278 dims = inputData.rank ();
279- for (int j = 1 ; j < dims; j++) { // HERE we need to jump i = 1 - 4D tensor input
280- // cout << inputData.dimension(j) << endl;
279+ for (int j = 1 ; j < dims; j++) {
280+ // cout << inputData.dimension(j) << endl;
281281 shape.push_back ((int64_t )inputData.dimension (j));
282282 }
283283 }
284-
284+
285+ // Step 2: Convert data to float32
286+ std::vector<float > float_data (inputData.size ());
287+ for (size_t j = 0 ; j < inputData.size (); ++j) {
288+ float_data[j] = static_cast <float >(inputData.data ()[j]);
289+ }
290+
285291 // Step 3: Create ONNX Runtime tensor
286292 _memoryInfo = Ort::MemoryInfo::CreateCpu (OrtDeviceAllocator, OrtMemTypeCPU);
287293
288294 if (_memoryInfo == NULL ) {
289295 throw EssentiaException (" OnnxRuntimePredict: Error allocating memory for input tensor." );
290296 }
291297
298+ // display shape
299+ cout << " shape: [" ;
300+ for (size_t j = 0 ; j < shape.size (); ++j) {
301+ cout << shape[j];
302+ if (j + 1 < shape.size ()) std::cout << " , " ;
303+ }
304+ cout << " ]" << endl;
305+
306+ // display float_data
307+ cout << " float_data: [" ;
308+ for (size_t j = 0 ; j < float_data.size (); ++j) {
309+ cout << float_data[j];
310+ if (j + 1 < float_data.size ()) std::cout << " , " ;
311+ }
312+ cout << " ]" << endl;
313+
292314 input_tensors.emplace_back (Ort::Value::CreateTensor<float >(_memoryInfo, float_data.data (), float_data.size (), shape.data (), shape.size ()));
293-
315+ shapes. push_back (shape);
294316 }
295317
296318 // Define input and output names
@@ -303,40 +325,55 @@ void OnnxPredict::compute() {
303325 }
304326
305327 // Run the Onnxruntime session.
306- auto output_tensors = _session.Run (_runOptions, // Run options.
307- input_names.data (), // Input node names.
308- input_tensors.data (), // Input tensor values.
309- _nInputs, // Number of inputs.
310- output_names.data (), // Output node names.
311- _nOutputs // Number of outputs.
328+ auto output_tensors = _session.Run (_runOptions, // Run options.
329+ input_names.data (), // Input node names.
330+ input_tensors.data (), // Input tensor values.
331+ _nInputs, // Number of inputs.
332+ output_names.data (), // Output node names.
333+ _nOutputs // Number of outputs.
312334 );
313335
314336 // Map output tensors to pool
315337 for (size_t i = 0 ; i < output_tensors.size (); ++i) {
316338
317339 const Real* outputData = output_tensors[i].GetTensorData <Real>();
318340
319- // Create and array to store the tensor shape.
341+
342+ auto outputInfo = output_tensors[i].GetTensorTypeAndShapeInfo ();
343+ cout << " GetElementType: " << outputInfo.GetElementType () << endl;
344+ cout << " Dimensions of the output: " << outputInfo.GetShape ().size () << endl;
345+ cout << " Shape of the output: [" ;
346+ auto tensor_size = 1 ;
347+ for (unsigned int shapeI = 0 ; shapeI < outputInfo.GetShape ().size (); shapeI++){
348+ tensor_size *= outputInfo.GetShape ()[shapeI];
349+ cout << outputInfo.GetShape ()[shapeI];
350+ if (shapeI + 1 <outputInfo.GetShape ().size ()) cout << " , " ;
351+ }
352+ cout << " ]" << endl;
353+
354+ cout << " tensor_size: " << tensor_size << endl;
355+
356+ // display outputData
357+ cout << " outputData" << i << " : [" ;
358+ for (size_t j = 0 ; j < tensor_size; ++j) {
359+ cout << outputData[j];
360+ if (j + 1 < tensor_size) std::cout << " , " ;
361+ }
362+ cout << " ]" << endl;
363+
364+ // Create and array to store the output tensor shape.
320365 array<long int , 4 > _shape {1 , 1 , 1 , 1 };
321- // _shape[0] = (int)outputShapes [0];
322- _shape[ 0 ] = ( int )shape[ 0 ];
366+ _shape[0 ] = (int )shapes[ 0 ] [0 ];
367+
323368 for (size_t j = 1 ; j < _outputNodes[i].shape .size (); j++){
324- _shape[j+1 ] = (int )_outputNodes[i].shape [j];
369+ int shape_idx = _shape.size () - j;
370+ _shape[shape_idx] = (int )_outputNodes[i].shape [_outputNodes[i].shape .size () - j];
325371 }
326372
327373 // Store tensor in pool
328374 const Tensor<Real> tensorMap = TensorMap<const Real>(outputData, _shape);
329375 poolOut.set (_outputs[i], tensorMap);
330376 }
331-
332- /* Cleanup
333- for (const auto& tensorInfo : all_input_infos) {
334- _allocator.Free((void*)tensorInfo.name.c_str());
335- }
336-
337- for (const auto& tensorInfo : all_output_infos) {
338- _allocator.Free((void*)tensorInfo.name.c_str());
339- }*/
340377
341378}
342379
0 commit comments