@@ -102,6 +102,60 @@ void TfLiteVarArrayFree(T* a) {
102102 free (a);
103103}
104104
105+ #ifndef TF_LITE_STATIC_MEMORY
106+
107+ TfLiteQuantization TfLiteQuantizationClone (const TfLiteQuantization& src) {
108+ TfLiteQuantization dst;
109+ dst.type = src.type ;
110+ switch (src.type ) {
111+ case kTfLiteNoQuantization :
112+ break ;
113+ case kTfLiteAffineQuantization : {
114+ dst.params = calloc (1 , sizeof (TfLiteAffineQuantization));
115+ const TfLiteAffineQuantization* const src_params =
116+ (TfLiteAffineQuantization*)(src.params );
117+ TfLiteAffineQuantization* const dst_params =
118+ (TfLiteAffineQuantization*)(dst.params );
119+ dst_params->quantized_dimension = src_params->quantized_dimension ;
120+ dst_params->scale = TfLiteFloatArrayCopy (src_params->scale );
121+ dst_params->zero_point = TfLiteIntArrayCopy (src_params->zero_point );
122+ break ;
123+ }
124+ }
125+ return dst;
126+ }
127+
128+ TfLiteSparsity TfLiteSparsityClone (const TfLiteSparsity& src) {
129+ TfLiteSparsity dst = src;
130+ dst.traversal_order = TfLiteIntArrayCopy (src.traversal_order );
131+ dst.block_map = TfLiteIntArrayCopy (src.block_map );
132+ if (src.dim_metadata ) {
133+ dst.dim_metadata = reinterpret_cast <TfLiteDimensionMetadata*>(
134+ calloc (1 , sizeof (TfLiteDimensionMetadata) * src.dim_metadata_size ));
135+ for (int i = 0 ; i < src.dim_metadata_size ; ++i) {
136+ dst.dim_metadata [i] = src.dim_metadata [i];
137+ dst.dim_metadata [i].array_segments =
138+ TfLiteIntArrayCopy (src.dim_metadata [i].array_segments );
139+ dst.dim_metadata [i].array_indices =
140+ TfLiteIntArrayCopy (src.dim_metadata [i].array_indices );
141+ }
142+ }
143+ return dst;
144+ }
145+
146+ // Clones the source sparsity to a newly allocated object.
147+ TfLiteSparsity* TfLiteSparsityClone (const TfLiteSparsity* const src) {
148+ if (!src) {
149+ return nullptr ;
150+ }
151+ TfLiteSparsity* dst =
152+ reinterpret_cast <TfLiteSparsity*>(calloc (1 , sizeof (TfLiteSparsity)));
153+ *dst = TfLiteSparsityClone (*src);
154+ return dst;
155+ }
156+
157+ #endif // TF_LITE_STATIC_MEMORY
158+
105159} // namespace
106160
107161extern " C" {
@@ -234,6 +288,55 @@ void TfLiteTensorFree(TfLiteTensor* t) {
234288 t->sparsity = nullptr ;
235289}
236290
291+ TfLiteTensor TfLiteTensorClone (const TfLiteTensor src) {
292+ // We copy all of the source data first, then we clone the fields that can't
293+ // be shared between two tensor instances.
294+ TfLiteTensor dst = src;
295+ // Data that is owned by the original tensor mut be cloned. Check
296+ // TfLiteTensorFree to find out which members are owned.
297+ if (src.data .data ) {
298+ const TfLiteAllocationStrategy allocation_strategy =
299+ TfLiteTensorGetAllocationStrategy (&src);
300+ switch (allocation_strategy) {
301+ case kTfLiteAllocationStrategyUnknown :
302+ // We don't know the allocation strategy, which means that the tensor
303+ // doesn't own its data: we keep the copied pointer to the data.
304+ break ;
305+ case kTfLiteAllocationStrategyNone :
306+ break ;
307+ case kTfLiteAllocationStrategyMMap :
308+ // Mmapped data is read-only and external to the interpreter. We keep
309+ // the copied pointer to the data.
310+ break ;
311+ case kTfLiteAllocationStrategyArena :
312+ // Arena tensors are allocated when the graph is prepared. There is no
313+ // data associated to such a tensor between runs so we don't care about
314+ // the value of `data`.
315+ break ;
316+ case kTfLiteAllocationStrategyMalloc :
317+ dst.data .data = malloc (src.bytes );
318+ std::memcpy (dst.data .data , src.data .data , src.bytes );
319+ break ;
320+ case kTfLiteAllocationStrategyNew :
321+ // Special case for variant objects. They are allocated using new/delete
322+ // but require using the `CloneTo` function.
323+ if (src.allocation_type == kTfLiteVariantObject ) {
324+ dst.data .data = reinterpret_cast <const VariantData*>(src.data .data )
325+ ->CloneTo (nullptr );
326+ } else {
327+ dst.data .data = new char [src.bytes ];
328+ std::memcpy (dst.data .data , src.data .data , src.bytes );
329+ }
330+ break ;
331+ }
332+ }
333+ dst.dims = TfLiteIntArrayCopy (src.dims );
334+ dst.dims_signature = TfLiteIntArrayCopy (src.dims_signature );
335+ dst.quantization = TfLiteQuantizationClone (src.quantization );
336+ dst.sparsity = TfLiteSparsityClone (src.sparsity );
337+ return dst;
338+ }
339+
237340void TfLiteTensorReset (TfLiteType type, const char * name, TfLiteIntArray* dims,
238341 TfLiteQuantizationParams quantization, char * buffer,
239342 size_t size, TfLiteAllocationType allocation_type,
@@ -334,6 +437,14 @@ TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
334437TfLiteStatus TfLiteTensorRealloc (size_t num_bytes, TfLiteTensor* tensor) {
335438 return TfLiteTensorResizeMaybeCopy (num_bytes, tensor, true );
336439}
440+
441+ const TfLiteIntArray* TfLiteTensorGetDimsSignature (const TfLiteTensor* t) {
442+ if (t->dims_signature != nullptr && t->dims_signature ->size != 0 ) {
443+ return t->dims_signature ;
444+ } else {
445+ return t->dims ;
446+ }
447+ }
337448#endif // TF_LITE_STATIC_MEMORY
338449
339450const char * TfLiteTypeGetName (TfLiteType type) {
@@ -399,11 +510,13 @@ TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
399510 case kTfLiteDynamic :
400511 return kTfLiteAllocationStrategyMalloc ;
401512 case kTfLitePersistentRo :
402- return kTfLiteAllocationStrategyUnknown ;
513+ return kTfLiteAllocationStrategyMalloc ;
403514 case kTfLiteCustom :
404515 return kTfLiteAllocationStrategyUnknown ;
405516 case kTfLiteVariantObject :
406517 return kTfLiteAllocationStrategyNew ;
518+ case kTfLiteNonCpu :
519+ return kTfLiteAllocationStrategyUnknown ;
407520 }
408521 return kTfLiteAllocationStrategyUnknown ;
409522}
@@ -428,6 +541,8 @@ TfLiteRunStability TfLiteTensorGetBufferAddressStability(
428541 return kTfLiteRunStabilityUnknown ;
429542 case kTfLiteVariantObject :
430543 return kTfLiteRunStabilityAcrossRuns ;
544+ case kTfLiteNonCpu :
545+ return kTfLiteRunStabilityUnknown ;
431546 }
432547 return kTfLiteRunStabilityUnknown ;
433548}
@@ -451,6 +566,8 @@ TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {
451566 return kTfLiteRunStabilityUnknown ;
452567 case kTfLiteVariantObject :
453568 return kTfLiteRunStabilitySingleRun ;
569+ case kTfLiteNonCpu :
570+ return kTfLiteRunStabilityUnknown ;
454571 }
455572 return kTfLiteRunStabilityUnknown ;
456573}
@@ -477,11 +594,13 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {
477594 return kTfLiteRunStepUnknown ;
478595 case kTfLiteVariantObject :
479596 return kTfLiteRunStepEval ;
597+ case kTfLiteNonCpu :
598+ return kTfLiteRunStepUnknown ;
480599 }
481600 return kTfLiteRunStepUnknown ;
482601}
483602
484- // Returns the operation steop when the shape of a tensor is computed.
603+ // Returns the operation step when the shape of a tensor is computed.
485604//
486605// Some operations can precompute the shape of their results before the
487606// evaluation step. This makes the shape available earlier for subsequent
@@ -504,6 +623,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
504623 return kTfLiteRunStepUnknown ;
505624 case kTfLiteVariantObject :
506625 return kTfLiteRunStepEval ;
626+ case kTfLiteNonCpu :
627+ return kTfLiteRunStepUnknown ;
507628 }
508629 return kTfLiteRunStepUnknown ;
509630}
0 commit comments