Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions tensorflow/lite/micro/compression/metadata_saved.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@

// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
FLATBUFFERS_VERSION_MINOR == 5 &&
FLATBUFFERS_VERSION_REVISION == 26,
static_assert(FLATBUFFERS_VERSION_MAJOR == 25 &&
FLATBUFFERS_VERSION_MINOR == 9 &&
FLATBUFFERS_VERSION_REVISION == 23,
"Non-compatible flatbuffers version included");

namespace tflite {
Expand Down Expand Up @@ -272,7 +272,7 @@ inline void Metadata::UnPackTo(MetadataT *_o, const ::flatbuffers::resolver_func
(void)_o;
(void)_resolver;
{ auto _e = schema_version(); _o->schema_version = _e; }
{ auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr<tflite::micro::compression::SubgraphT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraphs.resize(0); } }
{ auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr<tflite::micro::compression::SubgraphT>(_e->Get(_i)->UnPack(_resolver)); } } } else { _o->subgraphs.resize(0); } }
}

inline ::flatbuffers::Offset<Metadata> Metadata::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
Expand Down Expand Up @@ -310,7 +310,7 @@ inline SubgraphT *Subgraph::UnPack(const ::flatbuffers::resolver_function_t *_re
inline void Subgraph::UnPackTo(SubgraphT *_o, const ::flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = lut_tensors(); if (_e) { _o->lut_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->lut_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->lut_tensors[_i].get(), _resolver); } else { _o->lut_tensors[_i] = std::unique_ptr<tflite::micro::compression::LutTensorT>(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->lut_tensors.resize(0); } }
{ auto _e = lut_tensors(); if (_e) { _o->lut_tensors.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->lut_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->lut_tensors[_i].get(), _resolver); } else { _o->lut_tensors[_i] = std::unique_ptr<tflite::micro::compression::LutTensorT>(_e->Get(_i)->UnPack(_resolver)); } } } else { _o->lut_tensors.resize(0); } }
}

inline ::flatbuffers::Offset<Subgraph> Subgraph::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) {
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/kernels/if_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ TF_LITE_MICRO_TEST(IfShouldInvokeSubgraphConditionTrue) {
}

TF_LITE_MICRO_TEST(IfShouldInvokeSubgraphConditionFalse) {
constexpr int kArenaSize = 5000;
constexpr int kArenaSize = 16384;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@veblush Thats a big increase in arena size. Any ideas why this expansion happened?

uint8_t arena[kArenaSize];

const tflite::Model* model =
Expand Down Expand Up @@ -163,7 +163,7 @@ TF_LITE_MICRO_TEST(IfShouldInvokeSubgraphConditionFalse) {
}

TF_LITE_MICRO_TEST(IfShouldNotOverwriteTensorAcrossSubgraphs) {
constexpr int kArenaSize = 5000;
constexpr int kArenaSize = 16384;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@veblush Same as line 138 question

uint8_t arena[kArenaSize];

const tflite::Model* model =
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/micro_allocation_info_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ TF_LITE_MICRO_TEST(TestSingleSubgraphWithIntermediates) {
}

TF_LITE_MICRO_TEST(TestMultiSubgraphWithIf) {
constexpr int kArenaSize = 1024;
constexpr int kArenaSize = 2048;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@veblush Did the updated flatbuffers library cause the model schema to expand in size?

uint8_t arena[kArenaSize];
const tflite::Model* model =
tflite::testing::GetSimpleModelWithSubgraphsAndIf();
Expand Down Expand Up @@ -114,7 +114,7 @@ TF_LITE_MICRO_TEST(TestMultiSubgraphWithIf) {
}

TF_LITE_MICRO_TEST(TestMultiSubgraphWithIfAndEmptySubgraph) {
constexpr int kArenaSize = 1024;
constexpr int kArenaSize = 2048;
uint8_t arena[kArenaSize];
const tflite::Model* model =
tflite::testing::GetSimpleModelWithIfAndEmptySubgraph();
Expand Down
32 changes: 16 additions & 16 deletions tensorflow/lite/micro/micro_allocator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ size_t GetArenaUsedBytesBySimpleMockModel(bool is_memory_planner_injected) {
TF_LITE_MICRO_TESTS_BEGIN

TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::SingleArenaBufferAllocator* simple_allocator =
tflite::SingleArenaBufferAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -193,7 +193,7 @@ TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) {
// always allocates from temp (interpreter returns buffers from
// TfLiteEvalTensor):
TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::SingleArenaBufferAllocator* simple_allocator =
tflite::SingleArenaBufferAllocator::Create(arena, arena_size);
Expand All @@ -220,7 +220,7 @@ TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) {
}

TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) {
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::SingleArenaBufferAllocator* simple_allocator =
tflite::SingleArenaBufferAllocator::Create(arena, arena_size);
Expand All @@ -247,7 +247,7 @@ TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) {
}

TF_LITE_MICRO_TEST(TestMissingQuantization) {
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::SingleArenaBufferAllocator* simple_allocator =
tflite::SingleArenaBufferAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -275,7 +275,7 @@ TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand All @@ -291,7 +291,7 @@ TF_LITE_MICRO_TEST(TestFailsWithWrongSequence) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
tflite::SubgraphAllocations* subgraph_allocations = nullptr;
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand All @@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(TestMockModelAllocation) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 1024 + 16;
constexpr size_t arena_size = 2048 + 16;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -361,7 +361,7 @@ TF_LITE_MICRO_TEST(TestMockModelAllocationInTwoSeparateArenas) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t persistent_arena[arena_size];
uint8_t non_persistent_arena[arena_size];

Expand Down Expand Up @@ -407,7 +407,7 @@ TF_LITE_MICRO_TEST(TestMockModelAllocationWithGivenMemoryPlanner) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::GreedyMemoryPlanner memory_planner;
tflite::MicroAllocator* allocator =
Expand Down Expand Up @@ -590,7 +590,7 @@ TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 2048;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -940,7 +940,7 @@ TF_LITE_MICRO_TEST(TestFailAllocatePersistentTfLiteTensor) {

TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand All @@ -954,7 +954,7 @@ TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) {

TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand All @@ -977,7 +977,7 @@ TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) {

TF_LITE_MICRO_TEST(TestAllocateAndDeallocateChainOfTfLiteTensor) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -1007,7 +1007,7 @@ TF_LITE_MICRO_TEST(TestAllocateAndDeallocateChainOfTfLiteTensor) {
}

TF_LITE_MICRO_TEST(TestAllocateAndDeallocateTempBuffer) {
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand All @@ -1023,7 +1023,7 @@ TF_LITE_MICRO_TEST(TestAllocateAndDeallocateTempBuffer) {

TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) {
const tflite::Model* model = tflite::testing::GetSimpleMockModel();
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size);
Expand Down Expand Up @@ -1287,7 +1287,7 @@ TF_LITE_MICRO_TEST(TestMockModelAllocationByNonPersistentMemoryPlannerShim) {
tflite::testing::TestingOpResolver op_resolver;
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));
constexpr size_t arena_size = 1024;
constexpr size_t arena_size = 4096;
uint8_t arena[arena_size];
tflite::MicroAllocator* allocator =
tflite::MicroAllocator::Create(arena, arena_size, &planner);
Expand Down
22 changes: 11 additions & 11 deletions tensorflow/lite/micro/micro_interpreter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ TF_LITE_MICRO_TEST(TestInterpreter) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t allocator_buffer_size = 2000;
constexpr size_t allocator_buffer_size = 4096;
uint8_t allocator_buffer[allocator_buffer_size];

// Create a new scope so that we can test the destructor.
Expand Down Expand Up @@ -119,7 +119,7 @@ TF_LITE_MICRO_TEST(TestInterpreterCompression) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
constexpr size_t kAllocatorBufferSize = 4096;
uint8_t allocator_buffer[kAllocatorBufferSize];

// Create a new scope so that we can test the destructor.
Expand Down Expand Up @@ -167,7 +167,7 @@ TF_LITE_MICRO_TEST(TestInterpreterCompressionAltMemoryAfterInit) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
constexpr size_t kAllocatorBufferSize = 4096;
uint8_t allocator_buffer[kAllocatorBufferSize];
constexpr size_t kAltMemSize = 10;
int16_t alt_mem_1[kAltMemSize];
Expand Down Expand Up @@ -195,7 +195,7 @@ TF_LITE_MICRO_TEST(TestInterpreterCompressionAltMemoryTooSmall) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
constexpr size_t kAllocatorBufferSize = 4096;
uint8_t allocator_buffer[kAllocatorBufferSize];
constexpr size_t kAltMemSize = 10;
int16_t alt_mem_1[kAltMemSize] = {};
Expand Down Expand Up @@ -257,7 +257,7 @@ TF_LITE_MICRO_TEST(TestInterpreterCompressionAltMemory) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
constexpr size_t kAllocatorBufferSize = 4096;
uint8_t allocator_buffer[kAllocatorBufferSize];
constexpr size_t kAltMemSize = 10;
int16_t alt_mem_1[kAltMemSize] = {};
Expand Down Expand Up @@ -419,7 +419,7 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t allocator_buffer_size = 4096 + 1024;
constexpr size_t allocator_buffer_size = 8192 + 2048;
uint8_t allocator_buffer[allocator_buffer_size];

tflite::RecordingMicroAllocator* allocator =
Expand Down Expand Up @@ -472,7 +472,7 @@ TF_LITE_MICRO_TEST(TestIncompleteInitialization) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t allocator_buffer_size = 2048;
constexpr size_t allocator_buffer_size = 4096;
uint8_t allocator_buffer[allocator_buffer_size];

tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
Expand All @@ -489,7 +489,7 @@ TF_LITE_MICRO_TEST(InterpreterWithProfilerShouldProfileOps) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t allocator_buffer_size = 2048;
constexpr size_t allocator_buffer_size = 4096;
uint8_t allocator_buffer[allocator_buffer_size];
tflite::MockProfiler profiler;
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
Expand Down Expand Up @@ -651,7 +651,7 @@ TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t allocator_buffer_size = 2000;
constexpr size_t allocator_buffer_size = 4096;
uint8_t allocator_buffer[allocator_buffer_size];

// Create a new scope so that we can test the destructor.
Expand Down Expand Up @@ -717,7 +717,7 @@ TF_LITE_MICRO_TEST(TestInterpreterNullInputsAndOutputs) {
tflite::testing::GetTestingOpResolver(op_resolver));
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, op_resolver.AddCallOnce());

constexpr size_t allocator_buffer_size = 2000;
constexpr size_t allocator_buffer_size = 4096;
uint8_t allocator_buffer[allocator_buffer_size];

tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
Expand Down Expand Up @@ -786,7 +786,7 @@ TF_LITE_MICRO_TEST(TestDynamicTensorFails) {
TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk,
tflite::testing::GetTestingOpResolver(op_resolver));

constexpr size_t kAllocatorBufferSize = 2000;
constexpr size_t kAllocatorBufferSize = 4096;
uint8_t allocator_buffer[kAllocatorBufferSize];

// Use a new scope for each MicroInterpreter
Expand Down
16 changes: 6 additions & 10 deletions tensorflow/lite/micro/test_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class StackAllocator : public flatbuffers::Allocator {
return *inst;
}

static constexpr size_t kStackAllocatorSize = 8192;
static constexpr size_t kStackAllocatorSize = 64 * 1024;

private:
uint8_t data_backing_[kStackAllocatorSize];
Expand All @@ -80,12 +80,9 @@ class StackAllocator : public flatbuffers::Allocator {
};

flatbuffers::FlatBufferBuilder* BuilderInstance() {
static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)];
static flatbuffers::FlatBufferBuilder* inst =
new (inst_memory) flatbuffers::FlatBufferBuilder(
StackAllocator::kStackAllocatorSize,
&StackAllocator::instance(MicroArenaBufferAlignment()));
return inst;
return new flatbuffers::FlatBufferBuilder(
1024,
&StackAllocator::instance(MicroArenaBufferAlignment()));
}

// A wrapper around FlatBuffer API to help build model easily.
Expand Down Expand Up @@ -1939,10 +1936,9 @@ const Model* GetNoOpModelWithTensorShape(
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) {
using flatbuffers::Offset;
flatbuffers::FlatBufferBuilder* builder = BuilderInstance();
constexpr size_t tensor_shape_size = 1;
const int32_t tensor_shape[tensor_shape_size] = {size};
std::vector<int32_t> shape = {size};
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@veblush will the verboten std::vector port to non x86 platforms?

const Offset<Tensor> tensor_offset = CreateTensor(
*builder, builder->CreateVector(tensor_shape, tensor_shape_size),
*builder, builder->CreateVector(shape),
TensorType_INT32, 0, builder->CreateString("test_tensor"), 0,
is_variable);
builder->Finish(tensor_offset);
Expand Down
Loading
Loading