Skip to content

Commit 9fe6071

Browse files
authored
[TFL FE] Validate quantized_dimension to prevent out-of-bounds write (#34394)
## Summary - Add bounds check for quantized_dimension in get_quant_shape() before using it as a vector index - Reject negative quantized_dimension at parse time in get_quantization() - Add test models and unit tests for both cases ## Details The get_quant_shape() function in tflite_quantize_resolver.cpp uses the quantized_dimension field from the FlatBuffer directly as an index into a std::vector without verifying it is within [0, rank). A crafted .tflite model with quantized_dimension >= tensor rank causes a heap out-of-bounds write during model loading (CWE-787). The fix adds validation at two points: - In get_quant_shape(): check axis >= 0 and axis < rank before indexing - In get_quantization(): reject negative axis values at parse time ### Tickets: - 181564
1 parent d1006cb commit 9fe6071

File tree

4 files changed

+246
-1
lines changed

4 files changed

+246
-1
lines changed

src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.cpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,14 @@ ov::Shape get_quant_shape(const ov::Output<ov::Node>& output,
6363
FRONT_END_GENERAL_CHECK(output.get_partial_shape().rank().is_static(),
6464
"Per-Channel Quantization of tensor with dynamic rank");
6565
auto rank = output.get_partial_shape().size();
66+
auto axis = quantization->get_axis();
67+
FRONT_END_GENERAL_CHECK(axis >= 0 && static_cast<size_t>(axis) < rank,
68+
"Per-Channel Quantization axis ",
69+
axis,
70+
" is out of range for tensor of rank ",
71+
rank);
6672
shape = ov::Shape(rank, 1);
67-
shape[quantization->get_axis()] = size;
73+
shape[static_cast<size_t>(axis)] = size;
6874
}
6975
return shape;
7076
}

src/frontends/tensorflow_lite/src/utils.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ std::shared_ptr<ov::frontend::tensorflow_lite::QuantizationInfo> ov::frontend::t
2525
if (quantization->get_zero_point().empty() && quantization->get_scale().empty())
2626
return {};
2727
quantization->set_axis(tf_quantization->quantized_dimension());
28+
FRONT_END_GENERAL_CHECK(quantization->get_axis() >= 0,
29+
"Quantized dimension must be non-negative, got: ",
30+
quantization->get_axis());
2831
return quantization;
2932
}
3033

src/frontends/tensorflow_lite/tests/convert_unsupported.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,3 +60,14 @@ INSTANTIATE_TEST_SUITE_P(OobIndices,
6060
"malformed_indices/oob_opcode_index.tflite",
6161
"malformed_indices/oob_graph_io_tensor_index.tflite",
6262
"malformed_indices/oob_buffer_index.tflite"));
63+
64+
// quantized_dimension=-1: load() throws in get_quantization() (non-negative axis check)
65+
INSTANTIATE_TEST_SUITE_P(NegativeQuantDim,
66+
MalformedModelLoadTest,
67+
::testing::Values("oob_quant_dim/negative_axis.tflite"));
68+
69+
// quantized_dimension=100 on rank-2 tensor: load() succeeds, convert() throws
70+
// in get_quant_shape() (axis >= rank check)
71+
INSTANTIATE_TEST_SUITE_P(OobQuantDim,
72+
MalformedModelConvertTest,
73+
::testing::Values("oob_quant_dim/axis_exceeds_rank.tflite"));
Lines changed: 225 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,225 @@
1+
# Copyright (C) 2018-2026 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
# Generate crafted .tflite model files with out-of-bounds quantized_dimension
5+
# field values. These models test that the TFLite frontend properly validates
6+
# the quantized_dimension field in QuantizationParameters before using it as
7+
# a vector index, preventing CWE-787 (Out-of-bounds Write).
8+
#
9+
# The vulnerability is in get_quant_shape() at tflite_quantize_resolver.cpp:67
10+
# where shape[quantization->get_axis()] = size; uses the quantized_dimension
11+
# directly as an array index without bounds checking.
12+
#
13+
# Model structure:
14+
# input [1,3] float32 -> QUANTIZE -> output [1,3] int8
15+
# Output tensor has per-channel quantization (3 scale values) with a malicious
16+
# quantized_dimension value. When TFLQuantizeReplacer processes the TFLQuantize
17+
# node wrapping the output tensor, it calls get_quant_shape() which uses
18+
# quantized_dimension as a vector index.
19+
#
20+
# Models generated:
21+
# oob_quant_dim/axis_exceeds_rank.tflite - quantized_dimension=100 on rank-2 tensor
22+
# oob_quant_dim/negative_axis.tflite - quantized_dimension=-1 on rank-2 tensor
23+
24+
import os
25+
import sys
26+
27+
import flatbuffers
28+
29+
30+
def create_float_vector(builder, values):
31+
"""Create a FlatBuffer vector of float32 values."""
32+
builder.StartVector(4, len(values), 4)
33+
for v in reversed(values):
34+
builder.PrependFloat32(v)
35+
return builder.EndVector()
36+
37+
38+
def create_int64_vector(builder, values):
39+
"""Create a FlatBuffer vector of int64 values."""
40+
builder.StartVector(8, len(values), 8)
41+
for v in reversed(values):
42+
builder.PrependInt64(v)
43+
return builder.EndVector()
44+
45+
46+
def build_tflite_with_quantization(quantized_dimension=0):
47+
"""
48+
Build a minimal .tflite FlatBuffer model with per-channel quantization
49+
on the output tensor of a QUANTIZE operator.
50+
51+
Model structure:
52+
- 2 tensors: input [1,3] float32 (no quantization), output [1,3] int8
53+
(per-channel quantization with 3 scale values and a malicious axis)
54+
- 3 buffers (0=empty sentinel, 1=for input, 2=for output)
55+
- 1 operator code (QUANTIZE, builtin_code=114)
56+
- 1 operator: inputs=[0], outputs=[1]
57+
- SubGraph inputs=[0], outputs=[1]
58+
59+
The QUANTIZE operator converts float32 input to int8 output. The output
60+
tensor has per-channel quantization, so TFLQuantizeReplacer will process
61+
it and call get_quant_shape() with the malicious quantized_dimension.
62+
63+
FlatBuffer table field indices (from schema.fbs):
64+
QuantizationParameters: 0=min, 1=max, 2=scale, 3=zero_point,
65+
4=details_type, 5=details, 6=quantized_dimension
66+
Tensor: 0=shape, 1=type, 2=buffer, 3=name, 4=quantization, 5=is_variable,
67+
6=sparsity, 7=shape_signature, 8=has_rank, 9=variant_tensors
68+
Operator: 0=opcode_index, 1=inputs, 2=outputs, ...
69+
OperatorCode: 0=deprecated_builtin_code, 1=custom_code, 2=version,
70+
3=builtin_code
71+
SubGraph: 0=tensors, 1=inputs, 2=outputs, 3=operators, 4=name
72+
Model: 0=version, 1=operator_codes, 2=subgraphs, 3=description, 4=buffers
73+
74+
Parameters:
75+
quantized_dimension: The axis value for per-channel quantization.
76+
Valid range for a rank-2 tensor is [0, 1].
77+
Values outside this range trigger the vulnerability.
78+
"""
79+
builder = flatbuffers.Builder(2048)
80+
81+
# -- Buffers (3 empty buffers) --
82+
buffer_offsets = []
83+
for _ in range(3):
84+
builder.StartObject(1) # Buffer has 1 field: data
85+
buffer_offsets.append(builder.EndObject())
86+
87+
builder.StartVector(4, len(buffer_offsets), 4)
88+
for off in reversed(buffer_offsets):
89+
builder.PrependUOffsetTRelative(off)
90+
buffers_vec = builder.EndVector()
91+
92+
# -- Operator Code: QUANTIZE (builtin_code=114) --
93+
builder.StartObject(4)
94+
builder.PrependInt8Slot(0, 114, 0) # deprecated_builtin_code = QUANTIZE
95+
builder.PrependInt32Slot(2, 1, 1) # version = 1
96+
builder.PrependInt32Slot(3, 114, 0) # builtin_code = QUANTIZE
97+
opcode_offset = builder.EndObject()
98+
99+
builder.StartVector(4, 1, 4)
100+
builder.PrependUOffsetTRelative(opcode_offset)
101+
opcodes_vec = builder.EndVector()
102+
103+
# -- QuantizationParameters for output tensor (per-channel, 3 scales) --
104+
# This is the malicious quantization with out-of-bounds quantized_dimension.
105+
# 3 scale values make it per-channel (size > 1), which triggers get_quant_shape().
106+
out_scale_vec = create_float_vector(builder, [0.1, 0.2, 0.3])
107+
out_zp_vec = create_int64_vector(builder, [0, 0, 0])
108+
builder.StartObject(7) # QuantizationParameters has 7 fields (union = 2 slots)
109+
builder.PrependUOffsetTRelativeSlot(2, out_scale_vec, 0) # scale (3 values)
110+
builder.PrependUOffsetTRelativeSlot(3, out_zp_vec, 0) # zero_point (3 values)
111+
builder.PrependInt32Slot(6, quantized_dimension, 0) # quantized_dimension (OOB!)
112+
out_quant = builder.EndObject()
113+
114+
# -- Tensors --
115+
# Tensor 0: input [1,3] float32 (no quantization)
116+
name0 = builder.CreateString("input")
117+
builder.StartVector(4, 2, 4)
118+
builder.PrependInt32(3)
119+
builder.PrependInt32(1)
120+
shape0_vec = builder.EndVector()
121+
122+
builder.StartObject(11) # Tensor has up to 11 fields
123+
builder.PrependUOffsetTRelativeSlot(0, shape0_vec, 0) # shape = [1, 3]
124+
builder.PrependInt8Slot(1, 0, 0) # type = FLOAT32 (0)
125+
builder.PrependUint32Slot(2, 1, 0) # buffer index = 1
126+
builder.PrependUOffsetTRelativeSlot(3, name0, 0) # name
127+
tensor0 = builder.EndObject()
128+
129+
# Tensor 1: output [1,3] int8 with per-channel quantization (malicious axis)
130+
name1 = builder.CreateString("output")
131+
builder.StartVector(4, 2, 4)
132+
builder.PrependInt32(3)
133+
builder.PrependInt32(1)
134+
shape1_vec = builder.EndVector()
135+
136+
builder.StartObject(11)
137+
builder.PrependUOffsetTRelativeSlot(0, shape1_vec, 0) # shape = [1, 3]
138+
builder.PrependInt8Slot(1, 9, 0) # type = INT8 (9)
139+
builder.PrependUint32Slot(2, 2, 0) # buffer index = 2
140+
builder.PrependUOffsetTRelativeSlot(3, name1, 0) # name
141+
builder.PrependUOffsetTRelativeSlot(4, out_quant, 0) # quantization (per-channel!)
142+
tensor1 = builder.EndObject()
143+
144+
# -- Tensors vector --
145+
builder.StartVector(4, 2, 4)
146+
builder.PrependUOffsetTRelative(tensor1)
147+
builder.PrependUOffsetTRelative(tensor0)
148+
tensors_vec = builder.EndVector()
149+
150+
# -- Operator: QUANTIZE, inputs=[0], outputs=[1] --
151+
builder.StartVector(4, 1, 4)
152+
builder.PrependInt32(0)
153+
op_inputs_vec = builder.EndVector()
154+
155+
builder.StartVector(4, 1, 4)
156+
builder.PrependInt32(1)
157+
op_outputs_vec = builder.EndVector()
158+
159+
builder.StartObject(11) # Operator
160+
builder.PrependUint32Slot(0, 0, 0) # opcode_index = 0
161+
builder.PrependUOffsetTRelativeSlot(1, op_inputs_vec, 0) # inputs = [0]
162+
builder.PrependUOffsetTRelativeSlot(2, op_outputs_vec, 0) # outputs = [1]
163+
op_offset = builder.EndObject()
164+
165+
builder.StartVector(4, 1, 4)
166+
builder.PrependUOffsetTRelative(op_offset)
167+
operators_vec = builder.EndVector()
168+
169+
# -- SubGraph --
170+
builder.StartVector(4, 1, 4)
171+
builder.PrependInt32(0)
172+
sg_inputs_vec = builder.EndVector()
173+
174+
builder.StartVector(4, 1, 4)
175+
builder.PrependInt32(1)
176+
sg_outputs_vec = builder.EndVector()
177+
178+
sg_name = builder.CreateString("main")
179+
180+
builder.StartObject(7) # SubGraph
181+
builder.PrependUOffsetTRelativeSlot(0, tensors_vec, 0) # tensors
182+
builder.PrependUOffsetTRelativeSlot(1, sg_inputs_vec, 0) # inputs = [0]
183+
builder.PrependUOffsetTRelativeSlot(2, sg_outputs_vec, 0) # outputs = [1]
184+
builder.PrependUOffsetTRelativeSlot(3, operators_vec, 0) # operators
185+
builder.PrependUOffsetTRelativeSlot(4, sg_name, 0) # name
186+
sg_offset = builder.EndObject()
187+
188+
builder.StartVector(4, 1, 4)
189+
builder.PrependUOffsetTRelative(sg_offset)
190+
subgraphs_vec = builder.EndVector()
191+
192+
desc = builder.CreateString("oob_quant_dim_test_model")
193+
194+
# -- Model --
195+
builder.StartObject(8)
196+
builder.PrependUint32Slot(0, 3, 0) # version = 3
197+
builder.PrependUOffsetTRelativeSlot(1, opcodes_vec, 0) # operator_codes
198+
builder.PrependUOffsetTRelativeSlot(2, subgraphs_vec, 0) # subgraphs
199+
builder.PrependUOffsetTRelativeSlot(3, desc, 0) # description
200+
builder.PrependUOffsetTRelativeSlot(4, buffers_vec, 0) # buffers
201+
model_offset = builder.EndObject()
202+
203+
builder.Finish(model_offset, b"TFL3")
204+
return bytes(builder.Output())
205+
206+
207+
if __name__ == "__main__":
208+
if len(sys.argv) < 2:
209+
print(f"Usage: {sys.argv[0]} <output_directory>")
210+
sys.exit(1)
211+
212+
path_to_model_dir = os.path.join(sys.argv[1], "oob_quant_dim")
213+
os.makedirs(path_to_model_dir, exist_ok=True)
214+
215+
# 1. quantized_dimension=100 on rank-2 tensor [1,3] -> OOB write in get_quant_shape()
216+
# get_quant_shape() at line 67: shape[100] = 3 with vector of size 2
217+
model = build_tflite_with_quantization(quantized_dimension=100)
218+
with open(os.path.join(path_to_model_dir, 'axis_exceeds_rank.tflite'), 'wb') as f:
219+
f.write(model)
220+
221+
# 2. quantized_dimension=-1 on rank-2 tensor [1,3] -> negative axis
222+
# get_quantization() in utils.cpp should reject negative axis at parse time
223+
model = build_tflite_with_quantization(quantized_dimension=-1)
224+
with open(os.path.join(path_to_model_dir, 'negative_axis.tflite'), 'wb') as f:
225+
f.write(model)

0 commit comments

Comments
 (0)