Skip to content

Commit c75681a

Browse files
Address CodeQL security issues on comparison of different types (#23276)
### Description Fix comparison of narrow type with wide type in loop condition. ### Motivation and Context Comparison between types of different widths in a loop condition can cause the loop to fail to terminate.
1 parent d8e8d4f commit c75681a

File tree

5 files changed

+16
-16
lines changed

5 files changed

+16
-16
lines changed

onnxruntime/core/framework/transpose_helper.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ typename std::enable_if<!has_mlas_transpose<T>::value, void>::type SimpleTranspo
2727
for (int64_t l = 0; l < num_loops; ++l) {
2828
T* output_for_first_writer = output_data;
2929

30-
for (auto wwpl = 0; wwpl < writes_per_writer_per_loop; ++wwpl) {
30+
for (int64_t wwpl = 0; wwpl < writes_per_writer_per_loop; ++wwpl) {
3131
T* output_for_current_writer = output_for_first_writer;
3232

3333
end = input_data + num_writers;
@@ -130,7 +130,7 @@ typename std::enable_if<!has_mlas_transpose<T>::value, void>::type SimpleTranspo
130130
for (int64_t l = 0; l < num_loops; ++l) {
131131
const T* input_for_first_reader = input_data;
132132

133-
for (auto rrpl = 0; rrpl < reads_per_reader_per_loop; ++rrpl) {
133+
for (int64_t rrpl = 0; rrpl < reads_per_reader_per_loop; ++rrpl) {
134134
const T* input_for_current_reader = input_for_first_reader;
135135

136136
end = output_data + num_readers;
@@ -210,7 +210,7 @@ void TransposeSingleAxisInwards(gsl::span<const size_t> permutations, const Tens
210210
for (int64_t l = 0; l < num_loops; ++l) {
211211
const uint8_t* input_for_first_reader = input_data;
212212

213-
for (auto rrpl = 0; rrpl < reads_per_reader_per_loop; ++rrpl) {
213+
for (int64_t rrpl = 0; rrpl < reads_per_reader_per_loop; ++rrpl) {
214214
const uint8_t* input_for_current_reader = input_for_first_reader;
215215

216216
for (int64_t r = 0; r < num_readers; ++r) {
@@ -309,4 +309,4 @@ bool IsTransposeMovingSingleAxis(gsl::span<const size_t> permutations, size_t& f
309309
return single_axis_moved;
310310
}
311311

312-
} // namespace onnxruntime
312+
} // namespace onnxruntime

onnxruntime/core/graph/contrib_ops/contrib_defs.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2890,15 +2890,15 @@ void RegisterContribSchemas() {
28902890
if (ctx.getNumOutputs() > 1) {
28912891
auto saved_mean_shape = ctx.getOutputType(1)->mutable_tensor_type()->mutable_shape();
28922892
saved_mean_shape->CopyFrom(input_shape);
2893-
for (int d = static_cast<int>(axis); d < input_ndim; ++d)
2894-
saved_mean_shape->mutable_dim(d)->set_dim_value(1);
2893+
for (int64_t d = axis; d < input_ndim; ++d)
2894+
saved_mean_shape->mutable_dim(static_cast<int>(d))->set_dim_value(1);
28952895
}
28962896

28972897
if (ctx.getNumOutputs() > 2) {
28982898
auto saved_inv_std_dev_shape = ctx.getOutputType(2)->mutable_tensor_type()->mutable_shape();
28992899
saved_inv_std_dev_shape->CopyFrom(input_shape);
2900-
for (int d = static_cast<int>(axis); d < input_ndim; ++d)
2901-
saved_inv_std_dev_shape->mutable_dim(d)->set_dim_value(1);
2900+
for (int64_t d = axis; d < input_ndim; ++d)
2901+
saved_inv_std_dev_shape->mutable_dim(static_cast<int>(d))->set_dim_value(1);
29022902
}
29032903
})
29042904
.SetContextDependentFunctionBodyBuilder(

onnxruntime/core/graph/graph_utils.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -869,13 +869,13 @@ bool RemoveNodesWithOneOutputBottomUp(Graph& graph, const Node& start_node) {
869869
}
870870

871871
// push the parents of current node to the queue.
872-
for (unsigned int i = 0; i < cur_node.InputDefs().size(); ++i) {
873-
const std::string& input_name = GetNodeInputName(cur_node, i);
874-
if (IsInitializer(graph, input_name, true) || IsGraphInput(graph, cur_node.InputDefs()[i])) {
872+
for (size_t i = 0; i < cur_node.InputDefs().size(); ++i) {
873+
const std::string& input_name = GetNodeInputName(cur_node, static_cast<int>(i));
874+
if (IsInitializer(graph, input_name, true) || IsGraphInput(graph, cur_node.InputDefs()[static_cast<int>(i)])) {
875875
// skip initializers and graph inputs
876876
continue;
877877
}
878-
const Node* parent_node = GetInputNode(cur_node, i);
878+
const Node* parent_node = GetInputNode(cur_node, static_cast<int>(i));
879879
if (nullptr == parent_node) {
880880
continue;
881881
}

onnxruntime/core/optimizer/attention_fusion_helper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -281,8 +281,8 @@ bool ValidateUnidirMask(std::vector<T> mask_data, int64_t w, bool& is_undirectio
281281
is_undirectional = true;
282282

283283
const T* p = mask_data.data();
284-
for (int i = 0; i < w; i++) {
285-
for (int j = 0; j < w; j++) {
284+
for (int64_t i = 0; i < w; i++) {
285+
for (int64_t j = 0; j < w; j++) {
286286
if (*p != static_cast<T>(1)) {
287287
is_one = false;
288288
}

onnxruntime/core/util/qmath.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ void GetQuantizationParameter(const float* data, int64_t num_of_elements, float&
6464
block_size = onnxruntime::narrow<std::ptrdiff_t>(num_of_elements);
6565
}
6666

67-
for (int i = 0; i < num_blocks; i++) {
67+
for (int i = 0; i < narrow<int>(num_blocks); i++) {
6868
aggregate[i].min = std::numeric_limits<float>::max();
6969
aggregate[i].max = std::numeric_limits<float>::lowest();
7070
}
@@ -79,7 +79,7 @@ void GetQuantizationParameter(const float* data, int64_t num_of_elements, float&
7979

8080
float& min = aggregate[0].min;
8181
float& max = aggregate[0].max;
82-
for (int i = 1; i < num_blocks; i++) {
82+
for (int i = 1; i < narrow<int>(num_blocks); i++) {
8383
min = std::min(min, aggregate[i].min);
8484
max = std::max(max, aggregate[i].max);
8585
}

0 commit comments

Comments
 (0)