Skip to content

Commit ff14dbc

Browse files
authored
Fix clang-tidy warnings (onnx#7392)
### Description <!-- - Describe your changes. --> This PR fixes several clang-tidy warnings. Some readability rules were not enabled due to a missing tailing comma, this PR fixes them. Two C++20 rules are also disabled. ### Motivation and Context Better code. <!-- - Why is this change required? What problem does it solve? --> <!-- - If it fixes an open issue, please link to the issue here. --> Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent 232bdbb commit ff14dbc

File tree

18 files changed

+62
-72
lines changed

18 files changed

+62
-72
lines changed

.clang-tidy

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,16 @@ Checks: >-
3030
-modernize-concat-nested-namespaces,
3131
-modernize-return-braced-init-list,
3232
-modernize-use-auto,
33+
-modernize-use-integer-sign-comparison,
34+
-modernize-use-ranges,
3335
-modernize-use-trailing-return-type,
3436
-modernize-use-nodiscard,
3537
readability-container-size-empty,
3638
readability-delete-null-pointer,
37-
readability-duplicate-include
39+
readability-duplicate-include,
3840
readability-misplaced-array-index,
3941
readability-non-const-parameter,
40-
readability-redundant*
42+
readability-redundant*,
4143
readability-simplify*,
4244
readability-static-accessed-through-instance,
4345
readability-static-definition-in-anonymous-namespace,

onnx/common/ir.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -839,7 +839,7 @@ class OpSetID final {
839839
explicit OpSetID(const OperatorSetIdProto& proto) : domain_(proto.domain()), version_(proto.version()) {}
840840

841841
// Default Domain Constructor
842-
explicit OpSetID(const int64_t version) : domain_(""), version_(version) {}
842+
explicit OpSetID(const int64_t version) : version_(version) {}
843843

844844
explicit OpSetID(std::string domain, int64_t version) : domain_(std::move(domain)), version_(version) {}
845845

onnx/common/ir_pb_converter.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ static Tensor tensorProtoToTensor(const ONNX_NAMESPACE::TensorProto& tp) {
112112
}
113113

114114
for (int i = 0; i < tp.external_data_size(); i++) {
115-
ret.external_data().push_back(std::make_pair(tp.external_data(i).key(), tp.external_data(i).value()));
115+
ret.external_data().emplace_back(tp.external_data(i).key(), tp.external_data(i).value());
116116
}
117117
if (tp.has_data_location()) {
118118
ret.data_location() = tp.data_location();

onnx/defs/controlflow/old.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3222,8 +3222,6 @@ values are computed in the outer graph, they need to be passed in as extra state
32223222
32233223
)DOC";
32243224

3225-
extern void ScanInferenceFunction(InferenceContext& ctx);
3226-
32273225
ONNX_OPERATOR_SET_SCHEMA(
32283226
Scan,
32293227
11,

onnx/defs/nn/defs.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ ONNX_API void convPoolShapeInference(
101101
}
102102

103103
std::vector<int64_t> effective_kernel_shape = kernel_shape;
104-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
104+
for (size_t i = 0; i < kernel_shape.size(); i++) {
105105
// accounting for dilation, how big is the kernel in this dimension
106106
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
107107
}
@@ -427,8 +427,8 @@ static void maxUnpoolShapeInference(InferenceContext& ctx) {
427427
if (output_shape.dim_size() != 1) {
428428
fail_type_inference("'output_shape' must be rank 1 tensor.");
429429
}
430-
if (output_shape.dim((int)0).has_dim_value() &&
431-
static_cast<int>(output_shape.dim((int)0).dim_value()) != input_shape.dim_size()) {
430+
if (output_shape.dim(0).has_dim_value() &&
431+
static_cast<int>(output_shape.dim(0).dim_value()) != input_shape.dim_size()) {
432432
fail_shape_inference("'output_shape' must have same number of elements as the shape of input tensor X.");
433433
}
434434
}
@@ -1158,7 +1158,7 @@ ONNX_API void convTransposeShapeInference(InferenceContext& ctx) {
11581158
}
11591159

11601160
std::vector<int64_t> effective_kernel_shape = kernel_shape;
1161-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
1161+
for (size_t i = 0; i < kernel_shape.size(); i++) {
11621162
// accounting for dilation, how big is the kernel in this dimension
11631163
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
11641164
}
@@ -2359,7 +2359,6 @@ static void col2imShapeInference(InferenceContext& ctx) {
23592359
}
23602360
*final_image_shape->add_dim() = image_dim_i;
23612361
}
2362-
return;
23632362
}
23642363

23652364
static const char* Col2Im_ver18_doc = R"DOC(

onnx/defs/nn/old.cc

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ static void convPoolShapeInference_opset19(
179179
}
180180

181181
std::vector<int64_t> effective_kernel_shape = kernel_shape;
182-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
182+
for (size_t i = 0; i < kernel_shape.size(); i++) {
183183
// accounting for dilation, how big is the kernel in this dimension
184184
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
185185
}
@@ -193,8 +193,8 @@ static void convPoolShapeInference_opset19(
193193
pads.assign(n_input_dims * 2, 0);
194194
const auto* auto_pad_attr = ctx.getAttribute("auto_pad");
195195
if ((nullptr != auto_pad_attr) && (auto_pad_attr->s() != "VALID")) {
196-
int input_dims_size = static_cast<int>(n_input_dims);
197-
for (int i = 0; i < input_dims_size; ++i) {
196+
auto input_dims_size = n_input_dims;
197+
for (size_t i = 0; i < input_dims_size; ++i) {
198198
int64_t residual = 0;
199199
int64_t stride = strides[i];
200200
if (stride > 1) {
@@ -206,7 +206,7 @@ static void convPoolShapeInference_opset19(
206206
residual -= stride;
207207
}
208208
}
209-
if (i >= static_cast<int>(effective_kernel_shape.size())) {
209+
if (i >= effective_kernel_shape.size()) {
210210
fail_shape_inference("kernel shape should have ", input_dims_size, " values in ", ctx.getDisplayName(), ".");
211211
}
212212
int64_t total_pad = residual == 0 ? effective_kernel_shape[i] - stride : effective_kernel_shape[i] - residual;
@@ -505,8 +505,8 @@ static void maxUnpoolShapeInference_opset11(InferenceContext& ctx) {
505505
if (output_shape.dim_size() != 1) {
506506
fail_type_inference("'output_shape' must be rank 1 tensor.");
507507
}
508-
if (output_shape.dim(static_cast<int>(0)).has_dim_value() &&
509-
static_cast<int>(output_shape.dim(static_cast<int>(0)).dim_value()) != input_shape.dim_size()) {
508+
if (output_shape.dim(0).has_dim_value() &&
509+
static_cast<int>(output_shape.dim(0).dim_value()) != input_shape.dim_size()) {
510510
fail_shape_inference("'output_shape' must have same number of elements as the shape of input tensor X.");
511511
}
512512
}
@@ -670,7 +670,7 @@ static void convTransposeShapeInference_opset11(InferenceContext& ctx) {
670670
}
671671

672672
std::vector<int64_t> effective_kernel_shape = kernel_shape;
673-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
673+
for (size_t i = 0; i < kernel_shape.size(); i++) {
674674
// accounting for dilation, how big is the kernel in this dimension
675675
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
676676
}
@@ -2005,7 +2005,7 @@ static void convPoolShapeInference1(
20052005
}
20062006

20072007
std::vector<int64_t> effective_kernel_shape = kernel_shape;
2008-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
2008+
for (size_t i = 0; i < kernel_shape.size(); i++) {
20092009
// accounting for dilation, how big is the kernel in this dimension
20102010
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
20112011
}
@@ -2606,8 +2606,8 @@ static void maxUnpoolShapeInference1(InferenceContext& ctx) {
26062606
if (output_shape.dim_size() != 1) {
26072607
fail_type_inference("'output_shape' must be rank 1 tensor.");
26082608
}
2609-
if (output_shape.dim(static_cast<int>(0)).has_dim_value() &&
2610-
static_cast<int>(output_shape.dim(static_cast<int>(0)).dim_value()) != input_shape.dim_size()) {
2609+
if (output_shape.dim(0).has_dim_value() &&
2610+
static_cast<int>(output_shape.dim(0).dim_value()) != input_shape.dim_size()) {
26112611
fail_shape_inference("'output_shape' must have same number of elements as the shape of input tensor X.");
26122612
}
26132613
}
@@ -3016,7 +3016,7 @@ static void convTransposeShapeInference1(InferenceContext& ctx) {
30163016
}
30173017

30183018
std::vector<int64_t> effective_kernel_shape = kernel_shape;
3019-
for (int i = 0; i < static_cast<int>(kernel_shape.size()); i++) {
3019+
for (size_t i = 0; i < kernel_shape.size(); i++) {
30203020
// accounting for dilation, how big is the kernel in this dimension
30213021
effective_kernel_shape[i] = (effective_kernel_shape[i] - 1) * dilations[i] + 1;
30223022
}

onnx/defs/parser.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,7 @@ Status OnnxParser::ParseSingleAttributeValue(AttributeProto& attr, AttributeProt
569569
Literal literal;
570570
PARSE_TOKEN(literal);
571571
attr.set_type(AttributeProto_AttributeType_FLOAT);
572-
attr.set_f(static_cast<float>(std::stof(literal.value)));
572+
attr.set_f(std::stof(literal.value));
573573
} else {
574574
attr.set_type(AttributeProto_AttributeType_GRAPH);
575575
PARSE(*attr.mutable_g());
@@ -591,7 +591,7 @@ Status OnnxParser::ParseSingleAttributeValue(AttributeProto& attr, AttributeProt
591591
break;
592592
case LiteralType::FLOAT_LITERAL:
593593
attr.set_type(AttributeProto_AttributeType_FLOAT);
594-
attr.set_f(static_cast<float>(std::stof(literal.value)));
594+
attr.set_f(std::stof(literal.value));
595595
break;
596596
case LiteralType::STRING_LITERAL:
597597
attr.set_type(AttributeProto_AttributeType_STRING);

onnx/defs/printer.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ class ProtoPrinter {
8989
}
9090

9191
template <typename T>
92-
inline void print(const T& prim) {
92+
void print(const T& prim) {
9393
output_ << prim;
9494
}
9595

@@ -104,21 +104,21 @@ class ProtoPrinter {
104104
}
105105

106106
template <typename T>
107-
inline void printKeyValuePair(KeyWordMap::KeyWord key, const T& val, bool addsep = true) {
107+
void printKeyValuePair(KeyWordMap::KeyWord key, const T& val, bool addsep = true) {
108108
if (addsep)
109109
output_ << "," << '\n';
110110
output_ << std::setw(indent_level) << ' ' << KeyWordMap::ToString(key) << ": ";
111111
print(val);
112112
}
113113

114-
inline void printKeyValuePair(KeyWordMap::KeyWord key, const std::string& val) {
114+
void printKeyValuePair(KeyWordMap::KeyWord key, const std::string& val) {
115115
output_ << "," << '\n';
116116
output_ << std::setw(indent_level) << ' ' << KeyWordMap::ToString(key) << ": ";
117117
printQuoted(val);
118118
}
119119

120120
template <typename Collection>
121-
inline void printSet(const char* open, const char* separator, const char* close, const Collection& coll) {
121+
void printSet(const char* open, const char* separator, const char* close, const Collection& coll) {
122122
const char* sep = "";
123123
output_ << open;
124124
for (auto& elt : coll) {
@@ -130,7 +130,7 @@ class ProtoPrinter {
130130
}
131131

132132
template <typename Collection>
133-
inline void printIdSet(const char* open, const char* separator, const char* close, const Collection& coll) {
133+
void printIdSet(const char* open, const char* separator, const char* close, const Collection& coll) {
134134
const char* sep = "";
135135
output_ << open;
136136
for (auto& elt : coll) {

onnx/defs/schema.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,6 @@ namespace ONNX_NAMESPACE {
2828
// Other positive integer means the ONNX schemas for the specified version have been loaded
2929
int OpSchemaRegistry::loaded_schema_version = -1;
3030

31-
constexpr int OpSchema::kUninitializedSinceVersion;
32-
3331
// By default if opset_version_to_load=0, it registers all opset schema for all opset versions
3432
// Otherwise, it only registers the latest schema according to opset_version_to_load
3533
void RegisterSchema(

onnx/defs/schema.h

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -427,11 +427,7 @@ class OpSchema final {
427427

428428
struct Attribute final {
429429
Attribute(std::string name_, std::string description_, AttributeProto::AttributeType type_, bool required_)
430-
: name(std::move(name_)),
431-
description(std::move(description_)),
432-
type(type_),
433-
required(required_),
434-
default_value() {}
430+
: name(std::move(name_)), description(std::move(description_)), type(type_), required(required_) {}
435431

436432
Attribute(std::string name_, std::string description_, AttributeProto default_value_)
437433
: name(std::move(name_)),
@@ -729,11 +725,11 @@ class OpSchema final {
729725
}
730726

731727
ONNX_API bool has_type_and_shape_inference_function() const {
732-
return tensor_inference_function_ ? true : false;
728+
return static_cast<bool>(tensor_inference_function_);
733729
}
734730

735731
ONNX_API bool has_data_propagation_function() const {
736-
return data_propagation_function_ ? true : false;
732+
return static_cast<bool>(data_propagation_function_);
737733
}
738734

739735
ONNX_API std::vector<int> function_opset_versions() const {
@@ -852,7 +848,7 @@ class OpSchema final {
852848
std::string doc_;
853849
// Default domain value ("") means it's ONNX domain.
854850
std::string domain_ = ONNX_DOMAIN;
855-
std::unordered_map<std::string, Attribute> attributes_{};
851+
std::unordered_map<std::string, Attribute> attributes_;
856852
bool allows_unchecked_attributes_ = false;
857853
std::vector<FormalParameter> inputs_;
858854
std::vector<FormalParameter> outputs_;
@@ -1091,7 +1087,7 @@ class OpSchemaRegistry final : public ISchemaRegistry {
10911087
}
10921088
auto lower_bound_incl = ver_range_it->second.first;
10931089
auto upper_bound_incl = ver_range_it->second.second;
1094-
if (!(lower_bound_incl <= ver && upper_bound_incl >= ver)) {
1090+
if (lower_bound_incl > ver || upper_bound_incl < ver) {
10951091
std::stringstream err;
10961092
err << "Trying to register schema with name " << op_name << " (domain: " << op_domain << " version: " << ver
10971093
<< ") from file " << op_schema.file() << " line " << op_schema.line() << ", but its version is not "
@@ -1349,9 +1345,6 @@ size_t ReplaceAll(std::string& s, const char* from, const char* to);
13491345
static ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce(op_schema_register_once##name##Counter) ONNX_UNUSED = \
13501346
OpSchema(#name, __FILE__, __LINE__)
13511347

1352-
// Helper function
1353-
size_t ReplaceAll(std::string& s, const char* from, const char* to);
1354-
13551348
ONNX_API inline std::string GenerateOptionalArgumentsDoc() {
13561349
return "This operator has **optional** inputs/outputs. "
13571350
"See [the doc](IR.md) for more details about the representation of "

0 commit comments

Comments
 (0)