Skip to content

Commit

Permalink
Fix clang-tidy warnings (onnx#6378)
Browse files Browse the repository at this point in the history
### Description
<!-- - Describe your changes. -->
This PR fixes more clang-tidy warnings and adds more checks.
### Motivation and Context
<!-- - Why is this change required? What problem does it solve? -->
Better code.
<!-- - If it fixes an open issue, please link to the issue here. -->

Signed-off-by: cyy <[email protected]>
  • Loading branch information
cyyever authored Sep 21, 2024
1 parent 8f38dcb commit 0649a3c
Show file tree
Hide file tree
Showing 21 changed files with 70 additions and 70 deletions.
8 changes: 8 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ Checks: >-
clang-analyzer-alpha.security.*,
cppcoreguidelines-avoid-goto,
cppcoreguidelines-interfaces-global-init,
cppcoreguidelines-init-variables,
cppcoreguidelines-no-malloc,
cppcoreguidelines-prefer-member-initializer,
cppcoreguidelines-pro-type-member-init,
Expand All @@ -33,6 +34,13 @@ Checks: >-
google-default-arguments,
google-global-names-in-headers,
google-explicit-constructor,
modernize-make-shared,
modernize-make-unique,
modernize-pass-by-value,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-nullptr,
modernize-use-override,
modernize-use-emplace
CheckOptions:
Expand Down
6 changes: 3 additions & 3 deletions onnx/checker.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,8 @@
#include <unordered_set>
#include <utility>

#include "onnx/defs/function.h"
#include "onnx/defs/schema.h"
#include "onnx/onnx-data_pb.h"
#include "onnx/onnx-operators_pb.h"
#include "onnx/onnx_pb.h"
#include "onnx/string_utils.h"

namespace ONNX_NAMESPACE {
Expand Down Expand Up @@ -107,6 +104,7 @@ class CheckerContext final {
class LexicalScopeContext final {
public:
LexicalScopeContext() = default;
~LexicalScopeContext() = default;

// Construct an instance with the lexical scope from the parent graph to allow
// lookup of names from that scope via this_or_ancestor_graph_has.
Expand All @@ -119,6 +117,8 @@ class LexicalScopeContext final {
parent_context_ = &parent_context;
return *this;
}
LexicalScopeContext(LexicalScopeContext&&) = delete;
LexicalScopeContext& operator=(LexicalScopeContext&&) = delete;

void add(const std::string& name) {
output_names.insert(name);
Expand Down
18 changes: 9 additions & 9 deletions onnx/common/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,10 @@ struct ScalarAttributeValue final : public AttributeValue {
ValueType& value() {
return value_;
}
virtual Ptr clone() const override {
Ptr clone() const override {
return std::make_unique<ScalarAttributeValue>(name, value_);
}
virtual AttributeKind kind() const override {
AttributeKind kind() const override {
return Kind;
}

Expand All @@ -147,14 +147,14 @@ template <typename T, AttributeKind Kind>
struct VectorAttributeValue final : public AttributeValue {
using ConstructorType = const std::vector<T>&&;
using ValueType = std::vector<T>;
VectorAttributeValue(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {}
VectorAttributeValue(Symbol name, ValueType value_) : AttributeValue(name), value_(std::move(value_)) {}
ValueType& value() {
return value_;
}
virtual AttributeKind kind() const override {
AttributeKind kind() const override {
return Kind;
}
virtual std::unique_ptr<AttributeValue> clone() const override {
std::unique_ptr<AttributeValue> clone() const override {
return std::make_unique<VectorAttributeValue>(name, ValueType(value_));
}

Expand All @@ -181,7 +181,7 @@ using TypeProtosAttr = VectorAttributeValue<TypeProto, AttributeKind::tps>;
// we return Derived* pointers because Nodes are normally held as pointers.
template <typename Derived>
struct Attributes {
Attributes() {}
Attributes() = default;
void copyAttributes(const Attributes& rhs) {
values_.clear();
values_.reserve(rhs.values_.size());
Expand Down Expand Up @@ -375,7 +375,7 @@ struct Value final {
Graph* owningGraph();
const Graph* owningGraph() const;
// TODO: make this more const correct
const use_list uses() const;
use_list uses() const;

// Replaces all uses of this node with 'newValue'.
//
Expand Down Expand Up @@ -839,7 +839,7 @@ class OpSetID final {
// Default Domain Constructor
explicit OpSetID(const int64_t version) : domain_(""), version_(version) {}

explicit OpSetID(const std::string& domain, int64_t version) : domain_(domain), version_(version) {}
explicit OpSetID(std::string domain, int64_t version) : domain_(std::move(domain)), version_(version) {}

// target must be in the form "<domain>&<version>"
std::string toString() const {
Expand Down Expand Up @@ -1425,7 +1425,7 @@ inline const_graph_node_list_iterator Node::reverseIterator() const {
// nodes in subgraph are also included.
// This method is usually used to check whether it is
// safe to delete a Value.
inline const use_list Value::uses() const {
inline use_list Value::uses() const {
use_list all_uses = uses_in_current_graph_;
owningGraph()->forEachNode([this, &all_uses](const Node* node) {
if (node->owningGraph() == this->owningGraph()) {
Expand Down
2 changes: 1 addition & 1 deletion onnx/common/ir_pb_converter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ void createDummyValue(
}

std::unique_ptr<Graph> graphProtoToGraph(const ONNX_NAMESPACE::GraphProto& gp, bool nested, const int ir_version) {
std::unique_ptr<Graph> g(new Graph());
auto g = std::make_unique<Graph>();

if (gp.has_name()) {
g->setName(gp.name());
Expand Down
2 changes: 1 addition & 1 deletion onnx/common/model_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Common::Status BuildNode(
std::vector<std::string> const& inputs,
std::vector<std::string> const& outputs,
NodeProto* node) {
if (node == NULL) {
if (node == nullptr) {
return Common::Status(Common::CHECKER, Common::INVALID_ARGUMENT, "node_proto should not be nullptr.");
}
node->set_name(name);
Expand Down
6 changes: 4 additions & 2 deletions onnx/common/status.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,22 @@

#include <assert.h>

#include <memory>

#include "onnx/string_utils.h"

namespace ONNX_NAMESPACE {
namespace Common {

Status::Status(StatusCategory category, int code, const std::string& msg) {
assert(static_cast<int>(StatusCode::OK) != code);
state_.reset(new State(category, code, msg));
state_ = std::make_unique<State>(category, code, msg);
}

Status::Status(StatusCategory category, int code) : Status(category, code, EmptyString()) {}

bool Status::IsOK() const noexcept {
return (state_ == NULL);
return (state_ == nullptr);
}

StatusCategory Status::Category() const noexcept {
Expand Down
2 changes: 1 addition & 1 deletion onnx/common/status.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class Status {
if (nullptr == other.state_) {
state_.reset();
} else if (state_ != other.state_) {
state_.reset(new State(*other.state_));
state_ = std::make_unique<State>(*other.state_);
}
}
}
Expand Down
16 changes: 6 additions & 10 deletions onnx/defs/data_type_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ void DataTypeUtils::FromString(const std::string& type_str, TypeProto& type_prot
s.LStrip(key_size);
s.LStrip(",");
StringRange v(s.Data(), s.Size());
int32_t key_type;
FromDataTypeString(key, key_type);
auto key_type = FromDataTypeString(key);
type_proto.mutable_map_type()->set_key_type(key_type);
return FromString(std::string(v.Data(), v.Size()), *type_proto.mutable_map_type()->mutable_value_type());
} else
Expand All @@ -211,18 +210,15 @@ void DataTypeUtils::FromString(const std::string& type_str, TypeProto& type_prot
#endif
if (s.LStrip("sparse_tensor")) {
s.ParensWhitespaceStrip();
int32_t e;
FromDataTypeString(std::string(s.Data(), s.Size()), e);
auto e = FromDataTypeString(std::string(s.Data(), s.Size()));
type_proto.mutable_sparse_tensor_type()->set_elem_type(e);
} else if (s.LStrip("tensor")) {
s.ParensWhitespaceStrip();
int32_t e;
FromDataTypeString(std::string(s.Data(), s.Size()), e);
auto e = FromDataTypeString(std::string(s.Data(), s.Size()));
type_proto.mutable_tensor_type()->set_elem_type(e);
} else {
// Scalar
int32_t e;
FromDataTypeString(std::string(s.Data(), s.Size()), e);
auto e = FromDataTypeString(std::string(s.Data(), s.Size()));
TypeProto::Tensor* t = type_proto.mutable_tensor_type();
t->set_elem_type(e);
// Call mutable_shape() to initialize a shape with no dimension.
Expand All @@ -236,14 +232,14 @@ bool DataTypeUtils::IsValidDataTypeString(const std::string& type_str) {
return (allowedSet.find(type_str) != allowedSet.end());
}

void DataTypeUtils::FromDataTypeString(const std::string& type_str, int32_t& tensor_data_type) {
int32_t DataTypeUtils::FromDataTypeString(const std::string& type_str) {
if (!IsValidDataTypeString(type_str)) {
ONNX_THROW_EX(std::invalid_argument(
"DataTypeUtils::FromDataTypeString - Received invalid data type string '" + type_str + "'."));
}

TypesWrapper& t = TypesWrapper::GetTypesWrapper();
tensor_data_type = t.TypeStrToTensorDataType()[type_str];
return t.TypeStrToTensorDataType()[type_str];
}

StringRange::StringRange() : data_(""), size_(0), start_(data_), end_(data_) {}
Expand Down
2 changes: 1 addition & 1 deletion onnx/defs/data_type_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class DataTypeUtils final {
private:
static void FromString(const std::string& type_str, TypeProto& type_proto);

static void FromDataTypeString(const std::string& type_str, int32_t& tensor_data_type);
static int32_t FromDataTypeString(const std::string& type_str);

static std::string ToString(const TypeProto& type_proto, const std::string& left = "", const std::string& right = "");

Expand Down
2 changes: 1 addition & 1 deletion onnx/defs/function.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class FunctionBodyHelper {

AttributeProtoWrapper() = default;

AttributeProtoWrapper(const AttributeProto& attr_prot) : proto(attr_prot) {}
AttributeProtoWrapper(AttributeProto attr_prot) : proto(std::move(attr_prot)) {}

template <typename T>
AttributeProtoWrapper(const std::string& attr_name, const T& value) : proto(MakeAttribute(attr_name, value)) {}
Expand Down
4 changes: 2 additions & 2 deletions onnx/defs/math/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3579,8 +3579,8 @@ ONNX_OPERATOR_SET_SCHEMA(

// Check for compatible matrix multiply dimensions
{
auto dimL = shapeL.dim(shapeL.dim_size() - 1);
auto dimR = shapeR.dim(shapeR.dim_size() - 2);
auto const& dimL = shapeL.dim(shapeL.dim_size() - 1);
auto const& dimR = shapeR.dim(shapeR.dim_size() - 2);
if (dimL.has_dim_value() && dimR.has_dim_value() && dimL.dim_value() != dimR.dim_value()) {
fail_shape_inference("Incompatible dimensions for matrix multiplication");
;
Expand Down
2 changes: 1 addition & 1 deletion onnx/defs/nn/defs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1215,7 +1215,7 @@ void convTransposeShapeInference(InferenceContext& ctx) {
ctx.getInputType(1)->tensor_type().shape().dim(1) * group; // channels should be the second dim of second input
// multiply group.

int size_of_output;
int size_of_output = 0;
if (output_shape_presented) {
size_of_output = static_cast<int>(output_shape.size());
for (int i = 0; i < size_of_output; ++i) {
Expand Down
8 changes: 4 additions & 4 deletions onnx/defs/nn/old.cc
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ void convPoolShapeInference_opset19(

// how many times we can move the kernel from it's initial position, based
// on the stride
int64_t strided_kernel_positions;
int64_t strided_kernel_positions = 0;

if (ceil_mode == 1)
strided_kernel_positions =
Expand Down Expand Up @@ -726,7 +726,7 @@ void convTransposeShapeInference_opset11(InferenceContext& ctx) {
ctx.getInputType(1)->tensor_type().shape().dim(1) * group; // channels should be the second dim of second input
// multiply group.

int size_of_output;
int size_of_output = 0;
if (output_shape_presented) {
size_of_output = static_cast<int>(output_shape.size());
for (int i = 0; i < size_of_output; ++i) {
Expand Down Expand Up @@ -2013,7 +2013,7 @@ void convPoolShapeInference1(

// how many times we can move the kernel from it's initial position, based
// on the stride
int64_t strided_kernel_positions;
int64_t strided_kernel_positions = 0;

if (ceil_mode == 1)
strided_kernel_positions =
Expand Down Expand Up @@ -3004,7 +3004,7 @@ void convTransposeShapeInference1(InferenceContext& ctx) {
ctx.getInputType(1)->tensor_type().shape().dim(1) * group; // channels should be the second dim of second input
// multiply group.

int size_of_output;
int size_of_output = 0;
if (output_shape_presented) {
size_of_output = static_cast<int>(output_shape.size());
for (int i = 0; i < size_of_output; ++i) {
Expand Down
4 changes: 2 additions & 2 deletions onnx/defs/parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ Status OnnxParser::Parse(TensorProto& tensorProto, const TypeProto& tensorTypePr
// tensorProto.mutable_int64_data()->Reserve(n);
// Parse the actual values:

int64_t intval;
int64_t intval = 0;
uint64_t uintval = 0;
float floatval = 0.0;
double dblval = 0.0;
Expand Down Expand Up @@ -833,7 +833,7 @@ Status OnnxParser::Parse(OpsetIdList& opsets) {
Status OnnxParser::Parse(ModelProto& model) {
model.Clear();
std::string strval;
int64_t intval;
int64_t intval = 0;
if (Matches('<')) {
do {
KeyWordMap::KeyWord keyword = KeyWordMap::KeyWord::NONE;
Expand Down
2 changes: 0 additions & 2 deletions onnx/defs/parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@

#include <ctype.h>

#include <iostream>
#include <stdexcept>
#include <string>
#include <unordered_map>

Expand Down
1 change: 0 additions & 1 deletion onnx/defs/printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#include "onnx/defs/printer.h"

#include <iomanip>
#include <vector>

#include "onnx/defs/tensor_proto_util.h"

Expand Down
12 changes: 6 additions & 6 deletions onnx/defs/schema.cc
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ void OpSchema::Verify(const NodeProto& node) const {
};

const auto& search = attributes_.find(name);
AttributeProto::AttributeType expected_type;
AttributeProto::AttributeType expected_type{};
if (search != attributes_.end()) {
expected_type = search->second.type;
} else if (allows_unchecked_attributes_ || isInternalSymbol(name)) {
Expand Down Expand Up @@ -802,7 +802,7 @@ OpSchema& OpSchema::FunctionBody(const char* func_body, int opset_version) {
if (opset_version == OpSchema::kUninitializedSinceVersion && since_version_ != OpSchema::kUninitializedSinceVersion) {
opset_version = since_version_;
}
std::shared_ptr<FunctionProto> function_proto(new FunctionProto());
auto function_proto = std::make_shared<FunctionProto>();
OnnxParser parser(func_body);
auto status = parser.Parse(*function_proto->mutable_node());
if (!status.IsOK())
Expand All @@ -822,7 +822,7 @@ OpSchema& OpSchema::FunctionBody(const std::vector<NodeProto>& func_nodes, int o
if (opset_version == OpSchema::kUninitializedSinceVersion && since_version_ != OpSchema::kUninitializedSinceVersion) {
opset_version = since_version_;
}
std::shared_ptr<FunctionProto> function_proto(new FunctionProto());
auto function_proto = std::make_shared<FunctionProto>();
for (const auto& node : func_nodes) {
auto new_node = function_proto->add_node();
new_node->CopyFrom(node);
Expand All @@ -831,7 +831,7 @@ OpSchema& OpSchema::FunctionBody(const std::vector<NodeProto>& func_nodes, int o
// opset import may have been set
// we may need to update its version with the specified opset_version
UpdateFunctionProtoOpsetImportVersion(*function_proto, opset_version);
opset_version_to_function_body_.emplace(opset_version, function_proto);
opset_version_to_function_body_.emplace(opset_version, std::move(function_proto));
return *this;
}

Expand All @@ -843,7 +843,7 @@ OpSchema& OpSchema::FunctionBody(
opset_version = since_version_;
}

std::shared_ptr<FunctionProto> function_proto(new FunctionProto());
auto function_proto = std::make_shared<FunctionProto>();
for (auto& relied_opset : relied_opsets) {
*(function_proto->mutable_opset_import()->Add()) = relied_opset;
}
Expand All @@ -855,7 +855,7 @@ OpSchema& OpSchema::FunctionBody(
// opset import may have been set
// we may need to update its version with the specified opset_version
UpdateFunctionProtoOpsetImportVersion(*function_proto, opset_version);
opset_version_to_function_body_.emplace(opset_version, function_proto);
opset_version_to_function_body_.emplace(opset_version, std::move(function_proto));
return *this;
}

Expand Down
Loading

0 comments on commit 0649a3c

Please sign in to comment.