From d2b85153d91193f30eac430d034990a3104ec706 Mon Sep 17 00:00:00 2001 From: liuxianxk Date: Thu, 19 May 2022 15:00:58 +0800 Subject: [PATCH] fix misra 0518 --- tf_adapter/kernels/geop_npu.cc | 5 +++-- tf_adapter/util/ge_plugin.cc | 2 +- tf_adapter/util/host_allocator.h | 2 +- tf_adapter/util/memory_pool.cc | 4 ++-- tf_adapter/util/memory_pool.h | 2 +- tf_adapter_2.x/npu_device/core/npu_device.cpp | 2 +- .../core/op_executors/npu_concrete_graph.cpp | 5 ++--- .../npu_device/core/optimizers/runtime/node_placer.cpp | 10 +++++----- .../npu_device/core/optimizers/runtime/node_placer.h | 6 +++--- .../optimizers/runtime/npu_build_npu_op_optimizer.cpp | 4 ++-- 10 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tf_adapter/kernels/geop_npu.cc b/tf_adapter/kernels/geop_npu.cc index 82b230172..3cf252b18 100644 --- a/tf_adapter/kernels/geop_npu.cc +++ b/tf_adapter/kernels/geop_npu.cc @@ -1614,7 +1614,7 @@ Status GeOp::GraphInputConvertToConst(OpKernelContext *ctx) { } } - if (check_value == true) { + if (check_value) { int32_t index = 0; TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &index)); Tensor tensor(ctx->input(index)); @@ -1680,7 +1680,7 @@ Status GeOp::GraphCheckInputEqualConstOp(OpKernelContext *ctx, Tensor &tensor, i char *tensor_input = ge::PtrToPtr(DMAHelper::base(&tensor)); is_equal = ((it.first.TotalBytes() == tensor.TotalBytes()) && (memcmp(tensor_const, tensor_input, tensor.TotalBytes()) == 0)); - if (is_equal != true) { + if (!is_equal) { return errors::Internal("Const input not equal with the input tensor value."); } } @@ -1690,6 +1690,7 @@ Status GeOp::GraphCheckInputEqualConstOp(OpKernelContext *ctx, Tensor &tensor, i Status GeOp::BuildInputTensorInfo(OpKernelContext *ctx, std::vector &input_vec, std::vector &input_shapes, std::vector &inputs) { + (void) ctx; // ctx is not nullptr int num_inputs = ctx->num_inputs(); std::string cur_input_shapes; diff --git a/tf_adapter/util/ge_plugin.cc b/tf_adapter/util/ge_plugin.cc index 34fc8e163..e2421bf9d 100644 --- a/tf_adapter/util/ge_plugin.cc +++ b/tf_adapter/util/ge_plugin.cc @@ -75,7 +75,7 @@ GePlugin *GePlugin::GetInstance() { return &instance; } -void GePlugin::Init(std::map &init_options, bool is_global) { +void GePlugin::Init(std::map &init_options, const bool is_global) { std::lock_guard lock(mutex_); if (isInit_) { ADP_LOG(INFO) << "[GePlugin] Ge has already initialized"; diff --git a/tf_adapter/util/host_allocator.h b/tf_adapter/util/host_allocator.h index 429b4791b..99262edf9 100644 --- a/tf_adapter/util/host_allocator.h +++ b/tf_adapter/util/host_allocator.h @@ -17,8 +17,8 @@ #ifndef TENSORFLOW_HOST_ALLOCATOR_H_ #define TENSORFLOW_HOST_ALLOCATOR_H_ -#include "tensorflow/core/framework/allocator.h" #include +#include "tensorflow/core/framework/allocator.h" namespace tensorflow { class HostAllocator : public Allocator { diff --git a/tf_adapter/util/memory_pool.cc b/tf_adapter/util/memory_pool.cc index aebebf827..e3f2e29d6 100644 --- a/tf_adapter/util/memory_pool.cc +++ b/tf_adapter/util/memory_pool.cc @@ -15,9 +15,9 @@ */ #include "memory_pool.h" -#include "securec.h" #include #include +#include "securec.h" #include "tf_adapter/common/adp_logger.h" namespace tensorflow { @@ -79,7 +79,7 @@ namespace tensorflow { return Status::OK(); } - bool MemoryPool::FreeMemoryList(std::list &memory_list) { + bool MemoryPool::FreeMemoryList(std::list &memory_list) const { auto memory_it = memory_list.begin(); while (memory_it != memory_list.end()) { free(memory_it->ptr); diff --git a/tf_adapter/util/memory_pool.h b/tf_adapter/util/memory_pool.h index 25160a738..ed080a62d 100644 --- a/tf_adapter/util/memory_pool.h +++ b/tf_adapter/util/memory_pool.h @@ -46,7 +46,7 @@ class MemoryPool { Status FreeAllMemory(); ~MemoryPool(); private: - bool FreeMemoryList(std::list &memory_list); + bool FreeMemoryList(std::list &memory_list) const; std::mutex memory_pool_lock_; std::list used_memory_list_; std::list free_memory_list_; diff --git a/tf_adapter_2.x/npu_device/core/npu_device.cpp b/tf_adapter_2.x/npu_device/core/npu_device.cpp index f8c5884d5..595a6a823 100644 --- a/tf_adapter_2.x/npu_device/core/npu_device.cpp +++ b/tf_adapter_2.x/npu_device/core/npu_device.cpp @@ -1226,7 +1226,7 @@ tensorflow::Status NpuDevice::LoadSupportedOps(std::unordered_set & } fs.close(); const static std::vector kAddonOps{"IteratorV2", "IteratorGetNext"}; - ops.insert(kAddonOps.begin(), kAddonOps.end()); + ops.insert(kAddonOps.cbegin(), kAddonOps.cend()); return tensorflow::Status::OK(); } diff --git a/tf_adapter_2.x/npu_device/core/op_executors/npu_concrete_graph.cpp b/tf_adapter_2.x/npu_device/core/op_executors/npu_concrete_graph.cpp index 65e20fc36..d19d43f22 100644 --- a/tf_adapter_2.x/npu_device/core/op_executors/npu_concrete_graph.cpp +++ b/tf_adapter_2.x/npu_device/core/op_executors/npu_concrete_graph.cpp @@ -175,9 +175,8 @@ void NpuConcreteGraph::Load(TFE_Context *context, NpuDevice *device, TF_Status * {ge::OPTION_EXEC_DYNAMIC_INPUT, "1"}, {ge::OPTION_EXEC_DYNAMIC_EXECUTE_MODE, "dynamic_execute"}, {ge::SHAPE_GENERALIZED_BUILD_MODE, "shape_generalized"}}; - if (kEmptyGeGraphId == device->AddGeGraphInner(context, GeGraphId(), Op(), GraphDef(), - (loop_type_ == LoopType::NPU_LOOP), status, - (NeedFuzzCompile() ? kFuzzCompileOptions : kOptions))) { + if (device->AddGeGraphInner(context, GeGraphId(), Op(), GraphDef(), (loop_type_ == LoopType::NPU_LOOP), status, + (NeedFuzzCompile() ? kFuzzCompileOptions : kOptions)) == kEmptyGeGraphId) { empty_ge_graph_ = true; } NPU_REQUIRES_TFE_OK(status); diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp index 0133e9eee..266bd7a1b 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.cpp @@ -137,7 +137,7 @@ std::set> NodePlacer::GetNpuClusters() { return clusters; } -bool NodePlacer::IsNpuMeaningLessNode(tensorflow::Node *node) { +bool NodePlacer::IsNpuMeaningLessNode(const tensorflow::Node *node) { const static std::unordered_set kNpuMeaningLessNodes{"Identity", "NoOp", "Const"}; return kNpuMeaningLessNodes.count(node->type_string()) != 0U; } @@ -168,7 +168,7 @@ tensorflow::Status NodePlacer::BuildNpuOp() { std::vector input_edges; std::vector output_edges; - std::vector nodes(cluster->nodes.begin(), cluster->nodes.end()); + std::vector nodes(cluster->nodes.cbegin(), cluster->nodes.cend()); std::sort(nodes.begin(), nodes.end(), StableNodeCompartor{}); auto cluster_graph = std::make_unique(tensorflow::OpRegistry::Global()); @@ -480,7 +480,7 @@ tensorflow::Status NodePlacer::BuildConcreteCluster() { continue; } auto cluster = iter->second; - auto found = std::find_if(cluster->nodes.begin(), cluster->nodes.end(), + auto found = std::find_if(cluster->nodes.cbegin(), cluster->nodes.cend(), [this](tensorflow::Node *node) { return !IsNodeCanPlacedOn(node, Placement::NPU); }); if (found != cluster->nodes.end()) { for (auto iter2 = concrete_clusters_.begin(); iter2 != concrete_clusters_.end();) { @@ -575,7 +575,7 @@ tensorflow::Status NodePlacer::SpreadNpuNodeFromPlacement(Placement placement) { DLOG() << "Start spread npu from " << GetNodesPlacedOn(placement).size() << " nodes placed on " << kPlacementString[placement] << ", npu node size " << GetNodesPlacedOn(Placement::NPU).size(); - const auto enter = [](tensorflow::Node *node) {}; + const auto enter = [](tensorflow::Node *node) { (void) node; }; tensorflow::DFSFrom(*graph_, starts, enter, {}, {}, [this](const tensorflow::Edge &edge) { return SpreadNpuEdge(edge, true); }); tensorflow::ReverseDFSFrom(*graph_, starts, enter, {}, {}, @@ -786,7 +786,7 @@ bool NodePlacer::ColocateNpu(tensorflow::Node *src, tensorflow::Node *dst) { } // Weather the edge can be npu bound -bool NodePlacer::IsSupportedNpuBound(const tensorflow::Edge &edge) { +bool NodePlacer::IsSupportedNpuBound(const tensorflow::Edge &edge) const { return edge.IsControlEdge() || device_->SupportedInputAndOutputType(EdgeDataType(edge)); } diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.h b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.h index 3ae168b5f..c70938eaf 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.h +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/node_placer.h @@ -64,7 +64,7 @@ struct NodeOrCluster { }; struct StableNodeCompartor { - bool operator()(const tensorflow::Node *a, const tensorflow::Node *b) { return a->id() < b->id(); } + bool operator()(const tensorflow::Node *a, const tensorflow::Node *b) const { return a->id() < b->id(); } }; class NodePlacer { @@ -98,9 +98,9 @@ class NodePlacer { uint64_t Topo(tensorflow::Node *node) const { return node_topo_.at(node); } private: - static bool IsNpuMeaningLessNode(tensorflow::Node *node); + static bool IsNpuMeaningLessNode(const tensorflow::Node *node); // Weather the edge can be npu bound - bool IsSupportedNpuBound(const tensorflow::Edge &edge); + bool IsSupportedNpuBound(const tensorflow::Edge &edge) const; // is this node placed in surely device bool IsNodePlaced(tensorflow::Node *node); // Check weather the node has placed on placement device diff --git a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp index dd2bdba81..9f31f714f 100644 --- a/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp +++ b/tf_adapter_2.x/npu_device/core/optimizers/runtime/npu_build_npu_op_optimizer.cpp @@ -57,7 +57,7 @@ tensorflow::Status SetShapeToOutputDesc(const std::vector &input_sh return tensorflow::Status::OK(); } -void GetOutputDataIndex(tensorflow::Node *node, std::vector &ordered_indexes) { +void GetOutputDataIndex(const tensorflow::Node *node, std::vector &ordered_indexes) { std::set out_index; for (const auto &out_edge : node->out_edges()) { if (!out_edge->IsControlEdge()) { @@ -66,7 +66,7 @@ void GetOutputDataIndex(tensorflow::Node *node, std::vector &ordered_in } } ordered_indexes.clear(); - (void)ordered_indexes.insert(ordered_indexes.end(), out_index.begin(), out_index.end()); + (void) ordered_indexes.insert(ordered_indexes.cend(), out_index.cbegin(), out_index.cend()); } tensorflow::Status BuildGetNextShape(tensorflow::Graph *graph, tensorflow::Node *node, -- Gitee