From 1c1de396286b3920a682a08bfdef74289ae9234e Mon Sep 17 00:00:00 2001 From: yan Date: Sat, 18 Jun 2022 18:14:44 +0800 Subject: [PATCH] support subgraph multi dims --- tf_adapter/kernels/geop_npu.cc | 183 ++++++++++++++++++++------------- tf_adapter/kernels/geop_npu.h | 14 ++- tf_adapter/util/util.cc | 173 +++++++++++++++++++++++++++++++ tf_adapter/util/util.h | 21 ++++ 4 files changed, 319 insertions(+), 72 deletions(-) diff --git a/tf_adapter/kernels/geop_npu.cc b/tf_adapter/kernels/geop_npu.cc index b26ead751..e2d73862a 100644 --- a/tf_adapter/kernels/geop_npu.cc +++ b/tf_adapter/kernels/geop_npu.cc @@ -40,6 +40,7 @@ #include "tf_adapter/util/generate_report.h" #include "tf_adapter/util/npu_ops_identifier.h" #include "tf_adapter/util/session_manager.h" +#include "tf_adapter/util/util.h" #ifdef TF_VERSION_TF2 #include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h" @@ -57,6 +58,7 @@ #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "framework/common/scope_guard.h" +#include "framework/common/string_util.h" #include "framework/omg/omg_inner_types.h" #include "framework/omg/parser/model_parser.h" #include "framework/omg/parser/parser_api.h" @@ -86,6 +88,10 @@ Status FunctionalizeControlFlow(Graph *graph, FunctionLibraryDefinition *library #endif namespace { const std::string ATTR_NAME_CONST_INPUT_NAME = "_const_input"; +const std::string ATTR_NAME_SUBGRAPH_MULTI_DIMS_INDEX = "_subgraph_multi_dims_index"; +const std::string ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE = "_subgraph_multi_dims_input_shape"; +const std::string ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS = "_subgraph_multi_dims_input_dims"; +const std::string ATTR_NAME_OP_MAX_SHAPE = "_op_max_shape"; using geDataUniquePtr = std::unique_ptr>; class NpuHostFixedAllocator : public tensorflow::Allocator, public tensorflow::core::RefCounted { @@ -624,7 +630,7 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { // if input shapes changed, cache graphs uint32_t cache_graph_id = graph_id_; - bool is_set_dynamic_config = (!sess_options_["ge.inputShape"].empty()) && (!sess_options_["ge.dynamicDims"].empty()); + bool is_set_dynamic_config = IsDynamicConfig(); bool is_tuning = (!init_options_["ge.jobType"].empty()) && (!init_options_["ge.tuningPath"].empty()); bool is_lazy_recompile_mode = (dynamic_input_ == "1") && (dynamic_graph_execute_mode_ == "lazy_recompile"); ADP_LOG(INFO) << "is_set_dynamic_config: " << is_set_dynamic_config << " is_tuning: " << is_tuning @@ -1073,10 +1079,9 @@ Status GeOp::BuildGraphDef(FunctionLibraryDefinition &flib_def, const std::vecto return ret; } - bool is_set_dynamic_config = !sess_options_["ge.inputShape"].empty() && !sess_options_["ge.dynamicDims"].empty() && - !sess_options_["ge.dynamicNodeType"].empty(); + bool is_set_dynamic_config = IsDynamicConfig(); if (is_set_dynamic_config) { - BuildShapeNodeAndCacheArgNodes(graph); + CollectDynamicNodes(graph); } for (Node *node : graph.nodes()) { @@ -1104,10 +1109,10 @@ Status GeOp::BuildGraphDef(FunctionLibraryDefinition &flib_def, const std::vecto } // set input_shape to dynamic nodes shape desc if (is_set_dynamic_config) { - ret = ChangeInputsShapeDesc(); + ret = UpdateDynamicConfigAttrs(graph); if (!ret.ok()) { - ADP_LOG(ERROR) << "[GEOP] ChangeInputsShapeDesc failed, " << ret.error_message(); - LOG(ERROR) << "[GEOP] ChangeInputsShapeDesc failed, " << ret.error_message(); + ADP_LOG(ERROR) << "[GEOP] UpdateDynamicConfigAttrs failed, " << ret.error_message(); + LOG(ERROR) << "[GEOP] UpdateDynamicConfigAttrs failed, " << ret.error_message(); return ret; } } @@ -1171,45 +1176,25 @@ Status GeOp::ParseOnnxGraphOpAttr(Node *&node) const { return Status::OK(); } -void GeOp::BuildShapeNodeAndCacheArgNodes(Graph &graph) { +void GeOp::CollectDynamicNodes(Graph &graph) { std::string dynamic_node_type = sess_options_["ge.dynamicNodeType"]; for (Node *node : graph.nodes()) { // add shape node to get getnext node real shape if (dynamic_node_type == "0" && node->type_string() == "IteratorGetNext") { dynamic_shape_nodes_.emplace_back(node); - std::set out_index; - for (auto out_edge : node->out_edges()) { - if (!out_edge->IsControlEdge()) { - std::string msg = "Src:" + out_edge->src()->name() + ":" + std::to_string(out_edge->src_output()) + - ", Dst:" + out_edge->dst()->name() + ":" + std::to_string(out_edge->dst_input()); - ADP_LOG(INFO) << "[GEOP] GetNext node in out info:" << msg; - out_index.insert(out_edge->src_output()); - } - } - for (int idx : out_index) { - std::string shape_name = "getnext_shape_" + std::to_string(idx); - Node *shape_node = nullptr; - TF_CHECK_OK(NodeBuilder(shape_name, "Shape") - .Input(node, idx) - .Device(node->def().device()) - .Finalize(&graph, &shape_node)); - std::string identity_name = "shape_identity_" + std::to_string(idx); - Node *identity_node = nullptr; - TF_CHECK_OK(NodeBuilder(identity_name, "Identity") - .Input(shape_node, 0) - .Device(shape_node->def().device()) - .Finalize(&graph, &identity_node)); - } + ADP_LOG(INFO) << "push in dynamic shape nodes, node: " << node->name() << ", type:" << node->type_string(); } // count data args and getnext args for dynamic dims if (node->type_string() == "_Arg") { if (node->name().find("IteratorGetNext_") != std::string::npos) { if (dynamic_node_type == "0") { dynamic_shape_nodes_.emplace_back(node); + ADP_LOG(INFO) << "push in dynamic shape nodes, node: " << node->name() << ", type:" << node->type_string(); } } else { if (dynamic_node_type == "1") { dynamic_shape_nodes_.emplace_back(node); + ADP_LOG(INFO) << "push in dynamic shape nodes, node: " << node->name() << ", type:" << node->type_string(); } } } @@ -1218,44 +1203,6 @@ void GeOp::BuildShapeNodeAndCacheArgNodes(Graph &graph) { std::sort(dynamic_shape_nodes_.begin(), dynamic_shape_nodes_.end(), CmpVecValue); } -Status GeOp::ChangeInputsShapeDesc() { - std::vector result; - std::string input_shapes = sess_options_["ge.inputShape"]; - Split(input_shapes, result, ";"); // e.g. result:["data:2,3", "data1:3,4"] - - if (dynamic_shape_nodes_.size() == 1 && dynamic_shape_nodes_[0]->type_string() == "IteratorGetNext") { - ADP_LOG(INFO) << "[GEOP] change " << dynamic_shape_nodes_[0]->name() << " shape desc."; - if (dynamic_shape_nodes_[0]->num_outputs() != static_cast(result.size())) { - return errors::InvalidArgument("input_shape is not match inputs num in graph"); - } - NodeDef &node_def = const_cast(dynamic_shape_nodes_[0]->def()); - AttrValue &output_tensor_descs = (*node_def.mutable_attr())[OUTPUT_DESC]; - for (int32 i = 0; i < dynamic_shape_nodes_[0]->num_outputs(); ++i) { - AttrValue attr_shape_value; - attr_shape_value.set_type(DT_INT32); - SetShapesToOutputDesc(result, i, attr_shape_value); - (*output_tensor_descs.mutable_list()->mutable_func(i)->mutable_attr())[SERIALIZE_SHAPE] = attr_shape_value; - } - } else { - if (!dynamic_shape_nodes_.empty()) { - if (dynamic_shape_nodes_.size() != result.size()) { - return errors::InvalidArgument("input_shape is not match inputs num in graph"); - } - } - for (size_t i = 0; i < dynamic_shape_nodes_.size(); ++i) { - ADP_LOG(INFO) << "[GEOP] change " << dynamic_shape_nodes_[i]->name() << " shape desc."; - NodeDef &node_def = const_cast(dynamic_shape_nodes_[i]->def()); - AttrValue &output_tensor_descs = (*node_def.mutable_attr())[OUTPUT_DESC]; - AttrValue attr_shape_value; - attr_shape_value.set_type(DT_INT32); - SetShapesToOutputDesc(result, i, attr_shape_value); - (*output_tensor_descs.mutable_list()->mutable_func(0)->mutable_attr())[SERIALIZE_SHAPE] = attr_shape_value; - } - } - ADP_LOG(INFO) << "[GEOP] change input shapes desc success."; - return Status::OK(); -} - void GeOp::SetShapesToOutputDesc(const std::vector &input_shapes, const int &index, AttrValue &attr_shape_value) const { if (input_shapes.empty()) { @@ -1287,6 +1234,99 @@ void GeOp::SetShapesToOutputDesc(const std::vector &input_shapes, c } } +Status GeOp::UpdateDynamicConfigAttrs(Graph &graph) { + if (dynamic_shape_nodes_.empty()) { + ADP_LOG(INFO) << "dynamic_shape_nodes_ empty, skip parse dynamic config"; + return Status::OK(); + } + + std::vector>> user_shape_map; + std::vector>> max_shape_map; + std::vector> dynamic_dims_vec;; + TF_RETURN_IF_ERROR(ParseDynamicShapesAndDims(sess_options_["ge.inputShape"], sess_options_["ge.dynamicDims"], + user_shape_map, dynamic_dims_vec, max_shape_map)); + + if (user_shape_map.size() != dynamic_shape_nodes_.size()) { + return errors::Internal("user_shape_map size[", user_shape_map.size(), "] and dynamic_shape_nodes_ size[", + dynamic_shape_nodes_.size(), " not match"); + } + std::vector subgraph_multi_dims_input_shape; + std::vector subgraph_multi_dims_input_dims; + TF_RETURN_IF_ERROR(BuildSubgraphMuliDimsInput(user_shape_map, dynamic_dims_vec, + subgraph_multi_dims_input_shape, subgraph_multi_dims_input_dims)); + SetMaxShapeRangeAttr(max_shape_map); + for (size_t i = 0; i < dynamic_shape_nodes_.size(); ++i) { + Node *src_node = dynamic_shape_nodes_[i]; + for (auto out : src_node->out_edges()) { + int idx = out->dst_input(); + Node *dst_node = out->dst(); + std::string pre_subgraph_input_shape; + std::string pre_subgraph_input_dims; + bool input_shape_exist = TryGetNodeAttr(dst_node->attrs(), ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE, &pre_subgraph_input_shape); + bool input_dims_exist = TryGetNodeAttr(dst_node->attrs(), ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS, &pre_subgraph_input_dims); + if (input_shape_exist ^ input_dims_exist) { + return errors::Internal("input_shape_exist[%d] and input_dims_exist[%d] not match", + input_shape_exist, input_dims_exist); + } + std::string subgraph_input_shape = std::to_string(idx) + ":" + subgraph_multi_dims_input_shape[i]; + if (!input_shape_exist && !input_dims_exist) { + dst_node->AddAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE, subgraph_input_shape); + dst_node->AddAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS, subgraph_multi_dims_input_dims[i]); + } else { + TF_RETURN_IF_ERROR( + UpdateSubgraphMultiDimsAttr(dst_node, pre_subgraph_input_shape, pre_subgraph_input_dims, + subgraph_input_shape, subgraph_multi_dims_input_dims[i])); + } + } + } + // for (Node *node : graph.nodes()) { + // node->AddAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INDEX, 0); + // } + return Status::OK(); +} + +Status GeOp::SetMaxShapeRangeAttr(std::vector>> &max_shape_range_map) { + for (size_t i = 0; i < dynamic_shape_nodes_.size(); ++i) { + Node *node = dynamic_shape_nodes_[i]; + if (node->name() != max_shape_range_map[i].first) { + ADP_LOG(ERROR) << "node name[" << node->name() << "] and [" << max_shape_range_map[i].first << "] not match"; + } + std::vector &shape_info = max_shape_range_map[i].second; + std::string shape_str; + for (auto &shape : shape_info) { + shape_str.append(std::to_string(shape)).append(","); + } + shape_str = shape_str.substr(0, shape_str.size() - 1); + ADP_LOG(INFO) << "node[" << node->name() << "] max shape range is [" << shape_str << "]"; + node->AddAttr(ATTR_NAME_OP_MAX_SHAPE, shape_str); + } + return Status::OK(); +} + +Status GeOp::UpdateSubgraphMultiDimsAttr(Node *node, const std::string &pre_input_shape, + const std::string &pre_input_dims, const std::string &new_input_shape, + const std::string &new_input_dims) { + std::vector pre_input_dims_vec = ge::StringUtils::Split(pre_input_dims, ';'); + std::vector new_input_dims_vec = ge::StringUtils::Split(new_input_dims, ';'); + if (pre_input_dims_vec.size() != new_input_dims_vec.size()) { + return errors::Internal("pre_input_dims size[%zu] and new_input_dims size[%zu] not match", + pre_input_dims_vec.size(), new_input_dims_vec.size()); + } + std::string update_input_dims; + for (size_t i = 0; i < new_input_dims_vec.size(); ++i) { + update_input_dims.append(pre_input_dims_vec[i]).append(",").append(new_input_dims_vec[i]).append(";"); + } + update_input_dims = update_input_dims.substr(0, update_input_dims.size() - 1); + std::string update_input_shape = pre_input_shape + ";" + new_input_shape; + node->ClearAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE); + node->ClearAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS); + node->AddAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE, update_input_shape); + node->AddAttr(ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS, update_input_dims); + ADP_LOG(INFO) << "update ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_SHAPE [%s]" << update_input_shape; + ADP_LOG(INFO) << "update ATTR_NAME_SUBGRAPH_MULTI_DIMS_INPUT_DIMS [%s]" << update_input_dims; + return Status::OK(); +} + int GeOp::RunTuning(std::vector &input_vec, std::vector &inputs, const OpKernelContext *const ctx) { if (tuned_flag_.test_and_set()) { ADP_LOG(INFO) << ctx->op_kernel().name() << " has tuned."; @@ -1901,6 +1941,11 @@ Status GeOp::DomiFormatFromString(std::string format, int32_t &domi_format) cons } return errors::Unavailable("DomiFormatFromString, not supported format, format = ", format); } + +bool GeOp::IsDynamicConfig() { + return !sess_options_["ge.inputShape"].empty() && !sess_options_["ge.dynamicDims"].empty() && + !sess_options_["ge.dynamicNodeType"].empty(); +} } // namespace tensorflow namespace tensorflow { diff --git a/tf_adapter/kernels/geop_npu.h b/tf_adapter/kernels/geop_npu.h index 38fe9e094..f4cb0f1ab 100644 --- a/tf_adapter/kernels/geop_npu.h +++ b/tf_adapter/kernels/geop_npu.h @@ -104,9 +104,7 @@ class GeOp : public AsyncOpKernel { void SetShapesToOutputDesc(const std::vector &input_shapes, const int &index, AttrValue &attr_shape_value) const; - void BuildShapeNodeAndCacheArgNodes(Graph &graph); - - Status ChangeInputsShapeDesc(); + void CollectDynamicNodes(Graph &graph); void AnalyzeInputDesc(void *tensor_ptr, ge::Tensor &input, ge::DataType type, std::vector &input_shapes); @@ -126,6 +124,16 @@ class GeOp : public AsyncOpKernel { void ChangeChannelNameAttr(NodeDef &node_def) const; + bool IsDynamicConfig(); + + Status UpdateDynamicConfigAttrs(Graph &graph); + + Status UpdateSubgraphMultiDimsAttr(Node *node, const std::string &pre_input_shape, + const std::string &pre_input_dims, const std::string &new_input_shape, + const std::string &new_input_dims); + + Status SetMaxShapeRangeAttr(std::vector>> &max_shape_range_map); + static const std::string INPUT_DESC; static const std::string OUTPUT_DESC; static const std::string SERIALIZE_FORMAT; diff --git a/tf_adapter/util/util.cc b/tf_adapter/util/util.cc index 024256a39..25a721c31 100644 --- a/tf_adapter/util/util.cc +++ b/tf_adapter/util/util.cc @@ -18,15 +18,28 @@ #include #include +#include #include "tf_adapter/common/adp_logger.h" #include "tf_adapter/common/common.h" #include "tf_adapter/common/compat_tf1_tf2.h" #include "inc/metadef/inc/graph/def_types.h" #include "graph/def_types.h" #include "securec.h" +#include "framework/common/string_util.h" namespace tensorflow { namespace { const std::string ATTR_VALUE_SCOPE_NAME = "_without_npu_compile"; +const size_t kMaxDynamicDimNum = 100; + +std::vector SplitInputShape(const std::string &input_shape) { + std::vector shape_pair_vec; + size_t pos = input_shape.rfind(":"); + if (pos != std::string::npos) { + shape_pair_vec.emplace_back(input_shape.substr(0, pos)); + shape_pair_vec.emplace_back(input_shape.substr(pos + 1, input_shape.size() - pos)); + } + return shape_pair_vec; +} } Status GetDtStringTensorData(const Tensor &tensor, uint8_t *&data_ptr, uint64_t &data_size, @@ -105,4 +118,164 @@ bool IsWithoutNpuScope(const NodeDef &node_def) { bool IsWithoutNpuScope(const Node *node) { return IsWithoutNpuScope(node->def()); } + +Status BuildSubgraphMuliDimsInput(const std::vector>> &user_shape_map, + const std::vector> &dynamic_dims_vec, + std::vector &subgraph_multi_dims_input_shape, + std::vector &subgraph_multi_dims_input_dims) { + size_t nodes_num = user_shape_map.size(); + size_t count = 0; + size_t dynamic_count = dynamic_dims_vec.size(); + for (size_t i = 0; i < nodes_num; ++i) { + std::vector tmp(dynamic_count); + auto &nodes_shape = user_shape_map[i].second; + for (auto &dim : nodes_shape) { + if (dim != -1) { continue; } + for (size_t j = 0; j < dynamic_count; ++j) { + tmp[j].append(dynamic_dims_vec[j][count]).append(","); + } + ++count; + } + std::string tmp_dims; + for (size_t j = 0; j < dynamic_count; ++j) { + if (tmp[j].empty()) { + return errors::Internal("build subgraph multi dims input dims failed"); + } + tmp_dims.append(tmp[j].substr(0, tmp[j].size() - 1)).append(";"); + } + std::string tmp_shape; + for (size_t j = 0; j < nodes_shape.size(); ++j) { + tmp_shape.append(std::to_string(nodes_shape[j])).append(","); + } + subgraph_multi_dims_input_dims.push_back(tmp_dims.substr(0, tmp_dims.size() - 1)); + subgraph_multi_dims_input_shape.push_back(tmp_shape.substr(0, tmp_shape.size() - 1)); + ADP_LOG(INFO) << "subgraph_multi_dims_input_dims index: " << i << " : " << subgraph_multi_dims_input_dims[i]; + ADP_LOG(INFO) << "subgraph_multi_dims_input_shape index: " << i << " : " << subgraph_multi_dims_input_shape[i]; + } + return Status::OK(); +} + +Status ParseDynamicShapesAndDims(const std::string &input_shapes, const std::string &dynamic_dims, + std::vector>> &user_shape_map, + std::vector> &dynamic_dims_vec, + std::vector>> &max_shape_range_map) { + TF_RETURN_IF_ERROR(ParseDynamicShapes(input_shapes, user_shape_map)); + std::vector> dynamic_dims_digit_vec; + TF_RETURN_IF_ERROR(ParseDynamicDims(dynamic_dims, dynamic_dims_vec, dynamic_dims_digit_vec, user_shape_map)); + TF_RETURN_IF_ERROR(ParseMaxShapeRange(user_shape_map, dynamic_dims_digit_vec, max_shape_range_map)); + return Status::OK(); +} + +Status ParseMaxShapeRange(const std::vector>> &user_shape_map, + const std::vector> &dynamic_dims_digit_vec, + std::vector>> &max_shape_range_map) { + size_t num = dynamic_dims_digit_vec[0].size(); + std::vector tmp(num, 0); + for (auto &digit_vec : dynamic_dims_digit_vec) { + for (size_t i = 0 ; i < num; ++i) { + tmp[i] = std::max(tmp[i], digit_vec[i]); + } + } + + size_t count = 0; + max_shape_range_map = user_shape_map; + for (auto &shape_range : max_shape_range_map) { + std::vector &shapes = shape_range.second; + for (size_t i = 0; i < shapes.size(); ++i ) { + if (shapes[i] == -1) shapes[i] = tmp[count++]; + } + } + return Status::OK(); +} + +Status ParseDynamicDims(const std::string &dynamic_dims, std::vector> &dynamic_dims_vec, + std::vector> &dynamic_dims_digit_vec, + const std::vector>> &user_shape_map) { + int32_t dynamic_dim_num = 0; + for (auto &info_shapes : user_shape_map) { + auto &shapes = info_shapes.second; + dynamic_dim_num += std::count(shapes.begin(), shapes.end(), -1); + } + ADP_LOG(INFO) << "dynamic dim num: " << dynamic_dim_num; + if (dynamic_dims.empty()) { + return errors::Internal("dynamic_dims can not be empty."); + } + // Different parameter sets are split by ';' + std::vector split_set = ge::StringUtils::Split(dynamic_dims, ';'); + if (split_set.size() > kMaxDynamicDimNum) { + return errors::Internal("dynamic_dims's num of parameter set can not exceed %zu.", kMaxDynamicDimNum); + } + for (auto split_dim : split_set) { + std::vector one_set = ge::StringUtils::Split(split_dim, ','); + if (one_set.size() != static_cast(dynamic_dim_num)) { + return errors::Internal("dynamic_dims:%s invalid. " + "reason: Each gear setting needs to be consistent with the number of -1 in the inputshape.", + dynamic_dims.c_str()); + } + std::vector digit_vec; + for (auto dim : one_set) { + for (auto c : dim) { + if (!isdigit(c)) { + return errors::Internal("dynamic_dims:%s parameter must be positive integer.", + dynamic_dims.c_str()); + } + digit_vec.push_back(std::strtol(dim.c_str(), nullptr, 10)); + } + } + dynamic_dims_vec.push_back(one_set); + dynamic_dims_digit_vec.push_back(digit_vec); + } + return Status::OK(); +} + +Status ParseDynamicShapes(const std::string &input_shapes, + std::vector>> &user_shape_map) { + std::vector shape_vec = ge::StringUtils::Split(input_shapes, ';'); + const int32_t DEFAULT_SHAPE_PAIR_SIZE = 2; + for (const auto &shape : shape_vec) { + std::vector shape_pair_vec = SplitInputShape(shape); + if (shape_pair_vec.size() != DEFAULT_SHAPE_PAIR_SIZE) { + return errors::Internal("parse input_shape failed."); + } + + if (shape_pair_vec[1].empty()) { + return errors::Internal("parse input_shape failed."); + } + + std::vector shape_value_strs = ge::StringUtils::Split(shape_pair_vec[1], ','); + std::vector shape_values; + for (auto &shape_value_str : shape_value_strs) { + // stoul: The method may throw an exception: invalid_argument/out_of_range + if (shape_value_str.find('.') != std::string::npos) { + return errors::Internal("unsupport float config value."); + } + + long result = 0; + try { + result = stol(ge::StringUtils::Trim(shape_value_str)); + if (!shape_value_str.empty() && (shape_value_str.front() == '-')) { + // The value maybe dynamic shape [-1], need substr it and verify isdigit. + shape_value_str = shape_value_str.substr(1); + } + for (char c : shape_value_str) { + if (!isdigit(c)) { + return errors::Internal("shape value[%s] is not digit", shape_value_str.c_str()); + } + } + } catch (const std::out_of_range &) { + return errors::Internal("value[%s] cause out of range execption!", shape_value_str.c_str()); + } catch (const std::invalid_argument &) { + return errors::Internal("value[%s] cause invalid argument!", shape_value_str.c_str()); + } catch (...) { + return errors::Internal("value[%s] cause unkown execption!", shape_value_str.c_str()); + } + + shape_values.push_back(result); + } + + user_shape_map.push_back(make_pair(ge::StringUtils::Trim(shape_pair_vec[0]), shape_values)); + } + + return Status::OK(); +} } // namespace tensorflow \ No newline at end of file diff --git a/tf_adapter/util/util.h b/tf_adapter/util/util.h index caa45c37d..4f7564025 100644 --- a/tf_adapter/util/util.h +++ b/tf_adapter/util/util.h @@ -35,5 +35,26 @@ Status MappingDtStringTensor2AclDataItem(const Tensor &tensor, acltdtDataItem *& bool IsWithoutNpuScope(const NodeDef &node_def); bool IsWithoutNpuScope(const Node *node); + +Status BuildSubgraphMuliDimsInput(const std::vector>> &user_shape_map, + const std::vector> &dynamic_dims_vec, + std::vector &subgraph_multi_dims_input_shape, + std::vector &subgraph_multi_dims_input_dims); + +Status ParseDynamicShapesAndDims(const std::string &input_shapes, const std::string &dynamic_dims, + std::vector>> &user_shape_map, + std::vector> &dynamic_dims_vec, + std::vector>> &max_shape_range_map); + +Status ParseDynamicDims(const std::string &dynamic_dims, std::vector> &dynamic_dims_vec, + std::vector> &dynamic_dims_digit_vec, + const std::vector>> &user_shape_map); + +Status ParseDynamicShapes(const std::string &input_shapes, + std::vector>> &user_shape_map); + +Status ParseMaxShapeRange(const std::vector>> &user_shape_map, + const std::vector> &dynamic_dims_digit_vec, + std::vector>> &max_shape_range_map); } // namespace tensorflow #endif // TENSORFLOW_UTILS_H_ \ No newline at end of file -- Gitee