From 47beba8ab92bbcb93023f9e4c8a95e66c11b17d0 Mon Sep 17 00:00:00 2001 From: zhao-lupeng Date: Thu, 12 Jan 2023 15:59:25 +0800 Subject: [PATCH] =?UTF-8?q?fixed=20e8017d2=20from=20https://gitee.com/deng?= =?UTF-8?q?tao24/tensorflow/pulls/1937=20add=20=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tests/st/util/testcase/npu_attrs_test.cc | 16 ++++++++++++++++ .../tests/ut/util/testcase/npu_attrs_test.cc | 16 ++++++++++++++++ tf_adapter/util/npu_attrs.cc | 10 ++++++++++ tf_adapter_2.x/npu_device/core/npu_device.cpp | 2 +- tf_adapter_2.x/npu_device/core/npu_micros.h | 4 ++-- 5 files changed, 45 insertions(+), 3 deletions(-) diff --git a/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc b/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc index aff1db6be..1b9b91e13 100644 --- a/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc +++ b/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc @@ -84,6 +84,22 @@ TEST_F(NpuAttrTest, CheckAoeMode) { EXPECT_EQ(s.ok(), false); } +TEST_F(NpuAttrTest, CheckPrecisionMode ) { + GraphOptimizationPassOptions options; + SessionOptions session_options; + session_options.config.mutable_graph_options()->mutable_optimizer_options()->set_do_function_inlining(true); + auto *custom_config = + session_options.config.mutable_graph_options()->mutable_rewrite_options()->add_custom_optimizers(); + custom_config->set_name("NpuOptimizer"); + options.session_options = &session_options; + + AttrValue precision_mode = AttrValue(); + precision_mode.set_s("force_Dp32"); + (*custom_config->mutable_parameter_map())["precision_mode"] = precision_mode; + Status s = NpuAttrs::SetNpuOptimizerAttr(options, nullptr); + EXPECT_EQ(s.ok(), false); +} + TEST_F(NpuAttrTest, GetDumpPath) { setenv("DUMP_GRAPH_PATH", "./", 1); string path = GetDumpPath(); diff --git a/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc b/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc index cf6b30c70..dc796242b 100644 --- a/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc +++ b/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc @@ -85,6 +85,22 @@ TEST_F(NpuAttrTest, CheckAoeMode) { EXPECT_EQ(s.ok(), false); } +TEST_F(NpuAttrTest, CheckPrecisionMode ) { + GraphOptimizationPassOptions options; + SessionOptions session_options; + session_options.config.mutable_graph_options()->mutable_optimizer_options()->set_do_function_inlining(true); + auto *custom_config = + session_options.config.mutable_graph_options()->mutable_rewrite_options()->add_custom_optimizers(); + custom_config->set_name("NpuOptimizer"); + options.session_options = &session_options; + + AttrValue precision_mode = AttrValue(); + precision_mode.set_s("force_Dp32"); + (*custom_config->mutable_parameter_map())["precision_mode"] = precision_mode; + Status s = NpuAttrs::SetNpuOptimizerAttr(options, nullptr); + EXPECT_EQ(s.ok(), false); +} + TEST_F(NpuAttrTest, GetDumpPath) { setenv("DUMP_GRAPH_PATH", "./", 1); string path = GetDumpPath(); diff --git a/tf_adapter/util/npu_attrs.cc b/tf_adapter/util/npu_attrs.cc index 630da0290..9c214dcf8 100644 --- a/tf_adapter/util/npu_attrs.cc +++ b/tf_adapter/util/npu_attrs.cc @@ -1732,6 +1732,16 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options } if (params.count("precision_mode") > 0) { precision_mode = params.at("precision_mode").s(); + if (precision_mode != "force_fp32" && precision_mode != "allow_fp32_to_fp16" && + precision_mode != "force_fp16" && precision_mode != "must_keep_origin_dtype" && + precision_mode != "allow_mix_precision") { + ADP_LOG(ERROR) << "precision_mode should be force_fp32, allow_fp32_to_fp16, force_fp16, " + "must_keep_origin_dtype or allow_mix_precision"; + LOG(ERROR) << "precision_mode should be force_fp32, allow_fp32_to_fp16, force_fp16, must_keep_origin_dtype " + "or allow_mix_precision"; + return errors::Internal("precision_mode should be force_fp32, allow_fp32_to_fp16, force_fp16, " + "must_keep_origin_dtype or allow_mix_precision"); + } } else { if (static_cast(graph_run_mode)) { precision_mode = "allow_fp32_to_fp16"; diff --git a/tf_adapter_2.x/npu_device/core/npu_device.cpp b/tf_adapter_2.x/npu_device/core/npu_device.cpp index f1d6f0f8e..1849e34dd 100644 --- a/tf_adapter_2.x/npu_device/core/npu_device.cpp +++ b/tf_adapter_2.x/npu_device/core/npu_device.cpp @@ -808,7 +808,7 @@ void NpuDevice::RunGeGraphAsync(TFE_Context *context, uint64_t graph_id, int num if (err_msg.empty()) { err_msg = " code:" + std::to_string(s); } - done(tensorflow::errors::Internal("Graph engine process graph failed: ", err_msg)); + done(tensorflow::errors::Internal("Graph engine process graph failed:\n", err_msg)); return; } else if (ge_outputs.size() != static_cast(num_outputs)) { done(tensorflow::errors::Internal("Graph engine process graph succeed but output num ", ge_outputs.size(), diff --git a/tf_adapter_2.x/npu_device/core/npu_micros.h b/tf_adapter_2.x/npu_device/core/npu_micros.h index cfeeb4ee0..933ef44a8 100644 --- a/tf_adapter_2.x/npu_device/core/npu_micros.h +++ b/tf_adapter_2.x/npu_device/core/npu_micros.h @@ -79,7 +79,7 @@ if (err_msg.empty()) { \ err_msg = " code:" + std::to_string(_status); \ } \ - CTX->status = tensorflow::errors::Internal(PREFIX, ":", err_msg); \ + CTX->status = tensorflow::errors::Internal(PREFIX, ":\n", err_msg); \ LOG(ERROR) << CTX->status.ToString(); \ return; \ } \ @@ -93,7 +93,7 @@ if (err_msg.empty()) { \ err_msg = " code:" + std::to_string(_status); \ } \ - (CTX)->status = tensorflow::errors::Internal(PREFIX, ":", err_msg); \ + (CTX)->status = tensorflow::errors::Internal(PREFIX, ":\n", err_msg); \ LOG(ERROR) << (CTX)->status.ToString(); \ return RET; \ } \ -- Gitee