diff --git a/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc b/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc index 8a46d0094f54b46001c4cac245aa47d388bfd452..1a00a753bc3c0b207d754e61fde99a0b5d833d21 100644 --- a/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc +++ b/tf_adapter/tests/st/util/testcase/npu_attrs_test.cc @@ -601,6 +601,21 @@ TEST_F(NpuAttrTest, SetNpuOptimizerAttr_compile_hybrid_mode) { EXPECT_EQ(s.ok(), false); } +TEST_F(NpuAttrTest, SetNpuOptimizerAttr_compile_hybrid_mode_no_set_dynamic_option) { + GraphOptimizationPassOptions options; + SessionOptions session_options; + session_options.config.mutable_graph_options()->mutable_optimizer_options()->set_do_function_inlining(true); + auto *custom_config = + session_options.config.mutable_graph_options()->mutable_rewrite_options()->add_custom_optimizers(); + custom_config->set_name("NpuOptimizer"); + options.session_options = &session_options; + AttrValue compile_hybrid_mode_value = AttrValue(); + compile_hybrid_mode_value.set_i(1); + (*custom_config->mutable_parameter_map())["compile_hybrid_mode"] = compile_hybrid_mode_value; + Status s = NpuAttrs::SetNpuOptimizerAttr(options, reinterpret_cast(1)); + EXPECT_EQ(s.ok(), false); +} + TEST_F(NpuAttrTest, GetAllAttrOptions_oo_level) { AttrValueMap attr_map; diff --git a/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc b/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc index 6a4e614a6b8c8db7a2c48fbb0bbf424c3c49a7f9..980f3a1b4aeb8342a63dd8285d6b6d8611cc3a73 100644 --- a/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc +++ b/tf_adapter/tests/ut/util/testcase/npu_attrs_test.cc @@ -740,6 +740,21 @@ TEST_F(NpuAttrTest, SetNpuOptimizerAttr_compile_hybrid_mode) { EXPECT_EQ(s.ok(), false); } +TEST_F(NpuAttrTest, SetNpuOptimizerAttr_compile_hybrid_mode_no_set_dynamic_option) { + GraphOptimizationPassOptions options; + SessionOptions session_options; + session_options.config.mutable_graph_options()->mutable_optimizer_options()->set_do_function_inlining(true); + auto *custom_config = + session_options.config.mutable_graph_options()->mutable_rewrite_options()->add_custom_optimizers(); + custom_config->set_name("NpuOptimizer"); + options.session_options = &session_options; + AttrValue compile_hybrid_mode_value = AttrValue(); + compile_hybrid_mode_value.set_i(1); + (*custom_config->mutable_parameter_map())["compile_hybrid_mode"] = compile_hybrid_mode_value; + Status s = NpuAttrs::SetNpuOptimizerAttr(options, reinterpret_cast(1)); + EXPECT_EQ(s.ok(), false); +} + TEST_F(NpuAttrTest, SetNpuOptimizerAttr_oo_level) { GraphOptimizationPassOptions options; SessionOptions session_options; diff --git a/tf_adapter/util/npu_attrs.cc b/tf_adapter/util/npu_attrs.cc index 67406594f34c6bc283889478d43b0d26395cbda4..c07ae9009f9851274461179d4dfdc3f420c12a35 100644 --- a/tf_adapter/util/npu_attrs.cc +++ b/tf_adapter/util/npu_attrs.cc @@ -2340,7 +2340,16 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options } } else if (params.count("input_shape") == 0 && params.count("dynamic_dims") == 0 && params.count("dynamic_node_type") == 0) { - // the three parameters are not set normally. + if (params.count("compile_hybrid_mode") > 0) { + compile_hybrid_mode = std::to_string(params.at("compile_hybrid_mode").i()); + if (compile_hybrid_mode == "1") { + ADP_LOG(ERROR) + << "input_shape, dynamic_dims and dynamic_node_type should be set when compile_hybrid_mode is 1"; + LOG(ERROR) << "input_shape, dynamic_dims and dynamic_node_type should be set when compile_hybrid_mode is 1"; + return errors::Internal( + "input_shape, dynamic_dims and dynamic_node_type should be set when compile_hybrid_mode is 1"); + } + } } else { ADP_LOG(FATAL) << "input_shape, dynamic_dims and dynamic_node_type should use together."; LOG(FATAL) << "input_shape, dynamic_dims and dynamic_node_type should use together.";