From c93eac84cd31b23809bd3be411af2fa49946688b Mon Sep 17 00:00:00 2001 From: tianye Date: Tue, 23 Nov 2021 11:33:45 +0800 Subject: [PATCH 01/21] tf rnn dynamic --- tf_adapter/ops/aicore/npu_aicore_ops.cc | 2 ++ .../python/npu_bridge/estimator/npu/npu_dynamic_rnn.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/tf_adapter/ops/aicore/npu_aicore_ops.cc b/tf_adapter/ops/aicore/npu_aicore_ops.cc index c56e4f9e4..2fc3fa8f6 100644 --- a/tf_adapter/ops/aicore/npu_aicore_ops.cc +++ b/tf_adapter/ops/aicore/npu_aicore_ops.cc @@ -168,6 +168,8 @@ REGISTER_OP("DynamicRnn") .Output("o: T") .Output("tanhc: T") .Attr("T: {float16, float32}") + .Attr("has_seq: bool = false") + .Attr("use_init: bool = false") .Attr("cell_type: string") .Attr("direction: string") .Attr("cell_depth: int = 1") diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py index 5cf30e368..7fff8ee6c 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py @@ -57,6 +57,8 @@ class _DynamicBasic(base_layer.Layer): self._dtype = dtype self._args = { "direction": self._direction, + "has_seq": False, + "use_init": False, "cell_depth": self._cell_depth, "keep_prob": self._keep_prob, "cell_clip": self._cell_clip, @@ -302,9 +304,12 @@ class DynamicRNN(_DynamicBasic): """Dynamic GRU. """ super(DynamicRNN, self).call(x, seq_length=seq_length) + if seq_length is not None: + self._args["has_seq"] = True if init_h is None: init_h = self._init_h else: + self._args["use_init"] = True init_h_shape = tensor_shape.TensorShape(init_h) if init_h_shape.ndims == 2: init_h = tf.reshape(init_h, [1, init_h_shape[0], init_h_shape[1]]) -- Gitee From 289cf3d1e416f2f9ce3d0f4154e9900196f091bd Mon Sep 17 00:00:00 2001 From: tianye Date: Mon, 29 Nov 2021 17:32:26 +0800 Subject: [PATCH 02/21] fix tf --- tf_adapter/ops/aicore/npu_aicore_ops.cc | 4 ++-- .../estimator/npu/npu_dynamic_rnn.py | 21 ++++++++++++------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/tf_adapter/ops/aicore/npu_aicore_ops.cc b/tf_adapter/ops/aicore/npu_aicore_ops.cc index 2fc3fa8f6..c60d0bff4 100644 --- a/tf_adapter/ops/aicore/npu_aicore_ops.cc +++ b/tf_adapter/ops/aicore/npu_aicore_ops.cc @@ -168,8 +168,8 @@ REGISTER_OP("DynamicRnn") .Output("o: T") .Output("tanhc: T") .Attr("T: {float16, float32}") - .Attr("has_seq: bool = false") - .Attr("use_init: bool = false") + .Attr("is_clean_seq_length: bool = false") + .Attr("is_clean_init: bool = false") .Attr("cell_type: string") .Attr("direction: string") .Attr("cell_depth: int = 1") diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py index 7fff8ee6c..decb2ec46 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py @@ -57,8 +57,8 @@ class _DynamicBasic(base_layer.Layer): self._dtype = dtype self._args = { "direction": self._direction, - "has_seq": False, - "use_init": False, + "is_clean_seq_length": False, + "is_clean_init": False, "cell_depth": self._cell_depth, "keep_prob": self._keep_prob, "cell_clip": self._cell_clip, @@ -298,18 +298,21 @@ class DynamicRNN(_DynamicBasic): def call(self, x, + weight=None, + bias=None, seq_length=None, init_h=None, init_c=None): """Dynamic GRU. """ super(DynamicRNN, self).call(x, seq_length=seq_length) - if seq_length is not None: - self._args["has_seq"] = True + if seq_length is None: + self._args["is_clean_seq_length"] = True if init_h is None: init_h = self._init_h + if x.shape[0].value is None: + self._args["is_clean_init"] = True else: - self._args["use_init"] = True init_h_shape = tensor_shape.TensorShape(init_h) if init_h_shape.ndims == 2: init_h = tf.reshape(init_h, [1, init_h_shape[0], init_h_shape[1]]) @@ -321,8 +324,12 @@ class DynamicRNN(_DynamicBasic): init_c = tf.reshape(init_c, [1, init_c_shape[0], init_c_shape[1]]) if init_c is None: init_c = self._init_c - self._args["w"] = self._rnn_w - self._args["b"] = self._rnn_b + if weight is None: + weight = self._rnn_w + if bias is None: + bias = self._rnn_b + self._args["w"] = weight + self._args["b"] = bias self._args["init_h"] = init_h self._args["init_c"] = init_c return gen_npu_ops.dynamic_rnn(**self._args) -- Gitee From 5ebff68f6a28f9065773460d5fcf840b82469325 Mon Sep 17 00:00:00 2001 From: tianye Date: Mon, 29 Nov 2021 19:32:21 +0800 Subject: [PATCH 03/21] fix tf rnn --- .../kernels/aicore/dynamic_rnn_v2_ops.cc | 30 ++++++++ tf_adapter/ops/aicore/npu_aicore_ops.cc | 68 ++++++++++++++++++- .../estimator/npu/npu_dynamic_rnn.py | 17 +++-- .../python/npu_bridge/estimator/npu_ops.py | 19 ++++++ 4 files changed, 123 insertions(+), 11 deletions(-) create mode 100644 tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc diff --git a/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc b/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc new file mode 100644 index 000000000..ccaf8d2c0 --- /dev/null +++ b/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc @@ -0,0 +1,30 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/register_types.h" + +namespace tensorflow { +template +class DynamicRnnV2OP : public OpKernel { + public: + explicit DynamicRnnV2OP(OpKernelConstruction *ctx) : OpKernel(ctx) { LOG(INFO) << "new DynamicRnnV2OP"; } + ~DynamicRnnV2OP() { LOG(INFO) << "del DynamicRnnV2OP"; } + void Compute(OpKernelContext *ctx) override { LOG(INFO) << "in DynamicRnnV2OP"; } + bool IsExpensive() override { return false; } +}; + +REGISTER_KERNEL_BUILDER(Name("DynamicRnnV2").Device(DEVICE_CPU), DynamicRnnV2OP); +} // namespace tensorflow \ No newline at end of file diff --git a/tf_adapter/ops/aicore/npu_aicore_ops.cc b/tf_adapter/ops/aicore/npu_aicore_ops.cc index c60d0bff4..e76449704 100644 --- a/tf_adapter/ops/aicore/npu_aicore_ops.cc +++ b/tf_adapter/ops/aicore/npu_aicore_ops.cc @@ -168,8 +168,72 @@ REGISTER_OP("DynamicRnn") .Output("o: T") .Output("tanhc: T") .Attr("T: {float16, float32}") - .Attr("is_clean_seq_length: bool = false") - .Attr("is_clean_init: bool = false") + .Attr("cell_type: string") + .Attr("direction: string") + .Attr("cell_depth: int = 1") + .Attr("use_peephole: bool = false") + .Attr("keep_prob: float = 1.0") + .Attr("cell_clip: float = -1.0") + .Attr("num_proj: int = 0") + .Attr("time_major: bool = true") + .Attr("activation: string") + .Attr("forget_bias: float = 0.0") + .Attr("is_training: bool = true") + .SetIsStateful() + .SetShapeFn([](InferenceContext* c) { + auto input_shape = c->input(0); + auto num_step = c->Dim(input_shape, 0); + auto batch_size = c->Dim(input_shape, 1); + auto input_size = c->Dim(input_shape, 2); + auto w = c->input(1); + auto hidden_size_total = c->Dim(w, 0); + DimensionHandle hidden_size; + TF_RETURN_IF_ERROR(c->Subtract(hidden_size_total, input_size, &hidden_size)); + int num_proj = 0; + TF_RETURN_IF_ERROR(c->GetAttr("num_proj", &num_proj)); + ShapeHandle output_y_shape; + if (num_proj == 0) { + output_y_shape = c->MakeShape({num_step, batch_size, hidden_size}); + } else { + std::vector num_projs; + num_projs.reserve(num_proj); + auto num_proj_shape = c->MakeShape(num_projs); + DimensionHandle num_proj_size = c->Dim(num_proj_shape, 0); + DimensionHandle output_hidden_size; + TF_RETURN_IF_ERROR(c->Min(num_proj_size, hidden_size, &output_hidden_size)); + output_y_shape = c->MakeShape({num_step, batch_size, output_hidden_size}); + } + auto output_h_shape = + c->MakeShape({num_step, batch_size, hidden_size}); + auto output_c_shape = + c->MakeShape({num_step, batch_size, hidden_size}); + + c->set_output(0, output_y_shape); + c->set_output(1, output_h_shape); + c->set_output(2, output_c_shape); + c->set_output(3, c->UnknownShape()); + c->set_output(4, c->UnknownShape()); + c->set_output(5, c->UnknownShape()); + c->set_output(6, c->UnknownShape()); + c->set_output(7, c->UnknownShape()); + return Status::OK(); + }); + +REGISTER_OP("DynamicRnnV2") + .Input("x: T") + .Input("w: T") + .Input("b: T") + .Input("init_h: T") + .Input("init_c: T") + .Output("y: T") + .Output("output_h: T") + .Output("output_c: T") + .Output("i: T") + .Output("j: T") + .Output("f: T") + .Output("o: T") + .Output("tanhc: T") + .Attr("T: {float16, float32}") .Attr("cell_type: string") .Attr("direction: string") .Attr("cell_depth: int = 1") diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py index decb2ec46..0955c9f12 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py @@ -57,8 +57,6 @@ class _DynamicBasic(base_layer.Layer): self._dtype = dtype self._args = { "direction": self._direction, - "is_clean_seq_length": False, - "is_clean_init": False, "cell_depth": self._cell_depth, "keep_prob": self._keep_prob, "cell_clip": self._cell_clip, @@ -292,8 +290,6 @@ class DynamicRNN(_DynamicBasic): shape=[4 * self._hidden_size], dtype=self._dtype, initializer=init_ops.zeros_initializer(dtype=self._dtype)) - self._init_h = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) - self._init_c = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) super(DynamicRNN, self).build(input_shape) def call(self, @@ -306,17 +302,17 @@ class DynamicRNN(_DynamicBasic): """Dynamic GRU. """ super(DynamicRNN, self).call(x, seq_length=seq_length) - if seq_length is None: - self._args["is_clean_seq_length"] = True + batch_size = x.shape[0].value + if init_h is None: + self._init_h = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) init_h = self._init_h - if x.shape[0].value is None: - self._args["is_clean_init"] = True else: init_h_shape = tensor_shape.TensorShape(init_h) if init_h_shape.ndims == 2: init_h = tf.reshape(init_h, [1, init_h_shape[0], init_h_shape[1]]) if init_c is None: + self._init_c = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) init_c = self._init_c else: init_c_shape = tensor_shape.TensorShape(init_c) @@ -332,4 +328,7 @@ class DynamicRNN(_DynamicBasic): self._args["b"] = bias self._args["init_h"] = init_h self._args["init_c"] = init_c - return gen_npu_ops.dynamic_rnn(**self._args) + if seq_length is None: + return gen_npu_ops.dynamic_rnn_v2(**self._args) + else: + return gen_npu_ops.dynamic_rnn(**self._args) diff --git a/tf_adapter/python/npu_bridge/estimator/npu_ops.py b/tf_adapter/python/npu_bridge/estimator/npu_ops.py index 43b326742..24205e867 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu_ops.py +++ b/tf_adapter/python/npu_bridge/estimator/npu_ops.py @@ -269,6 +269,25 @@ def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): return (dx, dw, db, seq_length, dh_prev, dc_prev) +@ops.RegisterGradient("DynamicRnnV2") +def dynamic_rnn_v2_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): + (x, w, b, seq_length, init_h, init_c) = op.inputs + (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs + (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, + output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, + cell_type=op.get_attr("cell_type"), + direction=op.get_attr("direction"), + cell_depth=op.get_attr("cell_depth"), + use_peephole=op.get_attr("use_peephole"), + keep_prob=op.get_attr("keep_prob"), + cell_clip=op.get_attr("cell_clip"), + num_proj=op.get_attr("num_proj"), + time_major=op.get_attr("time_major"), + forget_bias=op.get_attr("forget_bias")) + + return (dx, dw, db, seq_length, dh_prev, dc_prev) + + def scatter_elements(data, indices, updates, axis=0, name=None): data = ops.convert_to_tensor(data, name="data") indices = ops.convert_to_tensor(indices, name="indices") -- Gitee From 2468a19f7104f493739bc23c5ca087918851a8e9 Mon Sep 17 00:00:00 2001 From: tianye Date: Mon, 29 Nov 2021 19:40:07 +0800 Subject: [PATCH 04/21] fix tf rnn --- tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py index 0955c9f12..99da10bae 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py @@ -329,6 +329,7 @@ class DynamicRNN(_DynamicBasic): self._args["init_h"] = init_h self._args["init_c"] = init_c if seq_length is None: + self._args.pop("seq_length") return gen_npu_ops.dynamic_rnn_v2(**self._args) else: return gen_npu_ops.dynamic_rnn(**self._args) -- Gitee From 890e983c9666c8b6293af1d7f385a69ccb2cd00d Mon Sep 17 00:00:00 2001 From: tianye Date: Tue, 30 Nov 2021 16:10:59 +0800 Subject: [PATCH 05/21] fix --- .../npu_bridge/estimator/npu/npu_dynamic_rnn.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py index 99da10bae..ef9fe0d01 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_dynamic_rnn.py @@ -302,24 +302,15 @@ class DynamicRNN(_DynamicBasic): """Dynamic GRU. """ super(DynamicRNN, self).call(x, seq_length=seq_length) - batch_size = x.shape[0].value + batch_size = array_ops.shape(x)[1] if init_h is None: self._init_h = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) init_h = self._init_h - else: - init_h_shape = tensor_shape.TensorShape(init_h) - if init_h_shape.ndims == 2: - init_h = tf.reshape(init_h, [1, init_h_shape[0], init_h_shape[1]]) if init_c is None: self._init_c = array_ops.zeros([1, batch_size, self._hidden_size], dtype=self._dtype) init_c = self._init_c - else: - init_c_shape = tensor_shape.TensorShape(init_c) - if init_c_shape.ndims == 2: - init_c = tf.reshape(init_c, [1, init_c_shape[0], init_c_shape[1]]) - if init_c is None: - init_c = self._init_c + if weight is None: weight = self._rnn_w if bias is None: -- Gitee From 89d15d0e83757f11e67ec0392806ba20ecbdf5d7 Mon Sep 17 00:00:00 2001 From: tianye Date: Wed, 1 Dec 2021 09:36:05 +0800 Subject: [PATCH 06/21] fix --- tf_adapter/python/npu_bridge/estimator/npu_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tf_adapter/python/npu_bridge/estimator/npu_ops.py b/tf_adapter/python/npu_bridge/estimator/npu_ops.py index 24205e867..a5cd2fd5e 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu_ops.py +++ b/tf_adapter/python/npu_bridge/estimator/npu_ops.py @@ -271,7 +271,7 @@ def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): @ops.RegisterGradient("DynamicRnnV2") def dynamic_rnn_v2_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): - (x, w, b, seq_length, init_h, init_c) = op.inputs + (x, w, b, init_h, init_c) = op.inputs (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, @@ -285,7 +285,7 @@ def dynamic_rnn_v2_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias")) - return (dx, dw, db, seq_length, dh_prev, dc_prev) + return (dx, dw, db, dh_prev, dc_prev) def scatter_elements(data, indices, updates, axis=0, name=None): -- Gitee From 2a454ac38afbcf3e090644af2b4b855e2e30c2a5 Mon Sep 17 00:00:00 2001 From: tianye Date: Wed, 1 Dec 2021 17:31:22 +0800 Subject: [PATCH 07/21] sc --- tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc b/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc index ccaf8d2c0..d6cde0460 100644 --- a/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc +++ b/tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc @@ -19,7 +19,7 @@ namespace tensorflow { template class DynamicRnnV2OP : public OpKernel { - public: +public: explicit DynamicRnnV2OP(OpKernelConstruction *ctx) : OpKernel(ctx) { LOG(INFO) << "new DynamicRnnV2OP"; } ~DynamicRnnV2OP() { LOG(INFO) << "del DynamicRnnV2OP"; } void Compute(OpKernelContext *ctx) override { LOG(INFO) << "in DynamicRnnV2OP"; } -- Gitee From 4978bb174ff35ad016314fdf8f265047e230d35f Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 10:14:14 +0800 Subject: [PATCH 08/21] sc --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc new file mode 100644 index 000000000..890be659d --- /dev/null +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -0,0 +1,57 @@ +#include +#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" +#include "gtest/gtest.h" + +namespace tensorflow { +PartialTensorShape TShape(std::initializer_list dims) { + return PartialTensorShape(dims); +} + +class DynamicRnnV2OpTest : public testing::Test { + protected: + virtual void SetUp() {} + virtual void TearDown() {} +}; + +TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { + DataTypeSlice input_types({DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16}); + MemoryTypeSlice input_memory_types; + DataTypeSlice output_types({DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16, + DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16}); + MemoryTypeSlice output_memory_types; + DeviceBase *device = new DeviceBase(Env::Default()); + NodeDef *node_def = new NodeDef(); + OpDef *op_def = new OpDef(); + OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, + input_types, input_memory_types, output_types, output_memory_types, + 1, nullptr); + DynamicRnnV2OP dynamic_rnn_v2(context); + OpKernelContext *ctx = nullptr; + dynamic_rnn_v2.Compute(ctx); + dynamic_rnn_v2.IsExpensive(); + delete device; + delete node_def; + delete op_def; + delete context; +} + +TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { + const OpRegistrationData* reg; + TF_CHECK_OK(OpRegistry::Global()->LookUp("DynamicRnnV2", ®)); + OpDef op_def = reg->op_def; + NodeDef def; + TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) + .Attr("T", DT_FLOAT16) + .Attr("direction", "BIDIRECTIONAL") + .Input(FakeInputStub(DT_FLOAT16)) + .Input(FakeInputStub(DT_FLOAT16)) + .Input(FakeInputStub(DT_FLOAT16)) + .Input(FakeInputStub(DT_FLOAT16)) + .Input(FakeInputStub(DT_FLOAT16)) + .Finalize(&def)); + shape_inference::InferenceContext c(0, &def, op_def,{TShape({1,16,16}), TShape({32,64}), TShape({64}), + TShape({1,16,16}, TShape({1,16,16})}, {}, {}, {}); + std::vector input_shapes; + TF_CHECK_OK(reg->shape_inference_fn(&c)); +} +} \ No newline at end of file -- Gitee From 2ca1973d00b97e296994f128cb04f6d89eba07a3 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 10:29:56 +0800 Subject: [PATCH 09/21] ut,st --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index 890be659d..e9b547836 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -1,4 +1,7 @@ #include +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/platform/test.h" #include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" #include "gtest/gtest.h" @@ -14,10 +17,10 @@ class DynamicRnnV2OpTest : public testing::Test { }; TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { - DataTypeSlice input_types({DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16}); + DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); MemoryTypeSlice input_memory_types; - DataTypeSlice output_types({DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16, - DT_FLOAT16,DT_FLOAT16,DT_FLOAT16,DT_FLOAT16}); + DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, + DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); MemoryTypeSlice output_memory_types; DeviceBase *device = new DeviceBase(Env::Default()); NodeDef *node_def = new NodeDef(); @@ -41,17 +44,16 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { OpDef op_def = reg->op_def; NodeDef def; TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) - .Attr("T", DT_FLOAT16) + .Attr("T", DT_FLOAT) .Attr("direction", "BIDIRECTIONAL") - .Input(FakeInputStub(DT_FLOAT16)) - .Input(FakeInputStub(DT_FLOAT16)) - .Input(FakeInputStub(DT_FLOAT16)) - .Input(FakeInputStub(DT_FLOAT16)) - .Input(FakeInputStub(DT_FLOAT16)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) .Finalize(&def)); shape_inference::InferenceContext c(0, &def, op_def,{TShape({1,16,16}), TShape({32,64}), TShape({64}), - TShape({1,16,16}, TShape({1,16,16})}, {}, {}, {}); - std::vector input_shapes; + TShape({1,16,16}), TShape({1,16,16})}, {}, {}, {}); TF_CHECK_OK(reg->shape_inference_fn(&c)); } } \ No newline at end of file -- Gitee From 98469a9a3dbdc4d7a1b34a5e28fdcdcc07e72473 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 10:42:04 +0800 Subject: [PATCH 10/21] ut,st --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index e9b547836..46cffe076 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -1,20 +1,29 @@ #include #include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/platform/test.h" #include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" #include "gtest/gtest.h" +#include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/attr_value_util.h" +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { PartialTensorShape TShape(std::initializer_list dims) { return PartialTensorShape(dims); } -class DynamicRnnV2OpTest : public testing::Test { - protected: - virtual void SetUp() {} - virtual void TearDown() {} -}; +FakeInputFunctor FakeInputStub(DataType dt) { + return [dt](const OpDef& op_def, int in_index, const NodeDef& node_def, + NodeDefBuilder* builder) { + char c = 'a' + (in_index % 26); + string in_node = string(&c, 1); + builder->Input(in_node, 0, dt); + return Status::OK(); + }; +} TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); -- Gitee From 7f1e7b4736b7ee389f4c01da3554e9913bf82811 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 10:51:04 +0800 Subject: [PATCH 11/21] ut,st --- tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index 46cffe076..be52262f0 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -25,6 +25,12 @@ FakeInputFunctor FakeInputStub(DataType dt) { }; } +class DynamicRnnV2OpTest : public testing::Test { + protected: + virtual void SetUp() {} + virtual void TearDown() {} +}; + TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); MemoryTypeSlice input_memory_types; -- Gitee From 7f2ac646a5558a0c6bebda7289698f161f9d2e45 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 10:56:23 +0800 Subject: [PATCH 12/21] ut,st --- tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index be52262f0..112f755b7 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -11,6 +11,7 @@ #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { +namespace{ PartialTensorShape TShape(std::initializer_list dims) { return PartialTensorShape(dims); } @@ -71,4 +72,6 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { TShape({1,16,16}), TShape({1,16,16})}, {}, {}, {}); TF_CHECK_OK(reg->shape_inference_fn(&c)); } + +} } \ No newline at end of file -- Gitee From d613deee2f8b4fcad1d3229d122b3e2ccfc36a98 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:01:57 +0800 Subject: [PATCH 13/21] ut --- .../tests/{st => ut}/kernels/testcase/dynamic_rnn_v2_test.cc | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tf_adapter/tests/{st => ut}/kernels/testcase/dynamic_rnn_v2_test.cc (100%) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc similarity index 100% rename from tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc rename to tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc -- Gitee From 3429c971e27a03ad3523312dcc2935e333977c84 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:11:58 +0800 Subject: [PATCH 14/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 70 +++++++++++++++++++ .../testcase/layer_norm_grad_ops_test.cc | 2 + 2 files changed, 72 insertions(+) create mode 100644 tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc new file mode 100644 index 000000000..7cc31f76d --- /dev/null +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -0,0 +1,70 @@ +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/platform/test.h" +#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" +#include "gtest/gtest.h" +#include +namespace tensorflow { +namespace { + +PartialTensorShape TShape(std::initializer_list dims) { + return PartialTensorShape(dims); +} + +FakeInputFunctor FakeInputStub(DataType dt) { + return [dt](const OpDef &op_def, int in_index, const NodeDef &node_def, + NodeDefBuilder *builder) { + char c = 'a' + (in_index % 26); + string in_node = string(&c, 1); + builder->Input(in_node, 0, dt); + return Status::OK(); + }; +} + +TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { + DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); + MemoryTypeSlice input_memory_types; + DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, + DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); + MemoryTypeSlice output_memory_types; + DeviceBase *device = new DeviceBase(Env::Default()); + NodeDef *node_def = new NodeDef(); + OpDef *op_def = new OpDef(); + OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, + input_types, input_memory_types, output_types, output_memory_types, + 1, nullptr); + DynamicRnnV2OP dynamic_rnn_v2(context); + OpKernelContext *ctx = nullptr; + dynamic_rnn_v2.Compute(ctx); + dynamic_rnn_v2.IsExpensive(); + delete device; + delete node_def; + delete op_def; + delete context; +} + +// TEST(LayerNormGradOpTest, TestLayerNormGradShapeInference) { +// const OpRegistrationData *reg; +// TF_CHECK_OK(OpRegistry::Global()->LookUp("FusedLayerNormGrad", ®)); +// OpDef op_def = reg->op_def; +// NodeDef def; +// TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) +// .Attr("T", DT_FLOAT) +// .Input(FakeInputStub(DT_FLOAT)) +// .Input(FakeInputStub(DT_FLOAT)) +// .Input(FakeInputStub(DT_FLOAT)) +// .Input(FakeInputStub(DT_FLOAT)) +// .Input(FakeInputStub(DT_FLOAT)) +// .Finalize(&def)); +// shape_inference::InferenceContext c(0, &def, op_def, +// {TShape({16, 32}), TShape({16, 32}), TShape({16, 1}), +// TShape({16, 1}), TShape({32})}, +// {}, {}, {}); +// std::vector input_shapes; +// TF_CHECK_OK(reg->shape_inference_fn(&c)); +// ASSERT_EQ("[16,32]", c.DebugString(c.output(0))); +// ASSERT_EQ("[32]", c.DebugString(c.output(1))); +// ASSERT_EQ("[32]", c.DebugString(c.output(2))); +// } +} // namespace +} // namespace tensorflow diff --git a/tf_adapter/tests/st/kernels/testcase/layer_norm_grad_ops_test.cc b/tf_adapter/tests/st/kernels/testcase/layer_norm_grad_ops_test.cc index 3ee42c2fc..532abc7d4 100644 --- a/tf_adapter/tests/st/kernels/testcase/layer_norm_grad_ops_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/layer_norm_grad_ops_test.cc @@ -21,6 +21,8 @@ FakeInputFunctor FakeInputStub(DataType dt) { }; } + + TEST(LayerNormGradOpTest, TestLayerNormGrad) { DataTypeSlice input_types({DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT}); MemoryTypeSlice input_memory_types; -- Gitee From fa3031ae0747496e60bfcbb7ab2ca8f4d663b981 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:17:58 +0800 Subject: [PATCH 15/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 42 +++++----- .../kernels/testcase/dynamic_rnn_v2_test.cc | 77 ------------------- 2 files changed, 21 insertions(+), 98 deletions(-) delete mode 100644 tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index 7cc31f76d..f87476178 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -21,27 +21,27 @@ FakeInputFunctor FakeInputStub(DataType dt) { }; } -TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { - DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); - MemoryTypeSlice input_memory_types; - DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, - DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); - MemoryTypeSlice output_memory_types; - DeviceBase *device = new DeviceBase(Env::Default()); - NodeDef *node_def = new NodeDef(); - OpDef *op_def = new OpDef(); - OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, - input_types, input_memory_types, output_types, output_memory_types, - 1, nullptr); - DynamicRnnV2OP dynamic_rnn_v2(context); - OpKernelContext *ctx = nullptr; - dynamic_rnn_v2.Compute(ctx); - dynamic_rnn_v2.IsExpensive(); - delete device; - delete node_def; - delete op_def; - delete context; -} +// TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { +// DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); +// MemoryTypeSlice input_memory_types; +// DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, +// DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); +// MemoryTypeSlice output_memory_types; +// DeviceBase *device = new DeviceBase(Env::Default()); +// NodeDef *node_def = new NodeDef(); +// OpDef *op_def = new OpDef(); +// OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, +// input_types, input_memory_types, output_types, output_memory_types, +// 1, nullptr); +// DynamicRnnV2OP dynamic_rnn_v2(context); +// OpKernelContext *ctx = nullptr; +// dynamic_rnn_v2.Compute(ctx); +// dynamic_rnn_v2.IsExpensive(); +// delete device; +// delete node_def; +// delete op_def; +// delete context; +// } // TEST(LayerNormGradOpTest, TestLayerNormGradShapeInference) { // const OpRegistrationData *reg; diff --git a/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc deleted file mode 100644 index 112f755b7..000000000 --- a/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -#include -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/platform/test.h" -#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" -#include "gtest/gtest.h" -#include "tensorflow/core/framework/attr_value.pb.h" -#include "tensorflow/core/framework/attr_value_util.h" -#include "tensorflow/core/framework/node_def.pb.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" - -namespace tensorflow { -namespace{ -PartialTensorShape TShape(std::initializer_list dims) { - return PartialTensorShape(dims); -} - -FakeInputFunctor FakeInputStub(DataType dt) { - return [dt](const OpDef& op_def, int in_index, const NodeDef& node_def, - NodeDefBuilder* builder) { - char c = 'a' + (in_index % 26); - string in_node = string(&c, 1); - builder->Input(in_node, 0, dt); - return Status::OK(); - }; -} - -class DynamicRnnV2OpTest : public testing::Test { - protected: - virtual void SetUp() {} - virtual void TearDown() {} -}; - -TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { - DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); - MemoryTypeSlice input_memory_types; - DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, - DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); - MemoryTypeSlice output_memory_types; - DeviceBase *device = new DeviceBase(Env::Default()); - NodeDef *node_def = new NodeDef(); - OpDef *op_def = new OpDef(); - OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, - input_types, input_memory_types, output_types, output_memory_types, - 1, nullptr); - DynamicRnnV2OP dynamic_rnn_v2(context); - OpKernelContext *ctx = nullptr; - dynamic_rnn_v2.Compute(ctx); - dynamic_rnn_v2.IsExpensive(); - delete device; - delete node_def; - delete op_def; - delete context; -} - -TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { - const OpRegistrationData* reg; - TF_CHECK_OK(OpRegistry::Global()->LookUp("DynamicRnnV2", ®)); - OpDef op_def = reg->op_def; - NodeDef def; - TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) - .Attr("T", DT_FLOAT) - .Attr("direction", "BIDIRECTIONAL") - .Input(FakeInputStub(DT_FLOAT)) - .Input(FakeInputStub(DT_FLOAT)) - .Input(FakeInputStub(DT_FLOAT)) - .Input(FakeInputStub(DT_FLOAT)) - .Input(FakeInputStub(DT_FLOAT)) - .Finalize(&def)); - shape_inference::InferenceContext c(0, &def, op_def,{TShape({1,16,16}), TShape({32,64}), TShape({64}), - TShape({1,16,16}), TShape({1,16,16})}, {}, {}, {}); - TF_CHECK_OK(reg->shape_inference_fn(&c)); -} - -} -} \ No newline at end of file -- Gitee From af14e017381b80cfd86b00afe2b35ec83e8bc718 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:24:53 +0800 Subject: [PATCH 16/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index f87476178..93b2a5f13 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -21,27 +21,27 @@ FakeInputFunctor FakeInputStub(DataType dt) { }; } -// TEST_F(DynamicRnnV2OpTest, TestDynamicRnnV2) { -// DataTypeSlice input_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); -// MemoryTypeSlice input_memory_types; -// DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, -// DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); -// MemoryTypeSlice output_memory_types; -// DeviceBase *device = new DeviceBase(Env::Default()); -// NodeDef *node_def = new NodeDef(); -// OpDef *op_def = new OpDef(); -// OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, -// input_types, input_memory_types, output_types, output_memory_types, -// 1, nullptr); -// DynamicRnnV2OP dynamic_rnn_v2(context); -// OpKernelContext *ctx = nullptr; -// dynamic_rnn_v2.Compute(ctx); -// dynamic_rnn_v2.IsExpensive(); -// delete device; -// delete node_def; -// delete op_def; -// delete context; -// } +TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { + DataTypeSlice input_types({DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT}); + MemoryTypeSlice input_memory_types; + DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, + DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); + MemoryTypeSlice output_memory_types; + DeviceBase *device = new DeviceBase(Env::Default()); + NodeDef *node_def = new NodeDef(); + OpDef *op_def = new OpDef(); + OpKernelConstruction *context = new OpKernelConstruction( + DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, input_types, + input_memory_types, output_types, output_memory_types, 1, nullptr); + DynamicRnnV2OP dynamic_rnn_v2(context); + OpKernelContext *ctx = nullptr; + dynamic_rnn_v2.Compute(ctx); + dynamic_rnn_v2.IsExpensive(); + delete device; + delete node_def; + delete op_def; + delete context; +} // TEST(LayerNormGradOpTest, TestLayerNormGradShapeInference) { // const OpRegistrationData *reg; -- Gitee From 6b8feaacc9df7adc23442e2377545a92a88b9b3f Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:34:24 +0800 Subject: [PATCH 17/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 47 ++++++------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index 93b2a5f13..850512092 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -1,9 +1,15 @@ +#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" +#include +#include "gtest/gtest.h" +#include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/platform/test.h" -#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" -#include "gtest/gtest.h" -#include + namespace tensorflow { namespace { @@ -12,10 +18,10 @@ PartialTensorShape TShape(std::initializer_list dims) { } FakeInputFunctor FakeInputStub(DataType dt) { - return [dt](const OpDef &op_def, int in_index, const NodeDef &node_def, - NodeDefBuilder *builder) { + return [dt](const OpDef& op_def, int in_index, const NodeDef& node_def, + NodeDefBuilder* builder) { char c = 'a' + (in_index % 26); - string in_node = string(&c, 1); + string in_node = string(&c, 1); builder->Input(in_node, 0, dt); return Status::OK(); }; @@ -30,9 +36,9 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { DeviceBase *device = new DeviceBase(Env::Default()); NodeDef *node_def = new NodeDef(); OpDef *op_def = new OpDef(); - OpKernelConstruction *context = new OpKernelConstruction( - DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, input_types, - input_memory_types, output_types, output_memory_types, 1, nullptr); + OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, + input_types, input_memory_types, output_types, output_memory_types, + 1, nullptr); DynamicRnnV2OP dynamic_rnn_v2(context); OpKernelContext *ctx = nullptr; dynamic_rnn_v2.Compute(ctx); @@ -43,28 +49,5 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { delete context; } -// TEST(LayerNormGradOpTest, TestLayerNormGradShapeInference) { -// const OpRegistrationData *reg; -// TF_CHECK_OK(OpRegistry::Global()->LookUp("FusedLayerNormGrad", ®)); -// OpDef op_def = reg->op_def; -// NodeDef def; -// TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) -// .Attr("T", DT_FLOAT) -// .Input(FakeInputStub(DT_FLOAT)) -// .Input(FakeInputStub(DT_FLOAT)) -// .Input(FakeInputStub(DT_FLOAT)) -// .Input(FakeInputStub(DT_FLOAT)) -// .Input(FakeInputStub(DT_FLOAT)) -// .Finalize(&def)); -// shape_inference::InferenceContext c(0, &def, op_def, -// {TShape({16, 32}), TShape({16, 32}), TShape({16, 1}), -// TShape({16, 1}), TShape({32})}, -// {}, {}, {}); -// std::vector input_shapes; -// TF_CHECK_OK(reg->shape_inference_fn(&c)); -// ASSERT_EQ("[16,32]", c.DebugString(c.output(0))); -// ASSERT_EQ("[32]", c.DebugString(c.output(1))); -// ASSERT_EQ("[32]", c.DebugString(c.output(2))); -// } } // namespace } // namespace tensorflow -- Gitee From 4a4552dcf7c694e6bdeab6c51e36a489846dc268 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:43:49 +0800 Subject: [PATCH 18/21] ut --- tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index 850512092..fa4451a6e 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -39,7 +39,7 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, input_types, input_memory_types, output_types, output_memory_types, 1, nullptr); - DynamicRnnV2OP dynamic_rnn_v2(context); + DynamicRnnV2OP dynamic_rnn_v2(context); OpKernelContext *ctx = nullptr; dynamic_rnn_v2.Compute(ctx); dynamic_rnn_v2.IsExpensive(); -- Gitee From f8017883c80db81f2eed87c3aac55eb704ff5028 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 11:48:36 +0800 Subject: [PATCH 19/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc diff --git a/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc new file mode 100644 index 000000000..fa4451a6e --- /dev/null +++ b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc @@ -0,0 +1,53 @@ +#include "tf_adapter/kernels/aicore/dynamic_rnn_v2_ops.cc" +#include +#include "gtest/gtest.h" +#include "tensorflow/core/framework/attr_value.pb.h" +#include "tensorflow/core/framework/attr_value_util.h" +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/platform/test.h" + +namespace tensorflow { +namespace { + +PartialTensorShape TShape(std::initializer_list dims) { + return PartialTensorShape(dims); +} + +FakeInputFunctor FakeInputStub(DataType dt) { + return [dt](const OpDef& op_def, int in_index, const NodeDef& node_def, + NodeDefBuilder* builder) { + char c = 'a' + (in_index % 26); + string in_node = string(&c, 1); + builder->Input(in_node, 0, dt); + return Status::OK(); + }; +} + +TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { + DataTypeSlice input_types({DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT}); + MemoryTypeSlice input_memory_types; + DataTypeSlice output_types({DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT, + DT_FLOAT,DT_FLOAT,DT_FLOAT,DT_FLOAT}); + MemoryTypeSlice output_memory_types; + DeviceBase *device = new DeviceBase(Env::Default()); + NodeDef *node_def = new NodeDef(); + OpDef *op_def = new OpDef(); + OpKernelConstruction *context = new OpKernelConstruction(DEVICE_CPU, device, nullptr, node_def, op_def, nullptr, + input_types, input_memory_types, output_types, output_memory_types, + 1, nullptr); + DynamicRnnV2OP dynamic_rnn_v2(context); + OpKernelContext *ctx = nullptr; + dynamic_rnn_v2.Compute(ctx); + dynamic_rnn_v2.IsExpensive(); + delete device; + delete node_def; + delete op_def; + delete context; +} + +} // namespace +} // namespace tensorflow -- Gitee From 6bfffea881fdb20dddc07d84bc7eac89c5496c28 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 12:23:46 +0800 Subject: [PATCH 20/21] ut --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc index fa4451a6e..7654dcc08 100644 --- a/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/ut/kernels/testcase/dynamic_rnn_v2_test.cc @@ -49,5 +49,24 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { delete context; } +TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { + const OpRegistrationData* reg; + TF_CHECK_OK(OpRegistry::Global()->LookUp("DynamicRnnV2", ®)); + OpDef op_def = reg->op_def; + NodeDef def; + TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) + .Attr("T", DT_FLOAT) + .Attr("direction", "BIDIRECTIONAL") + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Finalize(&def)); + shape_inference::InferenceContext c(0, &def, op_def,{TShape({1,16,16}), TShape({32,64}), TShape({64}), + TShape({1,16,16}), TShape({1,16,16})}, {}, {}, {}); + TF_CHECK_OK(reg->shape_inference_fn(&c)); +} + } // namespace } // namespace tensorflow -- Gitee From d85921ebb5e14379dd1ad8d19674a15b36357095 Mon Sep 17 00:00:00 2001 From: tianye Date: Thu, 2 Dec 2021 14:38:13 +0800 Subject: [PATCH 21/21] ut st --- .../kernels/testcase/dynamic_rnn_v2_test.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc index fa4451a6e..7654dcc08 100644 --- a/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc +++ b/tf_adapter/tests/st/kernels/testcase/dynamic_rnn_v2_test.cc @@ -49,5 +49,24 @@ TEST(DynamicRnnV2OpTest, TestDynamicRnnV2) { delete context; } +TEST(DynamicRnnV2OpTest, TestDynamicRnnV2ShapeInference) { + const OpRegistrationData* reg; + TF_CHECK_OK(OpRegistry::Global()->LookUp("DynamicRnnV2", ®)); + OpDef op_def = reg->op_def; + NodeDef def; + TF_CHECK_OK(NodeDefBuilder("dummy", &op_def) + .Attr("T", DT_FLOAT) + .Attr("direction", "BIDIRECTIONAL") + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Input(FakeInputStub(DT_FLOAT)) + .Finalize(&def)); + shape_inference::InferenceContext c(0, &def, op_def,{TShape({1,16,16}), TShape({32,64}), TShape({64}), + TShape({1,16,16}), TShape({1,16,16})}, {}, {}, {}); + TF_CHECK_OK(reg->shape_inference_fn(&c)); +} + } // namespace } // namespace tensorflow -- Gitee