diff --git a/test/test_network_ops/test_reflection_pad2d.py b/test/test_network_ops/test_reflection_pad2d.py new file mode 100644 index 0000000000000000000000000000000000000000..dff95bf8c3065ee8fb5fc481e0d39f6704e42cde --- /dev/null +++ b/test/test_network_ops/test_reflection_pad2d.py @@ -0,0 +1,104 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu +import numpy as np + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests +from torch_npu.testing.util_test import create_common_tensor + +class TestReflectionPad2d(TestCase): + def cpu_op_out_exec(self, input1, pad, output): + m = torch._C._nn.reflection_pad2d(input1, pad, out=output) + m = m.numpy() + return m + + def npu_op_out_exec(self, input1, pad, output): + m_n = torch._C._nn.reflection_pad2d(input1, pad, out=output) + m_n = m_n.to("cpu") + m_n = m_n.numpy() + return m_n + + def cpu_op_exec(self, input1, pad): + m = torch.nn.ReflectionPad2d(pad) + output = m(input1) + output = output.numpy() + return output + + def npu_op_exec(self, input1, pad): + m = torch.nn.ReflectionPad2d(pad).to("npu") + output = m(input1) + output = output.to("cpu") + output = output.numpy() + return output + + def cpu_op_out_exec_fp16(self, input1, pad, output): + input1 = input1.to(torch.float32) + m = torch._C._nn.reflection_pad2d(input1, pad, out=output) + m = m.numpy() + m = m.astype(np.float16) + return m + + def cpu_op_exec_fp16(self, input1, pad): + input1 = input1.to(torch.float32) + m = torch.nn.ReflectionPad2d(pad) + output = m(input1) + output = output.numpy() + output = output.astype(np.float16) + return output + + def test_reflection_pad2d_out_shape_format_fp16(self, device): + shape_format = [ + [[np.float16, 0, (1, 1, 4, 3)], [2, 2, 2, 2]], + [[np.float16, 3, (1, 1, 4, 3)], 2] + ] + + for item in shape_format: + cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 100) + cpuout = torch.randn(1, 1, 3, 3) + npuout = cpuout.to(npu_input1.dtype).npu() + cpu_output = self.cpu_op_out_exec_fp16(cpu_input1, item[1], cpuout) + npu_output = self.npu_op_out_exec(npu_input1, item[1], npuout) + self.assertRtolEqual(cpu_output, npu_output) + + def test_reflection_pad2d_shape_format_fp16(self, device): + shape_format = [ + [[np.float16, 0, (1, 1, 4, 3)], [2, 2, 2, 2]], + [[np.float16, 3, (1, 1, 4, 3)], 2] + ] + + for item in shape_format: + cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 100) + cpu_output = self.cpu_op_exec_fp16(cpu_input1, item[1]) + npu_output = self.npu_op_exec(npu_input1, item[1]) + self.assertRtolEqual(cpu_output, npu_output) + + def test_reflection_pad2d_shape_format_fp32(self, device): + shape_format = [ + [[np.float32, 0, (1, 1, 37, 37)], [2, 2, 2, 2]], + [[np.float32, 3, (1, 1, 17, 17)], 2] + ] + + for item in shape_format: + cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 100) + cpu_output = self.cpu_op_exec(cpu_input1, item[1]) + npu_output = self.npu_op_exec(npu_input1, item[1]) + self.assertRtolEqual(cpu_output, npu_output) + +instantiate_device_type_tests(TestReflectionPad2d, globals(), except_for="cpu") +if __name__ == "__main__": + run_tests() + \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/ReflectionPad2dKernelNpu.cpp b/torch_npu/csrc/aten/ops/ReflectionPad2dKernelNpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fedabd8fdc12bbd644810c4ed892d2007fb94846 --- /dev/null +++ b/torch_npu/csrc/aten/ops/ReflectionPad2dKernelNpu.cpp @@ -0,0 +1,94 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. +// All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" + +namespace at_npu { +namespace native { + +c10::SmallVector reflection_pad2d_npu_output_size(const at::Tensor& self, at::IntArrayRef padding) { + int64_t N = self.size(0); + int64_t C = self.size(1); + int64_t H = self.size(2); + int64_t W = self.size(3); + int64_t padding_l = padding[0]; + int64_t padding_r = padding[1]; + int64_t padding_t = padding[2]; + int64_t padding_b = padding[3]; + int64_t Ho = H + padding_t + padding_b; + int64_t Wo = W + padding_l + padding_r; + c10::SmallVector outputSize = {N, C, Ho, Wo}; + return outputSize; +} + +at::Tensor& reflection_pad2d_out_npu_nocheck( + const at::Tensor& self, + at::IntArrayRef padding, + at::Tensor& out) { + TORCH_CHECK(padding.size() == 4, "padding size is expected to be 4"); + c10::SmallVector vectorInt; + c10::SmallVector paddingsVector = array_to_small_vector(padding); + paddingsVector.resize(2 * self.dim(), 0); + for (int64_t i = paddingsVector.size(); i > 1; i -= 2) { + vectorInt.emplace_back(paddingsVector[i - 2]); + vectorInt.emplace_back(paddingsVector[i - 1]); + } + c10::SmallVector value_tensor = {(int64_t)0}; + OpCommand cmd; + if(self.dtype() == at::kHalf) { + cmd.Name("PadV3") + .Input(self) + .Input(vectorInt, at::kInt) + .Input(value_tensor, self.scalar_type()) + .Output(out) + .Attr("mode", (string)"reflect") + .Attr("paddings_contiguous", true) + .Run(); + } else { + cmd.Name("MirrorPad") + .Input(self) + .Input(vectorInt, at::kInt) + .Output(out) + .Attr("mode", (string)"REFLECT") + .Run(); + } + return out; +} + +at::Tensor& NPUNativeFunctions::reflection_pad2d_out( + const at::Tensor& self, + at::IntArrayRef padding, + at::Tensor& result){ + auto outputSize = reflection_pad2d_npu_output_size(self, padding); + OpPreparation::CheckOut( + {self}, + result, + self, + outputSize); + reflection_pad2d_out_npu_nocheck(self, padding, result); + return result; +} + +at::Tensor NPUNativeFunctions::reflection_pad2d(const at::Tensor& self, at::IntArrayRef padding) { + TORCH_CHECK(padding.size() == 4, "padding size is expected to be 4"); + auto outputSize = reflection_pad2d_npu_output_size(self, padding); + at::Tensor out = OpPreparation::ApplyTensor(self, outputSize); + reflection_pad2d_out_npu_nocheck(self, padding, out); + return out; +} +} // namespace native +} // namespace at_npu