diff --git a/README.en.md b/README.en.md index 427b8a264563f09c3e4c770b69f4917039365457..a7519564f452c31ec6b985b3e5702c6294b21636 100644 --- a/README.en.md +++ b/README.en.md @@ -73,7 +73,6 @@ The following environment variables are function classes used in NPU scenarios o ``` export TASK_QUEUE_ENABLE=1 # Delivered by an asynchronous task to asynchronously call the ACL interface. You are advised to enable this environment variable and set its value to 1. -export PTCOPY_ENABLE=1 # Use the PTCopy operator mode to accelerate continuous rotation and copy. You are advised to enable this environment variable and set its value to 1. ``` The following are optional environment variables that may affect running models: diff --git a/README.zh.md b/README.zh.md index 262022df682d0c43b0d915e8997c3f646d7ad8c8..e9fcb196e595c1c03b35aea0b0fa9d26b20c2bc5 100644 --- a/README.zh.md +++ b/README.zh.md @@ -77,7 +77,6 @@ source pytorch/env.sh ``` export TASK_QUEUE_ENABLE=1 # 使用异步任务下发,异步调用acl接口,建议默认开启,开启设置为1 -export PTCOPY_ENABLE=1 # 使用PTCopy算子模式,加速转连续及copy等过程,建议默认开启,开启设置为1 ``` 可选的环境变量可能会对运行的模型产生影响: diff --git a/env.sh b/env.sh index d35a02be766772bb21fa33fd4b3bd2fe4db367a8..d17e4862485d21a814361f1ac4ff67e10e9a60d5 100644 --- a/env.sh +++ b/env.sh @@ -96,7 +96,6 @@ export LD_LIBRARY_PATH=${path_lib}:$LD_LIBRARY_PATH # pytorch 自定义环境变量 export TASK_QUEUE_ENABLE=0 # 使用异步任务下发,异步调用acl接口,建议默认开启,开启设置为1 -export PTCOPY_ENABLE=1 # 使用PTCopy算子模式,加速转连续及copy等过程,建议默认开启,开启设置为1 #export DYNAMIC_COMPILE_ENABLE=1 # 动态shape特性功能,针对shape变化场景,可选 开启设置为1 # log diff --git a/test/test_network_ops/test_rotated_box.py.py b/test/test_network_ops/test_rotated_box.py.py new file mode 100644 index 0000000000000000000000000000000000000000..91c690fd6955b9a750bd601050d6b47eaabbe077 --- /dev/null +++ b/test/test_network_ops/test_rotated_box.py.py @@ -0,0 +1,66 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests + +class TestRotatedBox(TestCase): + def npu_op_encode_exec(self, anchor_boxes, gt_bboxes, weight): + out = torch_npu.npu_rotated_box_encode(anchor_boxes, gt_bboxes, weight) + out = out.to("cpu") + return out.detach().numpy() + + def npu_op_decode_exec(self, anchor_boxes, deltas, weight): + out = torch_npu.npu_rotated_box_decode(anchor_boxes, deltas, weight) + out = out.to("cpu") + return out.detach().numpy() + + def test_rotated_boxes_encode_fp32(self, device): + anchor_boxes = torch.tensor([[[44.2877], [9.1412], [88.7575], [25.8879], [64.8047]]]).to("npu") + gt_bboxes = torch.tensor([[[39.1763], [0.9838], [78.1028], [29.5997], [51.5907]]]).to("npu") + weight = torch.tensor([1., 1., 1., 1., 1.]).npu() + expect_cpu = torch.tensor([[[-0.1773], [-0.1327], [-0.1331], [0.5358], [-0.8643]]]) + npu_output = self.npu_op_encode_exec(anchor_boxes, gt_bboxes, weight) + self.assertRtolEqual(expect_cpu.numpy(), npu_output) + + def test_rotated_boxes_decode_fp32(self, device): + anchor_boxes = torch.tensor([[[32.1855], [41.9922], [64.1435], [62.5325], [34.607]]]).to("npu") + deltas = torch.tensor([[[1.8725], [-1.8915], [0.2395], [-0.4622], [-34.6539]]]).to("npu") + weight = torch.tensor([1., 1., 1., 1., 1.]).npu() + expect_cpu = torch.tensor([[[87.70366], [6.9412346], [128.31055], [19.879467], [-88.313515]]]) + npu_output = self.npu_op_decode_exec(anchor_boxes, deltas, weight) + self.assertRtolEqual(expect_cpu.numpy(), npu_output) + + def test_rotated_boxes_encode_fp16(self, device): + anchor_boxes = torch.tensor([[[30.69], [32.6], [45.94], [59.88], [-44.53]]], dtype=torch.float16).to("npu") + gt_bboxes = torch.tensor([[[30.44], [18.72], [33.22], [45.56], [8.5]]], dtype=torch.float16).to("npu") + weight = torch.tensor([1., 1., 1., 1., 1.], dtype=torch.float16).npu() + expect_cpu = torch.tensor([[[-0.4253], [-0.5166], [-1.702], [-0.0162], [1.133]]], dtype=torch.float16) + npu_output = self.npu_op_encode_exec(anchor_boxes, gt_bboxes, weight) + self.assertRtolEqual(expect_cpu.numpy(), npu_output) + + def test_rotated_boxes_decode_fp16(self, device): + anchor_boxes = torch.tensor([[[4.137],[33.72],[29.4], [54.06], [41.28]]], dtype=torch.float16).to("npu") + deltas = torch.tensor([[[0.0244], [-1.992], [0.2109], [0.315], [-37.25]]], dtype=torch.float16).to("npu") + weight = torch.tensor([1., 1., 1., 1., 1.], dtype=torch.float16).npu() + expect_cpu = torch.tensor([[[1.786], [-10.58], [33.], [17.3], [-88.44]]], dtype=torch.float16) + npu_output = self.npu_op_decode_exec(anchor_boxes, deltas, weight) + self.assertRtolEqual(expect_cpu.numpy(), npu_output) + +instantiate_device_type_tests(TestRotatedBox, globals(), except_for='cpu') +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_network_ops/test_rotated_iou.py.py b/test/test_network_ops/test_rotated_iou.py.py new file mode 100644 index 0000000000000000000000000000000000000000..081b107ceee84d39bbe19d6f1f58a44c1e7c3925 --- /dev/null +++ b/test/test_network_ops/test_rotated_iou.py.py @@ -0,0 +1,83 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch_npu +import numpy as np + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests +from torch_npu.testing.util_test import create_common_tensor + +class TestRotatedIou(TestCase): + def generate_rto_data(self, item): + minValue, maxValue = 20, 60 + scope = 20 + dtype = item[0][0] + shape_one = item[0][-1] + shape_two = item[1][-1] + trans = item[-1] + + boxes_array1 = np.random.uniform(minValue, maxValue, shape_one[:2]+[2]).astype(dtype) + boxes_wh = np.random.randint(1, scope, size=shape_one[:2]+[2]) + boxes_angle = np.random.randint(-180, 180, size=shape_one[:2]+[1]) + boxes = np.concatenate([boxes_array1, boxes_wh, boxes_angle], dtype=dtype, axis=-1) + #query_boxes + query_boxes_array1 = np.random.uniform(minValue, maxValue, shape_two[:2]+[2]).astype(dtype) + query_boxes_wh = np.random.randint(1, scope, size=shape_two[:2]+[2] ) + query_boxes_angle = np.random.randint(-180, 180, size=shape_two[:2]+[1]) + query_boxes = np.concatenate([query_boxes_array1, query_boxes_wh, query_boxes_angle], dtype=dtype, axis=-1) + + cpu_input1 = torch.from_numpy(boxes) + cpu_input2 = torch.from_numpy(query_boxes) + npu_input1 = cpu_input1.npu() + npu_input2 = cpu_input2.npu() + return boxes, query_boxes, npu_input1, npu_input2 + + def cpu_expect_result(self, dtype): + if dtype == np.float32: + output = np.array([[[0., 0.00045966, 0.],[0., 0., 0.]], + [[0., 0., 0.],[0., 0., 0.]], + [[0., 0., 0.],[0.00600622, 0.10504241, 0.]], + [[0., 0., 0.],[0., 0., 0.]]], dtype=np.float32) + else: + output = np.array([[[0., 0.00045966, 0.],[0., 0., 0.]], + [[0., 0., 0.],[0., 0., 0.]], + [[0., 0., 0.],[0.00600622, 0.10504241, 0.]], + [[0., 0., 0.],[0., 0., 0.]]], dtype=np.float16) + return output + + def npu_op_exec(self, box1, box2, trans=False): + output = torch_npu.npu_rotated_iou(box1, box2, trans, 0, True, 0.0, 0.0) + output = output.detach().cpu().numpy() + return output + + def test_rotated_iou_shape_format_fp32(self, device): + dtype = np.float32 + shape_format = [[dtype, -1, [4,2,5]],[dtype, -1, [4,3,5]], False] + cpu_input1, cpu_input2, npu_input1, npu_input2 = self.generate_rto_data(shape_format) + cpu_output = self.cpu_expect_result(dtype) + npu_output = self.npu_op_exec(npu_input1, npu_input2, shape_format[-1]) + self.assertRtolEqual(cpu_output, npu_output) + + def test_rotated_iou_shape_format_fp16(self, device): + dtype = np.float16 + shape_format = [[dtype, -1, [4,2,5]],[dtype, -1, [4,3,5]], False] + cpu_input1, cpu_input2, npu_input1, npu_input2 = self.generate_rto_data(shape_format) + cpu_output = self.cpu_expect_result(dtype) + npu_output = self.npu_op_exec(npu_input1, npu_input2, shape_format[-1]) + self.assertRtolEqual(cpu_output, npu_output) + +instantiate_device_type_tests(TestRotatedIou, globals(), except_for="cpu") +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_network_ops/test_rotated_overlaps.py.py b/test/test_network_ops/test_rotated_overlaps.py.py new file mode 100644 index 0000000000000000000000000000000000000000..7e799c2d6884809bce455678b63603bf77a6f7e0 --- /dev/null +++ b/test/test_network_ops/test_rotated_overlaps.py.py @@ -0,0 +1,97 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch_npu +import numpy as np + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests +from torch_npu.testing.util_test import create_common_tensor + +class TestRotatedOverlaps(TestCase): + def generate_rto_data(self, item): + min_value, max_value = 30, 60 + scope = 20 + dtype = item[0][0] + shape_one = item[0][-1] + shape_two = item[1][-1] + + boxes_center = np.random.uniform(min_value, max_value, shape_one[:2] + [2]).astype(dtype) + boxes_wh = np.random.randint(1, scope, size=shape_one[:2] + [2]) + boxes_angle = np.random.randint(-180, 180, size=shape_one[:2] + [1]) + boxes = np.concatenate([boxes_center, boxes_wh, boxes_angle], axis=-1, dtype=dtype) + # query_boxes + query_boxes_center = np.random.uniform(min_value, max_value, shape_two[:2] + [2]).astype(dtype) + query_boxes_wh = np.random.randint(1, scope, size=shape_two[:2] + [2]) + query_boxes_angle = np.random.randint(-180, 180, size=shape_two[:2] + [1]) + query_boxes = np.concatenate([query_boxes_center, query_boxes_wh, query_boxes_angle], axis=-1, dtype=dtype) + + cpu_input1 = torch.from_numpy(boxes) + cpu_input2 = torch.from_numpy(query_boxes) + npu_input1 = cpu_input1.npu() + npu_input2 = cpu_input2.npu() + return cpu_input1, cpu_input2, npu_input1, npu_input2 + + def cpu_expect_result(self, dtype): + if dtype == np.float16: + output = np.array([[[0., 13.27, 1.022, 0.], + [0., 0., 54.12, 0.], + [0., 0., 0., 19.17]]], dtype=np.float16) + else: + output = np.array([[[0., 10.289731], + [0., 0.], + [0., 0.]]], dtype=np.float32) + return output + + def npu_op_exec(self, box1, box2, trans=False): + output = torch_npu.npu_rotated_overlaps(box1, box2, trans) + output = output.detach().cpu().numpy() + return output + + def test_rotated_overlaps_shape_format_fp32(self, device): + dtype = np.float32 + shape_list = [ + [[1, 3, 5], [1, 2, 5]], + ] + is_trans_list = [False] + shape_format = [[[dtype, -1, m[0]],[dtype, -1, m[1]], k] + for m in shape_list + for k in is_trans_list] + + for item in shape_format: + cpu_input1, cpu_input2, npu_input1, npu_input2 = self.generate_rto_data(item[:-1]) + cpu_output = self.cpu_expect_result(dtype) + npu_output = self.npu_op_exec(npu_input1, npu_input2, item[-1]) + # fp32 has't enough precission, but match model need currently. + self.assertRtolEqual(cpu_output, npu_output, prec=0.00005) + + def test_rotated_overlaps_shape_format_fp16(self, device): + dtype = np.float16 + shape_list = [ + [[1, 3, 5], [1, 4, 5]], + ] + # true is xyxyt, false is xywh format + is_trans_list = [False] + shape_format = [[[dtype, -1, m[0]],[dtype, -1, m[1]], k] + for m in shape_list + for k in is_trans_list] + for item in shape_format: + cpu_input1, cpu_input2, npu_input1, npu_input2 = self.generate_rto_data(item) + cpu_output = self.cpu_expect_result(dtype) + npu_output = self.npu_op_exec(npu_input1, npu_input2, item[-1]) + self.assertRtolEqual(cpu_output, npu_output) + +instantiate_device_type_tests(TestRotatedOverlaps, globals(), except_for="cpu") +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_trans_contiguous/test_as_strided_copy_to_contiguous.py b/test/test_trans_contiguous/test_as_strided_copy_to_contiguous.py new file mode 100644 index 0000000000000000000000000000000000000000..78d05c1681d58d283d81e2747e234597c27d9a08 --- /dev/null +++ b/test/test_trans_contiguous/test_as_strided_copy_to_contiguous.py @@ -0,0 +1,70 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import torch +import torch_npu +import numpy as np + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests +from torch_npu.testing.util_test import create_common_tensor, check_operators_in_prof + +os.environ["COMBINED_ENABLE"] = "1" # Open combined-view cases optimization + +class TestAsStridedCopyToContiguous(TestCase): + def cpu_op_exec(self, input1, size, stride, storage_offset): + output = torch.as_strided(input1, size, stride, storage_offset).contiguous() + output = output.numpy() + return output + + def npu_op_exec(self,input1, size, stride, storage_offset): + with torch.autograd.profiler.profile(use_npu=True) as prof: + output = torch.as_strided(input1, size, stride, storage_offset).contiguous() + self.assertEqual(check_operators_in_prof(['npuAsStrided'], prof, ['npuCombined']) \ + , True, "Error operators called!") + output = output.cpu().numpy() + return output + + def test_as_strided(self, device): + dtype_list = [np.bool, np.int32, np.float16, np.float32, np.int8, np.uint8, np.int64] + format_list = [-1] + small_shape_list = [ + [5, 5] + ] + small_shape_format = [ + [i, j, k] for i in dtype_list for j in format_list for k in small_shape_list + ] + + for item in small_shape_format: + cpu_input, npu_input = create_common_tensor(item, -100, 100) + cpu_output = self.cpu_op_exec(cpu_input, (3, 3), (1, 2), 1) + npu_output = self.npu_op_exec(npu_input, (3, 3), (1, 2), 1) + self.assertRtolEqual(cpu_output, npu_output) + + other_shape_format = [ + [[np.float16, 0, [13, 23]], (10, 15), (1, 2), 1], + [[np.float16, 3, [2, 13, 23]], (10, 15), (1, 2), 2], + [[np.float32, 29, [6, 32, 8, 2]], (8, 6, 2), (5, 4, 1), 3], + ] + + for item in other_shape_format: + cpu_input, npu_input = create_common_tensor(item[0], -100, 100) + cpu_output = self.cpu_op_exec(cpu_input, item[1], item[2], item[3]) + npu_output = self.npu_op_exec(npu_input, item[1], item[2], item[3]) + self.assertRtolEqual(cpu_output, npu_output) + +instantiate_device_type_tests(TestAsStridedCopyToContiguous, globals(), except_for="cpu") +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_trans_contiguous/test_tri_combined_views_copy_to_contiguous.py b/test/test_trans_contiguous/test_tri_combined_views_copy_to_contiguous.py new file mode 100644 index 0000000000000000000000000000000000000000..115d6059c9f3b953bfff5ea8eeb53412a98f320d --- /dev/null +++ b/test/test_trans_contiguous/test_tri_combined_views_copy_to_contiguous.py @@ -0,0 +1,97 @@ +# Copyright (c) 2020, Huawei Technologies.All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import torch +import torch_npu +import numpy as np + +from torch_npu.testing.common_utils import TestCase, run_tests +from torch_npu.testing.common_device_type import instantiate_device_type_tests +from torch_npu.testing.util_test import create_common_tensor, check_operators_in_prof + +os.environ["COMBINED_ENABLE"] = "1" # Open combined-view cases optimization + +class TestTriCombinedViewsCopyToContiguous(TestCase): + def test_view_narrow_permute_copy_contiguous(self, device): + dtype_list1 = [np.float16, np.float32] + format_list1 = [-1] + shape_list1 = [ + [200, 30, 40, 16], + ] + shape_format = [ + [i, j, k] for i in dtype_list1 for j in format_list1 for k in shape_list1 + ] + + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 0, 100) + # case 1: view+narrow+permute ==> cannot be optimized + with torch.autograd.profiler.profile(use_npu=True) as prof: + npu_out1 = npu_input.view(npu_input.size(0) * npu_input.size(1), npu_input.size(2), npu_input.size(3)) \ + [:,1:10].transpose(0, 1).contiguous() + self.assertEqual(check_operators_in_prof(['npuAsStrided'], prof, ['npuCombined']), \ + True, "Error operators called!") + cpu_out1 = cpu_input.view(cpu_input.size(0) * cpu_input.size(1), cpu_input.size(2), cpu_input.size(3)) \ + [:,1:10].transpose(0, 1).contiguous() + self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy()) + + # case 2: permute+view+narrow ==> cannot be optimized + with torch.autograd.profiler.profile(use_npu=True) as prof: + npu_out2 = npu_input.permute(1, 0, 2, 3). \ + view(npu_input.size(1), npu_input.size(0), npu_input.size(2)*npu_input.size(3)) \ + [:,:,1:10].contiguous() + self.assertEqual(check_operators_in_prof(['npuAsStrided'], prof, ['npuCombined']), \ + True, "Error operators called!") + cpu_out2 = cpu_input.permute(1, 0, 2, 3). \ + view(cpu_input.size(1), cpu_input.size(0), cpu_input.size(2)*cpu_input.size(3)) \ + [:,:,1:10].contiguous() + self.assertRtolEqual(npu_out2.to("cpu").numpy(), cpu_out2.numpy()) + + def test_view_select_permute_copy_contiguous(self, device): + dtype_list2 = [np.float16, np.float32] + format_list2 = [-1] + shape_list2 = [ + [200, 30, 40, 16], + ] + shape_format = [ + [i, j, k] for i in dtype_list2 for j in format_list2 for k in shape_list2 + ] + + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 0, 100) + # case 1: view+select+permute ==> cannot be optimized + with torch.autograd.profiler.profile(use_npu=True) as prof: + npu_out1 = npu_input.view(npu_input.size(0) * npu_input.size(1), npu_input.size(2), npu_input.size(3)) \ + [:,1].transpose(0, 1).contiguous() + self.assertEqual(check_operators_in_prof(['npuAsStrided'], prof, ['npuCombined']), \ + True, "Error operators called!") + cpu_out1 = cpu_input.view(cpu_input.size(0) * cpu_input.size(1), cpu_input.size(2), cpu_input.size(3)) \ + [:,1].transpose(0, 1).contiguous() + self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy()) + + # case 2: permute+view+select ==> cannot be optimized + with torch.autograd.profiler.profile(use_npu=True) as prof: + npu_out2 = npu_input.permute(1, 0, 2, 3). \ + view(npu_input.size(1), npu_input.size(0), npu_input.size(2)*npu_input.size(3)) \ + [:,:,2].contiguous() + self.assertEqual(check_operators_in_prof(['npuAsStrided'], prof, ['npuCombined']), \ + True, "Error operators called!") + cpu_out2 = cpu_input.permute(1, 0, 2, 3). \ + view(cpu_input.size(1), cpu_input.size(0), cpu_input.size(2)*cpu_input.size(3)) \ + [:,:,2].contiguous() + self.assertRtolEqual(npu_out2.to("cpu").numpy(), cpu_out2.numpy()) + +instantiate_device_type_tests(TestTriCombinedViewsCopyToContiguous, globals(), except_for='cpu') +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/torch_npu/csrc/aten/common/CopyKernel.cpp b/torch_npu/csrc/aten/common/CopyKernel.cpp index e511fb30dd288f0ed08752de8fce9ba72d3ddc97..935b225506c089fe737db98a77570a2f1e609c62 100644 --- a/torch_npu/csrc/aten/common/CopyKernel.cpp +++ b/torch_npu/csrc/aten/common/CopyKernel.cpp @@ -33,75 +33,6 @@ namespace at_npu { namespace native { namespace { -// src : host <-- device -// | copy src to dst on cpu -// dst : host --> device -void copy_d2d_via_host(at::Tensor& self, const at::Tensor& src, bool same_type) { - c10::npu::NPUStream copy_stream = c10::npu::getCurrentNPUStream(); - aclError error = aclrtSynchronizeStream(copy_stream); - if (error != ACL_ERROR_NONE) { - AT_ERROR("ACL stream synchronize failed."); - return; - } - - int64_t real_bytes = - StorageDescHelper::GetValidMemorySize(src) * src.element_size(); - auto cpu_src = at::empty( - real_bytes / src.element_size(), src.options().device(at::kCPU)); - cpu_src = cpu_src.as_strided(src.sizes(), src.strides()); - - error = aclrtMemcpy( - cpu_src.data_ptr(), - real_bytes, - src.data_ptr(), - real_bytes, - ACL_MEMCPY_DEVICE_TO_HOST); - if (error != ACL_ERROR_NONE) { - AT_ERROR("aclrtMemcpy device to cpu_src error."); - return; - } - - real_bytes = - StorageDescHelper::GetValidMemorySize(self) * self.element_size(); - auto cpu_dst = at::empty( - real_bytes / self.element_size(), self.options().device(at::kCPU)); - cpu_dst = cpu_dst.as_strided(self.sizes(), self.strides()); - - if (!same_type) { - cpu_src = cpu_src.to(cpu_dst.dtype()); - } - - // sometimes npu_dst just need part of cpu_dst's elements, so we do memory - // copy from npu to cpu here, let npu_dst cover cpu_dst, to avoid unneeded - // cpu_dst's elements cover npu_dst's original elements - if ((!cpu_dst.is_contiguous()) && (self.defined())) { - error = aclrtMemcpy( - cpu_dst.data_ptr(), - real_bytes, - self.data_ptr(), - real_bytes, - ACL_MEMCPY_DEVICE_TO_HOST); - if (error != ACL_ERROR_NONE) { - AT_ERROR("ACL_Memcpy device to cpu_dst error."); - return; - } - } - - cpu_dst.copy_(cpu_src); - - error = aclrtMemcpy( - self.data_ptr(), - real_bytes, - cpu_dst.data_ptr(), - real_bytes, - ACL_MEMCPY_HOST_TO_DEVICE); - if (error != ACL_ERROR_NONE) { - AT_ERROR("aclrtMemcpy cpu_dst to device error."); - return; - } - NPU_LOGD("Src or dst is not contiguous when do device to device copy."); -} - // NOTE: helper function of copy, the input parameter is not checked, The caller // needs to ensure that the parameters are correct. @@ -132,14 +63,8 @@ void copy_d2d_last_method( bool same_type, bool non_blocking) { // general copy method but Low performance - if (torch_npu::option::OptionsManager::CheckPTcopy_Enable()) { - RECORD_FUNCTION("d2dCopyWithPTCopy", std::vector({src})); - copy_kernel_npu(self, src, non_blocking); - } else { - RECORD_FUNCTION( - "d2dCopyWithStreamSynchronize", std::vector({src})); - copy_d2d_via_host(self, src, same_type); - } + RECORD_FUNCTION("d2dCopyWithPTCopy", std::vector({src})); + copy_kernel_npu(self, src, non_blocking); } // the dst and src are same format now @@ -150,15 +75,22 @@ void copy_d2d_dtype_baseformat( const at::Tensor& src, bool non_blocking) { if (!self.is_contiguous()) { + // Contiguous/discontiguous source tensor copy to discontiguous self tensor return copy_d2d_last_method(self, src, true, non_blocking); } if (!src.is_contiguous()) { - // discontiguous + // Discontiguous source tensor copy to contiguous self tensor if (TransContiguous::ContiguousOptimizeWithBaseFormat(self, src)) { + // Optimized trans-contiguous method + return; + } else { + // General trans-contiguous method + NPUNativeFunctions::npu_stride_copy_out(src, src.sizes(), src.strides(), src.storage_offset(), self); return; } } else { + // Contiguous source tensor copy to contiguous self tensor int64_t numel = self.numel(); if (numel == src.numel()) { RECORD_FUNCTION("d2dCopyAsync", std::vector({src})); diff --git a/torch_npu/csrc/aten/common/CopyKernelNpu.cpp b/torch_npu/csrc/aten/common/CopyKernelNpu.cpp index a283c31d63de486cc0bcabdaefb75761a2109eac..cb669c4b2df742c7c2f0a3c1370f736c839bc198 100644 --- a/torch_npu/csrc/aten/common/CopyKernelNpu.cpp +++ b/torch_npu/csrc/aten/common/CopyKernelNpu.cpp @@ -28,7 +28,7 @@ c10::SmallVector get_view_value( static c10::SmallVector value; // It is determined by the definition of view attr value.resize(strides.size() + 3); - value[0] = t.numel(); // storageImpl numel + value[0] = t.storage().nbytes() / t.element_size(); // storageImpl numel value[1] = t.storage_offset(); // default to 0 value[2] = strides.size(); for (size_t i = 0; i < strides.size(); i++) { diff --git a/torch_npu/csrc/aten/npu_native_functions.yaml b/torch_npu/csrc/aten/npu_native_functions.yaml index 167c196324270d51a977cc7ee802bda23166e854..6ebbebcf8efe0d2689afb7a59c5638b50267e48d 100644 --- a/torch_npu/csrc/aten/npu_native_functions.yaml +++ b/torch_npu/csrc/aten/npu_native_functions.yaml @@ -1916,6 +1916,12 @@ custom: variants: function, method - func: npu_softmax_cross_entropy_with_logits_backward(Tensor grad, Tensor self, Tensor labels) -> Tensor variants: function, method + - func: npu_stride_copy(Tensor self, int[] shape, int[] stride, Scalar storage_offset) -> Tensor + - func: npu_stride_copy.out(Tensor self, int[] shape, int[] stride, Scalar storage_offset, *, Tensor(a!) out) -> Tensor(a!) + - func: npu_rotated_box_encode(Tensor self, Tensor gt_bboxes, Tensor weight) -> Tensor + - func: npu_rotated_box_decode(Tensor self, Tensor deltas, Tensor weight) -> Tensor + - func: npu_rotated_iou(Tensor self, Tensor query_boxes, bool trans=False, int mode=0, bool is_cross=True, float v_threshold=0.0, float e_threshold=0.0) -> Tensor + - func: npu_rotated_overlaps(Tensor self, Tensor query_boxes, bool trans=False) -> Tensor custom_autograd: - func: npu_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor - func: npu_convolution_transpose(Tensor input, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor diff --git a/torch_npu/csrc/aten/ops/AsStridedKernelNpu.cpp b/torch_npu/csrc/aten/ops/AsStridedKernelNpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3fb30d4b22dc379b82dfb5b0ff8cddba21523f9e --- /dev/null +++ b/torch_npu/csrc/aten/ops/AsStridedKernelNpu.cpp @@ -0,0 +1,63 @@ +// Copyright (c) 2020, Huawei Technologies.All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" +#include + +namespace at_npu { +namespace native { + +at::Tensor& stride_copy_out_npu_nocheck( + at::Tensor& result, + const at::Tensor& self, + at::IntArrayRef shape, + at::IntArrayRef stride, + at::Scalar storage_offset) { + RECORD_FUNCTION("npuAsStrided", std::vector({self})); + OpCommand cmd; + cmd.Name("AsStrided") + .InputWithoutContiguous(self) + .Input(shape) + .Input(stride) + .Input(storage_offset, at::kLong) + .Output(result) + .Run(); + return result; +} + +at::Tensor& NPUNativeFunctions::npu_stride_copy_out( + const at::Tensor& self, + c10::IntArrayRef shape, + c10::IntArrayRef stride, + c10::Scalar storage_offset, + at::Tensor& result) { + stride_copy_out_npu_nocheck(result, self, shape, stride, storage_offset); + return result; +} + +at::Tensor NPUNativeFunctions::npu_stride_copy( + const at::Tensor& self, + c10::IntArrayRef shape, + c10::IntArrayRef stride, + c10::Scalar storage_offset) { + // AsStrided OP only supports ND input + at::Tensor result = OpPreparation::ApplyTensorWithFormat( + shape, self.options(), ACL_FORMAT_ND); + stride_copy_out_npu_nocheck(result, self, shape, stride, storage_offset); + return result; +} + +} // namespace native +} // namespace at_npu \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/RotatedBoxDecodeKernelNpu.cpp.cpp b/torch_npu/csrc/aten/ops/RotatedBoxDecodeKernelNpu.cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..09a880ea6d51c88623508844f0bf1db1567012b6 --- /dev/null +++ b/torch_npu/csrc/aten/ops/RotatedBoxDecodeKernelNpu.cpp.cpp @@ -0,0 +1,41 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. +// All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" + +namespace at_npu { +namespace native { + +at::Tensor NPUNativeFunctions::npu_rotated_box_decode( + const at::Tensor& self, + const at::Tensor& deltas, + const at::Tensor& weight){ + at::Tensor result = OpPreparation::ApplyTensor(self); + at::Tensor weightContiguous = weight.to(Device(at::kCPU), at::kFloat); + at::ArrayRef weightList(weightContiguous.data_ptr(), weightContiguous.numel()); + + OpCommand cmd; + cmd.Name("RotatedBoxDecode") + .Input(self) + .Input(deltas) + .Output(result) + .Attr("weight", weightList) + .Run(); + return result; +} +} //namespace native +} //namespace at_npu \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/RotatedBoxEncodeKernelNpu.cpp.cpp b/torch_npu/csrc/aten/ops/RotatedBoxEncodeKernelNpu.cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..41f4bd85b07043356e3b0db1db2563350043539a --- /dev/null +++ b/torch_npu/csrc/aten/ops/RotatedBoxEncodeKernelNpu.cpp.cpp @@ -0,0 +1,41 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. +// All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" + +namespace at_npu { +namespace native { + +at::Tensor NPUNativeFunctions::npu_rotated_box_encode( + const at::Tensor& self, + const at::Tensor& gtBox, + const at::Tensor& weight){ + at::Tensor result = OpPreparation::ApplyTensor(self); + at::Tensor weightContiguous = weight.to(Device(at::kCPU), at::kFloat); + at::ArrayRef weightList(weightContiguous.data_ptr(), weightContiguous.numel()); + + OpCommand cmd; + cmd.Name("RotatedBoxEncode") + .Input(self) + .Input(gtBox) + .Output(result) + .Attr("weight", weightList) + .Run(); + return result; +} +} //namespace native +} //namespace at_npu \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/RotatedIouKernelNpu.cpp.cpp b/torch_npu/csrc/aten/ops/RotatedIouKernelNpu.cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7d6f050729fef63e848f0b5ac6fdca7fdfa904a1 --- /dev/null +++ b/torch_npu/csrc/aten/ops/RotatedIouKernelNpu.cpp.cpp @@ -0,0 +1,81 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. +// All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" + +namespace at_npu { +namespace native { + +at::Tensor& rotated_iou_npu_nocheck( + at::Tensor& iou, + const at::Tensor& boxes, + const at::Tensor& query_boxes, + bool trans, + int64_t mode, + bool is_cross, + double v_threshold, + double e_threshold) { + string mode_str = (mode == 0) ? "iou" : "iof"; + + OpCommand cmd; + cmd.Name("RotatedIou") + .Input(boxes) + .Input(query_boxes) + .Output(iou) + .Attr("trans", trans) + .Attr("mode", mode_str) + .Attr("is_cross", is_cross) + .Attr("value", static_cast(v_threshold)) + .Attr("value", static_cast(e_threshold)) + .Run(); + return iou; +} + +at::Tensor NPUNativeFunctions::npu_rotated_iou( + const at::Tensor& boxes, + const at::Tensor& query_boxes, + bool trans, + int64_t mode, + bool is_cross, + double v_threshold, + double e_threshold) { + TORCH_CHECK(boxes.ndimension() == 3 && query_boxes.ndimension() == 3); + + auto origin_dtype = boxes.scalar_type(); + + at::Tensor boxesOk = boxes.permute({0, 2, 1}); + if (boxesOk.scalar_type() == at::kHalf){ + boxesOk = NPUNativeFunctions::npu_dtype_cast(boxesOk, at::kFloat); + } + Tensor query_boxesOk = query_boxes.permute({0, 2, 1}); + if (query_boxesOk.scalar_type() == at::kHalf){ + query_boxesOk = NPUNativeFunctions::npu_dtype_cast(query_boxesOk, at::kFloat); + } + + int64_t B = boxesOk.size(0); + int64_t N = boxesOk.size(-1); + int64_t K = query_boxesOk.size(-1); + + c10::SmallVector output_size({B, N, K}); + at::Tensor iou = OpPreparation::ApplyTensor(boxesOk, output_size); + + rotated_iou_npu_nocheck(iou, boxesOk, query_boxesOk, trans, mode, is_cross, v_threshold, e_threshold); + iou = NPUNativeFunctions::npu_dtype_cast(iou, origin_dtype); + return iou; +} +} // namespace native +} // namespace at \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/RotatedOverlapsKernelNpu.cpp.cpp b/torch_npu/csrc/aten/ops/RotatedOverlapsKernelNpu.cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df7e916ce04cb51dd0e4ae52c127cc21991e9f9b --- /dev/null +++ b/torch_npu/csrc/aten/ops/RotatedOverlapsKernelNpu.cpp.cpp @@ -0,0 +1,63 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. +// All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/aten/NPUNativeFunctions.h" + +namespace at_npu { +namespace native { + +at::Tensor& rotated_overlaps_npu_nocheck( + at::Tensor& overlaps, + const at::Tensor& self, + const at::Tensor& query_boxes, + bool trans) { + OpCommand cmd; + cmd.Name("RotatedOverlaps") + .Input(self) + .Input(query_boxes) + .Output(overlaps) + .Attr("trans", trans) + .Run(); + return overlaps; +} + +at::Tensor NPUNativeFunctions::npu_rotated_overlaps( + const at::Tensor& self, + const at::Tensor& query_boxes, + bool trans) { + TORCH_CHECK(self.ndimension() == 3 && query_boxes.ndimension() == 3, + "boxes' dim should be equal to query_boxes' ndimension() ", + "and equal to 3!"); + auto origin_dtype = self.scalar_type(); + // the Op only support fp32 currently! + at::Tensor selfCp = NPUNativeFunctions::npu_dtype_cast(self, at::kFloat).permute({0, 2, 1}); + at::Tensor queryBoxesCp = NPUNativeFunctions::npu_dtype_cast(query_boxes, at::kFloat).permute({0, 2, 1}); + + int64_t B = selfCp.size(0); + int64_t N = selfCp.size(-1); + int64_t K = queryBoxesCp.size(-1); + + c10::SmallVector output_size({B, N, K}); + at::Tensor overlaps = OpPreparation::ApplyTensor(selfCp, output_size); + + rotated_overlaps_npu_nocheck(overlaps, selfCp, queryBoxesCp, trans); + at::Tensor overlaps = NPUNativeFunctions::npu_dtype_cast(overlaps, origin_dtype); + + return overlaps; +} +} // namespace native +} // namespace at \ No newline at end of file diff --git a/torch_npu/csrc/framework/OpCommandBase.h b/torch_npu/csrc/framework/OpCommandBase.h index 069c2e4e8211226a682f16b0f703cd73e1fd4c4e..bb27053170fe61b9266db18ce1b2774893d98268 100644 --- a/torch_npu/csrc/framework/OpCommandBase.h +++ b/torch_npu/csrc/framework/OpCommandBase.h @@ -72,6 +72,14 @@ namespace at_npu return static_cast(*this); } + Derived &InputWithoutContiguous( + const at::Tensor &input, + const string &descName = "", + const string &realData = "") + { + return AddTensorInput(const_cast(input), at::ScalarType::Undefined, descName, realData); + } + Derived &Input() { return AddNoneTensor(); diff --git a/torch_npu/csrc/framework/contiguous/slice_opt.cpp b/torch_npu/csrc/framework/contiguous/slice_opt.cpp index 0eeea31d2ce6233d13c7e570814276e68877e005..c43cb5cf6fac49027ae121a8e2dcc191191ffecd 100644 --- a/torch_npu/csrc/framework/contiguous/slice_opt.cpp +++ b/torch_npu/csrc/framework/contiguous/slice_opt.cpp @@ -27,7 +27,7 @@ namespace at_npu public: bool Optimizer(const at::Tensor &src, at::Tensor &self) override { - // Pattern slice. Current pattern should be used before PTcopy process. + // Pattern slice. // Current pattern does not directly depend on other patterns. // The relative sequence of this pattern and other patterns is not important. c10::SmallVector offsets; diff --git a/torch_npu/csrc/register/OptionsManager.cpp b/torch_npu/csrc/register/OptionsManager.cpp index 0afa6ce4d49bdd48ee26026256829ea18559574f..cb93b59f9df31efe5906b2167567bdb06ae9bd7a 100644 --- a/torch_npu/csrc/register/OptionsManager.cpp +++ b/torch_npu/csrc/register/OptionsManager.cpp @@ -30,14 +30,6 @@ bool OptionsManager::CheckQueueEnable() { return (queue_enable == 1); } -bool OptionsManager::CheckPTcopy_Enable() { - static int32_t PTcopy__enable = -1; - if (PTcopy__enable == -1) { - PTcopy__enable = GetBoolTypeOption("PTCOPY_ENABLE"); - } - return (PTcopy__enable == 1); -} - bool OptionsManager::CheckCombinedOptimizerEnable() { static int32_t combined_optimize = -1; if (combined_optimize == -1) { diff --git a/torch_npu/csrc/register/OptionsManager.h b/torch_npu/csrc/register/OptionsManager.h index dd59ea6513cb6d4da2518f3c728e6ea30a39e598..0bae5ab2005ea57d95afc7a922ddca3f6ac8283c 100644 --- a/torch_npu/csrc/register/OptionsManager.h +++ b/torch_npu/csrc/register/OptionsManager.h @@ -27,7 +27,6 @@ namespace option { class OptionsManager { public: static bool CheckQueueEnable(); - static bool CheckPTcopy_Enable(); static bool CheckCombinedOptimizerEnable(); static bool CheckTriCombinedOptimizerEnable(); static bool CheckAclDumpDateEnable(); diff --git a/torch_npu/testing/util_test.py b/torch_npu/testing/util_test.py index 40c00f3d3bb46db226af85a6b5414e507beef100..6294c267847020b51a0ad001c468720c43040a61 100644 --- a/torch_npu/testing/util_test.py +++ b/torch_npu/testing/util_test.py @@ -141,4 +141,22 @@ def create_dtype_tensor(shape, dtype, npu_format=-1, min_value=-5, max_value=5, npu_input = torch.from_numpy(x).to(device) if npu_format != -1 and (dtype in [torch.float, torch.half]): npu_input = torch_npu.npu_format_cast(npu_input, npu_format) - return cpu_input, npu_input \ No newline at end of file + return cpu_input, npu_input + +def check_operators_in_prof(expected_operators, prof, unexpected_operators=None): + unexpected_operators = unexpected_operators or [] + prof_key_averages = prof.key_averages() + if not prof_key_averages: + return print("torch profiling is empty, please check it") + for prof_item in prof_key_averages: + if prof_item.key in unexpected_operators: + # if unexpected oprators are called, pattern inferring in trans-contiguous is failed + return False + elif prof_item.key in expected_operators: + # if expected oprator is called, empty it in expected_operators list + expected_operators.remove(prof_item.key) + + # if expected_operators list is empty, all oprators have been called + if not expected_operators: + return True + return False \ No newline at end of file