diff --git a/test/test_network_ops/test_upsample_linear1d_backward.py b/test/test_network_ops/test_upsample_linear1d_backward.py index 4cbfa26a31fd57e62824e18504af0527a0ec92d6..ff7531669ab1b14161eeccd5dc04544598e95b73 100644 --- a/test/test_network_ops/test_upsample_linear1d_backward.py +++ b/test/test_network_ops/test_upsample_linear1d_backward.py @@ -51,8 +51,8 @@ class TestUpsampleLinear1DBackward(TestCase): output = output.to("cpu") return output.detach().numpy(), gradnpu.detach().numpy() - def test_upsample_linear1d_backward(self, device="npu"): - for item in self.creat_shape_format(device): + def test_upsample_linear1d_backward(self): + for item in self.creat_shape_format(): cpu_input, npu_input = create_common_tensor(item, 0, 100) size = list(item[2]) diff --git a/test/test_network_ops/test_var.py b/test/test_network_ops/test_var.py index 6ef4ffd19384601ae3f89a280c23b830d11ae7a7..12482c7dc92d68210b8185996e308f335d949d22 100644 --- a/test/test_network_ops/test_var.py +++ b/test/test_network_ops/test_var.py @@ -143,7 +143,7 @@ class TestVar(TestCase): npu_input = npu_input.npu_format_cast(npuformat) return cpu_input, npu_input - def test_var_shape_format_fp16(self, device="npu"): + def test_var_shape_format_fp16(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -158,7 +158,7 @@ class TestVar(TestCase): npu_output = self.npu_op_exec(npu_input, item[3]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_shape_format_fp32(self, device="npu"): + def test_var_shape_format_fp32(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -171,7 +171,7 @@ class TestVar(TestCase): npu_output = self.npu_op_exec(npu_input, item[3]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_dim_shape_format_fp16(self, device="npu"): + def test_var_dim_shape_format_fp16(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -189,7 +189,7 @@ class TestVar(TestCase): npu_output = self.npu_op_dim_exec(npu_input, item[3], item[4], item[5]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_dim_shape_format_fp32(self, device="npu"): + def test_var_dim_shape_format_fp32(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -205,7 +205,7 @@ class TestVar(TestCase): npu_output = self.npu_op_dim_exec(npu_input, item[3], item[4], item[5]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_names_dim_shape_format_fp16(self, device="npu"): + def test_var_names_dim_shape_format_fp16(self): format_list = [-1] shape_list1 = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -223,7 +223,7 @@ class TestVar(TestCase): npu_output = self.npu_op_names_dim_exec(npu_input, item[3], item[4], item[5]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_names_dim_shape_format_fp32(self, device="npu"): + def test_var_names_dim_shape_format_fp32(self): format_list = [-1] shape_list1 = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -240,7 +240,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output, npu_output) - def test_var_out_shape_format_fp16(self, device="npu"): + def test_var_out_shape_format_fp16(self): format_list1 = [-1] shape_list = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -261,7 +261,7 @@ class TestVar(TestCase): cpu_output1 = cpu_output1.astype(np.float16) self.assertRtolEqual(cpu_output1, npu_output1) - def test_var_out_shape_format_fp32(self, device="npu"): + def test_var_out_shape_format_fp32(self): format_list1 = [-1] shape_list = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -279,7 +279,7 @@ class TestVar(TestCase): npu_output1 = self.npu_op_out_exec(npu_input1, item[3], npu_output, item[4], item[5]) self.assertRtolEqual(cpu_output1, npu_output1) - def test__var_shape_format_fp16(self, device="npu"): + def test__var_shape_format_fp16(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -295,7 +295,7 @@ class TestVar(TestCase): npu_output = self.npu_op_var_exec(npu_input, item[3]) self.assertRtolEqual(cpu_output, npu_output) - def test__var_shape_format_fp32(self,device): + def test__var_shape_format_fp32(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -309,7 +309,7 @@ class TestVar(TestCase): npu_output = self.npu_op_var_exec(npu_input, item[3]) self.assertRtolEqual(cpu_output, npu_output) - def test_var_mean_shape_format_fp16(self, device="npu"): + def test_var_mean_shape_format_fp16(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -327,7 +327,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_mean_shape_format_fp32(self, device="npu"): + def test_var_mean_shape_format_fp32(self): format_list = [-1] shape_list = [[32, 24], [32, 8, 24]] unbiased_list = [True, False] @@ -342,7 +342,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_mean_dim_shape_format_fp16(self, device="npu"): + def test_var_mean_dim_shape_format_fp16(self): format_list1 = [-1] shape_list1 = [[32, 24], [32, 8, 24]] dim_list = [0] @@ -362,7 +362,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_mean_dim_shape_format_fp32(self, device="npu"): + def test_var_mean_dim_shape_format_fp32(self): format_list = [-1] shape_list = [[32, 1024], [32, 8, 1024]] dim_list = [0] @@ -379,7 +379,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_mean_names_dim_shape_format_fp16(self, device="npu"): + def test_var_mean_names_dim_shape_format_fp16(self): shape = (1024, 8, 32) dimlist = ['N', 'C', 'H'] cpu_input = torch.rand(shape, dtype=torch.float32) @@ -395,7 +395,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_mean_names_dim_shape_format_fp32(self, device="npu"): + def test_var_mean_names_dim_shape_format_fp32(self): shape = (1024, 8, 32) dimlist = ['N', 'C', 'H'] cpu_input = torch.rand(shape, dtype=torch.float32, names=('N', 'C', 'H')) @@ -407,7 +407,7 @@ class TestVar(TestCase): self.assertRtolEqual(cpu_output1, npu_output1) self.assertRtolEqual(cpu_output2, npu_output2) - def test_var_dim_shape_format_5d_fp16(self, device="npu"): + def test_var_dim_shape_format_5d_fp16(self): format_list = [-1] shape_list = [[2, 94, 4, 52, 192]] dim_list = [0] diff --git a/torch_npu/csrc/aten/ops/UpsampleLinear1dBackwardKernelNpu.cpp b/torch_npu/csrc/aten/ops/UpsampleLinear1dBackwardKernelNpu.cpp index fdd5f5465dd29bcf5b167558b28fc376ab89402f..aed4a194d6c14bc77f94bd38c847e121cd1d2418 100644 --- a/torch_npu/csrc/aten/ops/UpsampleLinear1dBackwardKernelNpu.cpp +++ b/torch_npu/csrc/aten/ops/UpsampleLinear1dBackwardKernelNpu.cpp @@ -119,5 +119,6 @@ at::Tensor NPUNativeFunctions::upsample_linear1d_backward( return result; } + } // namespace native } // namespace at_npu \ No newline at end of file diff --git a/torch_npu/csrc/aten/ops/UpsampleLinear1dKernelNpu.cpp b/torch_npu/csrc/aten/ops/UpsampleLinear1dKernelNpu.cpp index e4d7aac4552fcdde8603ea1b35f8ff61a10fb531..bdd6d8adcd41420015786d39750289aa9e6336c7 100644 --- a/torch_npu/csrc/aten/ops/UpsampleLinear1dKernelNpu.cpp +++ b/torch_npu/csrc/aten/ops/UpsampleLinear1dKernelNpu.cpp @@ -105,7 +105,7 @@ at::Tensor& NPUNativeFunctions::upsample_linear1d_out( if (!NpuUtils::check_match(&result)) { at::Tensor contiguousResult = NpuUtils::format_contiguous(result); - at::Tensor newResult = upsample_linear1d_out_nocheck(self, output_size, align_corners, scales, result); + at::Tensor newResult = upsample_linear1d_out_nocheck(self, output_size, align_corners, scales, contiguousResult); NpuUtils::format_fresh_view(result, newResult); } else { upsample_linear1d_out_nocheck(self, output_size, align_corners, scales, result); diff --git a/torch_npu/csrc/aten/ops/VarKernelNpu.cpp b/torch_npu/csrc/aten/ops/VarKernelNpu.cpp index 2c3302aa7ae3e6bfc444d8cc31766482fb62a654..9c93fb023c87009e351f6ba0f56386b736d3eccb 100644 --- a/torch_npu/csrc/aten/ops/VarKernelNpu.cpp +++ b/torch_npu/csrc/aten/ops/VarKernelNpu.cpp @@ -221,5 +221,6 @@ tuple NPUNativeFunctions::var_mean(const at::Tensor& sel return NPUNativeFunctions::var_mean(self, dim, unbiased, false); } + } // namespace native } // namespace at_npu \ No newline at end of file