diff --git a/test/network_ops/test_upsample_scale_bicubic2d.py b/test/network_ops/test_upsample_scale_bicubic2d.py index 0dd68b485fd676239a355a1960e75db70ddd42f9..ddbfa73ad76f2b92acc8931c9171b247faf994c3 100644 --- a/test/network_ops/test_upsample_scale_bicubic2d.py +++ b/test/network_ops/test_upsample_scale_bicubic2d.py @@ -58,7 +58,6 @@ class TestUpsampleBicubic2d(TestCase): return shape_format1 - @unittest.skip("skip test_upsample_bicubic2d_scale_common_shape_format now") def test_upsample_bicubic2d_scale_common_shape_format(self): for item in self.create_scale_shape_format32(): cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 255) @@ -66,7 +65,6 @@ class TestUpsampleBicubic2d(TestCase): npu_output = self.npu_op_scale_exec(npu_input1, item[1]) self.assertRtolEqual(cpu_output, npu_output) - @unittest.skip("skip test_upsample_bicubic2d_float16_scale_shape_format now") def test_upsample_bicubic2d_float16_scale_shape_format(self): def cpu_op_exec_fp16(input1, size): input1 = input1.to(torch.float32)