diff --git a/test/network_ops/test_upsample_scale_bicubic2d.py b/test/network_ops/test_upsample_scale_bicubic2d.py index a6c47cbdd338c0c432b32bf32c44aa99ebb55a48..e182815b2c1b3f1ad27dd4c41df9ce0732ec69ed 100644 --- a/test/network_ops/test_upsample_scale_bicubic2d.py +++ b/test/network_ops/test_upsample_scale_bicubic2d.py @@ -43,7 +43,6 @@ class TestUpsampleBicubic2d(TestCase): return shape_format1 - @unittest.skip("skip test_upsample_bicubic2d_scale_common_shape_format now") def test_upsample_bicubic2d_scale_common_shape_format(self): for item in self.create_scale_shape_format32(): cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 255) @@ -51,7 +50,6 @@ class TestUpsampleBicubic2d(TestCase): npu_output = self.npu_op_scale_exec(npu_input1, item[1]) self.assertRtolEqual(cpu_output, npu_output) - @unittest.skip("skip test_upsample_bicubic2d_float16_scale_shape_format now") def test_upsample_bicubic2d_float16_scale_shape_format(self): def cpu_op_exec_fp16(input1, size): input1 = input1.to(torch.float32)