diff --git a/test/network_ops/test_upsample_scale_bicubic2d.py b/test/network_ops/test_upsample_scale_bicubic2d.py index a6c47cbdd338c0c432b32bf32c44aa99ebb55a48..a9b0d9e6bf9cab85fe753720df91c032fc7760d4 100644 --- a/test/network_ops/test_upsample_scale_bicubic2d.py +++ b/test/network_ops/test_upsample_scale_bicubic2d.py @@ -1,4 +1,3 @@ -import unittest import torch import numpy as np import torch_npu @@ -43,7 +42,6 @@ class TestUpsampleBicubic2d(TestCase): return shape_format1 - @unittest.skip("skip test_upsample_bicubic2d_scale_common_shape_format now") def test_upsample_bicubic2d_scale_common_shape_format(self): for item in self.create_scale_shape_format32(): cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 255) @@ -51,7 +49,6 @@ class TestUpsampleBicubic2d(TestCase): npu_output = self.npu_op_scale_exec(npu_input1, item[1]) self.assertRtolEqual(cpu_output, npu_output) - @unittest.skip("skip test_upsample_bicubic2d_float16_scale_shape_format now") def test_upsample_bicubic2d_float16_scale_shape_format(self): def cpu_op_exec_fp16(input1, size): input1 = input1.to(torch.float32)