diff --git a/test/npu/test_jit_script.py b/test/npu/test_jit_script.py index 1c96265eb9a8604ce132e4eaf30d43ef43bfb396..026ce43cbc9a90a3e9cdebc6ea90456c693e3259 100644 --- a/test/npu/test_jit_script.py +++ b/test/npu/test_jit_script.py @@ -86,6 +86,7 @@ class TestJitTrace(TestCase): output2 = script_model(x, r1, r2) self.assertRtolEqual(output1, output2) - +#test if __name__ == '__main__': run_tests() +#test \ No newline at end of file diff --git a/torch_npu/onnx/wrapper_ops_combined.py b/torch_npu/onnx/wrapper_ops_combined.py index aee88fa11b775b94bae5cb60fe73f3fc9f0e410e..9a844c69f3d49258d3005f7efff3adb5bb623741 100644 --- a/torch_npu/onnx/wrapper_ops_combined.py +++ b/torch_npu/onnx/wrapper_ops_combined.py @@ -1,11 +1,9 @@ -import torch + import torch_npu from torch_npu.utils._error_code import ErrCode, pta_error - +import torch __all__ = [] - - class _NPULinearOP(object): @staticmethod @@ -14,9 +12,7 @@ class _NPULinearOP(object): return torch._C._nn.linear(input_, weight, bias) return torch.ops.npu.npu_linear(input_, weight, bias) - class _NPUTransposeOP(object): - @staticmethod def forward(self, perm, require_contiguous=True, out=None): if torch.onnx.is_in_onnx_export(): @@ -28,8 +24,6 @@ class _NPUTransposeOP(object): out = torch.ops.npu.npu_transpose( self, perm, require_contiguous) return out - - class _NPUBroadcastOP(object): @staticmethod diff --git a/torch_npu/testing/testcase.py b/torch_npu/testing/testcase.py index 487420f6b009dea07e42bf6691e725c67bf839e7..c739d0ae82c4941941751e8e417f53b1751bfede 100644 --- a/torch_npu/testing/testcase.py +++ b/torch_npu/testing/testcase.py @@ -522,3 +522,4 @@ class TestCase(expecttest.TestCase): if baseline is None or runtime < baseline * 0.9: PerfBaseline.set_baseline(methodId, runtime) +# test \ No newline at end of file