diff --git a/torch_npu/csrc/aten/ops/GluGradKernelNpu.cpp b/torch_npu/csrc/aten/ops/GluGradKernelNpu.cpp index 912c3d6e57e779a7ed3c9fb8958943a7ca4fb7cb..9ec04528db0c99168a1f47df549f7590686c0609 100644 --- a/torch_npu/csrc/aten/ops/GluGradKernelNpu.cpp +++ b/torch_npu/csrc/aten/ops/GluGradKernelNpu.cpp @@ -23,6 +23,12 @@ namespace at_npu { namespace native { at::Tensor& NPUNativeFunctions::glu_backward_out(const at::Tensor &grad_output, const at::Tensor &self, int64_t dim, at::Tensor &result) { + auto outputSize = input_same_output_size(self); + OpPreparation::CheckOut( + {grad_output, self}, + result, + grad_output, + outputSize); TORCH_CHECK(self.dim() > 0, "glu does not support 0-dimensional Tensors"); auto wrap_dim = at::maybe_wrap_dim(dim, self.dim()); diff --git a/torch_npu/csrc/aten/ops/GluKernelNpu.cpp b/torch_npu/csrc/aten/ops/GluKernelNpu.cpp index c0514c39015fa0451f769f3f6335380f7fd5d09f..c444e51d7b395c6db86621ca2472ce9f1f3852fa 100644 --- a/torch_npu/csrc/aten/ops/GluKernelNpu.cpp +++ b/torch_npu/csrc/aten/ops/GluKernelNpu.cpp @@ -23,6 +23,11 @@ namespace at_npu { namespace native { at::Tensor& NPUNativeFunctions::glu_out(const at::Tensor& self, int64_t dim, at::Tensor& result) { + OpPreparation::CheckOut( + {self}, + result, + self); + TORCH_CHECK(self.dim() > 0, "glu does not support 0-dimensional at::Tensors"); auto wrap_dim = at::maybe_wrap_dim(dim, self.dim()); const int64_t nIn = self.size(wrap_dim);