diff --git a/test/test_optimized_lib/test_activations.py b/test/test_optimized_lib/test_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..f7f4c26b4643d7c811fd95c631dcd866e494b406 --- /dev/null +++ b/test/test_optimized_lib/test_activations.py @@ -0,0 +1,136 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu +import torch.nn.functional as F +import numpy as np +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor + +from torch_npu.contrib.optimized_lib.module import Mish, SiLU + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestActivations(TestCase): + + def cpu_mish(self, input1): + """ + Official implementation based on PyTorch link: + https://github.com/digantamisra98/Mish/blob/master/Mish/Torch/mish.py + + Applies the mish function element-wise: + mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x))) + See additional documentation for mish class. + """ + input1.requires_grad = True + res = input1 * torch.tanh(F.softplus(input1)) + l = res.sum() + l.backward() + return res.detach(), input1.grad + + def npu_mish(self, input1): + input1.requires_grad = True + model = Mish() + res = model(input1) + l = res.sum() + l.backward() + return res.detach().cpu(), input1.grad.cpu() + + def test_mish(self): + dtype_list = [np.float32] + format_list = [-1] + shape_list = [ + [4], + [2, 3], + [6, 5, 8, 10], + [1, 2, 3, 6, 6], + [2, 5, 6, 8, 9, 2], + [2, 5, 6, 8, 9, 2, 2], + ] + shape_format = [ + [i, j, k] for i in dtype_list for j in format_list for k in shape_list + ] + + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 1, 10) + if cpu_input.dtype == torch.float16: + cpu_input = cpu_input.float() + cpu_output, cpu_inputgrad = self.cpu_mish(cpu_input) + cpu_output = cpu_output.half() + cpu_inputgrad = cpu_inputgrad.half() + else: + cpu_output, cpu_inputgrad = self.cpu_mish(cpu_input) + + npu_output, npu_inputgrad = self.npu_mish(npu_input) + + self.assertRtolEqual(cpu_output, npu_output) + self.assertRtolEqual(cpu_inputgrad, npu_inputgrad) + + def cpu_silu(self, input1): + """ + Official implementation based on PyTorch link: + https://github.com/digantamisra98/Mish/blob/master/Mish/Torch/mish.py + + Applies the mish function element-wise: + mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x))) + See additional documentation for mish class. + """ + input1.requires_grad = True + res = input1 * torch.sigmoid(input1) + l = res.sum() + l.backward() + return res.detach(), input1.grad + + def npu_silu(self, input1): + input1.requires_grad = True + model = SiLU() + res = model(input1) + l = res.sum() + l.backward() + return res.detach().cpu(), input1.grad.cpu() + + def test_silu(self): + dtype_list = [np.float32] + format_list = [-1] + shape_list = [ + [4], + [2, 3], + [6, 5, 8, 10], + [1, 2, 3, 6, 6], + [2, 5, 6, 8, 9, 2], + [2, 5, 6, 8, 9, 2, 2], + ] + shape_format = [ + [i, j, k] for i in dtype_list for j in format_list for k in shape_list + ] + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 1, 10) + if cpu_input.dtype == torch.float16: + cpu_input = cpu_input.float() + cpu_output, cpu_inputgrad = self.cpu_mish(cpu_input) + cpu_output = cpu_output.half() + cpu_inputgrad = cpu_inputgrad.half() + else: + cpu_output, cpu_inputgrad = self.cpu_mish(cpu_input) + + npu_output, npu_inputgrad = self.npu_mish(npu_input) + + self.assertRtolEqual(cpu_output, npu_output) + self.assertRtolEqual(cpu_inputgrad, npu_inputgrad) + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_optimized_lib/test_bidirectional_lstm.py b/test/test_optimized_lib/test_bidirectional_lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..1a3da3118fc2faff8f596b054be7cddf97d8f247 --- /dev/null +++ b/test/test_optimized_lib/test_bidirectional_lstm.py @@ -0,0 +1,57 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu + +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor +from torch_npu.contrib.optimized_lib.module import BiLSTM + + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestBidirectionalLstm(TestCase): + + def npu_bidirectional_lstm(self, input): + input = input.npu() + input.requires_grad = True + rnn = BiLSTM(8, 4).npu() + input.retain_grad() + output = rnn(input) + output.backward(torch.ones(input.size(), dtype=torch.float).npu()) + input_grad = input.grad.cpu() + return output.detach().cpu(), input_grad.cpu() + + def test_bidirectional_lstm(self): + cpu_input = torch.rand(2, 2, 8) + npu_input = cpu_input.npu() + + npu_output, npu_inputgrad = self.npu_bidirectional_lstm(npu_input) + expedt_cpu_output = torch.tensor([[[-0.1025, -0.1874, 0.0458, -0.1486, -0.0266, 0.1953, -0.1688, 0.0765], + [-0.1941, -0.2162, -0.2046, -0.1855, -0.0262, 0.1460, -0.1729, 0.1274]], + [[-0.2140, -0.2439, -0.0682, -0.1685, -0.0381, 0.1166, -0.1262, 0.1035], + [-0.2947, -0.2786, -0.2559, -0.1584, -0.0176, 0.1005, -0.1135, 0.1113]]], dtype=torch.float32) + expedt_cpu_inputgrad = torch.tensor([[[-0.1387, 0.4024, -0.2715, -0.0965, -0.4193, 0.3688, 0.1259, -0.3764], + [-0.1760, 0.4257, -0.2285, -0.2325, -0.3738, 0.3502, -0.0338, -0.3057]], + [[-0.0190, 0.4121, -0.2733, -0.1313, -0.1804, 0.3720, -0.0196, -0.1863], + [-0.0638, 0.4532, -0.2258, -0.2342, -0.1488, 0.3592, -0.0708, -0.2137]]], dtype=torch.float32) + + self.assertRtolEqual(expedt_cpu_output, npu_output) + self.assertRtolEqual(expedt_cpu_inputgrad, npu_inputgrad) + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_optimized_lib/test_channel_shuffle.py b/test/test_optimized_lib/test_channel_shuffle.py new file mode 100644 index 0000000000000000000000000000000000000000..cb54e68f86a931c81958f92d45006fbd4ae09c49 --- /dev/null +++ b/test/test_optimized_lib/test_channel_shuffle.py @@ -0,0 +1,121 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor +from torch_npu.contrib.optimized_lib.module import ChannelShuffle + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestChannelShuffle(TestCase): + def cpu_channel_shuffle(self, x, groups, split_shuffle): + + #split_shuffle cpu仅支持False场景 + batchsize, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + x.requires_grad_(True) + # reshape + x = x.view(batchsize, groups, channels_per_group, height, width) + + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + output = x.view(batchsize, -1, height, width) + return output.detach().numpy() + + def npu_channel_shuffle(self, x, groups, split_shuffle): + model = ChannelShuffle(groups, split_shuffle=split_shuffle) + x = x.npu() + model = model.npu() + output = model(x, x) + + return output.detach().cpu().numpy() + + def npu_channel_shuffle_backward(self, x, groups, split_shuffle): + + model = ChannelShuffle(4, split_shuffle=split_shuffle) + x = x.npu() + x.requires_grad_(True) + model = model.npu() + output = model(x, x) + + loss = sum([i.sum() for i in output]) if split_shuffle else output.sum() + loss.backward() + + return output[0], output[1] + + def test_channel_shuffle_1_False(self): + split_shuffle = False + x = torch.randn(2, 2, 3, 3) + conv = torch.nn.Conv2d(2, 2, 1) + x1 = conv(x) + cpu_out = self.cpu_channel_shuffle(x1, groups=2, split_shuffle=False) + x1 = x1.npu() + npu_out = self.npu_channel_shuffle(x1, groups=2, split_shuffle=False) + + self.assertRtolEqual(cpu_out, npu_out) + + + def test_npu_channel_shuffle_2_True(self): + # There is no benchmarking data when split_shuffle=True, + x = torch.randn(2, 2, 3, 3) + conv = torch.nn.Conv2d(2, 2, 1) + x1 = conv(x) + x1 = x1.npu() + npu_output1, npu_output2 = self.npu_channel_shuffle_backward(x1, groups=4, split_shuffle=True) + + expedt_cpu_output1 = torch.tensor([[[[ 0.0385, -0.3217, -0.0174], + [ 0.1337, -0.1197, -0.0415], + [ 0.0843, 0.1638, -0.0149]], + + [[ 0.0385, -0.3217, -0.0174], + [ 0.1337, -0.1197, -0.0415], + [ 0.0843, 0.1638, -0.0149]]], + + + [[[-0.0203, -0.3950, -0.1230], + [ 0.2059, 0.0822, 0.6951], + [-0.0773, 0.0535, -0.0462]], + + [[-0.0203, -0.3950, -0.1230], + [ 0.2059, 0.0822, 0.6951], + [-0.0773, 0.0535, -0.0462]]]], dtype=torch.float32) + + expedt_cpu_output2 = torch.tensor([[[[ 0.5454, -0.0463, 0.4660], + [ 0.7197, 0.2986, 0.4197], + [ 0.6225, 0.7925, 0.4614]], + + [[ 0.5454, -0.0463, 0.4660], + [ 0.7197, 0.2986, 0.4197], + [ 0.6225, 0.7925, 0.4614]]], + + + [[[ 0.4537, -0.1535, 0.3048], + [ 0.8306, 0.6178, 1.7047], + [ 0.3617, 0.5625, 0.4009]], + + [[ 0.4537, -0.1535, 0.3048], + [ 0.8306, 0.6178, 1.7047], + [ 0.3617, 0.5625, 0.4009]]]], dtype=torch.float32) + self.assertRtolEqual(expedt_cpu_output1.numpy(), npu_output1.detach().cpu().numpy()) + self.assertRtolEqual(expedt_cpu_output2.numpy(), npu_output2.detach().cpu().numpy()) + + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_optimized_lib/test_crossentropy.py b/test/test_optimized_lib/test_crossentropy.py new file mode 100644 index 0000000000000000000000000000000000000000..8489e6d3afe5c4a922db40ea436f855f80f973e4 --- /dev/null +++ b/test/test_optimized_lib/test_crossentropy.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu + +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor +from torch_npu.contrib.optimized_lib.module import LabelSmoothingCrossEntropy + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestCrossentropy(TestCase): + + def test_npu_crossentropy_1(self): + x = torch.randn(2, 10) + y = torch.randint(0, 10, size=(2,)) + + x = x.npu() + y = y.npu() + x.requires_grad = True + m = LabelSmoothingCrossEntropy(10) + npu_output = m(x, y) + npu_output.backward() + expedt_cpu_xgrad = torch.tensor([[ 0.0465, 0.0317, 0.0612, 0.0215, 0.0695, + 0.0849, 0.0354, 0.0255, -0.4017, 0.0255], + [ 0.0133, 0.0225, 0.0104, 0.0787, 0.0202, + 0.1322, -0.4969, 0.1719, 0.0331, 0.0145]], dtype=torch.float32) + self.assertTrue(3.3496, npu_output.detach().cpu()) + self.assertRtolEqual(expedt_cpu_xgrad, x.grad.cpu()) + + def test_npu_crossentropy_2(self): + x = torch.randn(2, 10) + y = torch.randint(0, 10, size=(2,)) + + x = x.npu() + y = y.npu() + x.requires_grad = True + m = LabelSmoothingCrossEntropy(10, 0.1) + npu_output = m(x, y) + npu_output.backward() + expedt_cpu_xgrad = torch.tensor([[ 0.0410, 0.0261, 0.0557, 0.0160, 0.0639, + 0.0793, 0.0298, 0.0200, -0.3517, 0.0199], + [ 0.0077, 0.0170, 0.0049, 0.0732, 0.0146, + 0.1267, -0.4469, 0.1663, 0.0275, 0.0090]], dtype=torch.float32) + self.assertTrue(3.2760, npu_output.cpu()) + self.assertRtolEqual(expedt_cpu_xgrad, x.grad.cpu()) + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_optimized_lib/test_ps_roi_pooling.py b/test/test_optimized_lib/test_ps_roi_pooling.py new file mode 100644 index 0000000000000000000000000000000000000000..49333dd0f8a0f48acae3c04d65329490804e8237 --- /dev/null +++ b/test/test_optimized_lib/test_ps_roi_pooling.py @@ -0,0 +1,66 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu + +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor +from torch_npu.contrib.optimized_lib.module import PSROIPool + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestPsRoiPooling(TestCase): + def get_random_rois(self, shape): + rois_init = torch.zeros(shape) + for i in range(shape[0]): + for j in range(shape[1]): + pi1 = torch.rand(1, 2).uniform_(0, 10) + pi2 = torch.rand(1, 2).uniform_(10, 100) + boxi = torch.cat((pi1, pi2), 1) + n = torch.tensor([[float(i)]]) + boxi = torch.cat((n, boxi), 1) + rois_init[i, j, :] = boxi + return rois_init + + def npu_ps_roi_align(self, cls_feat, rois_tensor, pooled_height, pooled_width, spatial_scale, group_size, output_dim): + cls_feat.requires_grad = True + model = PSROIPool(pooled_height, pooled_width, spatial_scale, group_size, output_dim) + output = model(cls_feat, rois_tensor) # 512,22,7,7 + l = output.sum() + l.backward() + return output.detach().cpu(), cls_feat.grad.cpu() + + + def test_npu_roi_align_1(self): + cls_feat = torch.randn(4, 1078, 84, 84).float().npu() + rois_tensor = self.get_random_rois((4, 128, 5)).permute(0, 2, 1).float().npu() + pooled_height=7 + pooled_width=7 + spatial_scale=1 / 16.0 + group_size=7 + output_dim=22 + + npu_output, npu_inputgrad = self.npu_ps_roi_align(cls_feat, rois_tensor, pooled_height, pooled_width, spatial_scale, group_size, output_dim) + + expedt_cpu_output_shape = torch.randn(512, 22, 7, 7).shape + expedt_cpu_inputgrad_shape = cls_feat.shape + + self.assertEqual(expedt_cpu_output_shape, npu_output.shape) + self.assertEqual(expedt_cpu_inputgrad_shape, npu_inputgrad.shape) + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/test/test_optimized_lib/test_roi_align.py b/test/test_optimized_lib/test_roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..215ecd0550a0e59cbef664e083b7208b3a051ccd --- /dev/null +++ b/test/test_optimized_lib/test_roi_align.py @@ -0,0 +1,95 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# All rights reserved. +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch_npu + +from torch_npu.testing.testcase import TestCase, run_tests +from torch_npu.testing.common_utils import create_common_tensor +from torch_npu.contrib.optimized_lib.module import ROIAlign + +random_seed = 123 +torch.manual_seed(random_seed) + +class TestRoiAlign(TestCase): + + def npu_roi_align(self, input, roi, output_size, spatial_scale, sampling_ratio, aligned): + + input.requires_grad_(True) + roi.requires_grad_(True) + model=ROIAlign(output_size, spatial_scale, sampling_ratio).npu() + output = model(input, roi) + l = output.sum() + l.backward() + return output.detach().cpu(), input.grad.cpu() + + def test_npu_roi_align_1(self): + + input = torch.FloatTensor([[[[1, 2, 3 , 4, 5, 6], + [7, 8, 9, 10, 11, 12], + [13, 14, 15, 16, 17, 18], + [19, 20, 21, 22, 23, 24], + [25, 26, 27, 28, 29, 30], + [31, 32, 33, 34, 35, 36]]]]).npu() + roi = torch.tensor([[0, -2.0, -2.0, 22.0, 22.0]]).npu() + output_size = (3, 3) + spatial_scale = 0.25 + sampling_ratio = 2 + aligned = False + + npu_output, npu_inputgrad = self.npu_roi_align(input, roi, output_size, spatial_scale, sampling_ratio, aligned) + expedt_cpu_output = torch.tensor([[[[ 4.5000, 6.5000, 8.5000], + [16.5000, 18.5000, 20.5000], + [28.5000, 30.5000, 32.5000]]]], dtype=torch.float32) + expedt_cpu_inputgrad = torch.tensor([[[[0.2397, 0.2346, 0.2346, 0.2346, 0.2346, 0.2907], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2907, 0.2845, 0.2845, 0.2845, 0.2845, 0.3525]]]], dtype=torch.float32) + + self.assertRtolEqual(expedt_cpu_output, npu_output) + self.assertRtolEqual(expedt_cpu_inputgrad, npu_inputgrad) + + def test_npu_roi_align_2(self): + input = torch.FloatTensor([[[[1, 2, 3 , 4, 5, 6], + [7, 8, 9, 10, 11, 12], + [13, 14, 15, 16, 17, 18], + [19, 20, 21, 22, 23, 24], + [25, 26, 27, 28, 29, 30], + [31, 32, 33, 34, 35, 36]]]]).npu() + roi = torch.tensor([[0, -2.0, -2.0, 22.0, 22.0]]).npu() + output_size = (3, 3) + spatial_scale = 0.25 + sampling_ratio = 2 + aligned = True + + npu_output, npu_inputgrad = self.npu_roi_align(input, roi, output_size, spatial_scale, sampling_ratio, aligned) + + expedt_cpu_output = torch.tensor([[[[ 2.7500, 4.5000, 6.5000], + [13.2500, 15.0000, 17.0000], + [25.2500, 27.0000, 29.0000]]]], dtype=torch.float32) + expedt_cpu_inputgrad = torch.tensor([[[[0.2397, 0.2346, 0.2346, 0.2346, 0.2346, 0.2907], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2346, 0.2296, 0.2296, 0.2296, 0.2296, 0.2845], + [0.2907, 0.2845, 0.2845, 0.2845, 0.2845, 0.3525]]]], dtype=torch.float32) + + self.assertRtolEqual(expedt_cpu_output, npu_output) + self.assertRtolEqual(expedt_cpu_inputgrad, npu_inputgrad) + +if __name__ == "__main__": + run_tests() \ No newline at end of file diff --git a/torch_npu/contrib/optimized_lib/__init__.py b/torch_npu/contrib/optimized_lib/__init__.py index e5fec8412c72e960ea9a50889753a1772e3d12d1..8b8896cbbfd2a7c606506d082191c4c9c0228e65 100644 --- a/torch_npu/contrib/optimized_lib/__init__.py +++ b/torch_npu/contrib/optimized_lib/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .function import npu_iou, npu_ptiou, npu_giou, npu_multiclass_nms, npu_batched_multiclass_nms, \ - npu_single_level_responsible_flags, npu_fast_condition_index_put, npu_bbox_coder_encode_yolo, \ - npu_bbox_coder_encode_xyxy2xywh, npu_bbox_coder_decode_xywh2xyxy -from .module import ChannelShuffle, LabelSmoothingCrossEntropy, ROIAlign, , Mish, BiLSTM, PSROIPool, SiLU, Swish +from .module import ChannelShuffle, LabelSmoothingCrossEntropy, ROIAlign, Mish, BiLSTM, PSROIPool, SiLU, Swish __all__ = [ # from function