From 0e0f4bc88895896b1b4e2ececc501d8f96a170be Mon Sep 17 00:00:00 2001 From: wangyicheng Date: Wed, 23 Mar 2022 18:02:09 +0800 Subject: [PATCH 1/4] test/test_c10d.py change import order --- test/test_c10d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_c10d.py b/test/test_c10d.py index 428280e9430..1046bab6ad1 100644 --- a/test/test_c10d.py +++ b/test/test_c10d.py @@ -17,10 +17,10 @@ from enum import IntEnum, unique import os import unittest import torch -import torch_npu import torch.distributed as c10d import torch.distributed as dist import torch.multiprocessing as mp +import torch_npu from torch_npu.testing.testcase import TestCase, run_tests -- Gitee From 850fd469bfcf26d94308a21e278eb959b9153226 Mon Sep 17 00:00:00 2001 From: wangyicheng Date: Thu, 24 Mar 2022 14:44:25 +0800 Subject: [PATCH 2/4] fix redundant codo problem --- test/test_npu.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/test_npu.py b/test/test_npu.py index b8f63f24abd..a3bf1d87ee6 100644 --- a/test/test_npu.py +++ b/test/test_npu.py @@ -91,9 +91,7 @@ class TestNpu(TestCase): return torch.npu.FloatTensor(*size) def assert_change(comp=1, empty_cache=False, reset_peak=False): - # comp > 0: increased - # comp = 0: equal - # comp < 0: decreased + # comp > 0: increased, comp = 0: equal, comp < 0: decreased new_m = torch_npu.npu.memory_allocated(device) new_max_m = torch_npu.npu.max_memory_allocated(device) if comp > 0: -- Gitee From 15211d7e867d13b90236957caaaf5b726ecf7c37 Mon Sep 17 00:00:00 2001 From: wangyicheng Date: Thu, 24 Mar 2022 15:00:45 +0800 Subject: [PATCH 3/4] fix redundant codo problem --- test/test_npu.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_npu.py b/test/test_npu.py index a3bf1d87ee6..751e9039803 100644 --- a/test/test_npu.py +++ b/test/test_npu.py @@ -91,7 +91,6 @@ class TestNpu(TestCase): return torch.npu.FloatTensor(*size) def assert_change(comp=1, empty_cache=False, reset_peak=False): - # comp > 0: increased, comp = 0: equal, comp < 0: decreased new_m = torch_npu.npu.memory_allocated(device) new_max_m = torch_npu.npu.max_memory_allocated(device) if comp > 0: -- Gitee From bcca9e28b132fb3a69501e6b9758995e5f2353f1 Mon Sep 17 00:00:00 2001 From: wangyicheng Date: Thu, 24 Mar 2022 15:52:59 +0800 Subject: [PATCH 4/4] fix torch_npu distributed_c10d --- torch_npu/distributed/distributed_c10d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_npu/distributed/distributed_c10d.py b/torch_npu/distributed/distributed_c10d.py index bbb894fe183..468bbd5ca07 100644 --- a/torch_npu/distributed/distributed_c10d.py +++ b/torch_npu/distributed/distributed_c10d.py @@ -775,7 +775,7 @@ def isend(tensor, """ _check_single_tensor(tensor, "tensor") if _rank_not_in_group(group): - return + return None if group is None or group is GroupMember.WORLD: default_pg = _get_default_group() -- Gitee