diff --git a/test/test_c10d.py b/test/test_c10d.py index 428280e9430430e36ab24b471c132d6f4dc9fcd2..1046bab6ad14aebe7ac1b29362fc54741f49a84e 100644 --- a/test/test_c10d.py +++ b/test/test_c10d.py @@ -17,10 +17,10 @@ from enum import IntEnum, unique import os import unittest import torch -import torch_npu import torch.distributed as c10d import torch.distributed as dist import torch.multiprocessing as mp +import torch_npu from torch_npu.testing.testcase import TestCase, run_tests diff --git a/test/test_npu.py b/test/test_npu.py index b8f63f24abdfba51a1b89f1f56771ec814e0c6b0..751e9039803041a0b6942080a530ebc8c2616dfe 100644 --- a/test/test_npu.py +++ b/test/test_npu.py @@ -91,9 +91,6 @@ class TestNpu(TestCase): return torch.npu.FloatTensor(*size) def assert_change(comp=1, empty_cache=False, reset_peak=False): - # comp > 0: increased - # comp = 0: equal - # comp < 0: decreased new_m = torch_npu.npu.memory_allocated(device) new_max_m = torch_npu.npu.max_memory_allocated(device) if comp > 0: diff --git a/torch_npu/distributed/distributed_c10d.py b/torch_npu/distributed/distributed_c10d.py index bbb894fe18353ade918f5241739ac95ab73f68be..468bbd5ca07b6c83337086274432fe5cd306248b 100644 --- a/torch_npu/distributed/distributed_c10d.py +++ b/torch_npu/distributed/distributed_c10d.py @@ -775,7 +775,7 @@ def isend(tensor, """ _check_single_tensor(tensor, "tensor") if _rank_not_in_group(group): - return + return None if group is None or group is GroupMember.WORLD: default_pg = _get_default_group()