From 36c93ed1ef3e38eb37d963f46f83d50e5fb8522a Mon Sep 17 00:00:00 2001 From: dongwenbo6 Date: Sat, 27 Jan 2024 10:25:33 +0800 Subject: [PATCH 1/2] delete ip address --- test/test_torch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_torch.py b/test/test_torch.py index 68ce5aabba..39453247d6 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -9303,7 +9303,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], self.assertRaises(RuntimeError, lambda: x.sigmoid()) - @skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1993") + @skipIfTorchDynamo("pytorch torchdynamo issues 1993") def test_storage_dead_weak_ref(self): x = torch.UntypedStorage(2) w_x = weakref.ref(x) -- Gitee From 7c9927f68959b88d946c221b8da46df3ac140d18 Mon Sep 17 00:00:00 2001 From: dongwenbo6 Date: Sat, 27 Jan 2024 14:48:41 +0800 Subject: [PATCH 2/2] delete public IP address --- test/distributed/test_zero_redundancy_optimizer.py | 1 - test/test_binary_ufuncs.py | 5 ----- test/test_npu.py | 9 +-------- test/test_npu_multinpu.py | 2 -- test/test_transformers.py | 2 -- 5 files changed, 1 insertion(+), 18 deletions(-) diff --git a/test/distributed/test_zero_redundancy_optimizer.py b/test/distributed/test_zero_redundancy_optimizer.py index d3abbc6dcb..7f5f92ef1b 100644 --- a/test/distributed/test_zero_redundancy_optimizer.py +++ b/test/distributed/test_zero_redundancy_optimizer.py @@ -1133,7 +1133,6 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer): ddp_loss = cast(torch.Tensor, zero_optim.step(closure=closure_ddp)) # Increased tolerances are needed to pass when using TF32 - # See: https://github.com/pytorch/pytorch/issues/67764 torch.testing.assert_close( local_loss.cpu(), ddp_loss.cpu(), diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index 3626f774c3..4d25af62fe 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -125,7 +125,6 @@ class TestBinaryUfuncs(TestCase): exact_dtype = False if dtype is torch.bfloat16 and expected.dtype == np.float32: - # Ref: https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_utils.py#L1149 self.assertEqualHelper( actual, expected, @@ -1878,7 +1877,6 @@ class TestBinaryUfuncs(TestCase): return a // 5 # NOTE: this fails if the input is not an integer tensor - # See https://github.com/pytorch/pytorch/issues/45199 def _wrapped_rfloordiv_scalar(a): return 5 // a @@ -2672,7 +2670,6 @@ class TestBinaryUfuncs(TestCase): self.assertTrue(torch.all(fn(x, 0.0).isnan())) self.assertTrue(torch.all(fn(x, zero).isnan())) - # Check Issue https://github.com/pytorch/pytorch/issues/48130 @dtypes(*integral_types()) def test_fmod_remainder_by_zero_integral(self, device, dtype): fn_list = (torch.fmod, torch.remainder) @@ -3990,7 +3987,6 @@ class TestBinaryUfuncs(TestCase): base = make_tensor((30,), dtype=base_dtype, device=device, low=1, high=100) # Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0 - # Related: https://github.com/pytorch/pytorch/issues/48000 # base[0] = base[3] = base[7] = 0 exp = make_tensor((30,), dtype=exp_dtype, device=device, low=-2, high=2) exp[0] = exp[4] = exp[6] = 0 @@ -4355,7 +4351,6 @@ class TestBinaryUfuncs(TestCase): # `index_select` which is not implemented for `complex32` at the # time of writing this test. # Remove this test once above issue is fixed. - # Ref: https://github.com/pytorch/pytorch/pull/76364 x = make_tensor((2, 2), device=device, dtype=dtype) self.assertEqual(x * 2.5, x * torch.tensor(2.5, device=device, dtype=dtype)) diff --git a/test/test_npu.py b/test/test_npu.py index 54e8988aee..ee33fe0cf7 100644 --- a/test/test_npu.py +++ b/test/test_npu.py @@ -891,7 +891,6 @@ except RuntimeError as e: MultiplyInStream = self._make_multiply_in_stream() # Tests using grads outside the backward() stream context - # See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html x = torch.randn(5, 5, device='npu', requires_grad=True) with torch_npu.npu.stream(stream): stream.wait_stream(default_stream) @@ -918,8 +917,7 @@ except RuntimeError as e: self.assertEqual(x.grad, torch.ones_like(x) * 3) self.assertEqual(torch_npu.npu.current_stream(), bwd_ambient_stream) - # Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190 - @skipIfRocm(msg="flakey on ROCm https://github.com/pytorch/pytorch/issues/53190") + @skipIfRocm(msg="flakey on ROCm pytorch issues 53190") def test_streaming_backwards_multiple_streams(self): MultiplyInStream = self._make_multiply_in_stream() @@ -967,7 +965,6 @@ except RuntimeError as e: x_grad = torch.autograd.grad((loss,), (x,))[0] else: loss.backward() - # See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html torch_npu.npu.current_stream().wait_stream(stream) if out_of_place: @@ -1011,7 +1008,6 @@ except RuntimeError as e: # bwd ops don't sync with bwd_ambient_stream before consuming grad. torch.autograd.backward(tensors=c, grad_tensors=grad) - # See https://github.com/pytorch/pytorch/issues/47028 # assertEquals below run on bwd_ambient_stream, so this test may also fail # if backward() fails to sync with bwd_ambient_stream at the end. # Synchronizing here works around the issue until a proper fix can be made. @@ -1626,7 +1622,6 @@ torch_npu.npu.synchronize() for t in range(num_threads): self.assertEqual(results[t].sum().item(), size * size) - # Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401) @unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)') @unittest.skipIf(not TEST_PRIVATEUSE1, 'NPU not available') @skipIfRocm @@ -2017,7 +2012,6 @@ torch_npu.npu.synchronize() loss.backward() def test_autocast_cat_jit(self): - # Reported at https://github.com/pytorch/pytorch/issues/38958 class Model(torch.nn.Module): def forward(self): @@ -2117,7 +2111,6 @@ torch_npu.npu.synchronize() self.assertEqual(grad.half(), grad_control) def test_autocast_cache_leak(self): - # Reported at https://github.com/pytorch/pytorch/issues/48049 # Test is used to check, if autocast recaches the same parameters # when executed in a `torch.no_grad()` block. diff --git a/test/test_npu_multinpu.py b/test/test_npu_multinpu.py index bf33a4702c..0b98f85aa0 100644 --- a/test/test_npu_multinpu.py +++ b/test/test_npu_multinpu.py @@ -740,7 +740,6 @@ class TestNpuMultiNpu(TestCase): p2c.get() c2p.put(sync_func(self, TestNpuMultiNpu.FIFTY_MIL_CYCLES)) - # Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190 @skipIfRocm @unittest.skipIf(not TEST_MULTINPU, "detected only one NPU") def test_stream_event_nogil(self): @@ -1064,7 +1063,6 @@ class TestNpuMultiNpu(TestCase): @unittest.skipIf(not TEST_MULTINPU, "only one NPU detected") @unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle") def test_cuda_init_race(self): - # See https://github.com/pytorch/pytorch/issues/16559 import subprocess subprocess.check_call([sys.executable, '-c', """\ import torch diff --git a/test/test_transformers.py b/test/test_transformers.py index b244fb0f11..c03a942da3 100644 --- a/test/test_transformers.py +++ b/test/test_transformers.py @@ -96,7 +96,6 @@ def get_tolerances( atol = fudge_factor * max(atol, default_atol[computed_value.dtype]) rtol = fudge_factor * max(rtol, default_rtol[computed_value.dtype]) # torch.isclose() has weird behavior around see: - # https://github.com/pytorch/pytorch/issues/102400 if rtol > 1e30: rtol = default_rtol[computed_value.dtype] return atol, rtol @@ -1754,7 +1753,6 @@ class TestSDPA(NNTestCase): @parametrize("kernel", [SDPBackend.MATH]) def test_scaled_dot_product_attention_math_with_negative_scale(self, device, kernel: SDPBackend): - # https://github.com/pytorch/pytorch/issues/105190. def ref(x): v1 = torch.matmul(x, x.transpose(-1, -2)) v2 = v1 / -0.0001 -- Gitee