1 Star 0 Fork 40

shu_gzy/pytorch

forked from src-openEuler/pytorch 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0001-Remove-breakpad-dependency.patch 21.24 KB
一键复制 编辑 原始数据 按行查看 历史
谢志鹏 提交于 2022-06-13 12:50 +08:00 . upgrade to pytorch v1.11.0
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
From d862908738d31f3d5e5dc2caabe51930e7cf020e Mon Sep 17 00:00:00 2001
From: Nikita Shulga <nshulga@fb.com>
Date: Tue, 3 May 2022 20:21:55 +0000
Subject: [PATCH] Remove breakpad dependency
This functionality does not seem to be used
and there are some requests to update dependency.
Add `third_party` to torch_cpu include directories if compiling with
Caffe2 support, as `caffe2/quantization/server/conv_dnnlowp_op.cc` depends on `third_party/fbgemm/src/RefImplementations.h`
Pull Request resolved: https://github.com/pytorch/pytorch/pull/75394
Approved by: https://github.com/janeyx99, https://github.com/seemethere
---
CMakeLists.txt | 5 -
caffe2/CMakeLists.txt | 8 +-
cmake/Dependencies.cmake | 4 -
cmake/Summary.cmake | 1 -
test/test_cpp_extensions_jit.py | 79 +----------
test/test_utils.py | 30 +----
tools/build_variables.bzl | 1 -
torch/csrc/Module.cpp | 7 -
torch/csrc/api/include/torch/utils.h | 1 -
torch/csrc/utils/crash_handler.cpp | 169 ------------------------
torch/csrc/utils/crash_handler.h | 35 -----
torch/csrc/utils/init.cpp | 14 --
torch/csrc/utils/init.h | 5 -
torch/testing/_internal/common_utils.py | 15 ---
15 files changed, 5 insertions(+), 372 deletions(-)
delete mode 100644 torch/csrc/utils/crash_handler.cpp
delete mode 100644 torch/csrc/utils/crash_handler.h
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e51760b5..7f62e1c6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -209,7 +209,6 @@ cmake_dependent_option(
"USE_CUDNN" OFF)
option(USE_FBGEMM "Use FBGEMM (quantized 8-bit server operators)" ON)
option(USE_KINETO "Use Kineto profiling library" ON)
-option(USE_BREAKPAD "Use breakpad crash dump library" ON)
option(USE_CUPTI_SO "Use CUPTI as a shared library" OFF)
option(USE_FAKELOWP "Use FakeLowp operators" OFF)
option(USE_FFMPEG "Use ffmpeg" OFF)
@@ -273,10 +272,6 @@ if(NOT DEFINED USE_VULKAN)
"ANDROID" OFF)
endif()
-if(IOS)
- set(USE_BREAKPAD OFF)
-endif()
-
option(USE_SOURCE_DEBUG_ON_MOBILE "Enable " ON)
option(USE_LITE_INTERPRETER_PROFILER "Enable " ON)
option(USE_VULKAN_FP16_INFERENCE "Vulkan - Use fp16 inference" OFF)
diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
index d57d7ebb..cfcc1ae2 100644
--- a/caffe2/CMakeLists.txt
+++ b/caffe2/CMakeLists.txt
@@ -709,7 +709,6 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
${TORCH_SRC_DIR}/csrc/api/src/optim/schedulers/step_lr.cpp
${TORCH_SRC_DIR}/csrc/api/src/serialize/input-archive.cpp
${TORCH_SRC_DIR}/csrc/api/src/serialize/output-archive.cpp
- ${TORCH_SRC_DIR}/csrc/utils/crash_handler.cpp
)
endif()
@@ -1066,10 +1065,9 @@ if(USE_TBB)
target_link_libraries(torch_cpu PUBLIC TBB::tbb)
endif()
-if(USE_BREAKPAD)
- target_compile_definitions(torch_cpu PRIVATE ADD_BREAKPAD_SIGNAL_HANDLER)
- target_include_directories(torch_cpu PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../third_party ${CMAKE_CURRENT_LIST_DIR}/../third_party/breakpad/src)
- target_link_libraries(torch_cpu PRIVATE breakpad)
+if(BUILD_CAFFE2 AND BUILD_CAFFE2_OPS AND USE_FBGEMM)
+ # FIXME: quantization/server/conv_dnnlowp_op.cc depends on fbgemm/src/RefImplementations.h
+ target_include_directories(torch_cpu PRIVATE ${CMAKE_CURRENT_LIST_DIR}/../third_party)
endif()
target_include_directories(torch_cpu PRIVATE ${ATen_CPU_INCLUDE})
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index 557ab649..525fea7a 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -1824,10 +1824,6 @@ set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
-if(USE_BREAKPAD)
- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/breakpad)
-endif()
-
# ---[ Kineto
# edge profiler depends on KinetoProfiler but it only does cpu
# profiling. Thus we dont need USE_CUDA/USE_ROCM
diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake
index 20401207..1b15ec86 100644
--- a/cmake/Summary.cmake
+++ b/cmake/Summary.cmake
@@ -185,7 +185,6 @@ function(caffe2_print_configuration_summary)
message(STATUS " SELECTED_OP_LIST : ${SELECTED_OP_LIST}")
endif()
message(STATUS " USE_DEPLOY : ${USE_DEPLOY}")
- message(STATUS " USE_BREAKPAD : ${USE_BREAKPAD}")
message(STATUS " Public Dependencies : ${Caffe2_PUBLIC_DEPENDENCY_LIBS}")
message(STATUS " Private Dependencies : ${Caffe2_DEPENDENCY_LIBS}")
# coreml
diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py
index 06b1133d..9875f4ee 100644
--- a/test/test_cpp_extensions_jit.py
+++ b/test/test_cpp_extensions_jit.py
@@ -10,15 +10,12 @@ import tempfile
import subprocess
import glob
-import textwrap
-from multiprocessing import Process
-
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
-from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
+from torch.testing._internal.common_utils import gradcheck
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
@@ -869,80 +866,6 @@ class TestCppExtensionJIT(common.TestCase):
gradcheck(torch.ops.my.add, [a, b], eps=1e-2)
- @staticmethod
- def _crash_handler_test_process(stderr_file, destination):
- # Code to enable dumps and trigger a segfault
- if sys.platform == "win32":
- destination = destination.replace("\\", "\\\\")
- csrc = textwrap.dedent(f"""
- #include <torch/torch.h>
- #include <locale>
- #include <iostream>
- #include <codecvt>
- #include <string>
-
- int fail() {{
- std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
- std::string narrow("{destination}");
- std::wstring wide = converter.from_bytes(narrow);
- torch::crash_handler::enable_minidumps(wide.c_str());
-
- volatile int* bad = nullptr;
- return *bad;
- }}
- """)
- else:
- csrc = textwrap.dedent(f"""
- #include <torch/torch.h>
-
- int fail() {{
- torch::crash_handler::enable_minidumps("{destination}");
-
- volatile int* bad = nullptr;
- return *bad;
- }}
- """)
-
- # Some special stuff to overwrite stderr for a C++ extension
- # Copied from: https://stackoverflow.com/questions/8804893/redirect-stdout-from-python-for-c-calls
- sys.stdout.flush()
- newstdout = os.dup(2)
- devnull = os.open(stderr_file, os.O_WRONLY)
- os.dup2(devnull, 2)
- os.close(devnull)
- sys.stdout = os.fdopen(newstdout, 'w')
-
- module = torch.utils.cpp_extension.load_inline(
- name="segfault",
- cpp_sources=csrc,
- functions=["fail"],
- )
- module.fail()
-
- @unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
- @unittest.skipIf(not has_breakpad(), "Built without breakpad")
- @unittest.skipIf(os.environ.get("TEST_CONFIG") == "force_on_cpu", "fails on force_on_cpu config, tracked w/ #65253")
- def test_crash_handler(self):
- with tempfile.TemporaryDirectory() as temp_dir, tempfile.NamedTemporaryFile(delete=not sys.platform == "win32") as stderr:
- # Use multiprocessing to spin up a separate process to make catching
- # the segfault easier
- p = Process(target=self._crash_handler_test_process, args=(stderr.name, temp_dir))
- p.start()
- p.join()
-
- with open(stderr.name) as f:
- result = f.read().strip()
-
- # Check that the signal handler was called
- self.assertTrue(result.startswith(f"Wrote minidump to {temp_dir}"))
-
- with open(result.replace("Wrote minidump to ", ""), "rb") as dump_file:
- dump_bytes = dump_file.read()
-
- # Check that the file has the correct magic number
- self.assertEqual(b"MDMP", dump_bytes[0:4])
-
-
if __name__ == "__main__":
common.run_tests()
diff --git a/test/test_utils.py b/test/test_utils.py
index c8f4e3aa..5dd34b72 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -2,8 +2,6 @@
import sys
import os
-import contextlib
-import io
import re
import shutil
import random
@@ -21,7 +19,7 @@ import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
-from torch.testing._internal.common_utils import has_breakpad, load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, TEST_WITH_ASAN
+from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
@@ -757,32 +755,6 @@ class TestAssert(TestCase):
ms(torch.tensor([False], dtype=torch.bool))
-class TestCrashHandler(TestCase):
- @unittest.skipIf(TEST_WITH_ASAN, "ASAN disables the crash handler's signal handler")
- @unittest.skipIf(not has_breakpad(), "Built without breakpad")
- def test_python_exception_writing(self):
- with tempfile.TemporaryDirectory() as temp_dir:
- torch.utils._crash_handler.enable_minidumps(temp_dir)
- torch.utils._crash_handler.enable_minidumps_on_exceptions()
-
- files = os.listdir(temp_dir)
- self.assertEqual(len(files), 0)
-
- f = io.StringIO()
- with contextlib.redirect_stderr(f):
- try:
- @torch.jit.script
- def x(i: int):
- return i + "2" # type: ignore[operator]
- except RuntimeError as e:
- pass
-
- files = os.listdir(temp_dir)
- self.assertEqual(len(files), 1)
- self.assertTrue(files[0].endswith(".dmp"))
- torch.utils._crash_handler.disable_minidumps()
-
-
@unittest.skipIf(IS_SANDCASTLE, "cpp_extension is OSS only")
class TestStandaloneCPPJIT(TestCase):
def test_load_standalone(self):
diff --git a/tools/build_variables.bzl b/tools/build_variables.bzl
index f78b7cb0..dc705d3e 100644
--- a/tools/build_variables.bzl
+++ b/tools/build_variables.bzl
@@ -772,7 +772,6 @@ torch_cpp_srcs = [
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
"torch/csrc/api/src/serialize/input-archive.cpp",
"torch/csrc/api/src/serialize/output-archive.cpp",
- "torch/csrc/utils/crash_handler.cpp",
]
libtorch_python_cuda_core_sources = [
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
index 49f51790..86aea308 100644
--- a/torch/csrc/Module.cpp
+++ b/torch/csrc/Module.cpp
@@ -55,7 +55,6 @@
#include <torch/csrc/utils/tensor_new.h>
#include <torch/csrc/utils/tensor_numpy.h>
#include <torch/csrc/utils/python_dispatch.h>
-#include <torch/csrc/utils/crash_handler.h>
#include <torch/csrc/utils/python_arg_parser.h>
#include <torch/csrc/utils/pycfunction_helpers.h>
#include <torch/csrc/jit/python/python_tracer.h>
@@ -64,7 +63,6 @@
#include <torch/csrc/monitor/python_init.h>
#include <torch/csrc/onnx/init.h>
#include <torch/csrc/utils/init.h>
-#include <torch/csrc/utils/crash_handler.h>
#include <torch/csrc/api/include/torch/python/init.h>
#ifdef USE_DISTRIBUTED
@@ -839,7 +837,6 @@ PyObject* initModule() {
torch::monitor::initMonitorBindings(module);
torch::impl::dispatch::initDispatchBindings(module);
torch::throughput_benchmark::initThroughputBenchmarkBindings(module);
- torch::crash_handler::initCrashHandlerBindings(module);
torch::autograd::initReturnTypes(module);
torch::autograd::initNNFunctions(module);
torch::autograd::initFFTFunctions(module);
@@ -895,10 +892,6 @@ PyObject* initModule() {
// Automatically translate errors thrown from pybind11 functions
py::register_exception_translator([](std::exception_ptr e) { // NOLINT
- if (torch::crash_handler::is_enabled_on_exceptions()) {
- torch::crash_handler::write_minidump();
- }
-
try {
if (e) {
std::rethrow_exception(e);
diff --git a/torch/csrc/api/include/torch/utils.h b/torch/csrc/api/include/torch/utils.h
index f664074d..3bb6363a 100644
--- a/torch/csrc/api/include/torch/utils.h
+++ b/torch/csrc/api/include/torch/utils.h
@@ -5,7 +5,6 @@
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/api/include/torch/types.h>
-#include <torch/csrc/utils/crash_handler.h>
#include <cstdint>
namespace torch {
diff --git a/torch/csrc/utils/crash_handler.cpp b/torch/csrc/utils/crash_handler.cpp
deleted file mode 100644
index 8fb318b2..00000000
--- a/torch/csrc/utils/crash_handler.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-#include <cstdlib>
-#include <cstring>
-#include <iostream>
-
-#ifdef ADD_BREAKPAD_SIGNAL_HANDLER
-#ifdef __linux__
-#include <breakpad/src/client/linux/handler/exception_handler.h>
-#include <csignal>
-#elif __APPLE__
-#include <breakpad/src/client/mac/handler/exception_handler.h>
-#elif _WIN32
-#include <breakpad/src/client/windows/handler/exception_handler.h>
-#else
-#error unsupported platform
-#endif
-#endif
-
-#include <c10/util/Exception.h>
-#include <torch/csrc/utils/crash_handler.h>
-
-namespace torch {
-namespace crash_handler {
-
-#ifdef ADD_BREAKPAD_SIGNAL_HANDLER
-
-static std::unique_ptr<google_breakpad::ExceptionHandler> handler; // NOLINT
-static STRING_TYPE minidump_directory; // NOLINT
-static bool enabled_for_exceptions = false; // NOLINT
-
-#if __linux__
-bool dump_callback(
- const google_breakpad::MinidumpDescriptor& descriptor,
- void* context,
- bool succeeded) {
- if (succeeded) {
- std::cerr << "Wrote minidump to " << descriptor.path() << std::endl;
- }
- return succeeded;
-}
-#elif __APPLE__
-
-bool dump_callback(
- const char* dump_dir,
- const char* minidump_id,
- void* context,
- bool succeeded) {
- if (succeeded) {
- std::cerr << "Wrote minidump to " << dump_dir << "/" << minidump_id
- << ".dmp" << std::endl;
- }
- return succeeded;
-}
-#elif _WIN32
-bool dump_callback(
- const wchar_t* dump_path,
- const wchar_t* minidump_id,
- void* context,
- EXCEPTION_POINTERS* exinfo,
- MDRawAssertionInfo* assertion,
- bool succeeded) {
- if (succeeded) {
- // Printing with wcerr inserts spaces between all the characters for some
- // reason. If someone figures that out then we can get rid of the std::string
- // conversions here.
- std::wstring dump_path_ws(dump_path);
- std::string dump_path_string(dump_path_ws.begin(), dump_path_ws.end());
- std::wstring minidump_id_ws(minidump_id);
- std::string minidump_id_string(minidump_id_ws.begin(), minidump_id_ws.end());
- std::cerr << "Wrote minidump to " << dump_path_string << "\\" << minidump_id_string << ".dmp" << std::endl;
- }
- return succeeded;
-}
-#endif
-
-void enable_minidumps(const STRING_TYPE& dir) {
- minidump_directory = dir;
-// The constructor here registers the actual signal handler
-#ifdef __linux__
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
- google_breakpad::MinidumpDescriptor(minidump_directory),
- nullptr,
- dump_callback,
- nullptr,
- true,
- -1);
-#elif __APPLE__
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
- /*dump_path=*/minidump_directory.c_str(),
- /*filter=*/nullptr,
- /*callback=*/dump_callback,
- /*callback_context=*/nullptr,
- /*install_handler=*/true,
- /*port_name=*/nullptr);
-#elif _WIN32
- handler = std::make_unique<google_breakpad::ExceptionHandler>(
- /*dump_path=*/minidump_directory.c_str(),
- /*filter=*/nullptr,
- /*callback=*/dump_callback,
- /*callback_context=*/nullptr,
- /*handler_types=*/
- google_breakpad::ExceptionHandler::HandlerType::HANDLER_ALL);
-#endif
-}
-
-void disable_minidumps() {
- handler.reset();
-}
-
-const STRING_TYPE& get_minidump_directory() {
- if (handler == nullptr) {
- AT_ERROR(
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
- }
- return minidump_directory;
-}
-
-bool is_enabled_on_exceptions() {
- if (handler == nullptr) {
- return false;
- }
-
- return enabled_for_exceptions;
-}
-
-void write_minidump() {
- TORCH_CHECK(
- handler != nullptr,
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
- handler->WriteMinidump();
-}
-
-void enable_minidumps_on_exceptions() {
- if (handler == nullptr) {
- AT_ERROR(
- "Minidump handler is uninintialized, make sure to call enable_minidumps first");
- }
- enabled_for_exceptions = true;
-}
-
-#else
-// On unspported systems we can't do anything, so stub out everything.
-void enable_minidumps(const STRING_TYPE& dir) {
- AT_ERROR("Compiled without minidump support");
-}
-
-void disable_minidumps() {
- // Purposefully do nothing
-}
-
-const STRING_TYPE& get_minidump_directory() {
- AT_ERROR("Compiled without minidump support");
-}
-
-bool is_enabled_on_exceptions() {
- return false;
-}
-
-void write_minidump() {
- AT_ERROR("Compiled without minidump support");
-}
-
-void enable_minidumps_on_exceptions() {
- AT_ERROR("Compiled without minidump support");
-}
-
-#endif
-
-} // namespace crash_handler
-} // namespace torch
diff --git a/torch/csrc/utils/crash_handler.h b/torch/csrc/utils/crash_handler.h
deleted file mode 100644
index 93f03380..00000000
--- a/torch/csrc/utils/crash_handler.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#pragma once
-#include <torch/csrc/Export.h>
-#include <string>
-
-namespace torch {
-namespace crash_handler {
-
-#ifdef _WIN32
-typedef std::wstring STRING_TYPE;
-#else
-typedef std::string STRING_TYPE;
-#endif
-
-// Set up a handler that writes minidumps to 'dir' on signals. This is not
-// necessary to call unless you want to change 'dir' to something other than
-// the default '/tmp/pytorch_crashes'.
-TORCH_API void enable_minidumps(const STRING_TYPE& dir);
-
-// Enable minidumps when passing exceptions up to Python. By default these don't
-// do anything special, but it can be useful to write out a minidump on
-// exceptions for debugging purposes. This has no effect in C++.
-TORCH_API void enable_minidumps_on_exceptions();
-
-// Disable all minidump writing and un-register the signal handler
-TORCH_API void disable_minidumps();
-
-// Get the directory that minidumps will be written to
-TORCH_API const STRING_TYPE& get_minidump_directory();
-
-// These are TORCH_API'ed since they are used from libtorch_python.so
-TORCH_API bool is_enabled_on_exceptions();
-TORCH_API void write_minidump();
-
-} // namespace crash_handler
-} // namespace torch
diff --git a/torch/csrc/utils/init.cpp b/torch/csrc/utils/init.cpp
index 8472f208..04a5d1b3 100644
--- a/torch/csrc/utils/init.cpp
+++ b/torch/csrc/utils/init.cpp
@@ -1,7 +1,6 @@
#include <ATen/core/ivalue.h>
#include <torch/csrc/utils/init.h>
#include <torch/csrc/utils/throughput_benchmark.h>
-#include <torch/csrc/utils/crash_handler.h>
#include <pybind11/functional.h>
@@ -51,17 +50,4 @@ void initThroughputBenchmarkBindings(PyObject* module) {
}
} // namespace throughput_benchmark
-
-namespace crash_handler {
-
-void initCrashHandlerBindings(PyObject* module) {
- auto m = pybind11::handle(module).cast<pybind11::module>();
-
- m.def("_enable_minidumps", enable_minidumps)
- .def("_is_enabled_on_exceptions", is_enabled_on_exceptions)
- .def("_enable_minidumps_on_exceptions", enable_minidumps_on_exceptions)
- .def("_disable_minidumps", disable_minidumps)
- .def("_get_minidump_directory", get_minidump_directory);
-}
-} // namespace crash_handler
} // namespace torch
diff --git a/torch/csrc/utils/init.h b/torch/csrc/utils/init.h
index 45a1a3a7..bf6dd216 100644
--- a/torch/csrc/utils/init.h
+++ b/torch/csrc/utils/init.h
@@ -8,9 +8,4 @@ namespace throughput_benchmark {
void initThroughputBenchmarkBindings(PyObject* module);
} // namespace throughput_benchmark
-
-namespace crash_handler {
-void initCrashHandlerBindings(PyObject* module);
-
-} // namespace crash_handler
} // namespace torch
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
index 6e67b776..81656500 100644
--- a/torch/testing/_internal/common_utils.py
+++ b/torch/testing/_internal/common_utils.py
@@ -3061,21 +3061,6 @@ def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.devi
return torch.tensor(res, device=device, dtype=dtype)
-def has_breakpad():
- # We always build with breakpad in CI
- if IS_IN_CI:
- return True
-
- # If not on a special build, check that the library was actually linked in
- try:
- torch._C._get_minidump_directory() # type: ignore[attr-defined]
- return True
- except RuntimeError as e:
- if "Minidump handler is uninintialized" in str(e):
- return True
- return False
-
-
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
--
2.27.0
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/gaozhiyuan1/pytorch.git
git@gitee.com:gaozhiyuan1/pytorch.git
gaozhiyuan1
pytorch
pytorch
master

搜索帮助