diff --git a/1000-add-loongarch64-support-not-upstream-modified.patch b/1000-add-loongarch64-support-not-upstream-modified.patch new file mode 100644 index 0000000000000000000000000000000000000000..2c467abf0fb8ef583232c6944335b2175ed13a3f --- /dev/null +++ b/1000-add-loongarch64-support-not-upstream-modified.patch @@ -0,0 +1,3273 @@ +From 37b00491c7550c4ed0d2a17d565e1f355cc41d67 Mon Sep 17 00:00:00 2001 +From: herengui +Date: Thu, 31 Aug 2023 09:46:45 +0800 +Subject: [PATCH 1000/1001] add loongarch64 support not upstream modified + +Signed-off-by: herengui +--- + cmake/Modules/CompilerRTUtils.cmake | 3 + + cmake/base-config-ix.cmake | 2 + + cmake/builtin-config-ix.cmake | 3 +- + cmake/config-ix.cmake | 27 +- + lib/asan/asan_interceptors.h | 9 +- + lib/asan/asan_interceptors_vfork.S | 1 + + lib/asan/asan_malloc_linux.cpp | 4 + + lib/asan/asan_mapping.h | 14 +- + lib/asan/scripts/asan_symbolize.py | 2 +- + lib/asan/tests/asan_test.cpp | 14 +- + lib/builtins/CMakeLists.txt | 5 + + lib/builtins/clear_cache.c | 2 + + lib/crt/crtbegin.c | 8 + + lib/dfsan/dfsan_platform.h | 7 + + lib/fuzzer/FuzzerTracePC.cpp | 7 +- + lib/fuzzer/FuzzerUtil.h | 3 +- + lib/interception/tests/CMakeLists.txt | 2 +- + lib/lsan/lsan_allocator.cpp | 2 +- + lib/lsan/lsan_allocator.h | 2 +- + lib/lsan/lsan_common.cpp | 8 +- + lib/lsan/lsan_common.h | 4 +- + lib/msan/msan.h | 29 +- + lib/msan/msan_allocator.cpp | 2 +- + lib/msan/msan_interceptors.cpp | 4 + + lib/msan/tests/msan_test.cpp | 10 +- + lib/safestack/safestack_platform.h | 6 + + lib/sanitizer_common/CMakeLists.txt | 1 + + lib/sanitizer_common/sanitizer_common.h | 5 +- + .../sanitizer_common_syscalls.inc | 4 +- + .../sanitizer_coverage_libcdep_new.cpp | 14 +- + lib/sanitizer_common/sanitizer_linux.cpp | 514 +++++++++++------- + lib/sanitizer_common/sanitizer_linux.h | 2 +- + .../sanitizer_linux_libcdep.cpp | 31 +- + lib/sanitizer_common/sanitizer_platform.h | 42 +- + .../sanitizer_platform_interceptors.h | 7 +- + .../sanitizer_platform_limits_linux.cpp | 6 +- + .../sanitizer_platform_limits_posix.cpp | 71 +-- + .../sanitizer_platform_limits_posix.h | 25 +- + lib/sanitizer_common/sanitizer_ring_buffer.h | 9 +- + lib/sanitizer_common/sanitizer_stacktrace.cpp | 7 +- + lib/sanitizer_common/sanitizer_stacktrace.h | 7 +- + .../sanitizer_stoptheworld_linux_libcdep.cpp | 108 ++-- + .../sanitizer_symbolizer_libcdep.cpp | 2 + + .../sanitizer_tls_get_addr.cpp | 8 +- + lib/sanitizer_common/tests/CMakeLists.txt | 2 +- + .../tests/sanitizer_allocator_test.cpp | 6 +- + .../tests/sanitizer_ring_buffer_test.cpp | 5 +- + .../tests/sanitizer_stacktrace_test.cpp | 16 +- + lib/scudo/scudo_utils.cpp | 2 + + lib/scudo/standalone/checksum.cpp | 2 + + lib/scudo/standalone/common.h | 4 + + lib/tsan/CMakeLists.txt | 4 +- + lib/tsan/rtl/tsan_interceptors_posix.cpp | 2 + + lib/tsan/rtl/tsan_platform.h | 47 +- + lib/tsan/rtl/tsan_platform_linux.cpp | 13 +- + lib/tsan/rtl/tsan_rtl.cpp | 4 +- + lib/tsan/rtl/tsan_rtl.h | 3 +- + lib/xray/CMakeLists.txt | 6 + + lib/xray/tests/CMakeLists.txt | 1 + + lib/xray/xray_interface.cpp | 2 + + lib/xray/xray_tsc.h | 3 +- + test/asan/CMakeLists.txt | 2 +- + test/asan/TestCases/Linux/ptrace.cpp | 9 +- + test/asan/TestCases/Linux/segv_read_write.c | 2 +- + .../Posix/unpoison-alternate-stack.cpp | 4 +- + test/builtins/Unit/addtf3_test.c | 2 +- + test/builtins/Unit/subtf3_test.c | 2 +- + test/fuzzer/disable-leaks.test | 2 +- + test/fuzzer/exit_on_src_pos.test | 2 + + test/fuzzer/fork-ubsan.test | 2 +- + test/lit.common.cfg.py | 2 +- + test/lsan/TestCases/strace_test.cpp | 1 + + test/lsan/TestCases/swapcontext.cpp | 2 +- + test/lsan/TestCases/use_registers.cpp | 4 + + test/lsan/lit.common.cfg.py | 2 +- + test/msan/allocator_mapping.cpp | 2 +- + test/msan/fstat.cpp | 2 +- + test/msan/lit.cfg.py | 2 +- + test/msan/mmap.cpp | 6 +- + test/msan/mmap_below_shadow.cpp | 2 +- + test/msan/param_tls_limit.cpp | 4 +- + test/msan/preinit_array.cpp | 3 + + test/msan/strlen_of_shadow.cpp | 4 +- + test/msan/vararg.cpp | 3 +- + test/msan/vector_select.cpp | 2 +- + test/msan/wcsncpy.cpp | 2 +- + .../TestCases/Linux/pthread_mutex.cpp | 3 +- + .../TestCases/Linux/ptrace.cpp | 22 +- + .../Linux/sysconf_interceptor_bypass_test.cpp | 3 +- + .../TestCases/Posix/lstat.cpp | 2 +- + test/sanitizer_common/print_address.h | 2 +- + test/tsan/map32bit.cpp | 1 + + test/tsan/mmap_large.cpp | 2 + + test/tsan/test.h | 2 + + .../TestCases/Posix/arg1-arg0-logging.cpp | 2 +- + test/xray/TestCases/Posix/arg1-logger.cpp | 2 +- + .../Posix/arg1-logging-implicit-this.cpp | 2 +- + .../TestCases/Posix/argv0-log-file-name.cpp | 1 + + test/xray/TestCases/Posix/coverage-sample.cpp | 1 + + .../TestCases/Posix/fixedsize-logging.cpp | 1 + + test/xray/TestCases/Posix/func-id-utils.cpp | 1 + + test/xray/TestCases/Posix/logging-modes.cpp | 1 + + .../TestCases/Posix/optional-inmemory-log.cpp | 1 + + .../TestCases/Posix/patching-unpatching.cpp | 1 + + test/xray/TestCases/Posix/pic_test.cpp | 1 + + 105 files changed, 845 insertions(+), 441 deletions(-) + +diff --git a/compiler-rt.orig/cmake/Modules/CompilerRTUtils.cmake b/compiler-rt.new/cmake/Modules/CompilerRTUtils.cmake +index f61d487..49d0eac 100644 +--- a/compiler-rt.orig/cmake/Modules/CompilerRTUtils.cmake ++++ b/compiler-rt.new/cmake/Modules/CompilerRTUtils.cmake +@@ -162,6 +162,7 @@ macro(detect_target_arch) + check_symbol_exists(__powerpc64__ "" __PPC64) + check_symbol_exists(__powerpc64le__ "" __PPC64LE) + check_symbol_exists(__riscv "" __RISCV) ++ check_symbol_exists(__loongarch64 "" __LOONGARCH64) + check_symbol_exists(__s390x__ "" __S390X) + check_symbol_exists(__sparc "" __SPARC) + check_symbol_exists(__sparcv9 "" __SPARCV9) +@@ -194,6 +195,8 @@ macro(detect_target_arch) + else() + message(FATAL_ERROR "Unsupport XLEN for RISC-V") + endif() ++ elseif(__LOONGARCH64) ++ add_default_target_arch(loongarch64) + elseif(__S390X) + add_default_target_arch(s390x) + elseif(__SPARCV9) +diff --git a/compiler-rt.orig/cmake/base-config-ix.cmake b/compiler-rt.new/cmake/base-config-ix.cmake +index 1edab43..5fdcdab 100644 +--- a/compiler-rt.orig/cmake/base-config-ix.cmake ++++ b/compiler-rt.new/cmake/base-config-ix.cmake +@@ -224,6 +224,8 @@ macro(test_targets) + test_target_arch(wasm64 "" "--target=wasm64-unknown-unknown") + elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "ve") + test_target_arch(ve "__ve__" "--target=ve-unknown-none") ++ elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "loongarch64") ++ test_target_arch(loongarch64 "" "") + endif() + set(COMPILER_RT_OS_SUFFIX "") + endif() +diff --git a/compiler-rt.orig/cmake/builtin-config-ix.cmake b/compiler-rt.new/cmake/builtin-config-ix.cmake +index ad3b987..18b35f1 100644 +--- a/compiler-rt.orig/cmake/builtin-config-ix.cmake ++++ b/compiler-rt.new/cmake/builtin-config-ix.cmake +@@ -52,6 +52,7 @@ set(SPARCV9 sparcv9) + set(WASM32 wasm32) + set(WASM64 wasm64) + set(VE ve) ++set(LOONGARCH64 loongarch64) + + if(APPLE) + set(ARM64 arm64 arm64e) +@@ -63,7 +64,7 @@ set(ALL_BUILTIN_SUPPORTED_ARCH + ${X86} ${X86_64} ${ARM32} ${ARM64} + ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC32} ${PPC64} + ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} +- ${WASM32} ${WASM64} ${VE}) ++ ${WASM32} ${WASM64} ${VE} ${LOONGARCH64}) + + include(CompilerRTUtils) + include(CompilerRTDarwinUtils) +diff --git a/compiler-rt.orig/cmake/config-ix.cmake b/compiler-rt.new/cmake/config-ix.cmake +index f81b838..979e6c9 100644 +--- a/compiler-rt.orig/cmake/config-ix.cmake ++++ b/compiler-rt.new/cmake/config-ix.cmake +@@ -288,6 +288,7 @@ set(SPARCV9 sparcv9) + set(WASM32 wasm32) + set(WASM64 wasm64) + set(VE ve) ++set(LOONGARCH64 loongarch64) + + if(APPLE) + set(ARM64 arm64) +@@ -296,11 +297,11 @@ if(APPLE) + endif() + + set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${RISCV64} +- ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) ++ ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9} ${LOONGARCH64}) + set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} +- ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9}) ++ ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${LOONGARCH64}) + set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV32} ${RISCV64} ${VE}) +-set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) ++set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${LOONGARCH64}) + + if(ANDROID) + set(OS_NAME "Android") +@@ -309,7 +310,7 @@ else() + endif() + + if(OS_NAME MATCHES "Linux") +- set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${S390X}) ++ set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${S390X} ${LOONGARCH64}) + elseif (OS_NAME MATCHES "Windows") + set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64}) + elseif(OS_NAME MATCHES "Android") +@@ -322,24 +323,24 @@ set(ALL_GWP_ASAN_SUPPORTED_ARCH ${X86} ${X86_64}) + if(APPLE) + set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64}) + else() +- set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64} ${S390X}) ++ set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} ${PPC64} ${S390X} ${LOONGARCH64}) + endif() +-set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X}) ++set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X} ${LOONGARCH64}) + set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64}) + set(ALL_MEMPROF_SUPPORTED_ARCH ${X86_64}) + set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC32} ${PPC64} +- ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) +-set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) ++ ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9} ${LOONGARCH64}) ++set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${LOONGARCH64}) + set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} +- ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9}) +-set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) +-set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS64}) +-set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64}) ++ ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${LOONGARCH64}) ++set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64} ${LOONGARCH64}) ++set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS64} ${LOONGARCH64}) ++set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${LOONGARCH64}) + set(ALL_SCUDO_STANDALONE_SUPPORTED_ARCH ${X86} ${X86_64}) + if(APPLE) + set(ALL_XRAY_SUPPORTED_ARCH ${X86_64}) + else() +-set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) ++set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le ${LOONGARCH64}) + endif() + set(ALL_SHADOWCALLSTACK_SUPPORTED_ARCH ${ARM64}) + +diff --git a/compiler-rt.orig/lib/asan/asan_interceptors.h b/compiler-rt.new/lib/asan/asan_interceptors.h +index 45cdb80..2c6d208 100644 +--- a/compiler-rt.orig/lib/asan/asan_interceptors.h ++++ b/compiler-rt.new/lib/asan/asan_interceptors.h +@@ -112,10 +112,11 @@ void InitializePlatformInterceptors(); + # define ASAN_INTERCEPT___STRDUP 0 + #endif + +-#if SANITIZER_LINUX && \ +- (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \ +- defined(__x86_64__) || SANITIZER_RISCV64) +-# define ASAN_INTERCEPT_VFORK 1 ++#if SANITIZER_LINUX && \ ++ (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \ ++ defined(__x86_64__) || SANITIZER_RISCV64) || \ ++ SANITIZER_LOONGARCH64 ++#define ASAN_INTERCEPT_VFORK 1 + #else + # define ASAN_INTERCEPT_VFORK 0 + #endif +diff --git a/compiler-rt.orig/lib/asan/asan_interceptors_vfork.S b/compiler-rt.new/lib/asan/asan_interceptors_vfork.S +index 3ae5503..74cbf3c 100644 +--- a/compiler-rt.orig/lib/asan/asan_interceptors_vfork.S ++++ b/compiler-rt.new/lib/asan/asan_interceptors_vfork.S +@@ -7,6 +7,7 @@ + #include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S" + #include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S" + #include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S" ++#include "sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S" + #include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S" + #endif + +diff --git a/compiler-rt.orig/lib/asan/asan_malloc_linux.cpp b/compiler-rt.new/lib/asan/asan_malloc_linux.cpp +index 9c3f0a5..88a68c0 100644 +--- a/compiler-rt.orig/lib/asan/asan_malloc_linux.cpp ++++ b/compiler-rt.new/lib/asan/asan_malloc_linux.cpp +@@ -31,7 +31,11 @@ using namespace __asan; + + static uptr allocated_for_dlsym; + static uptr last_dlsym_alloc_size_in_words; ++#if SANITIZER_LOONGARCH64 ++static const uptr kDlsymAllocPoolSize = 4096 * 4; ++#else + static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; ++#endif + static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + + static inline bool IsInDlsymAllocPool(const void *ptr) { +diff --git a/compiler-rt.orig/lib/asan/asan_mapping.h b/compiler-rt.new/lib/asan/asan_mapping.h +index f239c3e..0b81c62 100644 +--- a/compiler-rt.orig/lib/asan/asan_mapping.h ++++ b/compiler-rt.new/lib/asan/asan_mapping.h +@@ -178,6 +178,8 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; + static const u64 kRiscv64_ShadowOffset64 = 0x20000000; + static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; + static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; ++static const u64 kLoongArch32_ShadowOffset32 = 0x0aaa0000; ++static const u64 kLoongArch64_ShadowOffset64 = 1ULL << 46; + static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; + static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; + static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 +@@ -205,6 +207,8 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; + # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address + # elif defined(__mips__) + # define SHADOW_OFFSET kMIPS32_ShadowOffset32 ++#elif SANITIZER_LOONGARCH ++# define SHADOW_OFFSET kLoongArch32_ShadowOffset32 + # elif SANITIZER_FREEBSD + # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 + # elif SANITIZER_NETBSD +@@ -239,12 +243,14 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; + # define SHADOW_OFFSET kDefaultShadowOffset64 + # elif defined(__mips64) + # define SHADOW_OFFSET kMIPS64_ShadowOffset64 +-#elif defined(__sparc__) +-#define SHADOW_OFFSET kSPARC64_ShadowOffset64 ++#elif SANITIZER_LOONGARCH64 ++# define SHADOW_OFFSET kLoongArch64_ShadowOffset64 ++# elif defined(__sparc__) ++# define SHADOW_OFFSET kSPARC64_ShadowOffset64 + # elif SANITIZER_WINDOWS64 +-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address ++# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address + # else +-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset ++# define SHADOW_OFFSET kDefaultShort64bitShadowOffset + # endif + #endif + +diff --git a/compiler-rt.orig/lib/asan/scripts/asan_symbolize.py b/compiler-rt.new/lib/asan/scripts/asan_symbolize.py +index 5c4001a..8e28462 100755 +--- a/compiler-rt.orig/lib/asan/scripts/asan_symbolize.py ++++ b/compiler-rt.new/lib/asan/scripts/asan_symbolize.py +@@ -50,7 +50,7 @@ def fix_filename(file_name): + def is_valid_arch(s): + return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s", + "armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390", +- "riscv64"] ++ "riscv64" "loongarch64"] + + def guess_arch(addr): + # Guess which arch we're running. 10 = len('0x') + 8 hex digits. +diff --git a/compiler-rt.orig/lib/asan/tests/asan_test.cpp b/compiler-rt.new/lib/asan/tests/asan_test.cpp +index c0b79bb..a0e0f83 100644 +--- a/compiler-rt.orig/lib/asan/tests/asan_test.cpp ++++ b/compiler-rt.new/lib/asan/tests/asan_test.cpp +@@ -621,9 +621,9 @@ NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) { + siglongjmp(buf, 1); + } + +-#if !defined(__ANDROID__) && !defined(__arm__) && !defined(__aarch64__) && \ +- !defined(__mips__) && !defined(__mips64) && !defined(__s390__) && \ +- !defined(__riscv) ++# if !defined(__ANDROID__) && !defined(__arm__) && !defined(__aarch64__) && \ ++ !defined(__mips__) && !defined(__mips64) && !defined(__s390__) && \ ++ !defined(__riscv) && !defined(__loongarch__) + NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { + // create three red zones for these two stack objects. + int a; +@@ -645,10 +645,10 @@ TEST(AddressSanitizer, BuiltinLongJmpTest) { + TouchStackFunc(); + } + } +-#endif // !defined(__ANDROID__) && !defined(__arm__) && +- // !defined(__aarch64__) && !defined(__mips__) +- // !defined(__mips64) && !defined(__s390__) +- // !defined(__riscv) ++# endif // !defined(__ANDROID__) && !defined(__arm__) && ++ // !defined(__aarch64__) && !defined(__mips__) ++ // !defined(__mips64) && !defined(__s390__) ++ // !defined(__riscv) && !defined(__loongarch__) + + TEST(AddressSanitizer, UnderscopeLongJmpTest) { + static jmp_buf buf; +diff --git a/compiler-rt.orig/lib/builtins/CMakeLists.txt b/compiler-rt.new/lib/builtins/CMakeLists.txt +index 73b6bea..8c1274c 100644 +--- a/compiler-rt.orig/lib/builtins/CMakeLists.txt ++++ b/compiler-rt.new/lib/builtins/CMakeLists.txt +@@ -599,6 +599,11 @@ set(mips64_SOURCES ${GENERIC_TF_SOURCES} + ${mips_SOURCES}) + set(mips64el_SOURCES ${GENERIC_TF_SOURCES} + ${mips_SOURCES}) ++set(loongarch64_SOURCES ++ loongarch/fp_mode.c ++ ${GENERIC_TF_SOURCES} ++ ${GENERIC_SOURCES} ++) + + set(powerpc_SOURCES ${GENERIC_SOURCES}) + +diff --git a/compiler-rt.orig/lib/builtins/clear_cache.c b/compiler-rt.new/lib/builtins/clear_cache.c +index 5a443dd..bc0865b 100644 +--- a/compiler-rt.orig/lib/builtins/clear_cache.c ++++ b/compiler-rt.new/lib/builtins/clear_cache.c +@@ -163,6 +163,8 @@ void __clear_cache(void *start, void *end) { + : "=r"(start_reg) + : "r"(start_reg), "r"(end_reg), "r"(flags), "r"(syscall_nr)); + assert(start_reg == 0 && "Cache flush syscall failed."); ++#elif defined(__linux__) && defined(__loongarch__) ++ __asm__ volatile("ibar 0"); + #else + #if __APPLE__ + // On Darwin, sys_icache_invalidate() provides this functionality +diff --git a/compiler-rt.orig/lib/crt/crtbegin.c b/compiler-rt.new/lib/crt/crtbegin.c +index 481c158..bbdc711 100644 +--- a/compiler-rt.orig/lib/crt/crtbegin.c ++++ b/compiler-rt.new/lib/crt/crtbegin.c +@@ -56,6 +56,10 @@ __asm__(".pushsection .init,\"ax\",@progbits\n\t" + __asm__(".pushsection .init,\"ax\",%progbits\n\t" + "call " __USER_LABEL_PREFIX__ "__do_init\n\t" + ".popsection"); ++#elif defined(__loongarch__) ++__asm__(".pushsection .init,\"ax\",%progbits\n\t" ++ "bl " __USER_LABEL_PREFIX__ "__do_init\n\t" ++ ".popsection"); + #elif defined(__arm__) || defined(__aarch64__) + __asm__(".pushsection .init,\"ax\",%progbits\n\t" + "bl " __USER_LABEL_PREFIX__ "__do_init\n\t" +@@ -118,6 +122,10 @@ __asm__(".pushsection .fini,\"ax\",@progbits\n\t" + __asm__(".pushsection .fini,\"ax\",@progbits\n\t" + "call " __USER_LABEL_PREFIX__ "__do_fini\n\t" + ".popsection"); ++#elif defined(__loongarch__) ++__asm__(".pushsection .fini,\"ax\",@progbits\n\t" ++ "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t" ++ ".popsection"); + #elif defined(__sparc__) + __asm__(".pushsection .fini,\"ax\",@progbits\n\t" + "call " __USER_LABEL_PREFIX__ "__do_fini\n\t" +diff --git a/compiler-rt.orig/lib/dfsan/dfsan_platform.h b/compiler-rt.new/lib/dfsan/dfsan_platform.h +index 4ff68b9..43006d4 100644 +--- a/compiler-rt.orig/lib/dfsan/dfsan_platform.h ++++ b/compiler-rt.new/lib/dfsan/dfsan_platform.h +@@ -54,6 +54,13 @@ struct Mapping48 { + + extern int vmaSize; + # define DFSAN_RUNTIME_VMA 1 ++#elif defined(__loongarch__) ++struct Mapping { ++ static const uptr kShadowAddr = 0x10000; ++ static const uptr kUnionTableAddr = 0x8000000000; ++ static const uptr kAppAddr = 0x7fff00008000; ++ static const uptr kShadowMask = ~0x7ffff0000000; ++}; + #else + # error "DFSan not supported for this platform!" + #endif +diff --git a/compiler-rt.orig/lib/fuzzer/FuzzerTracePC.cpp b/compiler-rt.new/lib/fuzzer/FuzzerTracePC.cpp +index 91e94d8..152780b 100644 +--- a/compiler-rt.orig/lib/fuzzer/FuzzerTracePC.cpp ++++ b/compiler-rt.new/lib/fuzzer/FuzzerTracePC.cpp +@@ -124,7 +124,8 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) { + // so we return (pc-2) in that case in order to be safe. + // For A32 mode we return (pc-4) because all instructions are 32 bit long. + return (PC - 3) & (~1); +-#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__) ++#elif defined(__powerpc__) || defined(__powerpc64__) || \ ++ defined(__aarch64__) || defined(__loongarch__) + // PCs are always 4 byte aligned. + return PC - 4; + #elif defined(__sparc__) || defined(__mips__) +@@ -139,8 +140,8 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) { + ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) { + #if defined(__mips__) + return PC + 8; +-#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \ +- defined(__aarch64__) ++#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \ ++ defined(__aarch64__) || defined(__loongarch__) + return PC + 4; + #else + return PC + 1; +diff --git a/compiler-rt.orig/lib/fuzzer/FuzzerUtil.h b/compiler-rt.new/lib/fuzzer/FuzzerUtil.h +index e90be08..d7d0490 100644 +--- a/compiler-rt.orig/lib/fuzzer/FuzzerUtil.h ++++ b/compiler-rt.new/lib/fuzzer/FuzzerUtil.h +@@ -15,6 +15,7 @@ + #include "FuzzerBuiltinsMsvc.h" + #include "FuzzerCommand.h" + #include "FuzzerDefs.h" ++#include + + namespace fuzzer { + +@@ -92,7 +93,7 @@ size_t SimpleFastHash(const uint8_t *Data, size_t Size); + + inline uint32_t Log(uint32_t X) { return 32 - Clz(X) - 1; } + +-inline size_t PageSize() { return 4096; } ++inline size_t PageSize() { return getpagesize(); } + inline uint8_t *RoundUpByPage(uint8_t *P) { + uintptr_t X = reinterpret_cast(P); + size_t Mask = PageSize() - 1; +diff --git a/compiler-rt.orig/lib/interception/tests/CMakeLists.txt b/compiler-rt.new/lib/interception/tests/CMakeLists.txt +index 06184ee..5525510 100644 +--- a/compiler-rt.orig/lib/interception/tests/CMakeLists.txt ++++ b/compiler-rt.new/lib/interception/tests/CMakeLists.txt +@@ -1,6 +1,6 @@ + include(CompilerRTCompile) + +-filter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el) ++filter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el loongarch64) + + set(INTERCEPTION_UNITTESTS + interception_linux_test.cpp +diff --git a/compiler-rt.orig/lib/lsan/lsan_allocator.cpp b/compiler-rt.new/lib/lsan/lsan_allocator.cpp +index 7042295..ec90a62 100644 +--- a/compiler-rt.orig/lib/lsan/lsan_allocator.cpp ++++ b/compiler-rt.new/lib/lsan/lsan_allocator.cpp +@@ -28,7 +28,7 @@ extern "C" void *memset(void *ptr, int value, uptr num); + namespace __lsan { + #if defined(__i386__) || defined(__arm__) + static const uptr kMaxAllowedMallocSize = 1UL << 30; +-#elif defined(__mips64) || defined(__aarch64__) ++#elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + static const uptr kMaxAllowedMallocSize = 4UL << 30; + #else + static const uptr kMaxAllowedMallocSize = 8UL << 30; +diff --git a/compiler-rt.orig/lib/lsan/lsan_allocator.h b/compiler-rt.new/lib/lsan/lsan_allocator.h +index 17e13cd..733d16f 100644 +--- a/compiler-rt.orig/lib/lsan/lsan_allocator.h ++++ b/compiler-rt.new/lib/lsan/lsan_allocator.h +@@ -50,7 +50,7 @@ struct ChunkMetadata { + }; + + #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ +- defined(__arm__) ++ defined(__arm__) || defined(__loongarch64) + template + struct AP32 { + static const uptr kSpaceBeg = 0; +diff --git a/compiler-rt.orig/lib/lsan/lsan_common.cpp b/compiler-rt.new/lib/lsan/lsan_common.cpp +index d5b4132..f147036 100644 +--- a/compiler-rt.orig/lib/lsan/lsan_common.cpp ++++ b/compiler-rt.new/lib/lsan/lsan_common.cpp +@@ -167,13 +167,15 @@ static inline bool CanBeAHeapPointer(uptr p) { + return ((p >> 47) == 0); + #elif defined(__mips64) + return ((p >> 40) == 0); +-#elif defined(__aarch64__) ++# elif defined(__loongarch64) ++ return ((p >> 47) == 0); ++# elif defined(__aarch64__) + unsigned runtimeVMA = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); + return ((p >> runtimeVMA) == 0); +-#else ++# else + return true; +-#endif ++# endif + } + + // Scans the memory range, looking for byte patterns that point into allocator +diff --git a/compiler-rt.orig/lib/lsan/lsan_common.h b/compiler-rt.new/lib/lsan/lsan_common.h +index b0ae6f0..9326471 100644 +--- a/compiler-rt.orig/lib/lsan/lsan_common.h ++++ b/compiler-rt.new/lib/lsan/lsan_common.h +@@ -35,8 +35,8 @@ + #define CAN_SANITIZE_LEAKS 0 + #elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \ + (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ +- defined(__powerpc64__) || defined(__s390x__)) +-#define CAN_SANITIZE_LEAKS 1 ++ defined(__powerpc64__) || defined(__s390x__) || defined(__loongarch64)) ++# define CAN_SANITIZE_LEAKS 1 + #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC) + #define CAN_SANITIZE_LEAKS 1 + #elif defined(__arm__) && SANITIZER_LINUX +diff --git a/compiler-rt.orig/lib/msan/msan.h b/compiler-rt.new/lib/msan/msan.h +index e794c7c..ccd53d0 100644 +--- a/compiler-rt.orig/lib/msan/msan.h ++++ b/compiler-rt.new/lib/msan/msan.h +@@ -60,8 +60,33 @@ const MappingDesc kMemoryLayout[] = { + {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}}; + +-#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) +-#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) ++# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) ++# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) ++ ++#elif SANITIZER_LINUX && defined(__loongarch64) ++ ++// LOONGARCH64 maps: ++// - 0x000000000000-0x000200000000: Program own segments ++// - 0x555500000000-0x555600000000: PIE program segments ++// - 0x7fff00000000-0x7fffffffffff: libraries segments. ++const MappingDesc kMemoryLayout[] = { ++ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"}, ++ {0x000200000000ULL, 0x010000000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"}, ++ {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"}, ++ {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"}, ++ {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"}, ++ {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x500000000000ULL, 0x500200000000ULL, MappingDesc::SHADOW, "shadow-1"}, ++ {0x500200000000ULL, 0x510000000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"}, ++ {0x600000000000ULL, 0x600200000000ULL, MappingDesc::ORIGIN, "origin-1"}, ++ {0x600200000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}}; ++ ++#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL) ++#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x100000000000ULL) + + #elif SANITIZER_LINUX && defined(__aarch64__) + +diff --git a/compiler-rt.orig/lib/msan/msan_allocator.cpp b/compiler-rt.new/lib/msan/msan_allocator.cpp +index 68be794..a9dcf12 100644 +--- a/compiler-rt.orig/lib/msan/msan_allocator.cpp ++++ b/compiler-rt.new/lib/msan/msan_allocator.cpp +@@ -44,7 +44,7 @@ struct MsanMapUnmapCallback { + } + }; + +-#if defined(__mips64) ++#if defined(__mips64) || defined(__loongarch64) + static const uptr kMaxAllowedMallocSize = 2UL << 30; + + struct AP32 { +diff --git a/compiler-rt.orig/lib/msan/msan_interceptors.cpp b/compiler-rt.new/lib/msan/msan_interceptors.cpp +index 4eea94f..fa42922 100644 +--- a/compiler-rt.orig/lib/msan/msan_interceptors.cpp ++++ b/compiler-rt.new/lib/msan/msan_interceptors.cpp +@@ -74,7 +74,11 @@ bool IsInInterceptorScope() { + } + + static uptr allocated_for_dlsym; ++#if SANITIZER_LOONGARCH64 ++static const uptr kDlsymAllocPoolSize = 4096 * 4; ++#else + static const uptr kDlsymAllocPoolSize = 1024; ++#endif + static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + + static bool IsInDlsymAllocPool(const void *ptr) { +diff --git a/compiler-rt.orig/lib/msan/tests/msan_test.cpp b/compiler-rt.new/lib/msan/tests/msan_test.cpp +index 5dc9090..b8a08a1 100644 +--- a/compiler-rt.orig/lib/msan/tests/msan_test.cpp ++++ b/compiler-rt.new/lib/msan/tests/msan_test.cpp +@@ -3159,13 +3159,15 @@ static void GetPathToLoadable(char *buf, size_t sz) { + static const char basename[] = "libmsan_loadable.mips64.so"; + #elif defined(__mips64) + static const char basename[] = "libmsan_loadable.mips64el.so"; +-#elif defined(__aarch64__) ++# elif defined(__loongarch64) ++ static const char basename[] = "libmsan_loadable.loongarch64.so"; ++# elif defined(__aarch64__) + static const char basename[] = "libmsan_loadable.aarch64.so"; +-#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ++# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + static const char basename[] = "libmsan_loadable.powerpc64.so"; +-#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ++# elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + static const char basename[] = "libmsan_loadable.powerpc64le.so"; +-#endif ++# endif + int res = snprintf(buf, sz, "%.*s/%s", + (int)dir_len, program_path, basename); + ASSERT_GE(res, 0); +diff --git a/compiler-rt.orig/lib/safestack/safestack_platform.h b/compiler-rt.new/lib/safestack/safestack_platform.h +index 81e4c26..7987bdc 100644 +--- a/compiler-rt.orig/lib/safestack/safestack_platform.h ++++ b/compiler-rt.new/lib/safestack/safestack_platform.h +@@ -96,6 +96,8 @@ inline void *Mmap(void *addr, size_t length, int prot, int flags, int fd, + return __mmap(addr, length, prot, flags, fd, 0, offset); + #elif defined(__x86_64__) && (SANITIZER_FREEBSD) + return (void *)__syscall(SYS_mmap, addr, length, prot, flags, fd, offset); ++#elif SANITIZER_LOONGARCH64 ++ return mmap(addr, length, prot, flags, fd, offset); + #else + return (void *)syscall(SYS_mmap, addr, length, prot, flags, fd, offset); + #endif +@@ -105,6 +107,8 @@ inline int Munmap(void *addr, size_t length) { + #if SANITIZER_NETBSD + DEFINE__REAL(int, munmap, void *a, size_t b); + return _REAL(munmap, addr, length); ++#elif SANITIZER_LOONGARCH64 ++ return munmap(addr, length); + #else + return syscall(SYS_munmap, addr, length); + #endif +@@ -114,6 +118,8 @@ inline int Mprotect(void *addr, size_t length, int prot) { + #if SANITIZER_NETBSD + DEFINE__REAL(int, mprotect, void *a, size_t b, int c); + return _REAL(mprotect, addr, length, prot); ++#elif SANITIZER_LOONGARCH64 ++ return mprotect(addr, length, prot); + #else + return syscall(SYS_mprotect, addr, length, prot); + #endif +diff --git a/compiler-rt.orig/lib/sanitizer_common/CMakeLists.txt b/compiler-rt.new/lib/sanitizer_common/CMakeLists.txt +index 674835a..82ab3c7 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/CMakeLists.txt ++++ b/compiler-rt.new/lib/sanitizer_common/CMakeLists.txt +@@ -184,6 +184,7 @@ set(SANITIZER_IMPL_HEADERS + sanitizer_syscall_linux_arm.inc + sanitizer_syscall_linux_x86_64.inc + sanitizer_syscall_linux_riscv64.inc ++ sanitizer_syscall_linux_loongarch64.inc + sanitizer_syscalls_netbsd.inc + sanitizer_thread_registry.h + sanitizer_tls_get_addr.h +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_common.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_common.h +index a6532ee..877f120 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_common.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_common.h +@@ -679,7 +679,8 @@ enum ModuleArch { + kModuleArchARMV7S, + kModuleArchARMV7K, + kModuleArchARM64, +- kModuleArchRISCV64 ++ kModuleArchRISCV64, ++ kModuleArchLOONGARCH64 + }; + + // Sorts and removes duplicates from the container. +@@ -746,6 +747,8 @@ inline const char *ModuleArchToString(ModuleArch arch) { + return "arm64"; + case kModuleArchRISCV64: + return "riscv64"; ++ case kModuleArchLOONGARCH64: ++ return "loongarch64"; + } + CHECK(0 && "Invalid module arch"); + return ""; +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_common_syscalls.inc b/compiler-rt.new/lib/sanitizer_common/sanitizer_common_syscalls.inc +index 1b89d6e..4c64931 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_common_syscalls.inc ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_common_syscalls.inc +@@ -2297,7 +2297,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { + #if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- SANITIZER_RISCV64) ++ SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) + if (data) { + if (request == ptrace_setregs) { + PRE_READ((void *)data, struct_user_regs_struct_sz); +@@ -2319,7 +2319,7 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { + #if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- SANITIZER_RISCV64) ++ SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) + if (res >= 0 && data) { + // Note that this is different from the interceptor in + // sanitizer_common_interceptors.inc. +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp +index 73ebeb5..a280402 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp +@@ -10,11 +10,12 @@ + #include "sanitizer_platform.h" + + #if !SANITIZER_FUCHSIA +-#include "sancov_flags.h" +-#include "sanitizer_allocator_internal.h" +-#include "sanitizer_atomic.h" +-#include "sanitizer_common.h" +-#include "sanitizer_file.h" ++# include "sancov_flags.h" ++# include "sanitizer_allocator_internal.h" ++# include "sanitizer_atomic.h" ++# include "sanitizer_common.h" ++# include "sanitizer_common/sanitizer_stacktrace.h" ++# include "sanitizer_file.h" + + using namespace __sanitizer; + +@@ -173,7 +174,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr* pcs, + + SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) { + if (!*guard) return; +- __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1); ++ __sancov::pc_guard_controller.TracePcGuard( ++ guard, StackTrace::GetPreviousInstructionPc(GET_CALLER_PC())); + } + + SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux.cpp +index 379f6d9..faba5b3 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux.cpp +@@ -78,6 +78,10 @@ + #include + #endif + ++#if SANITIZER_LINUX && defined(__loongarch__) ++#include ++#endif ++ + #if SANITIZER_FREEBSD + #include + #include +@@ -156,6 +160,8 @@ namespace __sanitizer { + #include "sanitizer_syscall_linux_riscv64.inc" + #elif SANITIZER_LINUX && defined(__aarch64__) + #include "sanitizer_syscall_linux_aarch64.inc" ++#elif SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++#include "sanitizer_syscall_linux_loongarch64.inc" + #elif SANITIZER_LINUX && defined(__arm__) + #include "sanitizer_syscall_linux_arm.inc" + #else +@@ -170,12 +176,17 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, + #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS + return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, + offset); ++#else ++#if SANITIZER_LOONGARCH64 ++ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, ++ offset); + #else + // mmap2 specifies file offset in 4096-byte units. + CHECK(IsAligned(offset, 4096)); + return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd, + offset / 4096); + #endif ++#endif + } + #endif // !SANITIZER_S390 + +@@ -252,6 +263,28 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) { + } + #endif + ++#if SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++static void statx_to_stat(struct statx *in, struct stat *out) { ++ internal_memset(out, 0, sizeof(*out)); ++ out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor); ++ out->st_ino = in->stx_ino; ++ out->st_mode = in->stx_mode; ++ out->st_nlink = in->stx_nlink; ++ out->st_uid = in->stx_uid; ++ out->st_gid = in->stx_gid; ++ out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor); ++ out->st_size = in->stx_size; ++ out->st_blksize = in->stx_blksize; ++ out->st_blocks = in->stx_blocks; ++ out->st_atime = in->stx_atime.tv_sec; ++ out->st_atim.tv_nsec = in->stx_atime.tv_nsec; ++ out->st_mtime = in->stx_mtime.tv_sec; ++ out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec; ++ out->st_ctime = in->stx_ctime.tv_sec; ++ out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec; ++} ++#endif ++ + #if defined(__mips64) + // Undefine compatibility macros from + // so that they would not clash with the kernel_stat +@@ -306,8 +339,16 @@ uptr internal_stat(const char *path, void *buf) { + #if SANITIZER_FREEBSD + return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); + #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS ++#if SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++ struct statx bufx; ++ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, ++ AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx); ++ statx_to_stat(&bufx, (struct stat *)buf); ++ return res; ++#else + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + 0); ++#endif + #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS + # if defined(__mips64) + // For mips64, stat syscall fills buffer in the format of kernel_stat +@@ -331,8 +372,17 @@ uptr internal_lstat(const char *path, void *buf) { + return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); + #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS ++#if SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++ struct statx bufx; ++ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, ++ AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, ++ STATX_BASIC_STATS, (uptr)&bufx); ++ statx_to_stat(&bufx, (struct stat *)buf); ++ return res; ++#else + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); ++#endif + #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS + # if SANITIZER_MIPS64 + // For mips64, lstat syscall fills buffer in the format of kernel_stat +@@ -359,6 +409,12 @@ uptr internal_fstat(fd_t fd, void *buf) { + int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); + return res; ++#elif SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++ struct statx bufx; ++ int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH, ++ STATX_BASIC_STATS, (uptr)&bufx); ++ statx_to_stat(&bufx, (struct stat *)buf); ++ return res; + # else + return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); + # endif +@@ -407,7 +463,7 @@ uptr internal_unlink(const char *path) { + } + + uptr internal_rename(const char *oldpath, const char *newpath) { +-#if defined(__riscv) ++#if defined(__riscv) || defined(__loongarch__) + return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD, + (uptr)newpath, 0); + #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +@@ -455,7 +511,11 @@ bool FileExists(const char *filename) { + return false; + struct stat st; + #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS ++#if SANITIZER_LINUX && SANITIZER_LOONGARCH64 ++ if (internal_stat(filename, &st)) ++#else + if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0)) ++#endif + #else + if (internal_stat(filename, &st)) + #endif +@@ -687,7 +747,8 @@ void BlockingMutex::CheckLocked() { + // Not used + #else + struct linux_dirent { +-#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64 ++#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64 || \ ++ SANITIZER_LOONGARCH64 + u64 d_ino; + u64 d_off; + #else +@@ -695,9 +756,9 @@ struct linux_dirent { + unsigned long d_off; + #endif + unsigned short d_reclen; +-#if defined(__aarch64__) || SANITIZER_RISCV64 ++#if defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 + unsigned char d_type; +-#endif ++# endif + char d_name[256]; + }; + #endif +@@ -1041,13 +1102,15 @@ uptr GetMaxVirtualAddress() { + return (1ULL << 38) - 1; + # elif defined(__mips64) + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; +-# elif defined(__s390x__) ++#elif SANITIZER_LOONGARCH64 ++ return (1ULL << 47) - 1; // 0x00007fffffffffffUL; ++# elif defined(__s390x__) + return (1ULL << 53) - 1; // 0x001fffffffffffffUL; +-#elif defined(__sparc__) ++# elif defined(__sparc__) + return ~(uptr)0; +-# else ++# else + return (1ULL << 47) - 1; // 0x00007fffffffffffUL; +-# endif ++# endif + #else // SANITIZER_WORDSIZE == 32 + # if defined(__s390__) + return (1ULL << 31) - 1; // 0x7fffffff; +@@ -1380,7 +1443,63 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "ra", "memory"); + return res; + } +-#elif defined(__aarch64__) ++# elif defined(__loongarch__) && SANITIZER_LINUX ++uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ++ int *parent_tidptr, void *newtls, int *child_tidptr) { ++ long long res; ++ if (!fn || !child_stack) ++ return -EINVAL; ++ CHECK_EQ(0, (uptr)child_stack % 16); ++ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ++ ((unsigned long long *)child_stack)[0] = (uptr)fn; ++ ((unsigned long long *)child_stack)[1] = (uptr)arg; ++ ++ register int (*__fn)(void *) __asm__("$a0") = fn; ++ register void *__stack __asm__("$a1") = child_stack; ++ register int __flags __asm__("$a2") = flags; ++ register void *__arg __asm__("$a3") = arg; ++ register int *__ptid __asm__("$a4") = parent_tidptr; ++ register void *__tls __asm__("$a5") = newtls; ++ register int *__ctid __asm__("$a6") = child_tidptr; ++ ++ __asm__ __volatile__( ++ /* $a0 = syscall($a7 = SYSCALL(clone), ++ * $a0 = flags, ++ * $a1 = child_stack, ++ * $a2 = parent_tidptr, ++ * $a3 = child_tyidptr, ++ * $a4 = new_tls) ++ */ ++ ++ /* Do the system call */ ++ "move $a0, $a2\n" /* flags */ ++ "move $a2, $a4\n" /* parent_tidptr */ ++ "move $a3, $a6\n" /* child_tidptr */ ++ "move $a4, $a5\n" /* tls */ ++ "addi.d $a7, $r0, %9\n" ++ "syscall 0\n" ++ ++ "bne $a0, $r0, 1f\n" ++ ++ /* In the child, now. Call "fn(arg)". */ ++ "ld.d $a6, $sp, 0\n" ++ "ld.d $a0, $sp, 8\n" ++ ++ "jirl $r1, $a6, 0\n" ++ ++ /* Call _exit($v0) */ ++ "addi.d $a7, $r0, %10\n" ++ "syscall 0\n" ++ ++ "1:\n" ++ "move %0, $a0\n" ++ : "=r"(res) ++ : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), ++ "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit) ++ : "$r1", "memory"); ++ return res; ++} ++# elif defined(__aarch64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; +@@ -1431,12 +1550,12 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "x30", "memory"); + return res; + } +-#elif defined(__powerpc64__) ++# elif defined(__powerpc64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; + // Stack frame structure. +-#if SANITIZER_PPC64V1 ++# if SANITIZER_PPC64V1 + // Back chain == 0 (SP + 112) + // Frame (112 bytes): + // Parameter save area (SP + 48), 8 doublewords +@@ -1446,20 +1565,20 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + // LR save area (SP + 16) + // CR save area (SP + 8) + // Back chain (SP + 0) +-# define FRAME_SIZE 112 +-# define FRAME_TOC_SAVE_OFFSET 40 +-#elif SANITIZER_PPC64V2 ++# define FRAME_SIZE 112 ++# define FRAME_TOC_SAVE_OFFSET 40 ++# elif SANITIZER_PPC64V2 + // Back chain == 0 (SP + 32) + // Frame (32 bytes): + // TOC save area (SP + 24) + // LR save area (SP + 16) + // CR save area (SP + 8) + // Back chain (SP + 0) +-# define FRAME_SIZE 32 +-# define FRAME_TOC_SAVE_OFFSET 24 +-#else +-# error "Unsupported PPC64 ABI" +-#endif ++# define FRAME_SIZE 32 ++# define FRAME_TOC_SAVE_OFFSET 24 ++# else ++# error "Unsupported PPC64 ABI" ++# endif + if (!fn || !child_stack) + return -EINVAL; + CHECK_EQ(0, (uptr)child_stack % 16); +@@ -1472,75 +1591,65 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + register void *__newtls __asm__("r8") = newtls; + register int *__ctidptr __asm__("r9") = child_tidptr; + +- __asm__ __volatile__( +- /* fn and arg are saved across the syscall */ +- "mr 28, %5\n\t" +- "mr 27, %8\n\t" +- +- /* syscall +- r0 == __NR_clone +- r3 == flags +- r4 == child_stack +- r5 == parent_tidptr +- r6 == newtls +- r7 == child_tidptr */ +- "mr 3, %7\n\t" +- "mr 5, %9\n\t" +- "mr 6, %10\n\t" +- "mr 7, %11\n\t" +- "li 0, %3\n\t" +- "sc\n\t" +- +- /* Test if syscall was successful */ +- "cmpdi cr1, 3, 0\n\t" +- "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" +- "bne- cr1, 1f\n\t" +- +- /* Set up stack frame */ +- "li 29, 0\n\t" +- "stdu 29, -8(1)\n\t" +- "stdu 1, -%12(1)\n\t" +- /* Do the function call */ +- "std 2, %13(1)\n\t" +-#if SANITIZER_PPC64V1 +- "ld 0, 0(28)\n\t" +- "ld 2, 8(28)\n\t" +- "mtctr 0\n\t" +-#elif SANITIZER_PPC64V2 +- "mr 12, 28\n\t" +- "mtctr 12\n\t" +-#else +-# error "Unsupported PPC64 ABI" +-#endif +- "mr 3, 27\n\t" +- "bctrl\n\t" +- "ld 2, %13(1)\n\t" +- +- /* Call _exit(r3) */ +- "li 0, %4\n\t" +- "sc\n\t" +- +- /* Return to parent */ +- "1:\n\t" +- "mr %0, 3\n\t" +- : "=r" (res) +- : "0" (-1), +- "i" (EINVAL), +- "i" (__NR_clone), +- "i" (__NR_exit), +- "r" (__fn), +- "r" (__cstack), +- "r" (__flags), +- "r" (__arg), +- "r" (__ptidptr), +- "r" (__newtls), +- "r" (__ctidptr), +- "i" (FRAME_SIZE), +- "i" (FRAME_TOC_SAVE_OFFSET) +- : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29"); ++ __asm__ __volatile__( ++ /* fn and arg are saved across the syscall */ ++ "mr 28, %5\n\t" ++ "mr 27, %8\n\t" ++ ++ /* syscall ++ r0 == __NR_clone ++ r3 == flags ++ r4 == child_stack ++ r5 == parent_tidptr ++ r6 == newtls ++ r7 == child_tidptr */ ++ "mr 3, %7\n\t" ++ "mr 5, %9\n\t" ++ "mr 6, %10\n\t" ++ "mr 7, %11\n\t" ++ "li 0, %3\n\t" ++ "sc\n\t" ++ ++ /* Test if syscall was successful */ ++ "cmpdi cr1, 3, 0\n\t" ++ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" ++ "bne- cr1, 1f\n\t" ++ ++ /* Set up stack frame */ ++ "li 29, 0\n\t" ++ "stdu 29, -8(1)\n\t" ++ "stdu 1, -%12(1)\n\t" ++ /* Do the function call */ ++ "std 2, %13(1)\n\t" ++# if SANITIZER_PPC64V1 ++ "ld 0, 0(28)\n\t" ++ "ld 2, 8(28)\n\t" ++ "mtctr 0\n\t" ++# elif SANITIZER_PPC64V2 ++ "mr 12, 28\n\t" ++ "mtctr 12\n\t" ++# else ++# error "Unsupported PPC64 ABI" ++# endif ++ "mr 3, 27\n\t" ++ "bctrl\n\t" ++ "ld 2, %13(1)\n\t" ++ ++ /* Call _exit(r3) */ ++ "li 0, %4\n\t" ++ "sc\n\t" ++ ++ /* Return to parent */ ++ "1:\n\t" ++ "mr %0, 3\n\t" ++ : "=r"(res) ++ : "0"(-1), "i"(EINVAL), "i"(__NR_clone), "i"(__NR_exit), "r"(__fn), ++ "r"(__cstack), "r"(__flags), "r"(__arg), "r"(__ptidptr), "r"(__newtls), ++ "r"(__ctidptr), "i"(FRAME_SIZE), "i"(FRAME_TOC_SAVE_OFFSET) ++ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29"); + return res; + } +-#elif defined(__i386__) && SANITIZER_LINUX ++# elif defined(__i386__) && SANITIZER_LINUX + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + int res; +@@ -1553,59 +1662,56 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + ((unsigned int *)child_stack)[2] = (uptr)fn; + ((unsigned int *)child_stack)[3] = (uptr)arg; + __asm__ __volatile__( +- /* %eax = syscall(%eax = SYSCALL(clone), +- * %ebx = flags, +- * %ecx = child_stack, +- * %edx = parent_tidptr, +- * %esi = new_tls, +- * %edi = child_tidptr) +- */ ++ /* %eax = syscall(%eax = SYSCALL(clone), ++ * %ebx = flags, ++ * %ecx = child_stack, ++ * %edx = parent_tidptr, ++ * %esi = new_tls, ++ * %edi = child_tidptr) ++ */ + +- /* Obtain flags */ +- "movl (%%ecx), %%ebx\n" +- /* Do the system call */ +- "pushl %%ebx\n" +- "pushl %%esi\n" +- "pushl %%edi\n" +- /* Remember the flag value. */ +- "movl %%ebx, (%%ecx)\n" +- "int $0x80\n" +- "popl %%edi\n" +- "popl %%esi\n" +- "popl %%ebx\n" +- +- /* if (%eax != 0) +- * return; +- */ +- +- "test %%eax,%%eax\n" +- "jnz 1f\n" +- +- /* terminate the stack frame */ +- "xorl %%ebp,%%ebp\n" +- /* Call FN. */ +- "call *%%ebx\n" +-#ifdef PIC +- "call here\n" +- "here:\n" +- "popl %%ebx\n" +- "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n" +-#endif +- /* Call exit */ +- "movl %%eax, %%ebx\n" +- "movl %2, %%eax\n" +- "int $0x80\n" +- "1:\n" +- : "=a" (res) +- : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), +- "c"(child_stack), +- "d"(parent_tidptr), +- "S"(newtls), +- "D"(child_tidptr) +- : "memory"); ++ /* Obtain flags */ ++ "movl (%%ecx), %%ebx\n" ++ /* Do the system call */ ++ "pushl %%ebx\n" ++ "pushl %%esi\n" ++ "pushl %%edi\n" ++ /* Remember the flag value. */ ++ "movl %%ebx, (%%ecx)\n" ++ "int $0x80\n" ++ "popl %%edi\n" ++ "popl %%esi\n" ++ "popl %%ebx\n" ++ ++ /* if (%eax != 0) ++ * return; ++ */ ++ ++ "test %%eax,%%eax\n" ++ "jnz 1f\n" ++ ++ /* terminate the stack frame */ ++ "xorl %%ebp,%%ebp\n" ++ /* Call FN. */ ++ "call *%%ebx\n" ++# ifdef PIC ++ "call here\n" ++ "here:\n" ++ "popl %%ebx\n" ++ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n" ++# endif ++ /* Call exit */ ++ "movl %%eax, %%ebx\n" ++ "movl %2, %%eax\n" ++ "int $0x80\n" ++ "1:\n" ++ : "=a"(res) ++ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "c"(child_stack), ++ "d"(parent_tidptr), "S"(newtls), "D"(child_tidptr) ++ : "memory"); + return res; + } +-#elif defined(__arm__) && SANITIZER_LINUX ++# elif defined(__arm__) && SANITIZER_LINUX + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + unsigned int res; +@@ -1621,22 +1727,22 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + register int *r4 __asm__("r4") = child_tidptr; + register int r7 __asm__("r7") = __NR_clone; + +-#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__) +-# define ARCH_HAS_BX +-#endif +-#if __ARM_ARCH > 4 +-# define ARCH_HAS_BLX +-#endif +- +-#ifdef ARCH_HAS_BX +-# ifdef ARCH_HAS_BLX +-# define BLX(R) "blx " #R "\n" +-# else +-# define BLX(R) "mov lr, pc; bx " #R "\n" +-# endif +-#else +-# define BLX(R) "mov lr, pc; mov pc," #R "\n" +-#endif ++# if __ARM_ARCH > 4 || defined(__ARM_ARCH_4T__) ++# define ARCH_HAS_BX ++# endif ++# if __ARM_ARCH > 4 ++# define ARCH_HAS_BLX ++# endif ++ ++# ifdef ARCH_HAS_BX ++# ifdef ARCH_HAS_BLX ++# define BLX(R) "blx " # R "\n" ++# else ++# define BLX(R) "mov lr, pc; bx " # R "\n" ++# endif ++# else ++# define BLX(R) "mov lr, pc; mov pc," # R "\n" ++# endif + + __asm__ __volatile__( + /* %r0 = syscall(%r7 = SYSCALL(clone), +@@ -1671,9 +1777,9 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "memory"); + return res; + } +-#endif // defined(__x86_64__) && SANITIZER_LINUX ++# endif // defined(__x86_64__) && SANITIZER_LINUX + +-#if SANITIZER_LINUX ++# if SANITIZER_LINUX + int internal_uname(struct utsname *buf) { + return internal_syscall(SYSCALL(uname), buf); + } +@@ -1875,36 +1981,43 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + #endif + } + return SignalContext::UNKNOWN; +-#elif defined(__arm__) ++# elif defined(__loongarch__) ++ u32 flags = ucontext->uc_mcontext.__flags; ++ if (flags & SC_ADDRERR_RD) ++ return SignalContext::READ; ++ if (flags & SC_ADDRERR_WR) ++ return SignalContext::WRITE; ++ return SignalContext::UNKNOWN; ++# elif defined(__arm__) + static const uptr FSR_WRITE = 1U << 11; + uptr fsr = ucontext->uc_mcontext.error_code; + return fsr & FSR_WRITE ? WRITE : READ; +-#elif defined(__aarch64__) ++# elif defined(__aarch64__) + static const u64 ESR_ELx_WNR = 1U << 6; + u64 esr; + if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN; + return esr & ESR_ELx_WNR ? WRITE : READ; +-#elif defined(__sparc__) ++# elif defined(__sparc__) + // Decode the instruction to determine the access type. + // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype). +-#if SANITIZER_SOLARIS ++# if SANITIZER_SOLARIS + uptr pc = ucontext->uc_mcontext.gregs[REG_PC]; +-#else ++# else + // Historical BSDism here. + struct sigcontext *scontext = (struct sigcontext *)context; +-#if defined(__arch64__) ++# if defined(__arch64__) + uptr pc = scontext->sigc_regs.tpc; +-#else ++# else + uptr pc = scontext->si_regs.pc; +-#endif +-#endif ++# endif ++# endif + u32 instr = *(u32 *)pc; + return (instr >> 21) & 1 ? WRITE: READ; +-#elif defined(__riscv) ++# elif defined(__riscv) + unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC]; + unsigned faulty_instruction = *(uint16_t *)pc; + +-#if defined(__riscv_compressed) ++# if defined(__riscv_compressed) + if ((faulty_instruction & 0x3) != 0x3) { // it's a compressed instruction + // set op_bits to the instruction bits [1, 0, 15, 14, 13] + unsigned op_bits = +@@ -1912,38 +2025,38 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + unsigned rd = faulty_instruction & 0xF80; // bits 7-11, inclusive + switch (op_bits) { + case 0b10'010: // c.lwsp (rd != x0) +-#if __riscv_xlen == 64 ++# if __riscv_xlen == 64 + case 0b10'011: // c.ldsp (rd != x0) +-#endif ++# endif + return rd ? SignalContext::READ : SignalContext::UNKNOWN; + case 0b00'010: // c.lw +-#if __riscv_flen >= 32 && __riscv_xlen == 32 ++# if __riscv_flen >= 32 && __riscv_xlen == 32 + case 0b10'011: // c.flwsp +-#endif +-#if __riscv_flen >= 32 || __riscv_xlen == 64 ++# endif ++# if __riscv_flen >= 32 || __riscv_xlen == 64 + case 0b00'011: // c.flw / c.ld +-#endif +-#if __riscv_flen == 64 ++# endif ++# if __riscv_flen == 64 + case 0b00'001: // c.fld + case 0b10'001: // c.fldsp +-#endif ++# endif + return SignalContext::READ; + case 0b00'110: // c.sw + case 0b10'110: // c.swsp +-#if __riscv_flen >= 32 || __riscv_xlen == 64 ++# if __riscv_flen >= 32 || __riscv_xlen == 64 + case 0b00'111: // c.fsw / c.sd + case 0b10'111: // c.fswsp / c.sdsp +-#endif +-#if __riscv_flen == 64 ++# endif ++# if __riscv_flen == 64 + case 0b00'101: // c.fsd + case 0b10'101: // c.fsdsp +-#endif ++# endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } + } +-#endif ++# endif + + unsigned opcode = faulty_instruction & 0x7f; // lower 7 bits + unsigned funct3 = (faulty_instruction >> 12) & 0x7; // bits 12-14, inclusive +@@ -1953,9 +2066,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + case 0b000: // lb + case 0b001: // lh + case 0b010: // lw +-#if __riscv_xlen == 64 ++# if __riscv_xlen == 64 + case 0b011: // ld +-#endif ++# endif + case 0b100: // lbu + case 0b101: // lhu + return SignalContext::READ; +@@ -1967,20 +2080,20 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + case 0b000: // sb + case 0b001: // sh + case 0b010: // sw +-#if __riscv_xlen == 64 ++# if __riscv_xlen == 64 + case 0b011: // sd +-#endif ++# endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } +-#if __riscv_flen >= 32 ++# if __riscv_flen >= 32 + case 0b0000111: // floating-point loads + switch (funct3) { + case 0b010: // flw +-#if __riscv_flen == 64 ++# if __riscv_flen == 64 + case 0b011: // fld +-#endif ++# endif + return SignalContext::READ; + default: + return SignalContext::UNKNOWN; +@@ -1988,21 +2101,21 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + case 0b0100111: // floating-point stores + switch (funct3) { + case 0b010: // fsw +-#if __riscv_flen == 64 ++# if __riscv_flen == 64 + case 0b011: // fsd +-#endif ++# endif + return SignalContext::WRITE; + default: + return SignalContext::UNKNOWN; + } +-#endif ++# endif + default: + return SignalContext::UNKNOWN; + } +-#else ++# else + (void)ucontext; + return UNKNOWN; // FIXME: Implement. +-#endif ++# endif + } + + bool SignalContext::IsTrueFaultingAddress() const { +@@ -2109,23 +2222,28 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; +-#elif defined(__s390__) ++#elif defined(__loongarch__) ++ ucontext_t *ucontext = (ucontext_t *)context; ++ *pc = ucontext->uc_mcontext.__pc; ++ *bp = ucontext->uc_mcontext.__gregs[22]; ++ *sp = ucontext->uc_mcontext.__gregs[3]; ++# elif defined(__s390__) + ucontext_t *ucontext = (ucontext_t*)context; +-# if defined(__s390x__) ++# if defined(__s390x__) + *pc = ucontext->uc_mcontext.psw.addr; +-# else ++# else + *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff; +-# endif ++# endif + *bp = ucontext->uc_mcontext.gregs[11]; + *sp = ucontext->uc_mcontext.gregs[15]; +-#elif defined(__riscv) ++# elif defined(__riscv) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.__gregs[REG_PC]; + *bp = ucontext->uc_mcontext.__gregs[REG_S0]; + *sp = ucontext->uc_mcontext.__gregs[REG_SP]; +-#else +-# error "Unsupported arch" +-#endif ++# else ++# error "Unsupported arch" ++# endif + } + + void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); } +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux.h +index 24902d1..1ead3a1 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux.h +@@ -61,7 +61,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact); + void internal_sigdelset(__sanitizer_sigset_t *set, int signum); + #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64 ++ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr); + #endif +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +index f20b900..6a53b09 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_linux_libcdep.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +@@ -259,7 +259,7 @@ void InitTlsSize() { } + + #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \ + defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \ +- defined(__arm__) || SANITIZER_RISCV64) && \ ++ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \ + SANITIZER_LINUX && !SANITIZER_ANDROID + // sizeof(struct pthread) from glibc. + static atomic_uintptr_t thread_descriptor_size; +@@ -301,7 +301,10 @@ uptr ThreadDescriptorSize() { + #elif defined(__mips__) + // TODO(sagarthakur): add more values as per different glibc versions. + val = FIRST_32_SECOND_64(1152, 1776); +-#elif SANITIZER_RISCV64 ++#elif SANITIZER_LOONGARCH64 ++ // val = 1856; ++ val = 1984; // get this from glibc 2.34 ++# elif SANITIZER_RISCV64 + int major; + int minor; + int patch; +@@ -316,10 +319,10 @@ uptr ThreadDescriptorSize() { + val = 1936; // tested against glibc 2.32 + } + +-#elif defined(__aarch64__) ++# elif defined(__aarch64__) + // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. + val = 1776; +-#elif defined(__powerpc64__) ++# elif defined(__powerpc64__) + val = 1776; // from glibc.ppc64le 2.20-8.fc21 + #elif defined(__s390__) + val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22 +@@ -336,17 +339,20 @@ uptr ThreadSelfOffset() { + return kThreadSelfOffset; + } + +-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 ++#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \ ++ SANITIZER_LOONGARCH64 + // TlsPreTcbSize includes size of struct pthread_descr and size of tcb + // head structure. It lies before the static tls blocks. + static uptr TlsPreTcbSize() { + #if defined(__mips__) + const uptr kTcbHead = 16; // sizeof (tcbhead_t) +-#elif defined(__powerpc64__) ++#elif defined(__loongarch__) ++ const uptr kTcbHead = 16; // sizeof (tcbhead_t) ++# elif defined(__powerpc64__) + const uptr kTcbHead = 88; // sizeof (tcbhead_t) +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + const uptr kTcbHead = 16; // sizeof (tcbhead_t) +-#endif ++# endif + const uptr kTlsAlign = 16; + const uptr kTlsPreTcbSize = + RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign); +@@ -379,6 +385,10 @@ uptr ThreadSelf() { + // https://github.com/riscv/riscv-elf-psabi-doc/issues/53 + uptr thread_pointer = reinterpret_cast(__builtin_thread_pointer()); + descr_addr = thread_pointer - TlsPreTcbSize(); ++#elif defined(__loongarch__) ++ uptr thread_pointer; ++ asm("or %0,$r2,$r0" : "=r"(thread_pointer)); ++ descr_addr = thread_pointer - TlsPreTcbSize(); + #elif defined(__s390__) + descr_addr = reinterpret_cast(__builtin_thread_pointer()); + #elif defined(__powerpc64__) +@@ -473,7 +483,7 @@ static void GetTls(uptr *addr, uptr *size) { + *addr -= *size; + *addr += ThreadDescriptorSize(); + #elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \ +- defined(__arm__) || SANITIZER_RISCV64 ++ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 + *addr = ThreadSelf(); + *size = GetTlsSize(); + #else +@@ -525,7 +535,8 @@ uptr GetTlsSize() { + GetTls(&addr, &size); + return size; + #elif SANITIZER_GLIBC +-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 ++#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \ ++ SANITIZER_LOONGARCH64 + return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16); + #else + return g_tls_size; +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform.h +index 96c01ba..43a9d6d 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform.h +@@ -232,6 +232,17 @@ + #define SANITIZER_RISCV64 0 + #endif + ++#if defined(__loongarch__) ++#define SANITIZER_LOONGARCH 1 ++#if defined(__loongarch64) ++#define SANITIZER_LOONGARCH32 0 ++#define SANITIZER_LOONGARCH64 1 ++#else ++#define SANITIZER_LOONGARCH32 1 ++#define SANITIZER_LOONGARCH64 0 ++#endif ++#endif ++ + // By default we allow to use SizeClassAllocator64 on 64-bit platform. + // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 + // does not work well and we need to fallback to SizeClassAllocator32. +@@ -240,22 +251,22 @@ + #ifndef SANITIZER_CAN_USE_ALLOCATOR64 + # if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA + # define SANITIZER_CAN_USE_ALLOCATOR64 1 +-# elif defined(__mips64) || defined(__aarch64__) +-# define SANITIZER_CAN_USE_ALLOCATOR64 0 +-# else +-# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) +-# endif ++#elif defined(__mips64) || defined(__aarch64__) ++# define SANITIZER_CAN_USE_ALLOCATOR64 0 ++# else ++# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) ++# endif + #endif + + // The range of addresses which can be returned my mmap. + // FIXME: this value should be different on different platforms. Larger values + // will still work but will consume more memory for TwoLevelByteMap. + #if defined(__mips__) +-#if SANITIZER_GO && defined(__mips64) +-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) +-#else +-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) +-#endif ++# if SANITIZER_GO && defined(__mips64) ++# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) ++# else ++# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) ++# endif + #elif SANITIZER_RISCV64 + #define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38) + #elif defined(__aarch64__) +@@ -284,11 +295,12 @@ + // mandated by the upstream linux community for all new ports. Other ports + // may still use legacy syscalls. + #ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +-# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX +-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 +-# else +-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 +-# endif ++#if (defined(__aarch64__) || defined(__riscv) || defined(__loongarch__)) && \ ++ SANITIZER_LINUX ++# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1 ++# else ++# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0 ++# endif + #endif + + // udi16 syscalls can only be used when the following conditions are +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_interceptors.h +index 068fc98..643a8b6 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_interceptors.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_interceptors.h +@@ -274,8 +274,8 @@ + #if SI_LINUX_NOT_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) +-#define SANITIZER_INTERCEPT_PTRACE 1 ++ defined(__s390__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) ++# define SANITIZER_INTERCEPT_PTRACE 1 + #else + #define SANITIZER_INTERCEPT_PTRACE 0 + #endif +@@ -486,7 +486,8 @@ + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS) + #define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC + #define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID) +-#define SANITIZER_INTERCEPT_CFREE SI_GLIBC ++#define SANITIZER_INTERCEPT_CFREE \ ++ (SI_GLIBC && !SANITIZER_LOONGARCH) + #define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX + #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS) + #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD) +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp +index c51327e..c54b960 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp +@@ -63,9 +63,9 @@ namespace __sanitizer { + #endif + } // namespace __sanitizer + +-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\ +- && !defined(__mips__) && !defined(__s390__)\ +- && !defined(__sparc__) && !defined(__riscv) ++# if !defined(__powerpc64__) && !defined(__x86_64__) && \ ++ !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \ ++ !defined(__sparc__) && !defined(__riscv) && !defined(__loongarch__) + COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); + #endif + +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +index 7abaeb8..be80248 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +@@ -92,9 +92,9 @@ + # include + # include + #if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ +- SANITIZER_RISCV64 +-# include +-# ifdef __arm__ ++ SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 ++# include ++# ifdef __arm__ + typedef struct user_fpregs elf_fpregset_t; + # define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/) + # if !defined(ARM_VFPREGS_SIZE) +@@ -139,20 +139,20 @@ typedef struct user_fpregs elf_fpregset_t; + #include + #include + #include +-#if defined(__mips64) +-# include +-#endif +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++# if defined(__mips64) || defined(__loongarch64) ++# include ++# endif ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include + #endif // SANITIZER_ANDROID + + #include +@@ -240,13 +240,17 @@ namespace __sanitizer { + #if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \ + defined(__x86_64__) || SANITIZER_RISCV64 +-#define SIZEOF_STRUCT_USTAT 32 +-#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \ +- || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) +-#define SIZEOF_STRUCT_USTAT 20 +-#else +-#error Unknown size of struct ustat +-#endif ++# define SIZEOF_STRUCT_USTAT 32 ++#elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \ ++ defined(__powerpc__) || defined(__s390__) || defined(__sparc__) ++# define SIZEOF_STRUCT_USTAT 20 ++#elif defined(__loongarch__) ++ // Not used. The minimum Glibc version available for LoongArch is 2.36 ++ // so ustat() wrapper is already gone. ++#define SIZEOF_STRUCT_USTAT 0 ++# else ++# error Unknown size of struct ustat ++# endif + unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT; + unsigned struct_rlimit64_sz = sizeof(struct rlimit64); + unsigned struct_statvfs64_sz = sizeof(struct statvfs64); +@@ -314,8 +318,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) +-#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) ++ defined(__s390__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) ++# if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) + unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); + #elif SANITIZER_RISCV64 +@@ -324,22 +328,25 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + #elif defined(__aarch64__) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); +-#elif defined(__s390__) ++#elif SANITIZER_LOONGARCH64 ++ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); ++ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state); ++# elif defined(__s390__) + unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct); +-#else ++# else + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); +-#endif // __mips64 || __powerpc64__ || __aarch64__ ++# endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch64 + #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ + defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ +- SANITIZER_RISCV64 ++ SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 + unsigned struct_user_fpxregs_struct_sz = 0; + #else + unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); + #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ +-// || __s390__ +-#ifdef __arm__ ++ // || __s390__ || __loongarch64 ++# ifdef __arm__ + unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; + #else + unsigned struct_user_vfpregs_struct_sz = 0; +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_posix.h +index 8a156b7..a7c745e 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_platform_limits_posix.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_platform_limits_posix.h +@@ -85,24 +85,27 @@ const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID + ? FIRST_32_SECOND_64(104, 128) + : FIRST_32_SECOND_64(160, 216); + const unsigned struct_kernel_stat64_sz = 104; +-#elif defined(__s390__) && !defined(__s390x__) ++# elif defined(__loongarch__) ++const unsigned struct_kernel_stat_sz = 128; ++const unsigned struct_kernel_stat64_sz = 0; // LoongArch does not use stat64 ++# elif defined(__s390__) && !defined(__s390x__) + const unsigned struct_kernel_stat_sz = 64; + const unsigned struct_kernel_stat64_sz = 104; +-#elif defined(__s390x__) ++# elif defined(__s390x__) + const unsigned struct_kernel_stat_sz = 144; + const unsigned struct_kernel_stat64_sz = 0; +-#elif defined(__sparc__) && defined(__arch64__) ++# elif defined(__sparc__) && defined(__arch64__) + const unsigned struct___old_kernel_stat_sz = 0; + const unsigned struct_kernel_stat_sz = 104; + const unsigned struct_kernel_stat64_sz = 144; +-#elif defined(__sparc__) && !defined(__arch64__) ++# elif defined(__sparc__) && !defined(__arch64__) + const unsigned struct___old_kernel_stat_sz = 0; + const unsigned struct_kernel_stat_sz = 64; + const unsigned struct_kernel_stat64_sz = 104; +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + const unsigned struct_kernel_stat_sz = 128; + const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64 +-#endif ++# endif + struct __sanitizer_perf_event_attr { + unsigned type; + unsigned size; +@@ -122,7 +125,7 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long); + + #if SANITIZER_LINUX + +-#if defined(__powerpc64__) || defined(__s390__) ++#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) + const unsigned struct___old_kernel_stat_sz = 0; + #elif !defined(__sparc__) + const unsigned struct___old_kernel_stat_sz = 32; +@@ -803,10 +806,10 @@ typedef void __sanitizer_FILE; + # define SANITIZER_HAS_STRUCT_FILE 0 + #endif + +-#if SANITIZER_LINUX && !SANITIZER_ANDROID && \ +- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) ++# if SANITIZER_LINUX && !SANITIZER_ANDROID && \ ++ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ ++ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ ++ defined(__s390__) || SANITIZER_RISCV64 || defined(__loongarch64)) + extern unsigned struct_user_regs_struct_sz; + extern unsigned struct_user_fpregs_struct_sz; + extern unsigned struct_user_fpxregs_struct_sz; +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_ring_buffer.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_ring_buffer.h +index 2a46e93..a154d4a 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_ring_buffer.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_ring_buffer.h +@@ -84,18 +84,21 @@ template + class CompactRingBuffer { + // Top byte of long_ stores the buffer size in pages. + // Lower bytes store the address of the next buffer element. +- static constexpr int kPageSizeBits = 12; + static constexpr int kSizeShift = 56; + static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1; + +- uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; } ++ uptr GetStorageSize() const { ++ unsigned kPageSizeBits = Log2(GetPageSizeCached()); ++ return (long_ >> kSizeShift) << kPageSizeBits; ++ } + + void Init(void *storage, uptr size) { ++ unsigned kPageSizeBits = Log2(GetPageSizeCached()); + CHECK_EQ(sizeof(CompactRingBuffer), sizeof(void *)); + CHECK(IsPowerOfTwo(size)); + CHECK_GE(size, 1 << kPageSizeBits); + CHECK_LE(size, 128 << kPageSizeBits); +- CHECK_EQ(size % 4096, 0); ++ CHECK_EQ(size % GetPageSizeCached(), 0); + CHECK_EQ(size % sizeof(T), 0); + CHECK_EQ((uptr)storage % (size * 2), 0); + long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift); +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stacktrace.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_stacktrace.cpp +index b0487d8..451c966 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stacktrace.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_stacktrace.cpp +@@ -21,7 +21,8 @@ namespace __sanitizer { + uptr StackTrace::GetNextInstructionPc(uptr pc) { + #if defined(__sparc__) || defined(__mips__) + return pc + 8; +-#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) ++#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) || \ ++ defined(__loongarch__) + return pc + 4; + #elif SANITIZER_RISCV64 + // Current check order is 4 -> 2 -> 6 -> 8 +@@ -118,7 +119,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, + uhwptr pc1 = caller_frame[2]; + #elif defined(__s390__) + uhwptr pc1 = frame[14]; +-#elif defined(__riscv) ++#elif defined(__riscv) || defined(__loongarch__) + // frame[-1] contains the return address + uhwptr pc1 = frame[-1]; + #else +@@ -133,7 +134,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, + trace_buffer[size++] = (uptr) pc1; + } + bottom = (uptr)frame; +-#if defined(__riscv) ++#if defined(__riscv) || defined(__loongarch__) + // frame[-2] contain fp of the previous frame + uptr new_bp = (uptr)frame[-2]; + #else +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt.new/lib/sanitizer_common/sanitizer_stacktrace.h +index 15616f8..e605c7e 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stacktrace.h ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_stacktrace.h +@@ -21,8 +21,8 @@ struct BufferedStackTrace; + + static const u32 kStackTraceMax = 256; + +-#if SANITIZER_LINUX && defined(__mips__) +-# define SANITIZER_CAN_FAST_UNWIND 0 ++#if (SANITIZER_LINUX && defined(__mips__)) ++# define SANITIZER_CAN_FAST_UNWIND 0 + #elif SANITIZER_WINDOWS + # define SANITIZER_CAN_FAST_UNWIND 0 + #else +@@ -77,7 +77,8 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) { + // so we return (pc-2) in that case in order to be safe. + // For A32 mode we return (pc-4) because all instructions are 32 bit long. + return (pc - 3) & (~1); +-#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__) ++#elif defined(__powerpc__) || defined(__powerpc64__) || \ ++ defined(__aarch64__) || defined(__loongarch__) + // PCs are always 4 byte aligned. + return pc - 4; + #elif defined(__sparc__) || defined(__mips__) +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +index 53cfddc..9e277b4 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +@@ -16,45 +16,48 @@ + #if SANITIZER_LINUX && \ + (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64) +- +-#include "sanitizer_stoptheworld.h" +- +-#include "sanitizer_platform_limits_posix.h" +-#include "sanitizer_atomic.h" +- +-#include +-#include // for CLONE_* definitions +-#include +-#include // for PR_* definitions +-#include // for PTRACE_* definitions +-#include // for pid_t +-#include // for iovec +-#include // for NT_PRSTATUS +-#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID ++ defined(__arm__) || SANITIZER_RISCV64 || defined(__loongarch__)) ++ ++# include "sanitizer_atomic.h" ++# include "sanitizer_platform_limits_posix.h" ++# include "sanitizer_stoptheworld.h" ++ ++# if defined(__loongarch__) ++# include ++# endif ++ ++# include // for NT_PRSTATUS ++# include ++# include // for CLONE_* definitions ++# include ++# include // for PR_* definitions ++# include // for PTRACE_* definitions ++# include // for pid_t ++# include // for iovec ++# if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID + // GLIBC 2.20+ sys/user does not include asm/ptrace.h + # include + #endif + #include // for user_regs_struct +-#if SANITIZER_ANDROID && SANITIZER_MIPS +-# include // for mips SP register in sys/user.h +-#endif +-#include // for signal-related stuff +- +-#ifdef sa_handler +-# undef sa_handler +-#endif +- +-#ifdef sa_sigaction +-# undef sa_sigaction +-#endif +- +-#include "sanitizer_common.h" +-#include "sanitizer_flags.h" +-#include "sanitizer_libc.h" +-#include "sanitizer_linux.h" +-#include "sanitizer_mutex.h" +-#include "sanitizer_placement_new.h" ++# if (SANITIZER_ANDROID && SANITIZER_MIPS) || SANITIZER_LOONGARCH ++# include // for mips SP register in sys/user.h ++# endif ++# include // for signal-related stuff ++ ++# ifdef sa_handler ++# undef sa_handler ++# endif ++ ++# ifdef sa_sigaction ++# undef sa_sigaction ++# endif ++ ++# include "sanitizer_common.h" ++# include "sanitizer_flags.h" ++# include "sanitizer_libc.h" ++# include "sanitizer_linux.h" ++# include "sanitizer_mutex.h" ++# include "sanitizer_placement_new.h" + + // Sufficiently old kernel headers don't provide this value, but we can still + // call prctl with it. If the runtime kernel is new enough, the prctl call will +@@ -508,29 +511,38 @@ typedef struct user regs_struct; + # define REG_SP regs[EF_REG29] + # endif + +-#elif defined(__aarch64__) ++# elif defined(__loongarch__) ++typedef struct user_regs_struct regs_struct; ++static constexpr uptr kExtraRegs[] = {0}; ++# define ARCH_IOVEC_FOR_GETREGSET ++ ++# if SANITIZER_LOONGARCH ++#define REG_SP regs[3] ++# endif ++ ++# elif defined(__aarch64__) + typedef struct user_pt_regs regs_struct; +-#define REG_SP sp ++# define REG_SP sp + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + typedef struct user_regs_struct regs_struct; + // sys/ucontext.h already defines REG_SP as 2. Undefine it first. +-#undef REG_SP +-#define REG_SP sp ++# undef REG_SP ++# define REG_SP sp + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#elif defined(__s390__) ++# elif defined(__s390__) + typedef _user_regs_struct regs_struct; +-#define REG_SP gprs[15] ++# define REG_SP gprs[15] + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#else +-#error "Unsupported architecture" +-#endif // SANITIZER_ANDROID && defined(__arm__) ++# else ++# error "Unsupported architecture" ++# endif // SANITIZER_ANDROID && defined(__arm__) + + tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { + CHECK_LT(index, thread_ids_.size()); +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +index 710da4c..ea9aae0 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +@@ -263,6 +263,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess { + const char *const kSymbolizerArch = "--default-arch=riscv64"; + #elif defined(__aarch64__) + const char* const kSymbolizerArch = "--default-arch=arm64"; ++#elif SANITIZER_LOONGARCH64 ++ const char *const kSymbolizerArch = "--default-arch=loongarch64"; + #elif defined(__arm__) + const char* const kSymbolizerArch = "--default-arch=arm"; + #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/compiler-rt.new/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +index 1f664b6..25d3305 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/sanitizer_tls_get_addr.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +@@ -98,14 +98,16 @@ void DTLS_Destroy() { + // "Dynamic thread vector pointers point 0x8000 past the start of each + // TLS block." (sysdeps//dl-tls.h) + static const uptr kDtvOffset = 0x8000; +-#elif defined(__riscv) ++# elif defined(__loongarch__) ++static const uptr kDtvOffset = 0; ++# elif defined(__riscv) + // This is glibc's TLS_DTV_OFFSET: + // "Dynamic thread vector pointers point 0x800 past the start of each + // TLS block." (sysdeps/riscv/dl-tls.h) + static const uptr kDtvOffset = 0x800; +-#else ++# else + static const uptr kDtvOffset = 0; +-#endif ++# endif + + DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res, + uptr static_tls_begin, uptr static_tls_end) { +diff --git a/compiler-rt.orig/lib/sanitizer_common/tests/CMakeLists.txt b/compiler-rt.new/lib/sanitizer_common/tests/CMakeLists.txt +index abd73ca..a68058b 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/tests/CMakeLists.txt ++++ b/compiler-rt.new/lib/sanitizer_common/tests/CMakeLists.txt +@@ -3,7 +3,7 @@ include(CompilerRTCompile) + clang_compiler_add_cxx_check() + + # FIXME: use SANITIZER_COMMON_SUPPORTED_ARCH here +-filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64) ++filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64 loongarch64) + if(APPLE) + darwin_filter_host_archs(SANITIZER_UNITTEST_SUPPORTED_ARCH SANITIZER_UNITTEST_SUPPORTED_ARCH) + endif() +diff --git a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +index 26593c0..7a5a32d 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +@@ -146,9 +146,11 @@ static const u64 kAddressSpaceSize = 1ULL << 39; + static const u64 kAddressSpaceSize = 1ULL << 53; + #elif defined(__s390__) + static const u64 kAddressSpaceSize = 1ULL << 31; +-#else ++#elif defined(__loongarch__) ++static const u64 kAddressSpaceSize = 1ULL << 47; ++# else + static const u64 kAddressSpaceSize = 1ULL << 32; +-#endif ++# endif + + static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); + +diff --git a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp +index 91ec2f9..cbaefe1 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp +@@ -10,7 +10,9 @@ + // + //===----------------------------------------------------------------------===// + #include "sanitizer_common/sanitizer_ring_buffer.h" ++ + #include "gtest/gtest.h" ++#include "sanitizer_common/sanitizer_common.h" + + namespace __sanitizer { + +@@ -84,9 +86,10 @@ CompactRingBuffer *AllocCompactRingBuffer(size_t count) { + + TEST(CompactRingBuffer, int64) { + const size_t page_sizes[] = {1, 2, 4, 128}; ++ size_t page_size = GetPageSizeCached(); + + for (size_t pages : page_sizes) { +- size_t count = 4096 * pages / sizeof(int64_t); ++ size_t count = page_size * pages / sizeof(int64_t); + auto R = AllocCompactRingBuffer(count); + int64_t top = count * 3 + 13; + for (int64_t i = 0; i < top; ++i) R->push(i); +diff --git a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp +index 9a47b4e..4f01054 100644 +--- a/compiler-rt.orig/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp ++++ b/compiler-rt.new/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp +@@ -32,6 +32,14 @@ class FastUnwindTest : public ::testing::Test { + uhwptr fake_top; + uhwptr fake_bottom; + BufferedStackTrace trace; ++ ++#if defined(__loongarch__) || defined(__riscv) ++ const uptr kFpOffset = 4; ++ const uptr kBpOffset = 2; ++#else ++ const uptr kFpOffset = 2; ++ const uptr kBpOffset = 0; ++#endif + }; + + static uptr PC(uptr idx) { +@@ -49,17 +57,17 @@ void FastUnwindTest::SetUp() { + // Fill an array of pointers with fake fp+retaddr pairs. Frame pointers have + // even indices. + for (uptr i = 0; i + 1 < fake_stack_size; i += 2) { +- fake_stack[i] = (uptr)&fake_stack[i+2]; // fp ++ fake_stack[i] = (uptr)&fake_stack[i + kFpOffset]; // fp + fake_stack[i+1] = PC(i + 1); // retaddr + } + // Mark the last fp point back up to terminate the stack trace. + fake_stack[RoundDownTo(fake_stack_size - 1, 2)] = (uhwptr)&fake_stack[0]; + + // Top is two slots past the end because UnwindFast subtracts two. +- fake_top = (uhwptr)&fake_stack[fake_stack_size + 2]; ++ fake_top = (uhwptr)&fake_stack[fake_stack_size + kFpOffset]; + // Bottom is one slot before the start because UnwindFast uses >. + fake_bottom = (uhwptr)mapping; +- fake_bp = (uptr)&fake_stack[0]; ++ fake_bp = (uptr)&fake_stack[kBpOffset]; + start_pc = PC(0); + } + +@@ -120,7 +128,7 @@ TEST_F(FastUnwindTest, OneFrameStackTrace) { + trace.Unwind(start_pc, fake_bp, nullptr, true, 1); + EXPECT_EQ(1U, trace.size); + EXPECT_EQ(start_pc, trace.trace[0]); +- EXPECT_EQ((uhwptr)&fake_stack[0], trace.top_frame_bp); ++ EXPECT_EQ((uhwptr)&fake_stack[kBpOffset], trace.top_frame_bp); + } + + TEST_F(FastUnwindTest, ZeroFramesStackTrace) { +diff --git a/compiler-rt.orig/lib/scudo/scudo_utils.cpp b/compiler-rt.new/lib/scudo/scudo_utils.cpp +index b7ce8f9..fd31f6c 100644 +--- a/compiler-rt.orig/lib/scudo/scudo_utils.cpp ++++ b/compiler-rt.new/lib/scudo/scudo_utils.cpp +@@ -138,6 +138,8 @@ bool hasHardwareCRC32() { + return hasHardwareCRC32ARMPosix(); + #endif // SANITIZER_FUCHSIA + } ++#elif defined(__loongarch__) ++bool hasHardwareCRC32() { return true; } + #else + bool hasHardwareCRC32() { return false; } + #endif // defined(__x86_64__) || defined(__i386__) +diff --git a/compiler-rt.orig/lib/scudo/standalone/checksum.cpp b/compiler-rt.new/lib/scudo/standalone/checksum.cpp +index 05d4ba5..56c639f 100644 +--- a/compiler-rt.orig/lib/scudo/standalone/checksum.cpp ++++ b/compiler-rt.new/lib/scudo/standalone/checksum.cpp +@@ -74,6 +74,8 @@ bool hasHardwareCRC32() { + return !!(getauxval(AT_HWCAP) & HWCAP_CRC32); + #endif // SCUDO_FUCHSIA + } ++#elif defined(__loongarch__) ++bool hasHardwareCRC32() { return true; } + #else + // No hardware CRC32 implemented in Scudo for other architectures. + bool hasHardwareCRC32() { return false; } +diff --git a/compiler-rt.orig/lib/scudo/standalone/common.h b/compiler-rt.new/lib/scudo/standalone/common.h +index 662b733..30162ee 100644 +--- a/compiler-rt.orig/lib/scudo/standalone/common.h ++++ b/compiler-rt.new/lib/scudo/standalone/common.h +@@ -109,6 +109,10 @@ inline void yieldProcessor(u8 Count) { + __asm__ __volatile__("" ::: "memory"); + for (u8 I = 0; I < Count; I++) + __asm__ __volatile__("yield"); ++#elif defined(__loongarch__) ++ __asm__ __volatile__("" ::: "memory"); ++ for (u8 I = 0; I < Count; I++) ++ __asm__ __volatile__("dbar 0"); + #endif + __asm__ __volatile__("" ::: "memory"); + } +diff --git a/compiler-rt.orig/lib/tsan/CMakeLists.txt b/compiler-rt.new/lib/tsan/CMakeLists.txt +index 88c6f09..8fc45af 100644 +--- a/compiler-rt.orig/lib/tsan/CMakeLists.txt ++++ b/compiler-rt.new/lib/tsan/CMakeLists.txt +@@ -122,7 +122,7 @@ if(APPLE) + message(FATAL_ERROR "Building the TSan runtime requires at least macOS SDK 10.12 (or aligned SDK on other platforms)") + endif() + +- add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S) ++ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S rtl/tsan_rtl_aarch64.S rtl/tsan_rtl_loongarch64.S) + + set(TSAN_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS}) + +@@ -199,6 +199,8 @@ else() + VERBATIM) + elseif(arch MATCHES "mips64|mips64le") + add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S) ++ elseif(arch MATCHES "loongarch64") ++ add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_loongarch64.S) + else() + set(TSAN_ASM_SOURCES) + endif() +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt.new/lib/tsan/rtl/tsan_interceptors_posix.cpp +index 6c49ccd..a596d74 100644 +--- a/compiler-rt.orig/lib/tsan/rtl/tsan_interceptors_posix.cpp ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_interceptors_posix.cpp +@@ -75,6 +75,8 @@ struct ucontext_t { + #define PTHREAD_ABI_BASE "GLIBC_2.3.2" + #elif defined(__aarch64__) || SANITIZER_PPC64V2 + #define PTHREAD_ABI_BASE "GLIBC_2.17" ++#elif SANITIZER_LOONGARCH64 ++#define PTHREAD_ABI_BASE "GLIBC_2.34" + #endif + + extern "C" int pthread_attr_init(void *attr); +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_platform.h b/compiler-rt.new/lib/tsan/rtl/tsan_platform.h +index 81d345d..f2342b2 100644 +--- a/compiler-rt.orig/lib/tsan/rtl/tsan_platform.h ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_platform.h +@@ -113,6 +113,47 @@ struct Mapping { + static const uptr kVdsoBeg = 0xfffff00000ull; + }; + ++# define TSAN_RUNTIME_VMA 1 ++# elif defined(__loongarch64) ++// clang-format off ++/* ++C/C++ on linux/loongarch64 (48-bit VMA) ++0000 0000 0000 - 0001 0000 0000: - (4 GB) ++0001 0000 0000 - 0002 0000 0000: main binary (4 GB) ++0002 0000 0000 - 0020 0000 0000: - (120 GB) ++0020 0000 0000 - 0040 0000 0000: shadow (128 GB) ++0040 0000 0000 - 0050 0000 0000: metainfo (memory blocks and sync objects) (64 GB) ++0050 0000 0000 - 5555 0000 0000: - (360 GB) ++5555 0000 0000 - 5556 0000 0000: main binary (PIE) (4 GB) ++5556 0000 0000 - 7fb0 0000 0000: - (20 GB) ++7fb0 0000 0000 - 7fb2 0000 0000: traces (8 GB) ++7fb2 0000 0000 - 7ffe 0000 0000: - (304 GB) ++7ffe 0000 0000 - 7fff 0000 0000: heap (4 GB) ++7fff 0000 0000 - 7fff 8000 0000: - (2 GB) ++7fff 8000 0000 - 7fff ffff ffff: modules and main thread stack (<2 GB) ++*/ ++ ++struct Mapping { ++ static const uptr kLoAppMemBeg = 0x000000004000ull; ++ static const uptr kLoAppMemEnd = 0x000200000000ull; ++ static const uptr kShadowBeg = 0x002000000000ull; ++ static const uptr kShadowEnd = 0x004000000000ull; ++ static const uptr kMetaShadowBeg = 0x005000000000ull; ++ static const uptr kMetaShadowEnd = 0x555500000000ull; ++ static const uptr kMidAppMemBeg = 0x555500000000ull; ++ static const uptr kMidAppMemEnd = 0x555600000000ull; ++ static const uptr kTraceMemBeg = 0x7fb000000000ull; ++ static const uptr kTraceMemEnd = 0x7fb200000000ull; ++ static const uptr kHeapMemBeg = 0x7ffe00000000ull; ++ static const uptr kHeapMemEnd = 0x7fff00000000ull; ++ static const uptr kHiAppMemBeg = 0x7fff80000000ull; ++ static const uptr kHiAppMemEnd = 0x800000000000ull; ++ static const uptr kAppMemMsk = 0x7ff800000000ull; ++ static const uptr kAppMemXor = 0x000800000000ull; ++ static const uptr kVdsoBeg = 0x7fffffffc000ull; ++}; ++// clang-format on ++ + #define TSAN_MID_APP_RANGE 1 + #elif defined(__aarch64__) && defined(__APPLE__) + /* +@@ -244,10 +285,10 @@ struct Mapping48 { + }; + + // Indicates the runtime will define the memory regions at runtime. +-#define TSAN_RUNTIME_VMA 1 ++# define TSAN_RUNTIME_VMA 1 + // Indicates that mapping defines a mid range memory segment. +-#define TSAN_MID_APP_RANGE 1 +-#elif defined(__powerpc64__) ++# define TSAN_MID_APP_RANGE 1 ++# elif defined(__powerpc64__) + // PPC64 supports multiple VMA which leads to multiple address transformation + // functions. To support these multiple VMAS transformations and mappings TSAN + // runtime for PPC64 uses an external memory read (vmaSize) to select which +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt.new/lib/tsan/rtl/tsan_platform_linux.cpp +index 5e8879d..60b0c77 100644 +--- a/compiler-rt.orig/lib/tsan/rtl/tsan_platform_linux.cpp ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_platform_linux.cpp +@@ -66,7 +66,8 @@ extern "C" void *__libc_stack_end; + void *__libc_stack_end = 0; + #endif + +-#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO ++#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch__)) && \ ++ !SANITIZER_GO + # define INIT_LONGJMP_XOR_KEY 1 + #else + # define INIT_LONGJMP_XOR_KEY 0 +@@ -296,6 +297,8 @@ void InitializePlatform() { + CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + reexec = true; + } ++#endif ++#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch__)) + // Initialize the xor key used in {sig}{set,long}jump. + InitializeLongjmpXorKey(); + #endif +@@ -377,6 +380,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + return mangled_sp ^ xor_key; + #elif defined(__mips__) + return mangled_sp; ++# elif defined(__loongarch__) ++ return mangled_sp ^ longjmp_xor_key; + #else + #error "Unknown platform" + #endif +@@ -395,7 +400,7 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + #elif SANITIZER_LINUX + # ifdef __aarch64__ + # define LONG_JMP_SP_ENV_SLOT 13 +-# elif defined(__mips64) ++#elif defined(__mips64) || defined(__loongarch__) + # define LONG_JMP_SP_ENV_SLOT 1 + # else + # define LONG_JMP_SP_ENV_SLOT 6 +@@ -419,7 +424,11 @@ static void InitializeLongjmpXorKey() { + + // 2. Retrieve vanilla/mangled SP. + uptr sp; ++#ifdef __aarch64__ + asm("mov %0, sp" : "=r" (sp)); ++#else // loongarch ++ asm("move %0, $sp" : "=r"(sp)); ++#endif + uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; + + // 3. xor SPs to obtain key. +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt.new/lib/tsan/rtl/tsan_rtl.cpp +index 3d721eb..3ef1a08 100644 +--- a/compiler-rt.orig/lib/tsan/rtl/tsan_rtl.cpp ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_rtl.cpp +@@ -227,7 +227,7 @@ static void StartBackgroundThread() { + ctx->background_thread = internal_start_thread(&BackgroundThread, 0); + } + +-#ifndef __mips__ ++# if !(defined(__mips__) || defined(__loongarch__)) + static void StopBackgroundThread() { + atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); + internal_join_thread(ctx->background_thread); +@@ -432,7 +432,7 @@ void MaybeSpawnBackgroundThread() { + // On MIPS, TSan initialization is run before + // __pthread_initialize_minimal_internal() is finished, so we can not spawn + // new threads. +-#if !SANITIZER_GO && !defined(__mips__) ++#if !SANITIZER_GO && !(defined(__mips__) || defined(__loongarch__)) + static atomic_uint32_t bg_thread = {}; + if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && + atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_rtl.h b/compiler-rt.new/lib/tsan/rtl/tsan_rtl.h +index 04d474e..cfa4746 100644 +--- a/compiler-rt.orig/lib/tsan/rtl/tsan_rtl.h ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_rtl.h +@@ -54,7 +54,8 @@ namespace __tsan { + + #if !SANITIZER_GO + struct MapUnmapCallback; +-#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) ++# if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) || \ ++ defined(__loongarch64) + + struct AP32 { + static const uptr kSpaceBeg = 0; +diff --git a/compiler-rt.orig/lib/xray/CMakeLists.txt b/compiler-rt.new/lib/xray/CMakeLists.txt +index 54f2ad8..e8f64d5 100644 +--- a/compiler-rt.orig/lib/xray/CMakeLists.txt ++++ b/compiler-rt.new/lib/xray/CMakeLists.txt +@@ -47,6 +47,11 @@ set(aarch64_SOURCES + xray_trampoline_AArch64.S + ) + ++set(loongarch64_SOURCES ++ xray_loongarch.cpp ++ xray_trampoline_loongarch.S ++ ) ++ + set(mips_SOURCES + xray_mips.cpp + xray_trampoline_mips.S +@@ -111,6 +116,7 @@ set(XRAY_ALL_SOURCE_FILES + ${x86_64_SOURCES} + ${arm_SOURCES} + ${armhf_SOURCES} ++ ${loongarch64_SOURCES} + ${mips_SOURCES} + ${mipsel_SOURCES} + ${mips64_SOURCES} +diff --git a/compiler-rt.orig/lib/xray/tests/CMakeLists.txt b/compiler-rt.new/lib/xray/tests/CMakeLists.txt +index 96a9db1..0f034f9 100644 +--- a/compiler-rt.orig/lib/xray/tests/CMakeLists.txt ++++ b/compiler-rt.new/lib/xray/tests/CMakeLists.txt +@@ -65,6 +65,7 @@ if (NOT APPLE) + ${LLVM_TESTINGSUPPORT_LDFLAGS} XRAY_UNITTEST_LINK_FLAGS) + append_list_if(COMPILER_RT_HAS_LLVMTESTINGSUPPORT + ${LLVM_TESTINGSUPPORT_LIBLIST} XRAY_UNITTEST_LINK_FLAGS) ++ list(APPEND XRAY_UNITTEST_LINK_FLAGS -lLLVMXRay -lLLVMSupport -lLLVMDemangle -lLLVMTestingSupport) + else() + # We add the library directories one at a time in our CFLAGS. + foreach (DIR ${LLVM_LIBRARY_DIR}) +diff --git a/compiler-rt.orig/lib/xray/xray_interface.cpp b/compiler-rt.new/lib/xray/xray_interface.cpp +index 7669b9a..cc0e925 100644 +--- a/compiler-rt.orig/lib/xray/xray_interface.cpp ++++ b/compiler-rt.new/lib/xray/xray_interface.cpp +@@ -50,6 +50,8 @@ static const int16_t cSledLength = 28; + static const int16_t cSledLength = 48; + #elif SANITIZER_MIPS64 + static const int16_t cSledLength = 64; ++#elif SANITIZER_LOONGARCH64 ++static const int16_t cSledLength = 48; + #elif defined(__powerpc64__) + static const int16_t cSledLength = 8; + #else +diff --git a/compiler-rt.orig/lib/xray/xray_tsc.h b/compiler-rt.new/lib/xray/xray_tsc.h +index bd7e191..d1c1d78 100644 +--- a/compiler-rt.orig/lib/xray/xray_tsc.h ++++ b/compiler-rt.new/lib/xray/xray_tsc.h +@@ -42,7 +42,8 @@ inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT { + #include "xray_x86_64.inc" + #elif defined(__powerpc64__) + #include "xray_powerpc64.inc" +-#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) ++#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ ++ defined(__loongarch__) + // Emulated TSC. + // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does + // not have a constant frequency like TSC on x86(_64), it may go faster +diff --git a/compiler-rt.orig/test/asan/CMakeLists.txt b/compiler-rt.new/test/asan/CMakeLists.txt +index 855fac4..a7e1730 100644 +--- a/compiler-rt.orig/test/asan/CMakeLists.txt ++++ b/compiler-rt.new/test/asan/CMakeLists.txt +@@ -14,7 +14,7 @@ if(OS_NAME MATCHES "Windows" AND CMAKE_SIZEOF_VOID_P EQUAL 8 AND + endif() + + macro(get_bits_for_arch arch bits) +- if (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|arm64|mips64|mips64el|s390x|sparcv9|riscv64") ++ if (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|arm64|mips64|mips64el|s390x|sparcv9|riscv64|loongarch64") + set(${bits} 64) + elseif (${arch} MATCHES "i386|arm|mips|mipsel|sparc") + set(${bits} 32) +diff --git a/compiler-rt.orig/test/asan/TestCases/Linux/ptrace.cpp b/compiler-rt.new/test/asan/TestCases/Linux/ptrace.cpp +index 21743cf..a39b7a0 100644 +--- a/compiler-rt.orig/test/asan/TestCases/Linux/ptrace.cpp ++++ b/compiler-rt.new/test/asan/TestCases/Linux/ptrace.cpp +@@ -14,7 +14,7 @@ + #include + #include // for iovec + #include // for NT_PRSTATUS +-#ifdef __aarch64__ ++#if defined(__aarch64__) || defined(__loongarch__) + # include + #endif + +@@ -37,6 +37,13 @@ typedef struct user_fpsimd_state fpregs_struct; + #define PRINT_REG_FP(__fpregs) printf ("%x\n", (unsigned) (__fpregs.fpsr)) + #define ARCH_IOVEC_FOR_GETREGSET + ++#elif defined(__loongarch__) ++typedef struct user_pt_regs regs_struct; ++typedef struct user_fp_state fpregs_struct; ++#define PRINT_REG_PC(__regs) printf("%lx\n", (unsigned long)(__regs.csr_era)) ++#define PRINT_REG_FP(__fpregs) printf("%x\n", (unsigned)(__fpregs.fcsr)) ++#define ARCH_IOVEC_FOR_GETREGSET ++ + #elif defined(__powerpc64__) + typedef struct pt_regs regs_struct; + typedef elf_fpregset_t fpregs_struct; +diff --git a/compiler-rt.orig/test/asan/TestCases/Linux/segv_read_write.c b/compiler-rt.new/test/asan/TestCases/Linux/segv_read_write.c +index b137970..d6c4fb1 100644 +--- a/compiler-rt.orig/test/asan/TestCases/Linux/segv_read_write.c ++++ b/compiler-rt.new/test/asan/TestCases/Linux/segv_read_write.c +@@ -1,7 +1,7 @@ + // RUN: %clangxx_asan -std=c++11 -O0 %s -o %t + // RUN: not %run %t 2>&1 | FileCheck %s --check-prefix=READ + // RUN: not %run %t write 2>&1 | FileCheck %s --check-prefix=WRITE +-// UNSUPPORTED: powerpc64,mips,s390 ++// UNSUPPORTED: powerpc64,mips,s390,loongarch + + #include + +diff --git a/compiler-rt.orig/test/asan/TestCases/Posix/unpoison-alternate-stack.cpp b/compiler-rt.new/test/asan/TestCases/Posix/unpoison-alternate-stack.cpp +index 4774993..438d72f 100644 +--- a/compiler-rt.orig/test/asan/TestCases/Posix/unpoison-alternate-stack.cpp ++++ b/compiler-rt.new/test/asan/TestCases/Posix/unpoison-alternate-stack.cpp +@@ -3,7 +3,7 @@ + + // Don't optimize, otherwise the variables which create redzones might be + // dropped. +-// RUN: %clangxx_asan -std=c++20 -fexceptions -O0 %s -o %t -pthread ++// RUN: %clangxx_asan -fexceptions -O0 %s -o %t -pthread + // RUN: %run %t + + // XFAIL: ios && !iossim +@@ -140,7 +140,7 @@ void *threadFun(void *AltStack) { + int main() { + size_t const PageSize = sysconf(_SC_PAGESIZE); + // The Solaris defaults of 4k (32-bit) and 8k (64-bit) are too small. +- size_t const MinStackSize = std::max(PTHREAD_STACK_MIN, 16 * 1024); ++ size_t const MinStackSize = std::max(PTHREAD_STACK_MIN, 16 * 1024); + // To align the alternate stack, we round this up to page_size. + size_t const DefaultStackSize = + (MinStackSize - 1 + PageSize) & ~(PageSize - 1); +diff --git a/compiler-rt.orig/test/builtins/Unit/addtf3_test.c b/compiler-rt.new/test/builtins/Unit/addtf3_test.c +index 82a8020..e47821d 100644 +--- a/compiler-rt.orig/test/builtins/Unit/addtf3_test.c ++++ b/compiler-rt.new/test/builtins/Unit/addtf3_test.c +@@ -66,7 +66,7 @@ int main() + return 1; + + #if (defined(__arm__) || defined(__aarch64__)) && defined(__ARM_FP) || \ +- defined(i386) || defined(__x86_64__) ++ defined(i386) || defined(__x86_64__) || (defined(__loongarch__) && __loongarch_frlen != 0) + // Rounding mode tests on supported architectures + const long double m = 1234.0L, n = 0.01L; + +diff --git a/compiler-rt.orig/test/builtins/Unit/subtf3_test.c b/compiler-rt.new/test/builtins/Unit/subtf3_test.c +index c06a0ba..df5c393 100644 +--- a/compiler-rt.orig/test/builtins/Unit/subtf3_test.c ++++ b/compiler-rt.new/test/builtins/Unit/subtf3_test.c +@@ -59,7 +59,7 @@ int main() + return 1; + + #if (defined(__arm__) || defined(__aarch64__)) && defined(__ARM_FP) || \ +- defined(i386) || defined(__x86_64__) ++ defined(i386) || defined(__x86_64__) || (defined(__loongarch__) && __loongarch_frlen != 0) + // Rounding mode tests on supported architectures + const long double m = 1234.02L, n = 0.01L; + +diff --git a/compiler-rt.orig/test/fuzzer/disable-leaks.test b/compiler-rt.new/test/fuzzer/disable-leaks.test +index 1c65884..fc762d2 100644 +--- a/compiler-rt.orig/test/fuzzer/disable-leaks.test ++++ b/compiler-rt.new/test/fuzzer/disable-leaks.test +@@ -1,5 +1,5 @@ + REQUIRES: lsan +-UNSUPPORTED: aarch64 ++UNSUPPORTED: aarch64, loongarch + RUN: %cpp_compiler %S/AccumulateAllocationsTest.cpp -o %t-AccumulateAllocationsTest + RUN: %run %t-AccumulateAllocationsTest -detect_leaks=1 -runs=100000 2>&1 | FileCheck %s --check-prefix=ACCUMULATE_ALLOCS + ACCUMULATE_ALLOCS: INFO: libFuzzer disabled leak detection after every mutation +diff --git a/compiler-rt.orig/test/fuzzer/exit_on_src_pos.test b/compiler-rt.new/test/fuzzer/exit_on_src_pos.test +index d8fb662..de4da32 100644 +--- a/compiler-rt.orig/test/fuzzer/exit_on_src_pos.test ++++ b/compiler-rt.new/test/fuzzer/exit_on_src_pos.test +@@ -3,6 +3,8 @@ + # TODO: Find out why test fails on Darwin with -O2. + # Binaries must end in .exe or else symbolization will break on Windows because of how periods + # in expansion of %t cause the compiler to overwrite .lib and .exp files. ++UNSUPPORTED: loongarch ++ + RUN: %cpp_compiler -O0 %S/SimpleTest.cpp -o %t-SimpleTest.exe -mllvm -use-unknown-locations=Disable + RUN: %cpp_compiler -O0 %S/ShrinkControlFlowTest.cpp -o %t-ShrinkControlFlowTest.exe + +diff --git a/compiler-rt.orig/test/fuzzer/fork-ubsan.test b/compiler-rt.new/test/fuzzer/fork-ubsan.test +index 16be90d..09af1f9 100644 +--- a/compiler-rt.orig/test/fuzzer/fork-ubsan.test ++++ b/compiler-rt.new/test/fuzzer/fork-ubsan.test +@@ -1,4 +1,4 @@ +-# UNSUPPORTED: darwin, freebsd, aarch64 ++# UNSUPPORTED: darwin, freebsd, aarch64, loongarch + # Tests how the fork mode works together with ubsan. + RUN: %cpp_compiler %S/IntegerOverflowTest.cpp -o %t-IntegerOverflowTest -fsanitize=signed-integer-overflow -fno-sanitize-recover=signed-integer-overflow + RUN: not %run %t-IntegerOverflowTest -fork=1 -ignore_crashes=1 -runs=10000 2>&1 | FileCheck %s --check-prefix=UBSAN_FORK +diff --git a/compiler-rt.orig/test/lit.common.cfg.py b/compiler-rt.new/test/lit.common.cfg.py +index 30cfdbe..8a1b518 100644 +--- a/compiler-rt.orig/test/lit.common.cfg.py ++++ b/compiler-rt.new/test/lit.common.cfg.py +@@ -390,7 +390,7 @@ if config.host_os == 'Linux': + if not config.android and len(ver_lines) and ver_lines[0].startswith(b"ldd "): + from distutils.version import LooseVersion + ver = LooseVersion(ver_lines[0].split()[-1].decode()) +- for required in ["2.27", "2.30"]: ++ for required in ["2.27", "2.30", "2.34"]: + if ver >= LooseVersion(required): + config.available_features.add("glibc-" + required) + +diff --git a/compiler-rt.orig/test/lsan/TestCases/strace_test.cpp b/compiler-rt.new/test/lsan/TestCases/strace_test.cpp +index 18c809c..2b4835d 100644 +--- a/compiler-rt.orig/test/lsan/TestCases/strace_test.cpp ++++ b/compiler-rt.new/test/lsan/TestCases/strace_test.cpp +@@ -5,6 +5,7 @@ + // FIXME: This technically works in practice but cannot be tested because the + // fatal-error caused adb to failed. Could not be captured to stderr to lit-check. + // XFAIL: android ++// UNSUPPORTED : loongarch + + #include + #include +diff --git a/compiler-rt.orig/test/lsan/TestCases/swapcontext.cpp b/compiler-rt.new/test/lsan/TestCases/swapcontext.cpp +index d099959..5f23cd3 100644 +--- a/compiler-rt.orig/test/lsan/TestCases/swapcontext.cpp ++++ b/compiler-rt.new/test/lsan/TestCases/swapcontext.cpp +@@ -5,7 +5,7 @@ + // RUN: %env_lsan_opts= %run %t 2>&1 + // RUN: %env_lsan_opts= not %run %t foo 2>&1 | FileCheck %s + // Missing 'getcontext' and 'makecontext' on Android. +-// UNSUPPORTED: arm,powerpc64,android ++// UNSUPPORTED: arm,powerpc64,android,loongarch + + #include "sanitizer_common/sanitizer_ucontext.h" + #include +diff --git a/compiler-rt.orig/test/lsan/TestCases/use_registers.cpp b/compiler-rt.new/test/lsan/TestCases/use_registers.cpp +index dcf3bb9..21a88a7 100644 +--- a/compiler-rt.orig/test/lsan/TestCases/use_registers.cpp ++++ b/compiler-rt.new/test/lsan/TestCases/use_registers.cpp +@@ -32,6 +32,10 @@ extern "C" void *registers_thread_func(void *arg) { + asm("move $16, %0" + : + : "r"(p)); ++#elif defined(__loongarch__) ++ asm("move $r23, %0" ++ : ++ : "r"(p)); + #elif defined(__arm__) + asm("mov r5, %0" + : +diff --git a/compiler-rt.orig/test/lsan/lit.common.cfg.py b/compiler-rt.new/test/lsan/lit.common.cfg.py +index 5adeec3..9a3370a 100644 +--- a/compiler-rt.orig/test/lsan/lit.common.cfg.py ++++ b/compiler-rt.new/test/lsan/lit.common.cfg.py +@@ -76,7 +76,7 @@ config.substitutions.append( ("%clangxx_lsan ", build_invocation(clang_lsan_cxxf + # LeakSanitizer tests are currently supported on + # Android{aarch64, x86, x86_64}, x86-64 Linux, PowerPC64 Linux, arm Linux, mips64 Linux, s390x Linux and x86_64 Darwin. + supported_android = config.android and config.target_arch in ['x86_64', 'i386', 'aarch64'] and 'android-thread-properties-api' in config.available_features +-supported_linux = (not config.android) and config.host_os == 'Linux' and config.host_arch in ['x86_64', 'ppc64', 'ppc64le', 'mips64', 'arm', 'armhf', 'armv7l', 's390x'] ++supported_linux = (not config.android) and config.host_os == 'Linux' and config.host_arch in ['x86_64', 'ppc64', 'ppc64le', 'mips64', 'arm', 'armhf', 'armv7l', 's390x', 'loongarch64'] + supported_darwin = config.host_os == 'Darwin' and config.target_arch in ['x86_64'] + supported_netbsd = config.host_os == 'NetBSD' and config.target_arch in ['x86_64', 'i386'] + if not (supported_android or supported_linux or supported_darwin or supported_netbsd): +diff --git a/compiler-rt.orig/test/msan/allocator_mapping.cpp b/compiler-rt.new/test/msan/allocator_mapping.cpp +index 533128f..6bc4db3 100644 +--- a/compiler-rt.orig/test/msan/allocator_mapping.cpp ++++ b/compiler-rt.new/test/msan/allocator_mapping.cpp +@@ -8,7 +8,7 @@ + // This test only makes sense for the 64-bit allocator. The 32-bit allocator + // does not have a fixed mapping. Exclude platforms that use the 32-bit + // allocator. +-// UNSUPPORTED: target-is-mips64,target-is-mips64el,aarch64 ++// UNSUPPORTED: target-is-mips64,target-is-mips64el,aarch64, loongarch + + #include + #include +diff --git a/compiler-rt.orig/test/msan/fstat.cpp b/compiler-rt.new/test/msan/fstat.cpp +index 83f9705..1338663 100644 +--- a/compiler-rt.orig/test/msan/fstat.cpp ++++ b/compiler-rt.new/test/msan/fstat.cpp +@@ -4,7 +4,7 @@ + #include + + int main(void) { +- struct stat st; ++ struct stat st = {}; + if (fstat(0, &st)) + exit(1); + +diff --git a/compiler-rt.orig/test/msan/lit.cfg.py b/compiler-rt.new/test/msan/lit.cfg.py +index 8ec1614..2565fca 100644 +--- a/compiler-rt.orig/test/msan/lit.cfg.py ++++ b/compiler-rt.new/test/msan/lit.cfg.py +@@ -44,7 +44,7 @@ if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']: + # For mips64, mips64el we have forced store_context_size to 1 because these + # archs use slow unwinder which is not async signal safe. Therefore we only + # check the first frame since store_context size is 1. +-if config.host_arch in ['mips64', 'mips64el']: ++if config.host_arch in ['mips64', 'mips64el', 'loongarch64']: + config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK')) + else: + config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK')) +diff --git a/compiler-rt.orig/test/msan/mmap.cpp b/compiler-rt.new/test/msan/mmap.cpp +index 2e7e883..54cde8f 100644 +--- a/compiler-rt.orig/test/msan/mmap.cpp ++++ b/compiler-rt.new/test/msan/mmap.cpp +@@ -22,6 +22,10 @@ bool AddrIsApp(void *p) { + return (addr >= 0x0000000000ULL && addr <= 0x0200000000ULL) || + (addr >= 0xa200000000ULL && addr <= 0xc000000000ULL) || + addr >= 0xe200000000ULL; ++#elif defined(__loongarch64) ++ return (addr >= 0x000000000000ULL && addr < 0x000200000000ULL) || ++ (addr >= 0x510000000000ULL && addr < 0x600000000000ULL) || ++ (addr >= 0x700000000000ULL && addr < 0x800000000000ULL); + #elif defined(__powerpc64__) + return addr < 0x000100000000ULL || addr >= 0x300000000000ULL; + #elif defined(__s390x__) +@@ -60,7 +64,7 @@ bool AddrIsApp(void *p) { + + int main() { + // Large enough to quickly exhaust the entire address space. +-#if defined(__mips64) || defined(__aarch64__) ++#if defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + const size_t kMapSize = 0x100000000ULL; + #else + const size_t kMapSize = 0x1000000000ULL; +diff --git a/compiler-rt.orig/test/msan/mmap_below_shadow.cpp b/compiler-rt.new/test/msan/mmap_below_shadow.cpp +index 46d948c..97c416b 100644 +--- a/compiler-rt.orig/test/msan/mmap_below_shadow.cpp ++++ b/compiler-rt.new/test/msan/mmap_below_shadow.cpp +@@ -21,7 +21,7 @@ int main(void) { + #elif defined(__x86_64__) + uintptr_t hint = 0x4f0000000000ULL; + const uintptr_t app_start = 0x600000000000ULL; +-#elif defined (__mips64) ++#elif defined(__mips64) || defined(__loongarch64) + uintptr_t hint = 0x4f00000000ULL; + const uintptr_t app_start = 0x6000000000ULL; + #elif defined (__powerpc64__) +diff --git a/compiler-rt.orig/test/msan/param_tls_limit.cpp b/compiler-rt.new/test/msan/param_tls_limit.cpp +index 43e6685..cd7ed31 100644 +--- a/compiler-rt.orig/test/msan/param_tls_limit.cpp ++++ b/compiler-rt.new/test/msan/param_tls_limit.cpp +@@ -5,9 +5,9 @@ + // RUN: %clangxx_msan -fsanitize-memory-track-origins -O0 %s -o %t && %run %t + // RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -O0 %s -o %t && %run %t + // +-// AArch64 fails with: ++// AArch64 and LoongArch fails with: + // void f801(S<801>): Assertion `__msan_test_shadow(&s, sizeof(s)) == -1' failed +-// XFAIL: aarch64 ++// XFAIL: aarch64 || loongarch + // When passing huge structs by value, SystemZ uses pointers, therefore this + // test in its present form is unfortunately not applicable. + // ABI says: "A struct or union of any other size . Replace such an +diff --git a/compiler-rt.orig/test/msan/preinit_array.cpp b/compiler-rt.new/test/msan/preinit_array.cpp +index 6f877ba..c72004e 100644 +--- a/compiler-rt.orig/test/msan/preinit_array.cpp ++++ b/compiler-rt.new/test/msan/preinit_array.cpp +@@ -1,5 +1,8 @@ + // RUN: %clangxx_msan -O0 %s -o %t && %run %t + ++// FIXME: Something changed in glibc 2.34, maybe earier. ++// UNSUPPORTED: glibc-2.34 ++ + #include + + volatile int global; +diff --git a/compiler-rt.orig/test/msan/strlen_of_shadow.cpp b/compiler-rt.new/test/msan/strlen_of_shadow.cpp +index 5e7c89c..3b0365a 100644 +--- a/compiler-rt.orig/test/msan/strlen_of_shadow.cpp ++++ b/compiler-rt.new/test/msan/strlen_of_shadow.cpp +@@ -13,9 +13,9 @@ + #include "test.h" + + const char *mem_to_shadow(const char *p) { +-#if defined(__x86_64__) ++#if defined(__x86_64__) || defined(__loongarch64) + return (char *)((uintptr_t)p ^ 0x500000000000ULL); +-#elif defined (__mips64) ++#elif defined(__mips64) + return (char *)((uintptr_t)p ^ 0x8000000000ULL); + #elif defined(__powerpc64__) + #define LINEARIZE_MEM(mem) \ +diff --git a/compiler-rt.orig/test/msan/vararg.cpp b/compiler-rt.new/test/msan/vararg.cpp +index e1a7b12..ef4e40c 100644 +--- a/compiler-rt.orig/test/msan/vararg.cpp ++++ b/compiler-rt.new/test/msan/vararg.cpp +@@ -16,10 +16,11 @@ + + // Check that shadow and origin are passed through va_args. + +-// Copying origins on AArch64, MIPS and PowerPC isn't supported yet. ++// Copying origins on AArch64, LoongArch, MIPS and PowerPC isn't supported yet. + // XFAIL: aarch64 + // XFAIL: mips + // XFAIL: powerpc64 ++// XFAIL: loongarch + + #include + #include +diff --git a/compiler-rt.orig/test/msan/vector_select.cpp b/compiler-rt.new/test/msan/vector_select.cpp +index 0cf1164..8173b86 100644 +--- a/compiler-rt.orig/test/msan/vector_select.cpp ++++ b/compiler-rt.new/test/msan/vector_select.cpp +@@ -11,7 +11,7 @@ __m128d select(bool b, __m128d c, __m128d d) + { + return b ? c : d; + } +-#elif defined (__mips64) || defined (__powerpc64__) ++#elif defined(__mips64) || defined(__powerpc64__) || defined(__loongarch64) + typedef double __w64d __attribute__ ((vector_size(16))); + + __w64d select(bool b, __w64d c, __w64d d) +diff --git a/compiler-rt.orig/test/msan/wcsncpy.cpp b/compiler-rt.new/test/msan/wcsncpy.cpp +index f448ab2..a5f31f4 100644 +--- a/compiler-rt.orig/test/msan/wcsncpy.cpp ++++ b/compiler-rt.new/test/msan/wcsncpy.cpp +@@ -1,7 +1,7 @@ + // RUN: %clangxx_msan -fsanitize-memory-track-origins -O0 %s -o %t && not %run %t >%t.out 2>&1 + // RUN: FileCheck %s < %t.out && FileCheck %s < %t.out + +-// XFAIL: mips ++// XFAIL: mips || loongarch + + #include + #include +diff --git a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/pthread_mutex.cpp b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/pthread_mutex.cpp +index 6109581..45bff96 100644 +--- a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/pthread_mutex.cpp ++++ b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/pthread_mutex.cpp +@@ -4,7 +4,8 @@ + + #include + +-#ifdef USE_GLIBC ++#if defined(USE_GLIBC) && !__GLIBC_PREREQ(2, 34) ++// They were removed from GLIBC 2.34 + extern "C" int __pthread_mutex_lock(pthread_mutex_t *__mutex); + extern "C" int __pthread_mutex_unlock(pthread_mutex_t *__mutex); + #define LOCK __pthread_mutex_lock +diff --git a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/ptrace.cpp b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/ptrace.cpp +index 82532c3..a4645c0 100644 +--- a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/ptrace.cpp ++++ b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/ptrace.cpp +@@ -16,7 +16,7 @@ + #include + #include + #endif +-#ifdef __aarch64__ ++#if defined(__aarch64__) || defined(__loongarch__) + // GLIBC 2.20+ sys/user does not include asm/ptrace.h + #include + #endif +@@ -114,6 +114,26 @@ int main(void) { + printf("%x\n", fpregs.fpc); + #endif // (__s390__) + ++#if (__loongarch64) ++ struct iovec regset_io; ++ ++ struct user_pt_regs regs; ++ regset_io.iov_base = ®s; ++ regset_io.iov_len = sizeof(regs); ++ res = ptrace(PTRACE_GETREGSET, pid, (void *)NT_PRSTATUS, (void *)®set_io); ++ assert(!res); ++ if (regs.csr_era) ++ printf("%lx\n", regs.csr_era); ++ ++ struct user_fp_state fpregs; ++ regset_io.iov_base = &fpregs; ++ regset_io.iov_len = sizeof(fpregs); ++ res = ptrace(PTRACE_GETREGSET, pid, (void *)NT_FPREGSET, (void *)®set_io); ++ assert(!res); ++ if (fpregs.fcsr) ++ printf("%lx\n", fpregs.fcsr); ++#endif // (__loongarch64) ++ + siginfo_t siginfo; + res = ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo); + assert(!res); +diff --git a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/sysconf_interceptor_bypass_test.cpp b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/sysconf_interceptor_bypass_test.cpp +index 0ffb346..43e21e2 100644 +--- a/compiler-rt.orig/test/sanitizer_common/TestCases/Linux/sysconf_interceptor_bypass_test.cpp ++++ b/compiler-rt.new/test/sanitizer_common/TestCases/Linux/sysconf_interceptor_bypass_test.cpp +@@ -8,7 +8,8 @@ + + // getauxval() used instead of sysconf() in GetPageSize() is defined starting + // glbc version 2.16. +-#if __GLIBC_PREREQ(2, 16) ++// Does not work with 2.31 and above at it calls sysconf for SIGSTKSZ. ++#if __GLIBC_PREREQ(2, 16) && !__GLIBC_PREREQ(2, 31) + extern "C" long sysconf(int name) { + fprintf(stderr, "sysconf wrapper called\n"); + return 0; +diff --git a/compiler-rt.orig/test/sanitizer_common/TestCases/Posix/lstat.cpp b/compiler-rt.new/test/sanitizer_common/TestCases/Posix/lstat.cpp +index 75b1961..f2ca240 100644 +--- a/compiler-rt.orig/test/sanitizer_common/TestCases/Posix/lstat.cpp ++++ b/compiler-rt.new/test/sanitizer_common/TestCases/Posix/lstat.cpp +@@ -5,7 +5,7 @@ + #include + + int main(void) { +- struct stat st; ++ struct stat st = {}; + + assert(!lstat("/dev/null", &st)); + #if defined(__sun__) && defined(__svr4__) +diff --git a/compiler-rt.orig/test/sanitizer_common/print_address.h b/compiler-rt.new/test/sanitizer_common/print_address.h +index e7bb1a3..7935f7b 100644 +--- a/compiler-rt.orig/test/sanitizer_common/print_address.h ++++ b/compiler-rt.new/test/sanitizer_common/print_address.h +@@ -8,7 +8,7 @@ void print_address(const char *str, int n, ...) { + while (n--) { + void *p = va_arg(ap, void *); + #if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \ +- defined(__s390x__) ++ defined(__s390x__) || defined(__loongarch__) + // On FreeBSD, the %p conversion specifier works as 0x%x and thus does not + // match to the format used in the diagnotic message. + fprintf(stderr, "0x%012lx ", (unsigned long) p); +diff --git a/compiler-rt.orig/test/tsan/map32bit.cpp b/compiler-rt.new/test/tsan/map32bit.cpp +index 8aef27b..0629ae2 100644 +--- a/compiler-rt.orig/test/tsan/map32bit.cpp ++++ b/compiler-rt.new/test/tsan/map32bit.cpp +@@ -11,6 +11,7 @@ + // XFAIL: mips + // XFAIL: aarch64 + // XFAIL: powerpc64 ++// XFAIL: loongarch + + // MAP_32BIT doesn't exist on OS X and NetBSD. + // UNSUPPORTED: darwin,netbsd +diff --git a/compiler-rt.orig/test/tsan/mmap_large.cpp b/compiler-rt.new/test/tsan/mmap_large.cpp +index c8d258e..10ae9a0 100644 +--- a/compiler-rt.orig/test/tsan/mmap_large.cpp ++++ b/compiler-rt.new/test/tsan/mmap_large.cpp +@@ -19,6 +19,8 @@ int main() { + const size_t kLog2Size = 39; + #elif defined(__mips64) || defined(__aarch64__) + const size_t kLog2Size = 32; ++#elif defined(__loongarch64) ++ const size_t kLog2Size = 32; + #elif defined(__powerpc64__) + const size_t kLog2Size = 39; + #endif +diff --git a/compiler-rt.orig/test/tsan/test.h b/compiler-rt.new/test/tsan/test.h +index 4c75572..4b2ddc0 100644 +--- a/compiler-rt.orig/test/tsan/test.h ++++ b/compiler-rt.new/test/tsan/test.h +@@ -61,6 +61,8 @@ unsigned long long monotonic_clock_ns() { + const int kPCInc = 4; + #elif defined(__sparc__) || defined(__mips__) + const int kPCInc = 8; ++#elif defined(__loongarch__) ++const int kPCInc = 4; + #else + const int kPCInc = 1; + #endif +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-arg0-logging.cpp b/compiler-rt.new/test/xray/TestCases/Posix/arg1-arg0-logging.cpp +index 757f81a..a531622 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-arg0-logging.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/arg1-arg0-logging.cpp +@@ -6,7 +6,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=true verbosity=1 xray_logfile_base=arg0-arg1-logging-" %run %t + // + // TODO: Support these in ARM and PPC +-// XFAIL: arm || aarch64 || mips ++// XFAIL: arm || aarch64 || mips || loongarch + // UNSUPPORTED: powerpc64le + + #include "xray/xray_interface.h" +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-logger.cpp b/compiler-rt.new/test/xray/TestCases/Posix/arg1-logger.cpp +index 48544c3..0d7c9a2 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-logger.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/arg1-logger.cpp +@@ -11,7 +11,7 @@ + // RUN: rm -f arg1-logger-* + // + // At the time of writing, the ARM trampolines weren't written yet. +-// XFAIL: arm || aarch64 || mips ++// XFAIL: arm || aarch64 || mips || loongarch + // See the mailing list discussion of r296998. + // UNSUPPORTED: powerpc64le + +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-logging-implicit-this.cpp b/compiler-rt.new/test/xray/TestCases/Posix/arg1-logging-implicit-this.cpp +index d8dd622..26129a8 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/arg1-logging-implicit-this.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/arg1-logging-implicit-this.cpp +@@ -4,7 +4,7 @@ + // RUN: rm -f log-args-this-* + // RUN: XRAY_OPTIONS="patch_premain=true verbosity=1 xray_logfile_base=log-args-this-" %run %t + // +-// XFAIL: FreeBSD || arm || aarch64 || mips ++// XFAIL: FreeBSD || arm || aarch64 || mips || loongarch + // UNSUPPORTED: powerpc64le + #include "xray/xray_interface.h" + #include +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/argv0-log-file-name.cpp b/compiler-rt.new/test/xray/TestCases/Posix/argv0-log-file-name.cpp +index bd48693..f364151 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/argv0-log-file-name.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/argv0-log-file-name.cpp +@@ -7,6 +7,7 @@ + // RUN: rm xray-log.argv0-log-file-name.* xray.log.file.name + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + #include +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/coverage-sample.cpp b/compiler-rt.new/test/xray/TestCases/Posix/coverage-sample.cpp +index 1903ad6..70dfd46 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/coverage-sample.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/coverage-sample.cpp +@@ -6,6 +6,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t | FileCheck %s + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/fixedsize-logging.cpp b/compiler-rt.new/test/xray/TestCases/Posix/fixedsize-logging.cpp +index e4462c8..d9cdad5 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/fixedsize-logging.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/fixedsize-logging.cpp +@@ -8,6 +8,7 @@ + // RUN: rm fixedsize-logging-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/func-id-utils.cpp b/compiler-rt.new/test/xray/TestCases/Posix/func-id-utils.cpp +index ab0c5b0..b2631f1 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/func-id-utils.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/func-id-utils.cpp +@@ -7,6 +7,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + #include +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/logging-modes.cpp b/compiler-rt.new/test/xray/TestCases/Posix/logging-modes.cpp +index f839ba5..2302995 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/logging-modes.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/logging-modes.cpp +@@ -5,6 +5,7 @@ + // RUN: %run %t | FileCheck %s + // + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + #include "xray/xray_log_interface.h" +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/optional-inmemory-log.cpp b/compiler-rt.new/test/xray/TestCases/Posix/optional-inmemory-log.cpp +index a32c874..59d4c53 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/optional-inmemory-log.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/optional-inmemory-log.cpp +@@ -9,6 +9,7 @@ + // RUN: rm -f optional-inmemory-log.xray-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/patching-unpatching.cpp b/compiler-rt.new/test/xray/TestCases/Posix/patching-unpatching.cpp +index 978a897..267c431 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/patching-unpatching.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/patching-unpatching.cpp +@@ -7,6 +7,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t 2>&1 | FileCheck %s + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + +diff --git a/compiler-rt.orig/test/xray/TestCases/Posix/pic_test.cpp b/compiler-rt.new/test/xray/TestCases/Posix/pic_test.cpp +index fbf6bdc..161567b 100644 +--- a/compiler-rt.orig/test/xray/TestCases/Posix/pic_test.cpp ++++ b/compiler-rt.new/test/xray/TestCases/Posix/pic_test.cpp +@@ -10,6 +10,7 @@ + // RUN: rm -f pic-test-logging-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + +-- +2.41.0 + diff --git a/1001-add-loongarch64-support-not-upstream-new.patch b/1001-add-loongarch64-support-not-upstream-new.patch new file mode 100644 index 0000000000000000000000000000000000000000..33f46b745bc5bdbc23e43f75bd8a97c4d0dd68a0 --- /dev/null +++ b/1001-add-loongarch64-support-not-upstream-new.patch @@ -0,0 +1,790 @@ +From 4ba7d34a059c3ce0738940f293fded7f3096d016 Mon Sep 17 00:00:00 2001 +From: herengui +Date: Thu, 31 Aug 2023 09:46:51 +0800 +Subject: [PATCH 1001/1001] add loongarch64 support not upstream new + +Signed-off-by: herengui +--- + lib/builtins/loongarch/fp_mode.c | 61 ++++++ + ...ommon_interceptors_vfork_loongarch64.inc.S | 57 ++++++ + .../sanitizer_syscall_linux_loongarch64.inc | 168 +++++++++++++++++ + lib/tsan/rtl/tsan_rtl_loongarch64.S | 142 ++++++++++++++ + lib/xray/xray_loongarch.cpp | 173 ++++++++++++++++++ + lib/xray/xray_trampoline_loongarch.S | 129 +++++++++++++ + 6 files changed, 730 insertions(+) + create mode 100644 lib/builtins/loongarch/fp_mode.c + create mode 100644 lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S + create mode 100644 lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc + create mode 100644 lib/tsan/rtl/tsan_rtl_loongarch64.S + create mode 100644 lib/xray/xray_loongarch.cpp + create mode 100644 lib/xray/xray_trampoline_loongarch.S + +diff --git a/compiler-rt.orig/lib/builtins/loongarch/fp_mode.c b/compiler-rt.new/lib/builtins/loongarch/fp_mode.c +new file mode 100644 +index 0000000..6471cc3 +--- /dev/null ++++ b/compiler-rt.new/lib/builtins/loongarch/fp_mode.c +@@ -0,0 +1,61 @@ ++//=== lib/builtins/loongarch/fp_mode.c - Floaing-point mode utilities -*- C ++//-*-===// ++// ++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ++// See https://llvm.org/LICENSE.txt for license information. ++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ++// ++//===----------------------------------------------------------------------===// ++#include "../fp_mode.h" ++ ++#define LOONGARCH_TONEAREST 0x0000 ++#define LOONGARCH_TOWARDZERO 0x0100 ++#define LOONGARCH_UPWARD 0x0200 ++#define LOONGARCH_DOWNWARD 0x0300 ++ ++#define LOONGARCH_RMODE_MASK \ ++ (LOONGARCH_TONEAREST | LOONGARCH_TOWARDZERO | LOONGARCH_UPWARD | \ ++ LOONGARCH_DOWNWARD) ++ ++#define LOONGARCH_INEXACT 0x10000 ++ ++FE_ROUND_MODE __fe_getround(void) { ++#if __loongarch_frlen != 0 ++ int fcsr; ++#if __clang__ ++ __asm__ __volatile__("movfcsr2gr %0, $fcsr0" : "=r"(fcsr)); ++#else ++ /* FIXME: gcc cannot recognise $fcsr0, use $r0 as a workaround. */ ++ __asm__ __volatile__("movfcsr2gr %0, $r0" : "=r"(fcsr)); ++#endif ++ fcsr &= LOONGARCH_RMODE_MASK; ++ switch (fcsr) { ++ case LOONGARCH_TOWARDZERO: ++ return FE_TOWARDZERO; ++ case LOONGARCH_DOWNWARD: ++ return FE_DOWNWARD; ++ case LOONGARCH_UPWARD: ++ return FE_UPWARD; ++ case LOONGARCH_TONEAREST: ++ default: ++ return FE_TONEAREST; ++ } ++#else ++ return FE_TONEAREST; ++#endif ++} ++ ++int __fe_raise_inexact(void) { ++#if __loongarch_frlen != 0 ++ int fcsr; ++#if __clang__ ++ __asm__ __volatile__("movfcsr2gr %0, $fcsr0" : "=r"(fcsr)); ++ __asm__ __volatile__("movgr2fcsr $fcr0, %0" ::"r"(fcsr | LOONGARCH_INEXACT)); ++#else ++ /* FIXME: gcc cannot recognise $fcsr0, use $r0 as a workaround. */ ++ __asm__ __volatile__("movfcsr2gr %0, $r0" : "=r"(fcsr)); ++ __asm__ __volatile__("movgr2fcsr $r0, %0" ::"r"(fcsr | LOONGARCH_INEXACT)); ++#endif ++#endif ++ return 0; ++} +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/compiler-rt.new/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S +new file mode 100644 +index 0000000..dae72b5 +--- /dev/null ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S +@@ -0,0 +1,57 @@ ++#if defined(__loongarch64) && defined(__linux__) ++ ++#include "sanitizer_common/sanitizer_asm.h" ++ ++ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA) ++ASM_HIDDEN(_ZN14__interception10real_vforkE) ++ ++.text ++.globl ASM_WRAPPER_NAME(vfork) ++ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) ++ASM_WRAPPER_NAME(vfork): ++ // Save ra in the off-stack spill area. ++ // allocate space on stack ++ addi.d $sp, $sp, -16 ++ // store $ra value ++ st.d $ra, $sp, 8 ++ bl COMMON_INTERCEPTOR_SPILL_AREA ++ // restore previous values from stack ++ ld.d $ra, $sp, 8 ++ // adjust stack ++ addi.d $sp, $sp, 16 ++ // store $ra by $a0 ++ st.d $ra, $a0, 0 ++ ++ // Call real vfork. This may return twice. User code that runs between the first and the second return ++ // may clobber the stack frame of the interceptor; that's why it does not have a frame. ++ la.local $a0, _ZN14__interception10real_vforkE ++ ld.d $a0, $a0, 0 ++ jirl $ra, $a0, 0 ++ ++ // adjust stack ++ addi.d $sp, $sp, -16 ++ // store $a0 by adjusted stack ++ st.d $a0, $sp, 8 ++ // jump to exit label if $a0 is 0 ++ beqz $a0, .L_exit ++ ++ // $a0 != 0 => parent process. Clear stack shadow. ++ // put old $sp to $a0 ++ addi.d $a0, $sp, 16 ++ bl %plt(COMMON_INTERCEPTOR_HANDLE_VFORK) ++ ++.L_exit: ++ // Restore $ra ++ bl COMMON_INTERCEPTOR_SPILL_AREA ++ ld.d $ra, $a0, 0 ++ // load value by stack ++ ld.d $a0, $sp, 8 ++ // adjust stack ++ addi.d $sp, $sp, 16 ++ jr $ra ++ASM_SIZE(vfork) ++ ++.weak vfork ++.set vfork, ASM_WRAPPER_NAME(vfork) ++ ++#endif +diff --git a/compiler-rt.orig/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/compiler-rt.new/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc +new file mode 100644 +index 0000000..0d8d530 +--- /dev/null ++++ b/compiler-rt.new/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc +@@ -0,0 +1,168 @@ ++//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===// ++// ++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ++// See https://llvm.org/LICENSE.txt for license information. ++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ++// ++//===----------------------------------------------------------------------===// ++// ++// Implementations of internal_syscall and internal_iserror for ++// Linux/loongarch64. ++// ++//===----------------------------------------------------------------------===// ++ ++// About local register variables: ++// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables ++// ++// Kernel ABI... ++// syscall number is passed in a7 ++// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in ++// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments ++// are passed in: a0-a7 (confirmed by inspecting glibc sources). ++#define SYSCALL(name) __NR_##name ++ ++#define INTERNAL_SYSCALL_CLOBBERS \ ++ "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8" ++ ++static uptr __internal_syscall(u64 nr) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0"); ++ __asm__ volatile("syscall 0\n\t" ++ : "=r"(a0) ++ : "r"(a7) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall0(n) (__internal_syscall)(n) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall2(n, a1, a2) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1), "r"(a2) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall3(n, a1, a2, a3) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, ++ u64 arg4) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1), "r"(a2), "r"(a3) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall4(n, a1, a2, a3, a4) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, ++ long arg5) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall5(n, a1, a2, a3, a4, a5) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ ++ (u64)(a5)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, ++ long arg5, long arg6) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; ++ register u64 a5 asm("a5") = arg6; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ ++ (u64)(a5), (long)(a6)) ++ ++static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, ++ long arg5, long arg6, long arg7) { ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; ++ register u64 a5 asm("a5") = arg6; ++ register u64 a6 asm("a6") = arg7; ++ __asm__ volatile("syscall 0\n\t" ++ : "+r"(a0) ++ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), ++ "r"(a6) ++ : INTERNAL_SYSCALL_CLOBBERS); ++ return a0; ++} ++#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \ ++ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ ++ (u64)(a5), (long)(a6), (long)(a7)) ++ ++#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n ++#define __SYSCALL_NARGS(...) \ ++ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, ) ++#define __SYSCALL_CONCAT_X(a, b) a##b ++#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b) ++#define __SYSCALL_DISP(b, ...) \ ++ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__) ++ ++#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__) ++ ++// Helper function used to avoid clobbering of errno. ++bool internal_iserror(uptr retval, int *internal_errno) { ++ if (retval >= (uptr)-4095) { ++ if (internal_errno) ++ *internal_errno = -retval; ++ return true; ++ } ++ return false; ++} +diff --git a/compiler-rt.orig/lib/tsan/rtl/tsan_rtl_loongarch64.S b/compiler-rt.new/lib/tsan/rtl/tsan_rtl_loongarch64.S +new file mode 100644 +index 0000000..66b16e4 +--- /dev/null ++++ b/compiler-rt.new/lib/tsan/rtl/tsan_rtl_loongarch64.S +@@ -0,0 +1,142 @@ ++.section .text ++ ++.hidden __tsan_setjmp ++.comm _ZN14__interception11real_setjmpE,8,8 ++.globl setjmp ++.type setjmp, @function ++setjmp: ++ ++ # save env parameters ++ addi.d $sp, $sp, -32 ++ st.d $ra, $sp, 24 ++ st.d $fp, $sp, 16 ++ ++ # save jmp_buf ++ st.d $a0, $sp, 0 ++ ++ # obtain $sp and call tsan interceptor ++ addi.d $a0, $sp, 32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf ++ ld.d $a0, $sp, 0 ++ ++ # load libc setjmp to t0 ++ la.got $t0, _ZN14__interception11real_setjmpE ++ ++ # restore env parameters ++ ld.d $fp, $sp, 16 ++ ld.d $ra, $sp, 24 ++ addi.d $sp, $sp, 32 ++ ++ # tail jump to libc setjmp ++ ld.d $t0, $t0, 0 ++ jr $t0 ++ ++.size setjmp, .-setjmp ++ ++.globl _setjmp ++.comm _ZN14__interception12real__setjmpE,8,8 ++.type _setjmp, @function ++_setjmp: ++ ++ # Save env parameters ++ addi.d $sp, $sp, -32 ++ st.d $ra, $sp, 24 ++ st.d $fp, $sp, 16 ++ ++ # save jmp_buf ++ st.d $a0, $sp, 0 ++ ++ # obtain $sp and call tsan interceptor ++ addi.d $a0, $sp, 32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf ++ ld.d $a0, $sp, 0 ++ ++ # load libc setjmp to t0 ++ la.got $t0, _ZN14__interception12real__setjmpE ++ ++ # restore env parameters ++ ld.d $fp, $sp, 16 ++ ld.d $ra, $sp, 24 ++ addi.d $sp, $sp, 32 ++ ++ # tail jump to libc setjmp ++ ld.d $t0, $t0, 0 ++ jr $t0 ++ ++.size _setjmp, .-_setjmp ++ ++.globl sigsetjmp ++.comm _ZN14__interception14real_sigsetjmpE,8,8 ++.type sigsetjmp, @function ++sigsetjmp: ++ ++ # Save env parameters ++ addi.d $sp, $sp, -32 ++ st.d $ra, $sp, 24 ++ st.d $fp, $sp, 16 ++ ++ # save jmp_buf and savesigs ++ st.d $a0, $sp, 0 ++ st.d $a1, $sp, 8 ++ ++ # obtain $sp and call tsan interceptor ++ addi.d $a0, $sp, 32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf and savesigs ++ ld.d $a0, $sp, 0 ++ ld.d $a1, $sp, 8 ++ ++ # load libc setjmp to t0 ++ la.got $t0, _ZN14__interception14real_sigsetjmpE ++ ++ # restore env parameters ++ ld.d $fp, $sp, 16 ++ ld.d $ra, $sp, 24 ++ addi.d $sp, $sp, 32 ++ ++ # tail jump to libc setjmp ++ ld.d $t0, $t0, 0 ++ jr $t0 ++ ++.size sigsetjmp, .-sigsetjmp ++ ++.comm _ZN14__interception16real___sigsetjmpE,8,8 ++.globl __sigsetjmp ++.type __sigsetjmp, @function ++__sigsetjmp: ++ ++ # Save env parameters ++ addi.d $sp, $sp, -32 ++ st.d $ra, $sp, 24 ++ st.d $fp, $sp, 16 ++ ++ # save jmp_buf and savesigs ++ st.d $a0, $sp, 0 ++ st.d $a1, $sp, 8 ++ ++ # obtain $sp and call tsan interceptor ++ addi.d $a0, $sp, 32 ++ bl __tsan_setjmp ++ ++ # restore jmp_buf and savesigs ++ ld.d $a0, $sp, 0 ++ ld.d $a1, $sp, 8 ++ ++ # load libc setjmp to t0 ++ la.got $t0, _ZN14__interception16real___sigsetjmpE ++ ++ # restore env parameters ++ ld.d $fp, $sp, 16 ++ ld.d $ra, $sp, 24 ++ addi.d $sp, $sp, 32 ++ ++ # tail jump to libc setjmp ++ ld.d $t0, $t0, 0 ++ jr $t0 ++ ++.size __sigsetjmp, .-__sigsetjmp +diff --git a/compiler-rt.orig/lib/xray/xray_loongarch.cpp b/compiler-rt.new/lib/xray/xray_loongarch.cpp +new file mode 100644 +index 0000000..c72bb2d +--- /dev/null ++++ b/compiler-rt.new/lib/xray/xray_loongarch.cpp +@@ -0,0 +1,173 @@ ++//===-- xray_loongarch.cpp -----------------------------------------*- C++ ++//-*-===// ++// ++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ++// See https://llvm.org/LICENSE.txt for license information. ++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ++// ++//===----------------------------------------------------------------------===// ++// ++// This file is a part of XRay, a dynamic runtime instrumentation system. ++// ++// Implementation of loongarch-specific routines. ++// ++//===----------------------------------------------------------------------===// ++#include "sanitizer_common/sanitizer_common.h" ++#include "xray_defs.h" ++#include "xray_interface_internal.h" ++#include ++ ++namespace __xray { ++ ++// The machine codes for some instructions used in runtime patching. ++enum PatchOpcodes : uint32_t { ++ PO_ADDID = 0x02c00000, // addi.d rt, rs, imm ++ PO_SD = 0x29c00000, // st.d rt, base, offset ++ PO_LU12IW = 0x14000000, // lu12i.w rt, imm ++ PO_ORI = 0x03800000, // ori rt, rs, imm ++ PO_LU32ID = 0x16000000, // lu32i.d rd, imm ++ PO_LU52ID = 0x03000000, // lu52i.d rd, rj, imm ++ PO_JIRL = 0x4c000000, // jirl rd, rj, 0 ++ PO_LD = 0x28c00000, // ld.d rt, base, offset ++ PO_B44 = 0x50002c00, // b #44 ++}; ++ ++enum RegNum : uint32_t { ++ RN_T0 = 0xC, ++ RN_T1 = 0xD, ++ RN_RA = 0x1, ++ RN_SP = 0x3, ++}; ++ ++// addi.d lu521.d ori ld.d st.d ++inline static uint32_t ++encodeInstruction_i12(uint32_t Opcode, uint32_t Rd, uint32_t Rj, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rj << 5 | Rd | Imm << 10); ++} ++ ++// lu12i.w lu32i.d ++inline static uint32_t ++encodeInstruction_si20(uint32_t Opcode, uint32_t Rd, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rd | Imm << 5); ++} ++ ++// jirl ++inline static uint32_t ++encodeInstruction_si16(uint32_t Opcode, uint32_t Rd, uint32_t Rj, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rj << 5 | Rd | Imm << 10); ++} ++ ++inline static bool patchSled(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled, ++ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT { ++ // When |Enable| == true, ++ // We replace the following compile-time stub (sled): ++ // ++ // xray_sled_n: ++ // B .tmpN ++ // 11 NOPs (44 bytes) ++ // .tmpN ++ // ++ // With the following runtime patch: ++ // xray_sled_n (64-bit): ++ // addi.d sp,sp, -16 ;create stack frame ++ // st.d ra, sp, 8 ;save return address ++ // lu12i.w t0,%%abs_hi20(__xray_FunctionEntry/Exit) ++ // ori %1,t0,%%abs_lo12(__xray_FunctionEntry/Exit) ++ // lu32i.d t0,%%abs64_lo20(__xray_FunctionEntry/Exit) ++ // lu52i.d t0,t0,%%abs64_hi12(__xray_FunctionEntry/Exit) ++ // lu12i.w t1,%%abs_hi20(function_id) ++ // ori %1,t1,%%abs_lo12(function_id) ;pass function id ++ // jirl ra, t0, 0 ;call Tracing hook ++ // ld.d ra, sp, 8 ;restore return address ++ // addi.d sp, sp, 16 ;delete stack frame ++ // ++ // Replacement of the first 4-byte instruction should be the last and atomic ++ // operation, so that the user code which reaches the sled concurrently ++ // either jumps over the whole sled, or executes the whole sled when the ++ // latter is ready. ++ // ++ // When |Enable|==false, we set back the first instruction in the sled to be ++ // B #44 ++ ++ uint32_t *Address = reinterpret_cast(Sled.address()); ++ if (Enable) { ++ uint32_t LoTracingHookAddr = reinterpret_cast(TracingHook) & 0xfff; ++ uint32_t HiTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 12) & 0xfffff; ++ uint32_t HigherTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 32) & 0xfffff; ++ uint32_t HighestTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 52) & 0xfff; ++ uint32_t LoFunctionID = FuncId & 0xfff; ++ uint32_t HiFunctionID = (FuncId >> 12) & 0xfffff; ++ Address[2] = encodeInstruction_i12(PatchOpcodes::PO_SD, RegNum::RN_RA, ++ RegNum::RN_SP, 0x8); ++ Address[3] = encodeInstruction_si20(PatchOpcodes::PO_LU12IW, RegNum::RN_T0, ++ HiTracingHookAddr); ++ Address[4] = encodeInstruction_i12(PatchOpcodes::PO_ORI, RegNum::RN_T0, ++ RegNum::RN_T0, LoTracingHookAddr); ++ Address[5] = encodeInstruction_si20(PatchOpcodes::PO_LU32ID, RegNum::RN_T0, ++ HigherTracingHookAddr); ++ Address[6] = encodeInstruction_i12(PatchOpcodes::PO_LU52ID, RegNum::RN_T0, ++ RegNum::RN_T0, HighestTracingHookAddr); ++ Address[7] = encodeInstruction_si20(PatchOpcodes::PO_LU12IW, RegNum::RN_T1, ++ HiFunctionID); ++ Address[8] = encodeInstruction_i12(PatchOpcodes::PO_ORI, RegNum::RN_T1, ++ RegNum::RN_T1, LoFunctionID); ++ Address[9] = encodeInstruction_si16(PatchOpcodes::PO_JIRL, RegNum::RN_RA, ++ RegNum::RN_T0, 0); ++ Address[10] = encodeInstruction_i12(PatchOpcodes::PO_LD, RegNum::RN_RA, ++ RegNum::RN_SP, 0x8); ++ Address[11] = encodeInstruction_i12(PatchOpcodes::PO_LD, RegNum::RN_SP, ++ RegNum::RN_SP, 0x10); ++ uint32_t CreateStackSpace = encodeInstruction_i12( ++ PatchOpcodes::PO_ADDID, RegNum::RN_SP, RegNum::RN_SP, 0xfff0); ++ std::atomic_store_explicit( ++ reinterpret_cast *>(Address), CreateStackSpace, ++ std::memory_order_release); ++ } else { ++ std::atomic_store_explicit( ++ reinterpret_cast *>(Address), ++ uint32_t(PatchOpcodes::PO_B44), std::memory_order_release); ++ } ++ return true; ++} ++ ++bool patchFunctionEntry(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled, ++ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT { ++ return patchSled(Enable, FuncId, Sled, Trampoline); ++} ++ ++bool patchFunctionExit(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); ++} ++ ++bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: In the future we'd need to distinguish between non-tail exits and ++ // tail exits for better information preservation. ++ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); ++} ++ ++bool patchCustomEvent(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: Implement in loongarch? ++ return false; ++} ++ ++bool patchTypedEvent(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: Implement in loongarch? ++ return false; ++} ++} // namespace __xray ++ ++extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { ++ // FIXME: this will have to be implemented in the trampoline assembly file ++} +diff --git a/compiler-rt.orig/lib/xray/xray_trampoline_loongarch.S b/compiler-rt.new/lib/xray/xray_trampoline_loongarch.S +new file mode 100644 +index 0000000..7ac019b +--- /dev/null ++++ b/compiler-rt.new/lib/xray/xray_trampoline_loongarch.S +@@ -0,0 +1,129 @@ ++//===-- xray_trampoline_loongarch.s -----------------------------*- ASM -*-===// ++// ++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ++// See https://llvm.org/LICENSE.txt for license information. ++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ++// ++//===----------------------------------------------------------------------===// ++// ++// This file is a part of XRay, a dynamic runtime instrumentation system. ++// ++// This implements the loongarch-specific assembler for the trampolines. ++// ++//===----------------------------------------------------------------------===// ++ ++ .text ++ .file "xray_trampoline_loongarch.S" ++ .globl __xray_FunctionEntry ++ .p2align 2 ++ .type __xray_FunctionEntry,@function ++__xray_FunctionEntry: ++ .cfi_startproc ++ // Save argument registers before doing any actual work. ++ .cfi_def_cfa_offset 136 ++ addi.d $sp, $sp, -136 ++ st.d $ra, $sp, 128 ++ .cfi_offset 1, -8 ++ st.d $a7, $sp, 120 ++ st.d $a6, $sp, 112 ++ st.d $a5, $sp, 104 ++ st.d $a4, $sp, 96 ++ st.d $a3, $sp, 88 ++ st.d $a2, $sp, 80 ++ st.d $a1, $sp, 72 ++ st.d $a0, $sp, 64 ++ fst.d $f7, $sp, 56 ++ fst.d $f6, $sp, 48 ++ fst.d $f5, $sp, 40 ++ fst.d $f4, $sp, 32 ++ fst.d $f3, $sp, 24 ++ fst.d $f2, $sp, 16 ++ fst.d $f1, $sp, 8 ++ fst.d $f0, $sp, 0 ++ ++ ++ lu12i.w $t2, %got_hi20(_ZN6__xray19XRayPatchedFunctionE) ++ ori $t2, $t2, %got_lo12(_ZN6__xray19XRayPatchedFunctionE) ++ lu32i.d $t2, %got64_lo20(_ZN6__xray19XRayPatchedFunctionE) ++ lu52i.d $t2, $t2, %got64_hi12(_ZN6__xray19XRayPatchedFunctionE) ++ ld.d $t2, $t2, 0 ++ ++ beqz $t2, FunctionEntry_restore ++ ++ // a1=0 means that we are tracing an entry event ++ move $a1, $zero ++ // Function ID is in t1 (the first parameter). ++ move $a0, $t1 ++ jirl $ra, $t2, 0 ++ ++FunctionEntry_restore: ++ // Restore argument registers ++ fld.d $f0, $sp, 0 ++ fld.d $f1, $sp, 8 ++ fld.d $f2, $sp, 16 ++ fld.d $f3, $sp, 24 ++ fld.d $f4, $sp, 32 ++ fld.d $f5, $sp, 40 ++ fld.d $f6, $sp, 48 ++ fld.d $f7, $sp, 56 ++ ld.d $a0, $sp, 64 ++ ld.d $a1, $sp, 72 ++ ld.d $a2, $sp, 80 ++ ld.d $a3, $sp, 88 ++ ld.d $a4, $sp, 96 ++ ld.d $a5, $sp, 104 ++ ld.d $a6, $sp, 112 ++ ld.d $a7, $sp, 120 ++ ld.d $ra, $sp, 128 ++ addi.d $sp, $sp, 136 ++ jr $ra ++FunctionEntry_end: ++ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry ++ .cfi_endproc ++ ++ .text ++ .globl __xray_FunctionExit ++ .p2align 2 ++ .type __xray_FunctionExit,@function ++__xray_FunctionExit: ++ .cfi_startproc ++ // Save return registers before doing any actual work. ++ .cfi_def_cfa_offset 48 ++ addi.d $sp, $sp, -48 ++ st.d $ra, $sp, 40 ++ .cfi_offset 1, -8 ++ st.d $fp, $sp, 32 ++ st.d $a1, $sp, 24 ++ st.d $a0, $sp, 16 ++ fst.d $f1, $sp, 8 ++ fst.d $f0, $sp, 0 ++ ++ lu12i.w $t2, %got_hi20(_ZN6__xray19XRayPatchedFunctionE) ++ ori $t2, $t2, %got_lo12(_ZN6__xray19XRayPatchedFunctionE) ++ lu32i.d $t2, %got64_lo20(_ZN6__xray19XRayPatchedFunctionE) ++ lu52i.d $t2, $t2, %got64_hi12(_ZN6__xray19XRayPatchedFunctionE) ++ ld.d $t2, $t2, 0 ++ ++ beqz $t2, FunctionExit_restore ++ ++ // a1=1 means that we are tracing an exit event ++ ori $a1, $zero, 1 ++ // Function ID is in t1 (the first parameter). ++ move $a0, $t1 ++ jirl $ra, $t2, 0 ++ ++FunctionExit_restore: ++ // Restore return registers ++ fld.d $f0, $sp, 0 ++ fld.d $f1, $sp, 8 ++ ld.d $a1, $sp, 24 ++ ld.d $a0, $sp, 16 ++ ld.d $fp, $sp, 32 ++ ld.d $ra, $sp, 40 ++ addi.d $sp, $sp, 48 ++ jr $ra ++ ++FunctionExit_end: ++ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit ++ .cfi_endproc ++ +-- +2.41.0 + diff --git a/compiler-rt.spec b/compiler-rt.spec index cf2c79d8c211dd502d2850517e41a48e144e1862..65233d6980579a49646d775a513078bc2bb729ad 100644 --- a/compiler-rt.spec +++ b/compiler-rt.spec @@ -4,7 +4,7 @@ Name: compiler-rt Version: 12.0.1 -Release: 2 +Release: 3 Summary: LLVM "compiler-rt" runtime libraries License: NCSA or MIT @@ -13,6 +13,9 @@ Source0: https://github.com/llvm/llvm-project/releases/download/llvmorg-%{versio Source2: tstellar-gpg-key.asc Patch0: 0001-PATCH-compiler-rt-Workaround-libstdc-limitation-wrt..patch +Patch1000: 1000-add-loongarch64-support-not-upstream-modified.patch +Patch1001: 1001-add-loongarch64-support-not-upstream-new.patch + BuildRequires: gcc BuildRequires: gcc-c++ BuildRequires: cmake @@ -120,6 +123,9 @@ fi %endif %changelog +* Thu Aug 10 2023 herengui - 12.0.1-3 +- add loongarch64 support + * Tue Dec 20 2022 eastb233 - 12.0.1-2 - Delete run path in DSO