diff --git a/compiler-rt/lib/hwasan/CMakeLists.txt b/compiler-rt/lib/hwasan/CMakeLists.txt index 9082f60058ee7a2a6bda477298ff102b0a0dbb96..a9f6006ac05cc8d323e443759401a25ddce5de0e 100644 --- a/compiler-rt/lib/hwasan/CMakeLists.txt +++ b/compiler-rt/lib/hwasan/CMakeLists.txt @@ -14,6 +14,7 @@ set(HWASAN_RTL_SOURCES hwasan_linux.cpp hwasan_memintrinsics.cpp hwasan_poisoning.cpp + hwasan_quarantine.cpp # OHOS_LOCAL hwasan_report.cpp hwasan_setjmp_aarch64.S hwasan_setjmp_x86_64.S @@ -42,6 +43,7 @@ set(HWASAN_RTL_HEADERS hwasan_malloc_bisect.h hwasan_mapping.h hwasan_poisoning.h + hwasan_quarantine.h # OHOS_LOCAL hwasan_report.h hwasan_thread.h hwasan_thread_list.h diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp index 842455150c7b3362f3940e14e10f69b544c75e4f..ebb5b4923ba6426b5a0565146d3dd920b44c91ed 100644 --- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp +++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp @@ -20,6 +20,7 @@ #include "hwasan_mapping.h" #include "hwasan_malloc_bisect.h" #include "hwasan_thread.h" +#include "hwasan_thread_list.h" // OHOS_LOCAL #include "hwasan_report.h" namespace __hwasan { @@ -53,6 +54,14 @@ static uptr AlignRight(uptr addr, uptr requested_size) { return addr + kShadowAlignment - tail_size; } +// OHOS_LOCAL begin +int HwasanChunkView::AllocatedByThread() const { + if (metadata_) + return metadata_->thread_id; + return -1; +} +// OHOS_LOCAL end + uptr HwasanChunkView::Beg() const { if (metadata_ && metadata_->right_aligned) return AlignRight(block_, metadata_->get_requested_size()); @@ -80,6 +89,14 @@ void GetAllocatorStats(AllocatorStatCounters s) { allocator.GetStats(s); } +// OHOS_LOCAL begin +void SimpleThreadDeallocate(void *ptr, AllocatorCache *cache) { + CHECK(ptr); + CHECK(cache); + allocator.Deallocate(cache, ptr); +} +// OHOS_LOCAL end + uptr GetAliasRegionStart() { #if defined(HWASAN_ALIASING_MODE) constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1); @@ -160,6 +177,12 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, meta->set_requested_size(orig_size); meta->alloc_context_id = StackDepotPut(*stack); meta->right_aligned = false; + // OHOS_LOCAL begin + if (t) + meta->thread_id = t->tid(); + else + meta->thread_id = -1; + // OHOS_LOCAL end if (zeroise) { internal_memset(allocated, 0, size); } else if (flags()->max_malloc_fill_size > 0) { @@ -295,16 +318,42 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { TagMemoryAligned(reinterpret_cast(aligned_ptr), TaggedSize(orig_size), tag); } + +// OHOS_LOCAL begin + int aid = meta->thread_id; if (t) { - allocator.Deallocate(t->allocator_cache(), aligned_ptr); - if (auto *ha = t->heap_allocations()) - ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, - free_context_id, static_cast(orig_size)}); + if (!t->TryPutInQuarantineWithDealloc(reinterpret_cast(aligned_ptr), + TaggedSize(orig_size), + alloc_context_id, free_context_id)) { + allocator.Deallocate(t->allocator_cache(), aligned_ptr); + } + if (t->AllowTracingHeapAllocation()) { + if (auto *ha = t->heap_allocations()) { + if ((flags()->heap_record_max == 0 || + orig_size <= flags()->heap_record_max) && + (flags()->heap_record_min == 0 || + orig_size >= flags()->heap_record_min)) { + ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, + free_context_id, static_cast(orig_size), aid, t->tid()}); + } + } + } } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; + if (hwasanThreadList().AllowTracingHeapAllocation()) { + if ((flags()->heap_record_max == 0 || + orig_size <= flags()->heap_record_max) && + (flags()->heap_record_min == 0 || + orig_size >= flags()->heap_record_min)) { + hwasanThreadList().RecordFallBack( + {reinterpret_cast(tagged_ptr), alloc_context_id, + free_context_id, static_cast(orig_size), aid, 0}); + } + } allocator.Deallocate(cache, aligned_ptr); } +// OHOS_LOCAL end } static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h index 35c3d6b4bf434148a9a21c1fac928fd94144156c..28f4f74bdb937d80da9f134d156d0a36c60027f6 100644 --- a/compiler-rt/lib/hwasan/hwasan_allocator.h +++ b/compiler-rt/lib/hwasan/hwasan_allocator.h @@ -35,6 +35,7 @@ struct Metadata { u32 requested_size_high : 31; u32 right_aligned : 1; u32 alloc_context_id; + int thread_id; // OHOS_LOCAL u64 get_requested_size() { return (static_cast(requested_size_high) << 32) + requested_size_low; } @@ -88,6 +89,7 @@ class HwasanChunkView { uptr ActualSize() const; // Size allocated by the allocator. u32 GetAllocStackId() const; bool FromSmallHeap() const; + int AllocatedByThread() const; // OHOS_LOCAL private: uptr block_; Metadata *const metadata_; @@ -104,12 +106,16 @@ struct HeapAllocationRecord { u32 alloc_context_id; u32 free_context_id; u32 requested_size; + int alloc_thread; // OHOS_LOCAL + int free_thread; // OHOS_LOCAL }; typedef RingBuffer HeapAllocationsRingBuffer; void GetAllocatorStats(AllocatorStatCounters s); +void SimpleThreadDeallocate(void *ptr, AllocatorCache *cache); // OHOS_LOCAL + inline bool InTaggableRegion(uptr addr) { #if defined(HWASAN_ALIASING_MODE) // Aliases are mapped next to shadow so that the upper bits match the shadow diff --git a/compiler-rt/lib/hwasan/hwasan_flags.inc b/compiler-rt/lib/hwasan/hwasan_flags.inc index 18ea47f981bec9538c54130c09a4bfe2ec31305a..15f399a0a7f01ba6d436e84a94f25c2c0634cdd8 100644 --- a/compiler-rt/lib/hwasan/hwasan_flags.inc +++ b/compiler-rt/lib/hwasan/hwasan_flags.inc @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// #ifndef HWASAN_FLAG -# error "Define HWASAN_FLAG prior to including this file!" +# error "Define HWASAN_FLAG prior to including this file!" #endif // HWASAN_FLAG(Type, Name, DefaultValue, Description) @@ -38,21 +38,27 @@ HWASAN_FLAG( "bytes that will be filled with malloc_fill_byte on malloc.") HWASAN_FLAG(bool, free_checks_tail_magic, 1, - "If set, free() will check the magic values " - "to the right of the allocated object " - "if the allocation size is not a divident of the granule size") + "If set, free() will check the magic values " + "to the right of the allocated object " + "if the allocation size is not a divident of the granule size") HWASAN_FLAG( int, max_free_fill_size, 0, "HWASan allocator flag. max_free_fill_size is the maximal amount of " "bytes that will be filled with free_fill_byte during free.") HWASAN_FLAG(int, malloc_fill_byte, 0xbe, - "Value used to fill the newly allocated memory.") -HWASAN_FLAG(int, free_fill_byte, 0x55, - "Value used to fill deallocated memory.") + "Value used to fill the newly allocated memory.") +HWASAN_FLAG(int, free_fill_byte, 0x55, "Value used to fill deallocated memory.") HWASAN_FLAG(int, heap_history_size, 1023, - "The number of heap (de)allocations remembered per thread. " - "Affects the quality of heap-related reports, but not the ability " - "to find bugs.") + "The number of heap (de)allocations remembered per thread. " + "Affects the quality of heap-related reports, but not the ability " + "to find bugs.") +// OHOS_LOCAL begin +HWASAN_FLAG( + int, heap_history_size_main_thread, 102300, + "The number of heap (de)allocations remembered for the main thread. " + "Affects the quality of heap-related reports, but not the ability " + "to find bugs.") +// OHOS_LOCAL end HWASAN_FLAG(bool, export_memory_stats, true, "Export up-to-date memory stats through /proc") HWASAN_FLAG(int, stack_history_size, 1024, @@ -73,7 +79,6 @@ HWASAN_FLAG(bool, malloc_bisect_dump, false, "Print all allocations within [malloc_bisect_left, " "malloc_bisect_right] range ") - // Exit if we fail to enable the AArch64 kernel ABI relaxation which allows // tagged pointers in syscalls. This is the default, but being able to disable // that behaviour is useful for running the testsuite on more platforms (the @@ -81,3 +86,42 @@ HWASAN_FLAG(bool, malloc_bisect_dump, false, // are untagged before the call. HWASAN_FLAG(bool, fail_without_syscall_abi, true, "Exit if fail to request relaxed syscall ABI.") + +// OHOS_LOCAL begin +HWASAN_FLAG( + bool, print_uaf_stacks_with_same_tag, true, + "Control the output content of use-after-free, deciding whether to print " + "all stack traces of matched allocations with the same tag restriction.") + +// Heap allocation information for freed threads +HWASAN_FLAG(uptr, freed_threads_history_size, 100, + "The number of freed threads can be recorded.") +HWASAN_FLAG(bool, verbose_freed_threads, false, + "Print the heap allocation information of freed threads.") + +// Limits the size of the heap memory allocated to be recorded in order to +// reduce the data. As a result, information may be missing. By default, the +// minimum/maximum threshold is not set. +HWASAN_FLAG(int, heap_record_min, 0, + "Only recording the heap memory allocation information that is >= " + "heap_record_min.") +HWASAN_FLAG(int, heap_record_max, 0, + "Only recording the heap memory allocation information that is <= " + "heap_record_max.") + +HWASAN_FLAG(int, memory_around_register_size, 128, + "When reporting, the memory content of the address in register " + "±memory_around_register_size is printed.") + +// Set the quarantine area for freed heap, which is used to detect UAF-Write and +// Overflow-Write. Provide the detection capability for dynamic libraries +// compiled without hwasan option. +HWASAN_FLAG(int, heap_quarantine_thread_max_count, 128, + "Set the maximum count for heap quarantine per thread.") +HWASAN_FLAG(int, heap_quarantine_min, 0, + "The freed heap size should be larger than the minimum size before " + "it is placed into the heap quarantine.") +HWASAN_FLAG(int, heap_quarantine_max, 0, + "The freed heap size should be smaller than the maximum size before " + "it is placed into the heap quarantine.") +// OHOS_LOCAL end \ No newline at end of file diff --git a/compiler-rt/lib/hwasan/hwasan_quarantine.cpp b/compiler-rt/lib/hwasan/hwasan_quarantine.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad4488670d0a07a94c9361678cf8a5523fb08388 --- /dev/null +++ b/compiler-rt/lib/hwasan/hwasan_quarantine.cpp @@ -0,0 +1,117 @@ +//===-- hwasan_quarantine.cpp -----------------------------------*- C++-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Copyright (c) 2025 Huawei Device Co., Ltd. All rights reserved. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. Provide allocation quarantine +/// ability. +/// +//===----------------------------------------------------------------------===// +#include "hwasan_quarantine.h" + +#include "hwasan_allocator.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +namespace __hwasan { + +void HeapQuarantineController::ClearHeapQuarantine(AllocatorCache *cache) { + CHECK(cache); + if (heap_quarantine_list_) { + DeallocateWithHeapQuarantcheck(heap_quarantine_tail_, cache); + size_t sz = RoundUpTo( + flags()->heap_quarantine_thread_max_count * sizeof(HeapQuarantine), + GetPageSizeCached()); + UnmapOrDie(heap_quarantine_list_, sz); + heap_quarantine_tail_ = 0; + heap_quarantine_list_ = nullptr; + } + heap_quarantine_list_ = nullptr; +} + +bool HeapQuarantineController::TryPutInQuarantineWithDealloc( + uptr ptr, size_t s, u32 aid, u32 fid, AllocatorCache *cache) { + if (IsInPrintf()) { + return false; + } + if ((flags()->heap_quarantine_max > 0) && + (flags()->heap_quarantine_max > s && flags()->heap_quarantine_min <= s)) { + if (UNLIKELY(flags()->heap_quarantine_thread_max_count == 0)) { + return false; + } + if (UNLIKELY(heap_quarantine_list_ == nullptr)) { + size_t sz = RoundUpTo( + flags()->heap_quarantine_thread_max_count * sizeof(HeapQuarantine), + GetPageSizeCached()); + heap_quarantine_list_ = reinterpret_cast( + MmapOrDie(sz, "HeapQuarantine", 0)); + if (UNLIKELY(heap_quarantine_list_ == nullptr)) { + return false; + } + } + PutInQuarantineWithDealloc(ptr, s, aid, fid, cache); + return true; + } + return false; +} + +void HeapQuarantineController::PutInQuarantineWithDealloc( + uptr ptr, size_t s, u32 aid, u32 fid, AllocatorCache *cache) { + if (UNLIKELY(heap_quarantine_tail_ >= + flags()->heap_quarantine_thread_max_count)) { + // free 1/3 heap_quarantine_list + u32 free_count = heap_quarantine_tail_ / 3; + u32 left_count = heap_quarantine_tail_ - free_count; + DeallocateWithHeapQuarantcheck(free_count, cache); + internal_memcpy( + (char *)heap_quarantine_list_, + (char *)heap_quarantine_list_ + free_count * sizeof(HeapQuarantine), + left_count * sizeof(HeapQuarantine)); + internal_memset( + (char *)heap_quarantine_list_ + left_count * sizeof(HeapQuarantine), 0, + free_count * sizeof(HeapQuarantine)); + heap_quarantine_tail_ -= free_count; + } + + heap_quarantine_list_[heap_quarantine_tail_].ptr = ptr; + heap_quarantine_list_[heap_quarantine_tail_].s = s; + heap_quarantine_list_[heap_quarantine_tail_].alloc_context_id = aid; + heap_quarantine_list_[heap_quarantine_tail_].free_context_id = fid; + heap_quarantine_tail_++; + return; +} + +void HeapQuarantineController::DeallocateWithHeapQuarantcheck( + u32 free_count, AllocatorCache *cache) { + static u64 magic; + internal_memset(&magic, flags()->free_fill_byte, sizeof(magic)); + for (u32 i = 0; i < free_count; i++) { + u64 *ptrBeg = reinterpret_cast(heap_quarantine_list_[i].ptr); + if (heap_quarantine_list_[i].s > sizeof(u64)) { + if (flags()->max_free_fill_size > 0) { + uptr fill_size = + Min(heap_quarantine_list_[i].s, (uptr)flags()->max_free_fill_size); + for (size_t j = 0; j < (fill_size - 1) / sizeof(u64); j++) { + if (ptrBeg[j] != magic) { + Printf( + "ptrBeg was re-written after free %p[%zu], %p " + "%016llx:%016llx, freed by:\n", + ptrBeg, j, &ptrBeg[j], ptrBeg[j], magic); + StackDepotGet(heap_quarantine_list_[i].free_context_id).Print(); + Printf("allocated by:\n"); + StackDepotGet(heap_quarantine_list_[i].alloc_context_id).Print(); + break; + } + } + } + } + SimpleThreadDeallocate((void *)ptrBeg, cache); + } +} + +} // namespace __hwasan \ No newline at end of file diff --git a/compiler-rt/lib/hwasan/hwasan_quarantine.h b/compiler-rt/lib/hwasan/hwasan_quarantine.h new file mode 100644 index 0000000000000000000000000000000000000000..1c4c86a26168a51a0f504169c96dc075cf85153e --- /dev/null +++ b/compiler-rt/lib/hwasan/hwasan_quarantine.h @@ -0,0 +1,49 @@ +//===-- hwasan_quarantine.cpp -----------------------------------*- C++-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Copyright (c) 2025 Huawei Device Co., Ltd. All rights reserved. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. Provide allocation quarantine +/// ability header. +/// +//===----------------------------------------------------------------------===// +#ifndef HWASAN_QUARANTINE_H +#define HWASAN_QUARANTINE_H +#include "hwasan_allocator.h" +namespace __hwasan { +struct HeapQuarantine { + uptr ptr; + size_t s; + u32 alloc_context_id; + u32 free_context_id; +}; +// provide heap quarant for per thread, no data race. +class HeapQuarantineController { + private: + u32 heap_quarantine_tail_; + HeapQuarantine *heap_quarantine_list_; + void PutInQuarantineWithDealloc(uptr ptr, size_t s, u32 aid, u32 fid, + AllocatorCache *cache); + void DeallocateWithHeapQuarantcheck(u32 free_count, AllocatorCache *cache); + + public: + void Init() { + heap_quarantine_tail_ = 0; + heap_quarantine_list_ = nullptr; + } + + void ClearHeapQuarantine(AllocatorCache *cache); + + bool TryPutInQuarantineWithDealloc(uptr ptr, size_t s, u32 aid, u32 fid, + AllocatorCache *cache); +}; + +} // namespace __hwasan + +#endif // HWASAN_QUARANTINE_H \ No newline at end of file diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp index a59d5fef9791c2f0f2e71d80a583b5f71876733b..051c68ee2e846f5f37347652cf9321ba5ff0417b 100644 --- a/compiler-rt/lib/hwasan/hwasan_report.cpp +++ b/compiler-rt/lib/hwasan/hwasan_report.cpp @@ -139,45 +139,6 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator { const char *Thread() { return Green(); } }; -static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr, - HeapAllocationRecord *har, uptr *ring_index, - uptr *num_matching_addrs, - uptr *num_matching_addrs_4b) { - if (!rb) return false; - - *num_matching_addrs = 0; - *num_matching_addrs_4b = 0; - for (uptr i = 0, size = rb->size(); i < size; i++) { - auto h = (*rb)[i]; - if (h.tagged_addr <= tagged_addr && - h.tagged_addr + h.requested_size > tagged_addr) { - *har = h; - *ring_index = i; - return true; - } - - // Measure the number of heap ring buffer entries that would have matched - // if we had only one entry per address (e.g. if the ring buffer data was - // stored at the address itself). This will help us tune the allocator - // implementation for MTE. - if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) && - UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) { - ++*num_matching_addrs; - } - - // Measure the number of heap ring buffer entries that would have matched - // if we only had 4 tag bits, which is the case for MTE. - auto untag_4b = [](uptr p) { - return p & ((1ULL << 60) - 1); - }; - if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) && - untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) { - ++*num_matching_addrs_4b; - } - } - return false; -} - static void PrintStackAllocations(StackAllocationsRingBuffer *sa, tag_t addr_tag, uptr untagged_addr) { uptr frames = Min((uptr)flags()->stack_history_size, sa->size()); @@ -386,13 +347,22 @@ void PrintAddressDescription( if (uptr beg = chunk.Beg()) { uptr size = chunk.ActualSize(); Printf("%s[%p,%p) is a %s %s heap chunk; " - "size: %zd offset: %zd\n%s", + "size: %zd offset: %zd, Allocated By %u\n%s", // OHOS_LOCAL d.Location(), beg, beg + size, chunk.FromSmallHeap() ? "small" : "large", chunk.IsAllocated() ? "allocated" : "unallocated", size, untagged_addr - beg, + chunk.AllocatedByThread(), // OHOS_LOCAL d.Default()); +// OHOS_LOCAL begin + if (chunk.IsAllocated() && chunk.GetAllocStackId()) { + Printf("%s", d.Allocation()); + Printf("Currently allocated here:\n"); + Printf("%s", d.Default()); + GetStackTraceFromId(chunk.GetAllocStackId()).Print(); + } +// OHOS_LOCAL end } tag_t addr_tag = GetTagFromPointer(tagged_addr); @@ -408,8 +378,8 @@ void PrintAddressDescription( Printf("%s", d.Error()); Printf("\nCause: stack tag-mismatch\n"); Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, - t->unique_id()); + Printf("Address %p is located in stack of thread %d\n", untagged_addr, + t->tid()); // OHOS_LOCAL Printf("%s", d.Default()); t->Announce(); @@ -447,53 +417,120 @@ void PrintAddressDescription( if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) { ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); + candidate = nullptr; // OHOS_LOCAL num_descriptions_printed++; } - hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { - // Scan all threads' ring buffers to find if it's a heap-use-after-free. - HeapAllocationRecord har; - uptr ring_index, num_matching_addrs, num_matching_addrs_4b; - if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har, - &ring_index, &num_matching_addrs, - &num_matching_addrs_4b)) { - Printf("%s", d.Error()); - Printf("\nCause: use-after-free\n"); - Printf("%s", d.Location()); - Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n", - untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), - har.requested_size, UntagAddr(har.tagged_addr), - UntagAddr(har.tagged_addr) + har.requested_size); - Printf("%s", d.Allocation()); - Printf("freed by thread T%zd here:\n", t->unique_id()); - Printf("%s", d.Default()); - GetStackTraceFromId(har.free_context_id).Print(); - - Printf("%s", d.Allocation()); - Printf("previously allocated here:\n", t); - Printf("%s", d.Default()); - GetStackTraceFromId(har.alloc_context_id).Print(); +// OHOS_LOCAL begin + auto PrintUAF = [&](Thread *t, uptr ring_index, HeapAllocationRecord &har) { + uptr ha_untagged_addr = UntagAddr(har.tagged_addr); + Printf("%s", d.Error()); + Printf("\nPotential Cause: use-after-free\n"); + Printf("%s", d.Location()); + Printf( + "%p (rb[%d] tags:%02x) is located %zd bytes inside of %zd-byte region " + "[%p,%p)\n", + untagged_addr, ring_index, GetTagFromPointer(har.tagged_addr), + untagged_addr - ha_untagged_addr, har.requested_size, ha_untagged_addr, + ha_untagged_addr + har.requested_size); + Printf("%s", d.Allocation()); + Printf("freed by thread %d here:\n", t->tid()); + Printf("%s", d.Default()); + GetStackTraceFromId(har.free_context_id).Print(); - // Print a developer note: the index of this heap object - // in the thread's deallocation ring buffer. - Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1, - flags()->heap_history_size); - Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs); - Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n", - num_matching_addrs_4b); + Printf("%s", d.Allocation()); + Printf("previously allocated by thread %d here:\n", har.alloc_thread); + Printf("%s", d.Default()); + GetStackTraceFromId(har.alloc_context_id).Print(); - t->Announce(); - num_descriptions_printed++; + Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1, + t->IsMainThread() ? flags()->heap_history_size_main_thread + : flags()->heap_history_size); + t->Announce(); + num_descriptions_printed++; + }; + u64 record_searched = 0; + u64 record_matched = 0; + hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { + // Scan all threads' ring buffers to find if it's a heap-use-after-free. + auto *rb = t->heap_allocations(); + if (!rb) + return; + t->DisableTracingHeapAllocation(); + for (uptr i = 0, size = rb->realsize(); i < size; i++) { + auto h = (*rb)[i]; + record_searched++; + if (flags()->print_uaf_stacks_with_same_tag) { + if (h.tagged_addr <= tagged_addr && + h.tagged_addr + h.requested_size > tagged_addr) { + record_matched++; + PrintUAF(t, i, h); + } + } else { + uptr ha_untagged_addr = UntagAddr(h.tagged_addr); + if (ha_untagged_addr <= untagged_addr && + ha_untagged_addr + h.requested_size > untagged_addr) { + record_matched++; + PrintUAF(t, i, h); + } + } } + t->EnableTracingHeapAllocation(); }); - if (candidate && num_descriptions_printed == 0) { + auto PrintUAFinFreedThread = [&](HeapAllocationRecord &har) { + uptr ha_untagged_addr = UntagAddr(har.tagged_addr); + Printf( + "%p (Previously freed thread ptr tags: %02x) is located %zd " + "bytes inside of %zd-byte region [%p,%p)\n", + untagged_addr, GetTagFromPointer(har.tagged_addr), + untagged_addr - ha_untagged_addr, har.requested_size, ha_untagged_addr, + ha_untagged_addr + har.requested_size); + Printf("freed by thread %d here:\n", har.free_thread); + GetStackTraceFromId(har.free_context_id).Print(); + Printf("previously allocated by thread %d here:\n", har.alloc_thread); + GetStackTraceFromId(har.alloc_context_id).Print(); + num_descriptions_printed++; + }; + hwasanThreadList().VisitAllFreedRingBuffer( + [&](HeapAllocationsRingBuffer *rb) { + for (uptr i = 0, size = rb->realsize(); i < size; i++) { + auto har = (*rb)[i]; + record_searched++; + if (flags()->print_uaf_stacks_with_same_tag) { + if (har.tagged_addr <= tagged_addr && + har.tagged_addr + har.requested_size > tagged_addr) { + record_matched++; + PrintUAFinFreedThread(har); + } + } else { + if (UntagAddr(har.tagged_addr) <= untagged_addr && + UntagAddr(har.tagged_addr) + har.requested_size > + untagged_addr) { + record_matched++; + PrintUAFinFreedThread(har); + } + } + } + }); + Printf("Searched %lu records, find %lu with same addr %p\n\n", + record_searched, record_matched, untagged_addr); + if (!on_stack && candidate) { ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right); num_descriptions_printed++; } // Print the remaining threads, as an extra information, 1 line per thread. hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); }); + hwasanThreadList().PrintFreedRingBufferSummary(); + if (flags()->verbose_freed_threads) { + u32 freed_idx = 0; + hwasanThreadList().VisitAllFreedRingBuffer( + [&](HeapAllocationsRingBuffer *rb) { + Printf("RB %u: (%zd/%zu)\n", freed_idx++, rb->realsize(), rb->size()); + }); + } +// OHOS_LOCAL end if (!num_descriptions_printed) // We exhausted our possibilities. Bail out. @@ -583,8 +620,10 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { const char *bug_type = "invalid-free"; const Thread *thread = GetCurrentThread(); if (thread) { - Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n", - SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); +// OHOS_LOCAL begin + Report("ERROR: %s: %s on address %p at pc %p on thread %d\n", + SanitizerToolName, bug_type, untagged_addr, pc, thread->tid()); +// OHOS_LOCAL end } else { Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n", SanitizerToolName, bug_type, untagged_addr, pc); @@ -719,14 +758,18 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, offset += mem_tag - in_granule_offset; } } +// OHOS_LOCAL begin Printf( - "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n", + "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread %d\n", is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, short_tag, t->unique_id()); + mem_tag, short_tag, t->tid()); +// OHOS_LOCAL end } else { - Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", +// OHOS_LOCAL begin + Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread %d\n", is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, t->unique_id()); + mem_tag, t->tid()); +// OHOS_LOCAL end } if (offset != 0) Printf("Invalid access starting at offset %zu\n", offset); @@ -740,12 +783,60 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, PrintTagsAroundAddr(tag_ptr); - if (registers_frame) + if (registers_frame) { ReportRegisters(registers_frame, pc); + ReportMemoryNearRegisters(registers_frame, pc); // OHOS_LOCAL + } ReportErrorSummary(bug_type, stack); } +// OHOS_LOCAL begin +void PrintMemoryAroundAddress(MemoryMappingLayout &proc_maps, int reg_num, + uptr addr, uptr len, bool is_pc) { + const sptr kBufSize = 4095; + char *filename = (char *)MmapOrDie(kBufSize, __func__); + MemoryMappedSegment segment(filename, kBufSize); + while (proc_maps.Next(&segment)) { + if (segment.start <= addr && addr < segment.end && segment.IsReadable()) { + if (!is_pc) { + if (reg_num < 31) + Printf("x%d(%s):\n", reg_num, segment.filename); + else + Printf("sp(%s):\n", segment.filename); + } else { + Printf("pc(%s):\n", segment.filename); + } + uptr beg = RoundDownTo(addr - (addr < len ? addr : len), 8); + if (segment.start > beg) + beg = segment.start; + uptr end = RoundUpTo(addr + len, 8); + if (segment.end < end) + end = segment.end; + for (uptr pos = beg; pos < end; pos += 8) { + if (pos <= addr && addr < pos + 8) + Printf("==>\t\t%p %016llx\n", pos, *(uptr *)(pos)); + else + Printf("\t\t%p %016llx\n", pos, *(uptr *)(pos)); + } + break; + } + } +} + +void ReportMemoryNearRegisters(uptr *frame, uptr pc) { + Printf("Memory near registers:\n"); + MemoryMappingLayout proc_maps(/*cache_enabled*/ true); + for (int i = 0; i <= 31; ++i) { + PrintMemoryAroundAddress(proc_maps, i, UntagAddr(frame[i]), + flags()->memory_around_register_size); + proc_maps.Reset(); + } + PrintMemoryAroundAddress(proc_maps, -1, pc, + flags()->memory_around_register_size, true); +} +// OHOS_LOCAL end + // See the frame breakdown defined in __hwasan_tag_mismatch (from // hwasan_tag_mismatch_aarch64.S). void ReportRegisters(uptr *frame, uptr pc) { diff --git a/compiler-rt/lib/hwasan/hwasan_report.h b/compiler-rt/lib/hwasan/hwasan_report.h index de86c38fc01f2d88bebcacb2b823f8b22894d4e3..9ba8a1f6658b8440287363b20863bdb8c67a8fa1 100644 --- a/compiler-rt/lib/hwasan/hwasan_report.h +++ b/compiler-rt/lib/hwasan/hwasan_report.h @@ -16,6 +16,7 @@ #define HWASAN_REPORT_H #include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_procmaps.h" // OHOS_LOCAL #include "sanitizer_common/sanitizer_stacktrace.h" namespace __hwasan { @@ -27,9 +28,13 @@ void ReportInvalidFree(StackTrace *stack, uptr addr); void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size, const u8 *expected); void ReportRegisters(uptr *registers_frame, uptr pc); +// OHOS_LOCAL begin +void ReportMemoryNearRegisters(uptr *registers_frame, uptr pc); +void PrintMemoryAroundAddress(MemoryMappingLayout &proc_maps, int reg_num, + uptr addr, uptr len, bool is_pc = false); +// OHOS_LOCAL end void ReportAtExitStatistics(); - } // namespace __hwasan #endif // HWASAN_REPORT_H diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp index c776ae179cec2e5ed38b12a9fa3d9ab93a465c68..4be4929284d9247380093fc9db127a5f003ed374 100644 --- a/compiler-rt/lib/hwasan/hwasan_thread.cpp +++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp @@ -44,9 +44,12 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size, static atomic_uint64_t unique_id; unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed); - if (auto sz = flags()->heap_history_size) +// OHOS_LOCAL begin + if (auto sz = IsMainThread() ? flags()->heap_history_size_main_thread + : flags()->heap_history_size) +// OHOS_LOCAL end heap_allocations_ = HeapAllocationsRingBuffer::New(sz); - + trace_heap_allocation_ = true; // OHOS_LOCAL #if !SANITIZER_FUCHSIA // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will // be initialized before we enter the thread itself, so we will instead call @@ -54,6 +57,8 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size, InitStackRingBuffer(stack_buffer_start, stack_buffer_size); #endif InitStackAndTls(state); + tid_ = GetTid(); // OHOS_LOCAL + heap_quarantine_controller()->Init(); // OHOS_LOCAL } void Thread::InitStackRingBuffer(uptr stack_buffer_start, @@ -96,6 +101,8 @@ void Thread::ClearShadowForThreadStackAndTLS() { void Thread::Destroy() { if (flags()->verbose_threads) Print("Destroying: "); + // OHOS_LOCAL + heap_quarantine_controller()->ClearHeapQuarantine(allocator_cache()); AllocatorSwallowThreadLocalCache(allocator_cache()); ClearShadowForThreadStackAndTLS(); if (heap_allocations_) @@ -110,9 +117,17 @@ void Thread::Destroy() { } void Thread::Print(const char *Prefix) { - Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_, - (void *)this, stack_bottom(), stack_top(), - stack_top() - stack_bottom(), tls_begin(), tls_end()); +// OHOS_LOCAL begin + Printf( + "%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p) rb:(%zd/%u) " + "records(%llu/o:%llu) tid: %d\n", + Prefix, unique_id_, (void *)this, stack_bottom(), stack_top(), + stack_top() - stack_bottom(), tls_begin(), tls_end(), + heap_allocations() ? heap_allocations()->realsize() : 0, + IsMainThread() ? flags()->heap_history_size_main_thread + : flags()->heap_history_size, + all_record_count_, all_record_count_overflow_, tid_); +// OHOS_LOCAL end } static u32 xorshift(u32 state) { @@ -147,4 +162,12 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) { return tag; } +// OHOS_LOCAL begin +bool Thread::TryPutInQuarantineWithDealloc(uptr ptr, size_t s, u32 aid, + u32 fid) { + return heap_quarantine_controller()->TryPutInQuarantineWithDealloc( + ptr, s, aid, fid, allocator_cache()); +} +// OHOS_LOCAL end + } // namespace __hwasan diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h index 3db7c1a9454f30211e637c3023e2911be4f931a6..8f1126877aa92d0df59eeab2c894344d9a1d54aa 100644 --- a/compiler-rt/lib/hwasan/hwasan_thread.h +++ b/compiler-rt/lib/hwasan/hwasan_thread.h @@ -14,6 +14,7 @@ #define HWASAN_THREAD_H #include "hwasan_allocator.h" +#include "hwasan_quarantine.h" // OHOS_LOCAL #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_ring_buffer.h" @@ -48,6 +49,15 @@ class Thread { uptr tls_end() { return tls_end_; } bool IsMainThread() { return unique_id_ == 0; } +// OHOS_LOCAL begin + void inc_record(void) { + all_record_count_++; + if (all_record_count_ == 0) { + all_record_count_overflow_++; + } + } +// OHOS_LOCAL end + bool AddrIsInStack(uptr addr) { return addr >= stack_bottom_ && addr < stack_top_; } @@ -61,15 +71,29 @@ class Thread { void DisableTagging() { tagging_disabled_++; } void EnableTagging() { tagging_disabled_--; } +// OHOS_LOCAL begin + void EnableTracingHeapAllocation() { trace_heap_allocation_ = true; } + void DisableTracingHeapAllocation() { trace_heap_allocation_ = false; } + bool AllowTracingHeapAllocation() { return trace_heap_allocation_; } +// OHOS_LOCAL end + u64 unique_id() const { return unique_id_; } - void Announce() { - if (announced_) return; - announced_ = true; - Print("Thread: "); - } + +// OHOS_LOCAL begin + int tid() const { return tid_; } + void Announce() { Print("Thread: "); } +// OHOS_LOCAL end uptr &vfork_spill() { return vfork_spill_; } +// OHOS_LOCAL begin + HeapQuarantineController *heap_quarantine_controller() { + return &heap_quarantine_controller_; + } + + bool TryPutInQuarantineWithDealloc(uptr ptr, size_t s, u32 aid, u32 fid); +// OHOS_LOCAL end + private: // NOTE: There is no Thread constructor. It is allocated // via mmap() and *must* be valid in zero-initialized state. @@ -89,6 +113,9 @@ class Thread { HeapAllocationsRingBuffer *heap_allocations_; StackAllocationsRingBuffer *stack_allocations_; +// OHOS_LOCAL + HeapQuarantineController heap_quarantine_controller_; + u64 unique_id_; // counting from zero. u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread. @@ -97,6 +124,15 @@ class Thread { bool random_state_inited_; // Whether InitRandomState() has been called. +// OHOS_LOCAL begin + bool trace_heap_allocation_; + + int tid_ = -1; // Thread ID + + u64 all_record_count_ = 0; // Count record + u64 all_record_count_overflow_ = 0; // Whether all_record_count_ overflow. +// OHOS_LOCAL end + friend struct ThreadListHead; }; diff --git a/compiler-rt/lib/hwasan/hwasan_thread_list.h b/compiler-rt/lib/hwasan/hwasan_thread_list.h index 15916a802d6ee4c50c6189ad144d195f86ceba78..e039854ccaf83567ac71bd1723333752fa263c82 100644 --- a/compiler-rt/lib/hwasan/hwasan_thread_list.h +++ b/compiler-rt/lib/hwasan/hwasan_thread_list.h @@ -83,6 +83,16 @@ class HwasanThreadList { ring_buffer_size_ = RingBufferSize(); thread_alloc_size_ = RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2); +// OHOS_LOCAL begin + freed_rb_fallback_ = + HeapAllocationsRingBuffer::New(flags()->heap_history_size_main_thread); + + freed_rb_list_ = nullptr; + freed_rb_list_size_ = 0; + freed_rb_count_ = 0; + freed_rb_count_overflow_ = 0; + trace_heap_allocation_ = true; +// OHOS_LOCAL end } Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) { @@ -127,7 +137,55 @@ class HwasanThreadList { CHECK(0 && "thread not found in live list"); } +// OHOS_LOCAL begin + void AddFreedRingBuffer(Thread *t) { + if (t->heap_allocations() == nullptr || + t->heap_allocations()->realsize() == 0) + return; + + SpinMutexLock l(&freed_rb_mutex_); + if (!freed_rb_list_) { + size_t sz = flags()->freed_threads_history_size * + sizeof(HeapAllocationsRingBuffer *); + freed_rb_list_ = reinterpret_cast( + MmapOrDie(sz, "FreedRingBufferList")); + if (UNLIKELY(freed_rb_list_ == nullptr)) { + return; + } + } + if (freed_rb_list_size_ >= flags()->freed_threads_history_size) { + auto sz = flags()->freed_threads_history_size / 3; + for (uptr i = 0; i < sz; i++) { + if (freed_rb_list_[i]) + freed_rb_list_[i]->Delete(); + } + auto left = flags()->freed_threads_history_size - sz; + for (uptr i = 0; i < left; i++) { + freed_rb_list_[i] = freed_rb_list_[i + sz]; + } + freed_rb_list_size_ = left; + } + HeapAllocationsRingBuffer *freed_allocations_; + freed_allocations_ = HeapAllocationsRingBuffer::New( + t->IsMainThread() ? flags()->heap_history_size_main_thread + : flags()->heap_history_size); + + HeapAllocationsRingBuffer *rb = t->heap_allocations(); + for (uptr i = 0, size = rb->realsize(); i < size; i++) { + HeapAllocationRecord h = (*rb)[i]; + freed_allocations_->push({h.tagged_addr, h.alloc_context_id, + h.free_context_id, h.requested_size}); + } + freed_rb_list_[freed_rb_list_size_] = freed_allocations_; + freed_rb_list_size_++; + freed_rb_count_++; + if (freed_rb_count_ == 0) + freed_rb_count_overflow_++; + } +// OHOS_LOCAL end + void ReleaseThread(Thread *t) { + AddFreedRingBuffer(t); // OHOS_LOCAL RemoveThreadStats(t); t->Destroy(); DontNeedThread(t); @@ -154,6 +212,28 @@ class HwasanThreadList { for (Thread *t : live_list_) cb(t); } +// OHOS_LOCAL begin + template + void VisitAllFreedRingBuffer(CB cb) { + DisableTracingHeapAllocation(); + SpinMutexLock l(&freed_rb_mutex_); + for (size_t i = 0; i < freed_rb_list_size_; i++) { + cb(freed_rb_list_[i]); + } + if (freed_rb_fallback_) + cb(freed_rb_fallback_); + EnableTracingHeapAllocation(); + } + + void PrintFreedRingBufferSummary(void) { + SpinMutexLock l(&freed_rb_mutex_); + Printf("freed thread count: %llu, overflow %llu, %zd left\n", + freed_rb_count_, freed_rb_count_overflow_, freed_rb_list_size_); + if (freed_rb_fallback_) + Printf("fallback count: %llu\n", freed_rb_fallback_->realsize()); + } +// OHOS_LOCAL end + void AddThreadStats(Thread *t) { SpinMutexLock l(&stats_mutex_); stats_.n_live_threads++; @@ -173,6 +253,18 @@ class HwasanThreadList { uptr GetRingBufferSize() const { return ring_buffer_size_; } +// OHOS_LOCAL begin + void RecordFallBack(HeapAllocationRecord h) { + SpinMutexLock l(&freed_rb_mutex_); + if (freed_rb_fallback_) + freed_rb_fallback_->push(h); + } + + void EnableTracingHeapAllocation() { trace_heap_allocation_ = true; } + void DisableTracingHeapAllocation() { trace_heap_allocation_ = false; } + bool AllowTracingHeapAllocation() { return trace_heap_allocation_; } +// OHOS_LOCAL end + private: Thread *AllocThread() { SpinMutexLock l(&free_space_mutex_); @@ -195,6 +287,16 @@ class HwasanThreadList { SpinMutex live_list_mutex_; InternalMmapVector live_list_; +// OHOS_LOCAL begin + SpinMutex freed_rb_mutex_; + HeapAllocationsRingBuffer **freed_rb_list_; + HeapAllocationsRingBuffer *freed_rb_fallback_; + size_t freed_rb_list_size_; + u64 freed_rb_count_; + u64 freed_rb_count_overflow_; + bool trace_heap_allocation_; +// OHOS_LOCAL end + ThreadStats stats_; SpinMutex stats_mutex_; }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h index ed20388016fb10c6908a5315b7a2ba3b6512b97c..1b4f1bf45084233e308973a60cdbe2c1181f6081 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -229,6 +229,7 @@ bool ColorizeReports(); void RemoveANSIEscapeSequencesFromString(char *buffer); void Printf(const char *format, ...) FORMAT(1, 2); void Report(const char *format, ...) FORMAT(1, 2); +bool IsInPrintf(); // OHOS_LOCAL void SetPrintfAndReportCallback(void (*callback)(const char *)); #define VReport(level, ...) \ do { \ diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp index ec874e50adc446e41de36e7622677fdc011ac59f..689a94deccc0b0869081d0073cc265ea1103d9ee 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp @@ -36,6 +36,12 @@ #include #include +// OHOS_LOCAL begin +#if SANITIZER_OHOS +#include +#endif +// OHOS_LOCAL end + #if SANITIZER_FREEBSD // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before // that, it was never implemented. So just define it to zero. @@ -195,6 +201,7 @@ static void MaybeInstallSigaction(int signum, SignalHandlerType handler) { if (GetHandleSignalMode(signum) == kHandleSignalNo) return; +#if !SANITIZER_OHOS struct sigaction sigact; internal_memset(&sigact, 0, sizeof(sigact)); sigact.sa_sigaction = (sa_sigaction_t)handler; @@ -204,6 +211,20 @@ static void MaybeInstallSigaction(int signum, if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK; CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr)); VReport(1, "Installed the sigaction for signal %d\n", signum); +#else +// OHOS_LOCAL begin + typedef bool (*sc)(int, siginfo_t *, void *); + sc h = (sc)handler; + struct signal_chain_action sigchain = { + .sca_sigaction = h, + .sca_mask = {}, + .sca_flags = SA_SIGINFO | SA_NODEFER, + }; + // This is a void function for OHOS. When there are too many registered + // functions, an internal error is reported. CHECK is not required. + add_special_signal_handler(signum, &sigchain); +// OHOS_LOCAL end +#endif } void InstallDeadlySignalHandlers(SignalHandlerType handler) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp index 3a9e366d2df952a131634f79bea5e08841faa585..53cc149f662c4ff34007252a8332346807b601ee 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp @@ -310,10 +310,16 @@ static void NOINLINE SharedPrintfCode(bool append_pid, const char *format, format, args); } +static thread_local bool is_in_printf; // OHOS_LOCAL + +bool IsInPrintf() { return is_in_printf; } // OHOS_LOCAL + void Printf(const char *format, ...) { va_list args; va_start(args, format); + is_in_printf = true; // OHOS_LOCAL SharedPrintfCode(false, format, args); + is_in_printf =false; // OHOS_LOCAL va_end(args); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h index f22e40cac28409512b8b1541afe6c75aa1353787..d2356d69cf9b393b5b7f44762804ca187db49b3e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h @@ -27,6 +27,7 @@ class RingBuffer { RingBuffer *RB = reinterpret_cast(Ptr); uptr End = reinterpret_cast(Ptr) + SizeInBytes(Size); RB->last_ = RB->next_ = reinterpret_cast(End - sizeof(T)); + RB->full_ = false; // OHOS_LOCAL return RB; } void Delete() { @@ -35,11 +36,19 @@ class RingBuffer { uptr size() const { return last_ + 1 - reinterpret_cast(reinterpret_cast(this) + - 2 * sizeof(T *)); + sizeof(RingBuffer) - sizeof(T)); // OHOS_LOCAL } +// OHOS_LOCAL begin + uptr realsize() const { + if (full_) + return size(); + return reinterpret_cast((uptr)last_ - (uptr)next_) / sizeof(T); + } +// OHOS_LOCAL end + static uptr SizeInBytes(uptr Size) { - return Size * sizeof(T) + 2 * sizeof(T*); + return Size * sizeof(T) + sizeof(RingBuffer) - sizeof(T); // OHOS_LOCAL } uptr SizeInBytes() { return SizeInBytes(size()); } @@ -48,8 +57,10 @@ class RingBuffer { *next_ = t; next_--; // The condition below works only if sizeof(T) is divisible by sizeof(T*). - if (next_ <= reinterpret_cast(&next_)) + if (next_ <= reinterpret_cast(&next_)) { next_ = last_; + full_ = true; // OHOS_LOCAL + } } T operator[](uptr Idx) const { @@ -66,11 +77,13 @@ class RingBuffer { RingBuffer(const RingBuffer&) = delete; // Data layout: - // LNDDDDDDDD + // FLNDDDDDDDD + // F: indicates whether the ring buffer is full // D: data elements. // L: last_, always points to the last data element. // N: next_, initially equals to last_, is decremented on every push, // wraps around if it's less or equal than its own address. + bool full_; T *last_; T *next_; T data_[1]; // flexible array. diff --git a/compiler-rt/test/hwasan/TestCases/heap-buffer-overflow.c b/compiler-rt/test/hwasan/TestCases/heap-buffer-overflow.c index ff52a4bf298c613623ff233d054af6cdf2e5b134..8bbf1eb9a3969dca97e232d3f288357ed4a8d4ef 100644 --- a/compiler-rt/test/hwasan/TestCases/heap-buffer-overflow.c +++ b/compiler-rt/test/hwasan/TestCases/heap-buffer-overflow.c @@ -50,10 +50,18 @@ int main(int argc, char **argv) { // CHECKm30: is located 30 bytes to the left of 30-byte region // // CHECKMm30: is a large allocated heap chunk; size: 1003520 offset: -30 +// OHOS_LOCAL begin +// CHECKMm30: Currently allocated here: +// CHECKMm30: #0 {{[0x]+}}{{.*}} +// OHOS_LOCAL end // CHECKMm30: Cause: heap-buffer-overflow // CHECKMm30: is located 30 bytes to the left of 1000000-byte region // // CHECKM: is a large allocated heap chunk; size: 1003520 offset: 1000000 +// OHOS_LOCAL begin +// CHECKM: Currently allocated here: +// CHECKM: #0 {{[0x]+}}{{.*}} +// OHOS_LOCAL end // CHECKM: Cause: heap-buffer-overflow // CHECKM: is located 0 bytes to the right of 1000000-byte region // diff --git a/compiler-rt/test/hwasan/TestCases/rich-stack.c b/compiler-rt/test/hwasan/TestCases/rich-stack.c index c6c2f9bca66915abd0fb9b3b41dcea91be3b18fd..8f9f0834d56bdba5f8a9526854ce10fa67f3b225 100644 --- a/compiler-rt/test/hwasan/TestCases/rich-stack.c +++ b/compiler-rt/test/hwasan/TestCases/rich-stack.c @@ -64,4 +64,5 @@ int main(int argc, char **argv) { // R321: in BAR // R321-NEXT: in FOO // R321-NEXT: in main -// R321: is located in stack of thread T0 +// OHOS_LOCAL +// R321: is located in stack of thread {{.*}} diff --git a/compiler-rt/test/hwasan/TestCases/stack-uar.c b/compiler-rt/test/hwasan/TestCases/stack-uar.c index 3663eac5d2685897bd7d31cc1dcf1913bcb62b32..8d01fdecc8f537e061b9972890165589fd36062b 100644 --- a/compiler-rt/test/hwasan/TestCases/stack-uar.c +++ b/compiler-rt/test/hwasan/TestCases/stack-uar.c @@ -38,14 +38,16 @@ int main() { // CHECK: is located in stack of thread // CHECK: Potentially referenced stack objects: // CHECK-NEXT: zzz in buggy {{.*}}stack-uar.c:[[@LINE-20]] - // CHECK-NEXT: Memory tags around the buggy address + // OHOS_LOCAL + // CHECK: Memory tags around the buggy address // NOSYM: Previously allocated frames: // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uar.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uar.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uar.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uar.c.tmp+0x{{.*}}){{$}} - // NOSYM-NEXT: Memory tags around the buggy address + // OHOS_LOCAL + // NOSYM: Memory tags around the buggy address // CHECK: SUMMARY: HWAddressSanitizer: tag-mismatch {{.*}} in main } diff --git a/compiler-rt/test/hwasan/TestCases/stack-uas.c b/compiler-rt/test/hwasan/TestCases/stack-uas.c index 7f5a6f26d0675bfc86f977cda973e466dd66b2c6..096347479dcf4ae8cc9777290075e8efef2fc202 100644 --- a/compiler-rt/test/hwasan/TestCases/stack-uas.c +++ b/compiler-rt/test/hwasan/TestCases/stack-uas.c @@ -58,14 +58,16 @@ int main() { // CHECK: is located in stack of thread // CHECK: Potentially referenced stack objects: // CHECK-NEXT: zzz in buggy {{.*}}stack-uas.c:[[@LINE-17]] - // CHECK-NEXT: Memory tags around the buggy address + // OHOS_LOCAL + // CHECK: Memory tags around the buggy address // NOSYM: Previously allocated frames: // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uas.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uas.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uas.c.tmp+0x{{.*}}){{$}} // NOSYM-NEXT: record_addr:0x{{.*}} record:0x{{.*}} ({{.*}}/stack-uas.c.tmp+0x{{.*}}){{$}} - // NOSYM-NEXT: Memory tags around the buggy address + // OHOS_LOCAL + // NOSYM: Memory tags around the buggy address // CHECK: SUMMARY: HWAddressSanitizer: tag-mismatch {{.*}} in buggy } diff --git a/compiler-rt/test/hwasan/TestCases/thread-uaf.c b/compiler-rt/test/hwasan/TestCases/thread-uaf.c index c368882f45896f5692cb5c7ead7ae1ced5402c90..dd98013b388f7135248affc36f5663b38fb36880 100644 --- a/compiler-rt/test/hwasan/TestCases/thread-uaf.c +++ b/compiler-rt/test/hwasan/TestCases/thread-uaf.c @@ -28,12 +28,15 @@ void *Deallocate(void *arg) { void *Use(void *arg) { x[5] = 42; // CHECK: ERROR: HWAddressSanitizer: tag-mismatch on address - // CHECK: WRITE of size 1 {{.*}} in thread T3 - // CHECK: thread-uaf.c:[[@LINE-3]] + // OHOS_LOCAL + // CHECK: WRITE of size 1 {{.*}} in thread {{.*}} + // CHECK: thread-uaf.c:[[@LINE-4]] // CHECK: Cause: use-after-free - // CHECK: freed by thread T2 here + // OHOS_LOCAL + // CHECK: freed by thread {{.*}} here // CHECK: in Deallocate - // CHECK: previously allocated here: + // OHOS_LOCAL + // CHECK: previously allocated by thread {{.*}} here: // CHECK: in Allocate // CHECK-DAG: Thread: T2 0x // CHECK-DAG: Thread: T3 0x diff --git a/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c b/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c index c08b00fc35ace6cc28b6786a200e5f386ad8da9a..acdee4762df9d9a29b560f0e2bf142b46430763d 100644 --- a/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c +++ b/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c @@ -58,4 +58,5 @@ int main(int argc, char **argv) { // CHECK-NOT: Cause: heap-buffer-overflow // CHECK: Cause: use-after-free -// CHECK-NOT: Cause: heap-buffer-overflow +// OHOS_LOCAL +// CHECK: Cause: heap-buffer-overflow diff --git a/compiler-rt/test/hwasan/TestCases/wild-free-close.c b/compiler-rt/test/hwasan/TestCases/wild-free-close.c index 51eb949dcdc94e9717716b36403746baca4ca9ae..34bc0192bf159202392f3b868cf2993ce2eea385 100644 --- a/compiler-rt/test/hwasan/TestCases/wild-free-close.c +++ b/compiler-rt/test/hwasan/TestCases/wild-free-close.c @@ -11,9 +11,10 @@ int main() { fprintf(stderr, "ALLOC %p\n", __hwasan_tag_pointer(p, 0)); // CHECK: ALLOC {{[0x]+}}[[ADDR:.*]] free(p - 8); - // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread T{{[0-9]+}} + // OHOS_LOCAL + // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread {{.*}} // CHECK: #0 {{[0x]+}}{{.*}}[[PC]] in {{.*}}free - // CHECK: #1 {{.*}} in main {{.*}}wild-free-close.c:[[@LINE-3]] + // CHECK: #1 {{.*}} in main {{.*}}wild-free-close.c:[[@LINE-4]] // CHECK: is located 8 bytes to the left of 1-byte region [{{[0x]+}}{{.*}}[[ADDR]] // CHECK-NOT: Segmentation fault // CHECK-NOT: SIGSEGV diff --git a/compiler-rt/test/hwasan/TestCases/wild-free-realloc.c b/compiler-rt/test/hwasan/TestCases/wild-free-realloc.c index 19d2943e4c51c2c914eccb2f6c862e5e8f80f90a..8f47e46fca1f3be50bfe7fe900171a7b251b8d19 100644 --- a/compiler-rt/test/hwasan/TestCases/wild-free-realloc.c +++ b/compiler-rt/test/hwasan/TestCases/wild-free-realloc.c @@ -7,9 +7,10 @@ int main() { __hwasan_enable_allocator_tagging(); char *p = (char *)malloc(1); realloc(p + 0x10000000000, 2); - // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread T{{[0-9]+}} + // OHOS_LOCAL + // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread {{.*}} // CHECK: #0 {{[0x]+}}{{.*}}[[PC]] in {{.*}}realloc - // CHECK: #1 {{.*}} in main {{.*}}wild-free-realloc.c:[[@LINE-3]] + // CHECK: #1 {{.*}} in main {{.*}}wild-free-realloc.c:[[@LINE-4]] // CHECK-NOT: Segmentation fault // CHECK-NOT: SIGSEGV return 0; diff --git a/compiler-rt/test/hwasan/TestCases/wild-free-shadow.c b/compiler-rt/test/hwasan/TestCases/wild-free-shadow.c index 7810e26dfffd1f5d6ea9b8a0dc33565f978c87b9..a6cd82443247c8b4616c311f8d59efc36f8664ac 100644 --- a/compiler-rt/test/hwasan/TestCases/wild-free-shadow.c +++ b/compiler-rt/test/hwasan/TestCases/wild-free-shadow.c @@ -7,9 +7,10 @@ extern void *__hwasan_shadow_memory_dynamic_address; int main() { char *p = (char *)malloc(1); free(__hwasan_shadow_memory_dynamic_address); - // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{[0x]+}}[[PTR:.*]] at pc {{[0x]+}}[[PC:.*]] on thread T{{[0-9]+}} + // OHOS_LOCAL + // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{[0x]+}}[[PTR:.*]] at pc {{[0x]+}}[[PC:.*]] on thread {{.*}} // CHECK: #0 {{[0x]+}}{{.*}}[[PC]] in {{.*}}free - // CHECK: #1 {{.*}} in main {{.*}}wild-free-shadow.c:[[@LINE-3]] + // CHECK: #1 {{.*}} in main {{.*}}wild-free-shadow.c:[[@LINE-4]] // CHECK: {{[0x]+}}{{.*}}[[PTR]] is HWAsan shadow memory. // CHECK-NOT: Segmentation fault // CHECK-NOT: SIGSEGV diff --git a/compiler-rt/test/hwasan/TestCases/wild-free.c b/compiler-rt/test/hwasan/TestCases/wild-free.c index a38822c2f8609b1aeba25141ec19f736a93e39c2..50678142089b21a789e531f7b0f83bb25c8434ed 100644 --- a/compiler-rt/test/hwasan/TestCases/wild-free.c +++ b/compiler-rt/test/hwasan/TestCases/wild-free.c @@ -7,9 +7,10 @@ int main() { __hwasan_enable_allocator_tagging(); char *p = (char *)malloc(1); free(p + 0x10000000000); - // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread T{{[0-9]+}} + // OHOS_LOCAL + // CHECK: ERROR: HWAddressSanitizer: invalid-free on address {{.*}} at pc {{[0x]+}}[[PC:.*]] on thread {{.*}} // CHECK: #0 {{[0x]+}}{{.*}}[[PC]] in {{.*}}free - // CHECK: #1 {{.*}} in main {{.*}}wild-free.c:[[@LINE-3]] + // CHECK: #1 {{.*}} in main {{.*}}wild-free.c:[[@LINE-4]] // CHECK-NOT: Segmentation fault // CHECK-NOT: SIGSEGV return 0;