From 50c2c5c0fa7e4f8ba9f2cc1049522b661b88ba5c Mon Sep 17 00:00:00 2001 From: h00799777 Date: Wed, 4 Jun 2025 15:29:06 +0800 Subject: [PATCH] cmc gc generation. Issue: https://gitee.com/openharmony/arkcompiler_ets_runtime/issues/ICAJ0Z Change-Id: I0bd6580966e752449702eaf41b9288f497264c7f Signed-off-by: henrybill30 --- .../base_runtime/base_runtime.cpp | 4 +- common_components/common_runtime/src/BUILD.gn | 4 + .../src/heap/allocator/alloc_buffer.h | 2 +- .../src/heap/allocator/allocator.cpp | 6 +- .../src/heap/allocator/region_desc.h | 73 ++- .../src/heap/allocator/region_manager.cpp | 473 ++++-------------- .../src/heap/allocator/region_manager.h | 185 ++----- .../src/heap/allocator/region_space.cpp | 178 ++++++- .../src/heap/allocator/region_space.h | 168 ++++++- .../src/heap/collector/gc_request.cpp | 1 + .../src/heap/collector/gc_request.h | 1 + .../src/heap/collector/region_bitmap.h | 6 +- .../src/heap/collector/region_rset.h | 109 ++++ .../src/heap/collector/trace_collector.cpp | 51 +- .../src/heap/collector/trace_collector.h | 1 + .../common_runtime/src/heap/heap.cpp | 5 + .../common_runtime/src/heap/heap.h | 4 + .../src/heap/space/from_space.cpp | 183 +++++++ .../src/heap/space/from_space.h | 149 ++++++ .../src/heap/space/mature_space.cpp | 32 ++ .../src/heap/space/mature_space.h | 115 +++++ .../src/heap/space/regional_space.h | 44 ++ .../src/heap/space/to_space.cpp | 46 ++ .../common_runtime/src/heap/space/to_space.h | 118 +++++ .../src/heap/space/young_space.cpp | 40 ++ .../src/heap/space/young_space.h | 127 +++++ .../common_runtime/src/heap/verification.cpp | 23 +- .../src/heap/w_collector/copy_barrier.cpp | 2 +- .../src/heap/w_collector/enum_barrier.cpp | 5 +- .../src/heap/w_collector/idle_barrier.cpp | 26 + .../src/heap/w_collector/idle_barrier.h | 2 + .../heap/w_collector/post_trace_barrier.cpp | 4 +- .../heap/w_collector/preforward_barrier.cpp | 2 +- .../src/heap/w_collector/trace_barrier.cpp | 3 + .../src/heap/w_collector/w_collector.cpp | 120 ++++- .../src/mutator/satb_buffer.cpp | 3 + common_components/heap/heap_allocator.cpp | 16 +- ecmascript/compiler/call_signature.cpp | 3 +- ecmascript/compiler/stub_builder.cpp | 2 +- ecmascript/element_accessor-inl.h | 6 +- ecmascript/element_accessor.cpp | 2 +- ecmascript/ic/profile_type_info.h | 3 +- ecmascript/mem/barriers-inl.h | 3 +- ecmascript/mem/barriers.cpp | 4 +- ecmascript/mem/barriers.h | 2 +- ecmascript/stubs/runtime_stubs.cpp | 4 +- ecmascript/stubs/runtime_stubs.h | 2 +- ecmascript/tagged_array-inl.h | 5 +- ecmascript/tagged_array.h | 5 + 49 files changed, 1723 insertions(+), 649 deletions(-) create mode 100644 common_components/common_runtime/src/heap/collector/region_rset.h create mode 100644 common_components/common_runtime/src/heap/space/from_space.cpp create mode 100644 common_components/common_runtime/src/heap/space/from_space.h create mode 100644 common_components/common_runtime/src/heap/space/mature_space.cpp create mode 100644 common_components/common_runtime/src/heap/space/mature_space.h create mode 100644 common_components/common_runtime/src/heap/space/regional_space.h create mode 100644 common_components/common_runtime/src/heap/space/to_space.cpp create mode 100644 common_components/common_runtime/src/heap/space/to_space.h create mode 100644 common_components/common_runtime/src/heap/space/young_space.cpp create mode 100644 common_components/common_runtime/src/heap/space/young_space.h diff --git a/common_components/base_runtime/base_runtime.cpp b/common_components/base_runtime/base_runtime.cpp index 488fa93326..4e43776e3e 100755 --- a/common_components/base_runtime/base_runtime.cpp +++ b/common_components/base_runtime/base_runtime.cpp @@ -182,11 +182,11 @@ void BaseRuntime::RequestGC(GcType type) { switch (type) { case GcType::SYNC: { - HeapManager::RequestGC(GC_REASON_USER, false); + HeapManager::RequestGC(GC_REASON_YOUNG, false); break; } case GcType::ASYNC: { - HeapManager::RequestGC(GC_REASON_USER, true); + HeapManager::RequestGC(GC_REASON_YOUNG, true); break; } case GcType::FULL: { diff --git a/common_components/common_runtime/src/BUILD.gn b/common_components/common_runtime/src/BUILD.gn index a9c9d0a323..2486122926 100755 --- a/common_components/common_runtime/src/BUILD.gn +++ b/common_components/common_runtime/src/BUILD.gn @@ -54,6 +54,10 @@ source_Heap = [ "heap/w_collector/preforward_barrier.cpp", "heap/w_collector/post_trace_barrier.cpp", "heap/collector/trace_collector.cpp", + "heap/space/from_space.cpp", + "heap/space/mature_space.cpp", + "heap/space/to_space.cpp", + "heap/space/young_space.cpp", "common/page_cache.cpp", ] diff --git a/common_components/common_runtime/src/heap/allocator/alloc_buffer.h b/common_components/common_runtime/src/heap/allocator/alloc_buffer.h index 7d7c6871cc..8f4cfa7831 100755 --- a/common_components/common_runtime/src/heap/allocator/alloc_buffer.h +++ b/common_components/common_runtime/src/heap/allocator/alloc_buffer.h @@ -43,7 +43,7 @@ public: tlRegion_, tlRegion_->GetRegionStart(), tlRegion_->GetRegionAllocatedSize()); tlRegion_ = RegionDesc::NullRegion(); } - void RefershRegion(); + void ClearThreadLocalRegion(); bool SetPreparedRegion(RegionDesc* newPreparedRegion) { diff --git a/common_components/common_runtime/src/heap/allocator/allocator.cpp b/common_components/common_runtime/src/heap/allocator/allocator.cpp index 9a99c64fc0..50d45fa6d7 100755 --- a/common_components/common_runtime/src/heap/allocator/allocator.cpp +++ b/common_components/common_runtime/src/heap/allocator/allocator.cpp @@ -80,8 +80,8 @@ PagePool& PagePool::Instance() noexcept Allocator* Allocator::CreateAllocator() { - RegionSpace* regionSpace = new (std::nothrow) RegionSpace(); - LOGF_CHECK(regionSpace != nullptr) << "New RegionSpace failed"; - return regionSpace; + RegionSpace* heapSpace = new (std::nothrow) RegionSpace(); + LOGF_CHECK(heapSpace != nullptr) << "New RegionSpace failed"; + return heapSpace; } } // namespace panda diff --git a/common_components/common_runtime/src/heap/allocator/region_desc.h b/common_components/common_runtime/src/heap/allocator/region_desc.h index 77ff33fe08..a7942baf63 100755 --- a/common_components/common_runtime/src/heap/allocator/region_desc.h +++ b/common_components/common_runtime/src/heap/allocator/region_desc.h @@ -36,6 +36,7 @@ #include "common_components/common_runtime/src/heap/collector/copy_data_manager.h" #include "common_components/common_runtime/src/heap/collector/gc_infos.h" #include "common_components/common_runtime/src/heap/collector/region_bitmap.h" +#include "common_components/common_runtime/src/heap/collector/region_rset.h" #include "common_components/log/log.h" #include "securec.h" #ifdef ARKCOMMON_ASAN_SUPPORT @@ -106,6 +107,7 @@ public: metadata.freeSlot = nullptr; metadata.regionEnd = reinterpret_cast(nullptr); metadata.toSpaceRegion = false; + metadata.regionRSet = nullptr; } static inline RegionDesc* NullRegion() { @@ -380,6 +382,28 @@ public: } } + RegionRSet* GetRSet() + { + return metadata.regionRSet; + } + + void ClearRSet() + { + metadata.regionRSet->ClearCardTable(); + } + + bool MarkRSetCardTable(BaseObject* obj) + { + size_t offset = GetAddressOffset(reinterpret_cast(obj)); + return metadata.regionRSet->MarkCardTable(offset); + } + + bool IsInRSet(BaseObject* obj) + { + size_t offset = GetAddressOffset(reinterpret_cast(obj)); + return metadata.regionRSet->IsMarkedCard(offset); + } + ALWAYS_INLINE_CC size_t GetAddressOffset(HeapAddress address) { DCHECK_CC(GetRegionStart() <= address); @@ -407,6 +431,7 @@ public: LONE_FROM_REGION, EXEMPTED_FROM_REGION, TO_REGION, + MATURE_REGION, // pinned object will not be forwarded by concurrent copying gc. FULL_PINNED_REGION, @@ -542,10 +567,15 @@ public: void VisitAllObjectsWithFixedSize(size_t cellCount, const std::function&& func); void VisitAllObjectsBeforeFix(const std::function&& func); bool VisitLiveObjectsUntilFalse(const std::function&& func); + void VisitRememberSet(const std::function& func); // reset so that this region can be reused for allocation void InitFreeUnits() { + if (metadata.regionRSet != nullptr) { + delete metadata.regionRSet; + metadata.regionRSet = nullptr; + } size_t nUnit = GetUnitCount(); UnitInfo* unit = reinterpret_cast(this) - (nUnit - 1); for (size_t i = 0; i < nUnit; ++i) { @@ -591,7 +621,7 @@ public: // These interfaces are used to make sure the writing operations of value in C++ Bit Field will be atomic. void SetUnitRole(UnitRole role) { - metadata.unitBits.AtomicSetValue(0, BITS_4, static_cast(role)); + metadata.unitBits.AtomicSetValue(0, BITS_5, static_cast(role)); } void SetRegionType(RegionType type) { @@ -723,6 +753,37 @@ public: return GetFixLine() == GetRegionStart(); } + bool IsInRecentSpace() const + { + RegionType type = GetRegionType(); + return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION; + } + + bool IsInYoungSpace() const + { + RegionType type = GetRegionType(); + return type == RegionType::THREAD_LOCAL_REGION || type == RegionType::RECENT_FULL_REGION || + type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + } + + bool IsInFromSpace() const + { + RegionType type = GetRegionType(); + return type == RegionType::FROM_REGION || type == RegionType::EXEMPTED_FROM_REGION; + } + + bool IsInToSpace() const + { + RegionType type = GetRegionType(); + return type == RegionType::TO_REGION; + } + + bool IsInMatureSpace() const + { + RegionType type = GetRegionType(); + return type == RegionType::MATURE_REGION; + } + int32_t IncRawPointerObjectCount() { int32_t oldCount = __atomic_fetch_add(&metadata.rawPointerObjectCount, 1, __ATOMIC_SEQ_CST); @@ -938,7 +999,7 @@ private: BIT_OFFSET_ENQUEUED_REGION = 6, BIT_OFFSET_RESURRECTED_REGION = 7, BIT_OFFSET_FIXED_REGION = 8, - BIT_OFFSET_REGION_CELLCOUNT = 9, + BIT_OFFSET_REGION_CELLCOUNT = 9 }; struct ObjectSlot { @@ -982,11 +1043,13 @@ private: RegionDesc* ownerRegion; // if unit is SUBORDINATE_UNIT }; + RegionRSet* regionRSet = nullptr;; + // the writing operation in C++ Bit-Field feature is not atomic, the we wants to // change the value, we must use specific interface implenmented by BitFields. union { struct { - uint8_t unitRole : BITS_4; + uint8_t unitRole : BITS_5; }; BitFields unitBits; }; @@ -1063,7 +1126,7 @@ private: ~UnitInfo() = delete; // These interfaces are used to make sure the writing operations of value in C++ Bit Field will be atomic. - void SetUnitRole(UnitRole role) { metadata_.unitBits.AtomicSetValue(0, BITS_4, static_cast(role)); } + void SetUnitRole(UnitRole role) { metadata_.unitBits.AtomicSetValue(0, BITS_5, static_cast(role)); } void SetRegionType(RegionType type) { metadata_.regionBits.AtomicSetValue(RegionBitOffset::BIT_OFFSET_REGION_TYPE, BITS_5, @@ -1116,6 +1179,7 @@ private: metadata.liveByteCount = 0; metadata.liveInfo = nullptr; metadata.freeSlot = nullptr; + metadata.regionRSet = nullptr; SetRegionType(RegionType::FREE_REGION); SetUnitRole(uClass); ClearTraceCopyFixLine(); @@ -1129,6 +1193,7 @@ private: void InitRegion(size_t nUnit, UnitRole uClass) { InitRegionDesc(nUnit, uClass); + metadata.regionRSet = new RegionRSet(GetRegionSize()); // initialize region's subordinate units. UnitInfo* unit = reinterpret_cast(this) - (nUnit - 1); diff --git a/common_components/common_runtime/src/heap/allocator/region_manager.cpp b/common_components/common_runtime/src/heap/allocator/region_manager.cpp index 0a9ee44038..981ced99a0 100755 --- a/common_components/common_runtime/src/heap/allocator/region_manager.cpp +++ b/common_components/common_runtime/src/heap/allocator/region_manager.cpp @@ -77,30 +77,6 @@ const size_t RegionManager::MAX_UNIT_COUNT_PER_REGION = (128 * KB) / panda::ARK_ // size of huge page is 2048KB. const size_t RegionManager::HUGE_PAGE = (2048 * KB) / panda::ARK_COMMON_PAGE_SIZE; -class CopyTask : public Task { -public: - CopyTask(int32_t id, RegionManager& manager, RegionDesc& region, size_t regionCnt, TaskPackMonitor &monitor) - : Task(id), regionManager_(manager), startRegion_(region), regionCount_(regionCnt), monitor_(monitor) {} - - ~CopyTask() override = default; - - bool Run([[maybe_unused]] uint32_t threadIndex) override - { - // set current thread as a gc thread. - ThreadLocal::SetThreadType(ThreadType::GC_THREAD); - regionManager_.ParallelCopyFromRegions(startRegion_, regionCount_); - monitor_.NotifyFinishOne(); - ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); - return true; - } - -private: - RegionManager ®ionManager_; - RegionDesc &startRegion_; - size_t regionCount_; - TaskPackMonitor &monitor_; -}; - #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void RegionDesc::DumpRegionDesc(LogType type) const { @@ -225,6 +201,18 @@ bool RegionDesc::VisitLiveObjectsUntilFalse(const std::function& func) +{ + RegionRSet* rSet = GetRSet(); + if (IsLargeRegion()) { + if (rSet->IsMarkedCard(0)) { + func(reinterpret_cast(GetRegionStart())); + } + return; + } + rSet->VisitAllMarkedCard(func, GetRegionStart()); +} + void RegionList::MergeRegionList(RegionList& srcList, RegionDesc::RegionType regionType) { RegionList regionList("region list cache"); @@ -392,11 +380,6 @@ void RegionManager::SetLargeObjectThreshold() } } -void RegionManager::SetGarbageThreshold() -{ - fromSpaceGarbageThreshold_ = BaseRuntime::GetInstance()->GetGCParam().garbageThreshold; -} - void RegionManager::Initialize(size_t nRegion, uintptr_t regionInfoAddr) { size_t metadataSize = GetMetadataSize(nRegion); @@ -409,11 +392,10 @@ void RegionManager::Initialize(size_t nRegion, uintptr_t regionInfoAddr) inactiveZone_ = regionHeapStart_; SetMaxUnitCountForRegion(); SetLargeObjectThreshold(); - SetGarbageThreshold(); + // SetGarbageThreshold(); // propagate region heap layout RegionDesc::Initialize(nRegion, regionInfoAddr, regionHeapStart_); freeRegionManager_.Initialize(nRegion); - exemptedRegionThreshold_ = BaseRuntime::GetInstance()->GetHeapParam().exemptionThreshold; DLOG(REPORT, "region info @0x%zx+%zu, heap [0x%zx, 0x%zx), unit count %zu", regionInfoAddr, metadataSize, regionHeapStart_, regionHeapEnd_, nRegion); @@ -450,93 +432,42 @@ size_t RegionManager::ReleaseRegion(RegionDesc* region) return res; } -void RegionManager::ReassembleFromSpace() -{ - fromRegionList_.MergeRegionList(toRegionList_, RegionDesc::RegionType::FROM_REGION); - fromRegionList_.MergeRegionList(tlToRegionList_, RegionDesc::RegionType::FROM_REGION); - fromRegionList_.MergeRegionList(exemptedFromRegionList_, RegionDesc::RegionType::FROM_REGION); -} - void RegionManager::CountLiveObject(const BaseObject* obj) { RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); region->AddLiveByteCount(obj->GetSize()); } -void RegionManager::AssembleSmallGarbageCandidates() -{ - fromRegionList_.MergeRegionList(recentFullRegionList_, RegionDesc::RegionType::FROM_REGION); -} - void RegionManager::AssembleLargeGarbageCandidates() { oldLargeRegionList_.MergeRegionList(recentLargeRegionList_, RegionDesc::RegionType::OLD_LARGE_REGION); } -void RegionManager::AssemblePinnedGarbageCandidates(bool collectAll) +void RegionManager::AssemblePinnedGarbageCandidates() { oldPinnedRegionList_.MergeRegionList(recentPinnedRegionList_, RegionDesc::RegionType::FULL_PINNED_REGION); RegionDesc* region = oldPinnedRegionList_.GetHeadRegion(); - while (region != nullptr) { - RegionDesc* nextRegion = region->GetNextRegion(); - if (collectAll && (region->GetRawPointerObjectCount() > 0)) { - oldPinnedRegionList_.DeleteRegion(region); - rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); - } - - region = nextRegion; - } for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { oldFixedPinnedRegionList_[i]->MergeRegionList(*fixedPinnedRegionList_[i], RegionDesc::RegionType::FULL_FIXED_PINNED_REGION); } } -// copy only regions whose garbage bytes is greater than or equal to exemptedRegionThreshold. -void RegionManager::ExemptFromRegions() +void RegionManager::ClearRSet() { - size_t forwardBytes = 0; - size_t floatingGarbage = 0; - size_t oldFromBytes = fromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; - RegionDesc* fromRegion = fromRegionList_.GetHeadRegion(); - while (fromRegion != nullptr) { - size_t threshold = static_cast(exemptedRegionThreshold_ * fromRegion->GetRegionSize()); - size_t liveBytes = fromRegion->GetLiveByteCount(); - long rawPtrCnt = fromRegion->GetRawPointerObjectCount(); - if (liveBytes > threshold) { // ignore this region - RegionDesc* del = fromRegion; - DLOG(REGION, "region %p @0x%zx+%zu exempted by forwarding: %zu units, %u live bytes", del, - del->GetRegionStart(), del->GetRegionAllocatedSize(), - del->GetUnitCount(), del->GetLiveByteCount()); - - fromRegion = fromRegion->GetNextRegion(); - if (fromRegionList_.TryDeleteRegion(del, RegionDesc::RegionType::FROM_REGION, - RegionDesc::RegionType::EXEMPTED_FROM_REGION)) { - ExemptFromRegion(del); - } - floatingGarbage += (del->GetRegionSize() - del->GetLiveByteCount()); - } else if (rawPtrCnt > 0) { - RegionDesc* del = fromRegion; - DLOG(REGION, "region %p @0x%zx+%zu pinned by forwarding: %zu units, %u live bytes rawPtr cnt %u", - del, del->GetRegionStart(), del->GetRegionAllocatedSize(), - del->GetUnitCount(), del->GetLiveByteCount(), rawPtrCnt); - - fromRegion = fromRegion->GetNextRegion(); - if (fromRegionList_.TryDeleteRegion(del, RegionDesc::RegionType::FROM_REGION, - RegionDesc::RegionType::RAW_POINTER_REGION)) { - rawPointerRegionList_.PrependRegion(del, RegionDesc::RegionType::RAW_POINTER_REGION); - } - floatingGarbage += (del->GetRegionSize() - del->GetLiveByteCount()); - } else { - forwardBytes += fromRegion->GetLiveByteCount(); - fromRegion = fromRegion->GetNextRegion(); - } + auto clearFunc = [](RegionDesc* region) { + region->ClearRSet(); + }; + recentPinnedRegionList_.VisitAllRegions(clearFunc); + oldPinnedRegionList_.VisitAllRegions(clearFunc); + recentLargeRegionList_.VisitAllRegions(clearFunc); + oldLargeRegionList_.VisitAllRegions(clearFunc); + rawPointerRegionList_.VisitAllRegions(clearFunc); + for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { + fixedPinnedRegionList_[i]->VisitAllRegions(clearFunc); + oldFixedPinnedRegionList_[i]->VisitAllRegions(clearFunc); } - size_t newFromBytes = fromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; - size_t exemptedFromBytes = exemptedFromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; - VLOG(REPORT, "exempt from-space: %zu B - %zu B -> %zu B, %zu B floating garbage, %zu B to copy", - oldFromBytes, exemptedFromBytes, newFromBytes, floatingGarbage, forwardBytes); } void RegionManager::ForEachObjectUnsafe(const std::function& visitor) const @@ -566,8 +497,14 @@ RegionDesc* RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo size_t threshold = collector.GetGCStats().GetThreshold(); size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); if (allocated >= threshold) { - DLOG(ALLOC, "request heu gc: allocated %zu, threshold %zu", allocated, threshold); - collector.RequestGC(GC_REASON_HEU, true); + // 30: trigger young gc when threshold less than 30MB. + if (threshold < 30 * MB) { + DLOG(ALLOC, "request heu gc: young %zu, threshold %zu", allocated, threshold); + collector.RequestGC(GC_REASON_YOUNG, true); + } else { + DLOG(ALLOC, "request heu gc: allocated %zu, threshold %zu", allocated, threshold); + collector.RequestGC(GC_REASON_HEU, true); + } } } @@ -625,146 +562,12 @@ RegionDesc* RegionManager::TakeRegion(size_t num, RegionDesc::UnitRole type, boo return nullptr; } -uintptr_t RegionManager::AllocRegion() -{ - RegionDesc* region = TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, false); - DCHECK_CC(region != nullptr); - - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetCopyLine(); - region->SetFixLine(); - } - - DLOG(REGION, "alloc small object region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType()); - recentFullRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_FULL_REGION); - - uintptr_t start = region->GetRegionStart(); - uintptr_t addr = region->Alloc(region->GetRegionSize()); - DCHECK_CC(addr != 0); - - return start; -} - -uintptr_t RegionManager::AllocPinnedRegion() -{ - RegionDesc* region = TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, false, false); - DCHECK_CC(region != nullptr); - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetCopyLine(); - region->SetFixLine(); - } - - DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), - region->GetRegionAllocatedSize(), - region->GetRegionType()); - recentPinnedRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_PINNED_REGION); - - uintptr_t start = region->GetRegionStart(); - uintptr_t addr = region->Alloc(region->GetRegionSize()); - DCHECK_CC(addr != 0); - - return start; -} - -uintptr_t RegionManager::AllocLargeRegion(size_t size) -{ - return AllocLarge(size, false); -} - -void RegionManager::ParallelCopyFromRegions(RegionDesc &startRegion, size_t regionCnt) -{ - RegionDesc *currentRegion = &startRegion; - for (size_t count = 0; (count < regionCnt) && currentRegion != nullptr; ++count) { - RegionDesc *region = currentRegion; - currentRegion = currentRegion->GetNextRegion(); - CopyRegion(region); - } - - AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); - if (LIKELY_CC(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); // clear thread local region for gc threads. - } -} - -uintptr_t RegionManager::AllocJitFortRegion(size_t size) -{ - auto res = AllocLarge(size, false); - return res; -} - -void RegionManager::CopyFromRegions(Taskpool *threadPool) -{ - if (threadPool != nullptr) { - uint32_t parallel = Heap::GetHeap().GetCollectorResources().GetGCThreadCount(true) - 1; - uint32_t threadNum = parallel + 1; - // We won't change fromRegionList during gc, so we can use it without lock. - size_t totalRegionCount = fromRegionList_.GetRegionCount(); - if (UNLIKELY_CC(totalRegionCount == 0)) { - return; - } - size_t regionCntEachTask = totalRegionCount / static_cast(threadNum); - size_t leftRegionCnt = totalRegionCount - regionCntEachTask * parallel; - RegionDesc* region = fromRegionList_.GetHeadRegion(); - TaskPackMonitor monitor(parallel, parallel); - for (uint32_t i = 0; i < parallel; ++i) { - ASSERT_LOGF(region != nullptr, "from region list records wrong region info"); - RegionDesc* startRegion = region; - for (size_t count = 0; count < regionCntEachTask; ++count) { - region = region->GetNextRegion(); - } - threadPool->PostTask(std::make_unique(0, *this, *startRegion, regionCntEachTask, monitor)); - } - ParallelCopyFromRegions(*region, leftRegionCnt); - monitor.WaitAllFinished(); - } else { - CopyFromRegions(); - } -} - -void RegionManager::ExemptFromRegion(RegionDesc* region) -{ - exemptedFromRegionList_.PrependRegion(region, RegionDesc::RegionType::EXEMPTED_FROM_REGION); -} - -void RegionManager::CopyFromRegions() -{ - // iterate each region in fromRegionList - RegionDesc* fromRegion = fromRegionList_.GetHeadRegion(); - while (fromRegion != nullptr) { - ASSERT_LOGF(fromRegion->IsValidRegion(), "region is not head when get head region of from region list"); - RegionDesc* region = fromRegion; - fromRegion = fromRegion->GetNextRegion(); - CopyRegion(region); - } - - VLOG(REPORT, "copy %zu from-region units", fromRegionList_.GetUnitCount()); - - AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); - if (LIKELY_CC(allocBuffer != nullptr)) { - allocBuffer->ClearRegion(); // clear region for next GC - } -} - static void FixRecentRegion(TraceCollector& collector, RegionDesc* region) { // use fixline to skip new region after fix // visit object before fix line to avoid race condition with mutator - region->VisitAllObjectsBeforeFix([&collector, region](BaseObject* object) { + auto gcReason = Heap::GetHeap().GetGCReason(); + region->VisitAllObjectsBeforeFix([&collector, region, &gcReason](BaseObject* object) { if (region->IsNewObjectSinceForward(object)) { // handle dead objects in tl-regions for concurrent gc. if (collector.IsToVersion(object)) { @@ -782,7 +585,7 @@ static void FixRecentRegion(TraceCollector& collector, RegionDesc* region) }); } -static void FixRecentRegionList(TraceCollector& collector, RegionList& list) +void RegionManager::FixRecentRegionList(TraceCollector& collector, RegionList& list) { list.VisitAllRegions([&collector](RegionDesc* region) { DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); @@ -797,7 +600,7 @@ static void FixToRegion(TraceCollector& collector, RegionDesc* region) }); } -static void FixToRegionList(TraceCollector& collector, RegionList& list) +void RegionManager::FixToRegionList(TraceCollector& collector, RegionList& list) { list.VisitAllRegions([&collector](RegionDesc* region) { DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); @@ -807,7 +610,8 @@ static void FixToRegionList(TraceCollector& collector, RegionList& list) static void FixOldRegion(TraceCollector& collector, RegionDesc* region) { - region->VisitAllObjects([&collector](BaseObject* object) { + auto gcReason = Heap::GetHeap().GetGCReason(); + region->VisitAllObjects([&collector, &gcReason](BaseObject* object) { if (collector.IsSurvivedObject(object)) { collector.FixObjectRefFields(object); } else { @@ -821,8 +625,10 @@ void RegionManager::FixFixedRegionList(TraceCollector& collector, RegionList& li { size_t garbageSize = 0; RegionDesc* region = list.GetHeadRegion(); + auto gcReason = Heap::GetHeap().GetGCReason(); while (region != nullptr) { - if (region->GetLiveByteCount() == 0) { + auto liveBytes = region->GetLiveByteCount(); + if (liveBytes == 0) { RegionDesc* del = region; region = region->GetNextRegion(); list.DeleteRegion(del); @@ -831,7 +637,7 @@ void RegionManager::FixFixedRegionList(TraceCollector& collector, RegionList& li continue; } region->VisitAllObjectsWithFixedSize(cellCount, - [&collector, ®ion, &cellCount, &garbageSize](BaseObject* object) { + [&collector, ®ion, &cellCount, &garbageSize, &gcReason](BaseObject* object) { if (collector.IsSurvivedObject(object)) { collector.FixObjectRefFields(object); } else { @@ -846,7 +652,7 @@ void RegionManager::FixFixedRegionList(TraceCollector& collector, RegionList& li stats.pinnedGarbageSize += garbageSize; } -static void FixOldRegionList(TraceCollector& collector, RegionList& list) +void RegionManager::FixOldRegionList(TraceCollector& collector, RegionList& list) { list.VisitAllRegions([&collector](RegionDesc* region) { DLOG(REGION, "fix region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); @@ -854,10 +660,29 @@ static void FixOldRegionList(TraceCollector& collector, RegionList& list) }); } +static void FixMatureRegion(TraceCollector& collector, RegionDesc* region) +{ + region->VisitAllObjects([&collector, ®ion](BaseObject* object) { + if (region->IsNewObjectSinceTrace(object) || collector.IsSurvivedObject(object) || region->IsInRSet(object)) { + DLOG(FIX, "fix: mature obj %p<%p>(%zu)", object, object->GetTypeInfo(), object->GetSize()); + collector.FixObjectRefFields(object); + } + }); +} + +void RegionManager::FixMatureRegionList(TraceCollector& collector, RegionList& list) +{ + list.VisitAllRegions([&collector](RegionDesc* region) { + DLOG(REGION, "fix mature region %p@%#zx+%zu", region, region->GetRegionStart(), region->GetLiveByteCount()); + FixMatureRegion(collector, region); + }); +} + void RegionManager::FixOldPinnedRegionList(TraceCollector& collector, RegionList& list, GCStats& stats) { size_t garbageSize = 0; RegionDesc* region = list.GetHeadRegion(); + auto gcReason = Heap::GetHeap().GetGCReason(); while (region != nullptr) { if (region->GetLiveByteCount() == 0) { RegionDesc* del = region; @@ -878,23 +703,33 @@ void RegionManager::FixAllRegionLists() { TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); // fix all objects. - FixToRegionList(collector, toRegionList_); - FixToRegionList(collector, tlToRegionList_); + + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + FixMatureRegionList(collector, oldLargeRegionList_); + + // fix survived object but should be with line judgement. + FixMatureRegionList(collector, recentLargeRegionList_); + FixMatureRegionList(collector, recentPinnedRegionList_); + FixMatureRegionList(collector, rawPointerRegionList_); + FixMatureRegionList(collector, oldPinnedRegionList_); + for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { + FixMatureRegionList(collector, *fixedPinnedRegionList_[i]); + FixMatureRegionList(collector, *oldFixedPinnedRegionList_[i]); + } + return; + } GCStats& stats = Heap::GetHeap().GetCollector().GetGCStats(); // fix only survived objects. - FixOldRegionList(collector, exemptedFromRegionList_); FixOldRegionList(collector, oldLargeRegionList_); // fix survived object but should be with line judgement. - FixRecentRegionList(collector, tlRegionList_); - FixRecentRegionList(collector, recentFullRegionList_); FixRecentRegionList(collector, recentLargeRegionList_); FixRecentRegionList(collector, recentPinnedRegionList_); FixRecentRegionList(collector, rawPointerRegionList_); FixOldPinnedRegionList(collector, oldPinnedRegionList_, stats); for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { - FixFixedRegionList(collector, *oldFixedPinnedRegionList_[i], i, stats); FixRecentRegionList(collector, *fixedPinnedRegionList_[i]); + FixFixedRegionList(collector, *oldFixedPinnedRegionList_[i], i, stats); } } @@ -952,38 +787,13 @@ void RegionManager::DumpRegionDesc() const } #endif -void RegionManager::DumpRegionStats(const char* msg) const +void RegionManager::DumpRegionStats() const { size_t totalSize = regionHeapEnd_ - regionHeapStart_; size_t totalUnits = totalSize / RegionDesc::UNIT_SIZE; size_t activeSize = inactiveZone_ - regionHeapStart_; size_t activeUnits = activeSize / RegionDesc::UNIT_SIZE; - size_t tlRegions = tlRegionList_.GetRegionCount(); - size_t tlUnits = tlRegionList_.GetUnitCount(); - size_t tlSize = tlUnits * RegionDesc::UNIT_SIZE; - size_t allocTLSize = tlRegionList_.GetAllocatedSize(); - - size_t fromRegions = fromRegionList_.GetRegionCount(); - size_t fromUnits = fromRegionList_.GetUnitCount(); - size_t fromSize = fromUnits * RegionDesc::UNIT_SIZE; - size_t allocFromSize = fromRegionList_.GetAllocatedSize(); - - size_t exemptedFromRegions = exemptedFromRegionList_.GetRegionCount(); - size_t exemptedFromUnits = exemptedFromRegionList_.GetUnitCount(); - size_t exemptedFromSize = exemptedFromUnits * RegionDesc::UNIT_SIZE; - size_t allocExemptedFromSize = exemptedFromRegionList_.GetAllocatedSize(); - - size_t toRegions = toRegionList_.GetRegionCount() + tlToRegionList_.GetRegionCount(); - size_t toUnits = toRegionList_.GetUnitCount() + tlToRegionList_.GetUnitCount(); - size_t toSize = toUnits * RegionDesc::UNIT_SIZE; - size_t allocToSize = toRegionList_.GetAllocatedSize() + tlToRegionList_.GetAllocatedSize(); - - size_t recentFullRegions = recentFullRegionList_.GetRegionCount(); - size_t recentFullUnits = recentFullRegionList_.GetUnitCount(); - size_t recentFullSize = recentFullUnits * RegionDesc::UNIT_SIZE; - size_t allocRecentFullSize = recentFullRegionList_.GetAllocatedSize(); - size_t garbageRegions = garbageRegionList_.GetRegionCount(); size_t garbageUnits = garbageRegionList_.GetUnitCount(); size_t garbageSize = garbageUnits * RegionDesc::UNIT_SIZE; @@ -1009,24 +819,12 @@ void RegionManager::DumpRegionStats(const char* msg) const size_t recentLargeSize = recentlargeUnits * RegionDesc::UNIT_SIZE; size_t allocRecentLargeSize = recentLargeRegionList_.GetAllocatedSize(); - size_t usedUnits = GetUsedUnitCount(); size_t releasedUnits = freeRegionManager_.GetReleasedUnitCount(); size_t dirtyUnits = freeRegionManager_.GetDirtyUnitCount(); - size_t listedUnits = fromUnits + exemptedFromUnits + toUnits + garbageUnits + - recentFullUnits + largeUnits + recentlargeUnits + pinnedUnits + recentPinnedUnits; - - VLOG(REPORT, msg); VLOG(REPORT, "\ttotal units: %zu (%zu B)", totalUnits, totalSize); VLOG(REPORT, "\tactive units: %zu (%zu B)", activeUnits, activeSize); - VLOG(REPORT, "\ttl-regions %zu: %zu units (%zu B, alloc %zu)", tlRegions, tlUnits, tlSize, allocTLSize); - VLOG(REPORT, "\tfrom-regions %zu: %zu units (%zu B, alloc %zu)", fromRegions, fromUnits, fromSize, allocFromSize); - VLOG(REPORT, "\texempted from-regions %zu: %zu units (%zu B, alloc %zu)", - exemptedFromRegions, exemptedFromUnits, exemptedFromSize, allocExemptedFromSize); - VLOG(REPORT, "\tto-regions %zu: %zu units (%zu B, alloc %zu)", toRegions, toUnits, toSize, allocToSize); - VLOG(REPORT, "\trecent-full regions %zu: %zu units (%zu B, alloc %zu)", - recentFullRegions, recentFullUnits, recentFullSize, allocRecentFullSize); VLOG(REPORT, "\tgarbage regions %zu: %zu units (%zu B, alloc %zu)", garbageRegions, garbageUnits, garbageSize, allocGarbageSize); VLOG(REPORT, "\tpinned regions %zu: %zu units (%zu B, alloc %zu)", @@ -1038,8 +836,6 @@ void RegionManager::DumpRegionStats(const char* msg) const VLOG(REPORT, "\trecent large-object regions %zu: %zu units (%zu B, alloc %zu)", recentlargeRegions, recentlargeUnits, recentLargeSize, allocRecentLargeSize); - VLOG(REPORT, "\tlisted units: %zu (%zu B)", listedUnits, listedUnits * RegionDesc::UNIT_SIZE); - VLOG(REPORT, "\tused units: %zu (%zu B)", usedUnits, usedUnits * RegionDesc::UNIT_SIZE); VLOG(REPORT, "\treleased units: %zu (%zu B)", releasedUnits, releasedUnits * RegionDesc::UNIT_SIZE); VLOG(REPORT, "\tdirty units: %zu (%zu B)", dirtyUnits, dirtyUnits * RegionDesc::UNIT_SIZE); @@ -1047,26 +843,6 @@ void RegionManager::DumpRegionStats(const char* msg) const OHOS_HITRACE_COUNT("Ark_GC_totalUnits", totalUnits); OHOS_HITRACE_COUNT("Ark_GC_activeSize", activeSize); OHOS_HITRACE_COUNT("Ark_GC_activeUnits", activeUnits); - OHOS_HITRACE_COUNT("Ark_GC_tlRegions", tlRegions); - OHOS_HITRACE_COUNT("Ark_GC_tlUnits", tlUnits); - OHOS_HITRACE_COUNT("Ark_GC_tlSize", tlSize); - OHOS_HITRACE_COUNT("Ark_GC_allocTLSize", allocTLSize); - OHOS_HITRACE_COUNT("Ark_GC_fromRegions", fromRegions); - OHOS_HITRACE_COUNT("Ark_GC_fromUnits", fromUnits); - OHOS_HITRACE_COUNT("Ark_GC_fromSize", fromSize); - OHOS_HITRACE_COUNT("Ark_GC_allocFromSize", allocFromSize); - OHOS_HITRACE_COUNT("Ark_GC_exemptedFromRegions", exemptedFromRegions); - OHOS_HITRACE_COUNT("Ark_GC_exemptedFromUnits", exemptedFromUnits); - OHOS_HITRACE_COUNT("Ark_GC_exemptedFromSize", exemptedFromSize); - OHOS_HITRACE_COUNT("Ark_GC_allocExemptedFromSize", allocExemptedFromSize); - OHOS_HITRACE_COUNT("Ark_GC_toRegions", toRegions); - OHOS_HITRACE_COUNT("Ark_GC_toUnits", toUnits); - OHOS_HITRACE_COUNT("Ark_GC_toSize", toSize); - OHOS_HITRACE_COUNT("Ark_GC_allocToSize", allocToSize); - OHOS_HITRACE_COUNT("Ark_GC_recentFullRegions", recentFullRegions); - OHOS_HITRACE_COUNT("Ark_GC_recentFullUnits", recentFullUnits); - OHOS_HITRACE_COUNT("Ark_GC_recentFullSize", recentFullSize); - OHOS_HITRACE_COUNT("Ark_GC_allocRecentFullSize", allocRecentFullSize); OHOS_HITRACE_COUNT("Ark_GC_garbageRegions", garbageRegions); OHOS_HITRACE_COUNT("Ark_GC_garbageUnits", garbageUnits); OHOS_HITRACE_COUNT("Ark_GC_garbageSize", garbageSize); @@ -1087,43 +863,8 @@ void RegionManager::DumpRegionStats(const char* msg) const OHOS_HITRACE_COUNT("Ark_GC_recentlargeUnits", recentlargeUnits); OHOS_HITRACE_COUNT("Ark_GC_recentLargeSize", recentLargeSize); OHOS_HITRACE_COUNT("Ark_GC_allocRecentLargeSize", allocRecentLargeSize); - OHOS_HITRACE_COUNT("Ark_GC_usedUnits", usedUnits); OHOS_HITRACE_COUNT("Ark_GC_releasedUnits", releasedUnits); OHOS_HITRACE_COUNT("Ark_GC_dirtyUnits", dirtyUnits); - OHOS_HITRACE_COUNT("Ark_GC_listedUnits", listedUnits); -} - -RegionDesc* RegionManager::AllocateThreadLocalRegion(bool expectPhysicalMem) -{ - RegionDesc* region = TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, expectPhysicalMem); - if (region != nullptr) { - if (IsGcThread()) { - tlToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); - DLOG(REGION, "alloc to-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType()); - } else { - GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); - if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || - phase == GC_PHASE_POST_MARK) { - region->SetTraceLine(); - } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { - region->SetCopyLine(); - } else if (phase == GC_PHASE_FIX) { - region->SetCopyLine(); - region->SetFixLine(); - } - - tlRegionList_.PrependRegion(region, RegionDesc::RegionType::THREAD_LOCAL_REGION); - DLOG(REGION, "alloc tl-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, gc phase: %u", - region, region->GetRegionStart(), region->GetRegionSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType(), phase); - } - } - - return region; } void RegionManager::RequestForRegion(size_t size) @@ -1158,38 +899,6 @@ void RegionManager::RequestForRegion(size_t size) prevRegionAllocTime_ = TimeUtil::NanoSeconds(); } -void RegionManager::CopyRegion(RegionDesc* region) -{ - LOGF_CHECK(region->IsFromRegion()) << "region type " << static_cast(region->GetRegionType()); - - DLOG(COPY, "try copy region %p @0x%zx+%zu type %u, live bytes %u", - region, region->GetRegionStart(), region->GetRegionAllocatedSize(), - region->GetRegionType(), region->GetLiveByteCount()); - - if (region->GetLiveByteCount() == 0) { - return; - } - - int32_t rawPointerCount = region->GetRawPointerObjectCount(); - CHECK_CC(rawPointerCount == 0); - Collector& collector = Heap::GetHeap().GetCollector(); - bool forwarded = region->VisitLiveObjectsUntilFalse( - [&collector](BaseObject* obj) { return collector.ForwardObject(obj); }); - if (forwarded) { - // region->SetRouteState(RegionDesc::RouteState::COPYED) - // CollectRegion(region) - } else { - DLOG(COPY, "failure to copy region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, %u live bytes", - region, region->GetRegionStart(), region->GetRegionAllocatedSize(), - region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), - region->GetRegionType(), region->GetLiveByteCount()); - - fromRegionList_.DeleteRegion(region); - // since this region is possibly partially-forwarded, treat it as to-region so its state-word can be restored. - EnlistToRegion(region); - } -} - uintptr_t RegionManager::AllocPinnedFromFreeList(size_t cellCount) { GCPhase mutatorPhase = Mutator::GetMutator()->GetMutatorPhase(); @@ -1211,4 +920,24 @@ uintptr_t RegionManager::AllocPinnedFromFreeList(size_t cellCount) (reinterpret_cast(&Heap::GetHeap().GetCollector()))->MarkObject(object, cellCount); return allocPtr; } + +void RegionManager::VisitRememberSet(const std::function& func) +{ + auto visitFunc = [&func](RegionDesc* region) { + region->VisitAllObjects([®ion, &func](BaseObject* obj) { + if (region->IsInRSet(obj)) { + func(obj); + } + }); + }; + recentPinnedRegionList_.VisitAllRegions(visitFunc); + oldPinnedRegionList_.VisitAllRegions(visitFunc); + recentLargeRegionList_.VisitAllRegions(visitFunc); + oldLargeRegionList_.VisitAllRegions(visitFunc); + rawPointerRegionList_.VisitAllRegions(visitFunc); + for (size_t i = 0; i < FIXED_PINNED_REGION_COUNT; i++) { + fixedPinnedRegionList_[i]->VisitAllRegions(visitFunc); + oldFixedPinnedRegionList_[i]->VisitAllRegions(visitFunc); + } +} } // namespace panda diff --git a/common_components/common_runtime/src/heap/allocator/region_manager.h b/common_components/common_runtime/src/heap/allocator/region_manager.h index f84150ebad..2e5aeb1278 100755 --- a/common_components/common_runtime/src/heap/allocator/region_manager.h +++ b/common_components/common_runtime/src/heap/allocator/region_manager.h @@ -66,14 +66,15 @@ public: return RoundUp(metadataSize, panda::ARK_COMMON_PAGE_SIZE); } + static void FixRecentRegionList(TraceCollector& collector, RegionList& list); + static void FixToRegionList(TraceCollector& collector, RegionList& list); + static void FixOldRegionList(TraceCollector& collector, RegionList& list); + static void FixMatureRegionList(TraceCollector& collector, RegionList& list); + void Initialize(size_t regionNum, uintptr_t regionInfoStart); RegionManager() - : freeRegionManager_(*this), tlRegionList_("thread local regions"), - recentFullRegionList_("recent full regions"), fullTraceRegions_("full trace regions"), - fromRegionList_("from regions"), ghostFromRegionList_("ghost from regions"), - exemptedFromRegionList_("escaped from regions"), toRegionList_("to-regions"), - tlToRegionList_("tl-to-regions"), garbageRegionList_("garbage regions"), + : freeRegionManager_(*this), garbageRegionList_("garbage regions"), recentPinnedRegionList_("recent pinned regions"), oldPinnedRegionList_("old pinned regions"), rawPointerRegionList_("raw pointer pinned regions"), oldLargeRegionList_("old large regions"), recentLargeRegionList_("recent large regions"), largeTraceRegions_("large trace regions") @@ -88,25 +89,17 @@ public: RegionManager& operator=(const RegionManager&) = delete; - RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem = false); - - void ParallelCopyFromRegions(RegionDesc &startRegion, size_t regionCnt); - void CopyFromRegions(Taskpool *threadPool); - void CopyFromRegions(); - void CopyRegion(RegionDesc* region); void FixAllRegionLists(); void FixOldPinnedRegionList(TraceCollector& collector, RegionList& list, GCStats& stats); void FixFixedRegionList(TraceCollector& collector, RegionList& list, size_t cellCount, GCStats& stats); using RootSet = MarkStack; - void ExemptFromRegion(RegionDesc* region); - #if defined(GCINFO_DEBUG) && GCINFO_DEBUG void DumpRegionDesc() const; #endif - void DumpRegionStats(const char* msg) const; + void DumpRegionStats() const; uintptr_t GetInactiveZone() const { return inactiveZone_; } @@ -136,12 +129,15 @@ public: // take a region with *num* units for allocation RegionDesc* TakeRegion(size_t num, RegionDesc::UnitRole, bool expectPhysicalMem = false, bool allowgc = true); - // only used for deserialize allocation, allocate one region and regard it as full region - // adapt for concurrent gc - uintptr_t AllocRegion(); - uintptr_t AllocPinnedRegion(); - uintptr_t AllocLargeRegion(size_t size); - uintptr_t AllocJitFortRegion(size_t size); + RegionDesc* TakeRegion(bool expectPhysicalMem, bool allowgc) + { + return TakeRegion(maxUnitCountPerRegion_, RegionDesc::UnitRole::SMALL_SIZED_UNITS, expectPhysicalMem, allowgc); + } + + void AddRecentPinnedRegion(RegionDesc* region) + { + recentPinnedRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_PINNED_REGION); + } uintptr_t AllocPinnedFromFreeList(size_t size); @@ -199,13 +195,6 @@ public: uintptr_t addr = 0; std::mutex& regionListMutex = recentPinnedRegionList_.GetListMutex(); - // enter saferegion when wait lock to avoid gc timeout. - // note that release the mutex when function end. - { - // stw gc may deadlock here - // ScopedEnterSaferegion enterSaferegion(true) - // regionListMutex.lock() - } std::lock_guard lock(regionListMutex); RegionDesc* headRegion = recentPinnedRegionList_.GetHeadRegion(); if (headRegion != nullptr) { @@ -268,41 +257,19 @@ public: return addr; } - void EnlistFullThreadLocalRegion(RegionDesc* region) noexcept - { - ASSERT_LOGF(region->IsThreadLocalRegion(), "unexpected region type"); - if (IsGcThread()) { - EnlistToRegion(region); - } else { - recentFullRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_FULL_REGION); - } - } + void CountLiveObject(const BaseObject* obj); - void EnlistToRegion(RegionDesc* region) noexcept - { - ASSERT_LOGF(Heap::GetHeap().IsGcStarted(), "GC is not started"); - toRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); - } + void AssembleLargeGarbageCandidates(); + void AssemblePinnedGarbageCandidates(); - void RemoveThreadLocalRegion(RegionDesc* region) noexcept + void CollectFromSpaceGarbage(RegionList& fromList) { - ASSERT_LOGF(region->IsThreadLocalRegion(), "unexpected region type"); - if (IsGcThread()) { - tlToRegionList_.DeleteRegion(region); - } else { - tlRegionList_.DeleteRegion(region); - } + garbageRegionList_.MergeRegionList(fromList, RegionDesc::RegionType::GARBAGE_REGION); } - void CountLiveObject(const BaseObject* obj); - - void AssembleSmallGarbageCandidates(); - void AssembleLargeGarbageCandidates(); - void AssemblePinnedGarbageCandidates(bool collectAll); - - void CollectFromSpaceGarbage() + void AddRawPointerRegion(RegionDesc* region) { - garbageRegionList_.MergeRegionList(fromRegionList_, RegionDesc::RegionType::GARBAGE_REGION); + rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); } size_t CollectRegion(RegionDesc* region) @@ -319,30 +286,6 @@ public: } } - void AddRawPointerObject(BaseObject* obj) - { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - region->IncRawPointerObjectCount(); - if (region->IsFromRegion() && - fromRegionList_.TryDeleteRegion(region, RegionDesc::RegionType::FROM_REGION, - RegionDesc::RegionType::RAW_POINTER_REGION)) { - GCPhase phase = Heap::GetHeap().GetGCPhase(); - CHECK_CC(phase != GCPhase::GC_PHASE_COPY && phase != GCPhase::GC_PHASE_PRECOPY); - if (phase == GCPhase::GC_PHASE_POST_MARK) { - // region->ClearGhostRegionBit() - }; - rawPointerRegionList_.PrependRegion(region, RegionDesc::RegionType::RAW_POINTER_REGION); - } else { - CHECK_CC(region->GetRegionType() != RegionDesc::RegionType::LONE_FROM_REGION); - } - } - - void RemoveRawPointerObject(BaseObject* obj) - { - RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - region->DecRawPointerObjectCount(); - } - void ReclaimRegion(RegionDesc* region); size_t ReleaseRegion(RegionDesc* region); @@ -360,12 +303,6 @@ public: // targetSize: size of memory which we do not release and keep it as cache for future allocation. size_t ReleaseGarbageRegions(size_t targetSize) { return freeRegionManager_.ReleaseGarbageRegions(targetSize); } - // these methods are helpers for compaction. Since pinned object can not be moved during compaction, - // we first virtually reclaim all compactable heap memory, which are handled in cartesian tree. So far we get a map - // of heap memory about which region can be used for compaction. - void ExemptFromRegions(); - void ReassembleFromSpace(); - void ForEachObjectUnsafe(const std::function& visitor) const; void ForEachObjectSafe(const std::function& visitor) const; @@ -373,24 +310,19 @@ public: size_t GetRecentAllocatedSize() const { - return recentFullRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize() + - recentPinnedRegionList_.GetAllocatedSize(); + return recentLargeRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize(); } size_t GetSurvivedSize() const { - return fromRegionList_.GetAllocatedSize() + exemptedFromRegionList_.GetAllocatedSize() + - toRegionList_.GetAllocatedSize() + tlToRegionList_.GetAllocatedSize() + - oldPinnedRegionList_.GetAllocatedSize() + oldLargeRegionList_.GetAllocatedSize(); + return oldPinnedRegionList_.GetAllocatedSize() + oldLargeRegionList_.GetAllocatedSize(); } size_t GetUsedUnitCount() const { - return fromRegionList_.GetUnitCount() + exemptedFromRegionList_.GetUnitCount() + toRegionList_.GetUnitCount() + - tlToRegionList_.GetUnitCount() + recentFullRegionList_.GetUnitCount() + oldLargeRegionList_.GetUnitCount() + - recentLargeRegionList_.GetUnitCount() + oldPinnedRegionList_.GetUnitCount() + - recentPinnedRegionList_.GetUnitCount() + rawPointerRegionList_.GetUnitCount() + - largeTraceRegions_.GetUnitCount() + fullTraceRegions_.GetUnitCount() + tlRegionList_.GetUnitCount(); + return oldLargeRegionList_.GetUnitCount() + recentLargeRegionList_.GetUnitCount() + + oldPinnedRegionList_.GetUnitCount() + recentPinnedRegionList_.GetUnitCount() + + rawPointerRegionList_.GetUnitCount(); } size_t GetDirtyUnitCount() const { return freeRegionManager_.GetDirtyUnitCount(); } @@ -404,26 +336,11 @@ public: size_t GetAllocatedSize() const { - size_t threadLocalSize = 0; - AllocBufferVisitor visitor = [&threadLocalSize](AllocationBuffer& regionBuffer) { - RegionDesc* region = regionBuffer.GetRegion(); - if (UNLIKELY_CC(region == RegionDesc::NullRegion())) { - return; - } - threadLocalSize += region->GetRegionAllocatedSize(); - }; - Heap::GetHeap().GetAllocator().VisitAllocBuffers(visitor); - // exclude garbageRegionList for live object set. - return fromRegionList_.GetAllocatedSize() + exemptedFromRegionList_.GetAllocatedSize() + - toRegionList_.GetAllocatedSize() + tlToRegionList_.GetAllocatedSize() + - recentFullRegionList_.GetAllocatedSize() + oldLargeRegionList_.GetAllocatedSize() + - recentLargeRegionList_.GetAllocatedSize() + oldPinnedRegionList_.GetAllocatedSize() + - recentPinnedRegionList_.GetAllocatedSize() + rawPointerRegionList_.GetAllocatedSize() + - largeTraceRegions_.GetAllocatedSize() + fullTraceRegions_.GetAllocatedSize() + threadLocalSize; + return oldLargeRegionList_.GetAllocatedSize() + recentLargeRegionList_.GetAllocatedSize() + + oldPinnedRegionList_.GetAllocatedSize() + recentPinnedRegionList_.GetAllocatedSize() + + rawPointerRegionList_.GetAllocatedSize(); } - inline size_t GetFromSpaceSize() const { return fromRegionList_.GetAllocatedSize(); } - inline size_t GetPinnedSpaceSize() const { size_t pinnedSpaceSize = @@ -435,9 +352,6 @@ public: return pinnedSpaceSize; } - // valid only between forwarding phase and flipping phase. - size_t GetToSpaceSize() const { return toRegionList_.GetAllocatedSize() + tlToRegionList_.GetAllocatedSize(); } - RegionDesc* GetNextNeighborRegion(RegionDesc* region) const { HeapAddress address = region->GetRegionEnd(); @@ -455,7 +369,6 @@ public: void SetMaxUnitCountForRegion(); void SetLargeObjectThreshold(); - void SetGarbageThreshold(); void PrepareTrace() { @@ -517,12 +430,7 @@ public: void ClearAllGCInfo() { - ClearGCInfo(toRegionList_); - ClearGCInfo(tlToRegionList_); - ClearGCInfo(exemptedFromRegionList_); ClearGCInfo(oldLargeRegionList_); - ClearGCInfo(tlRegionList_); - ClearGCInfo(recentFullRegionList_); ClearGCInfo(recentLargeRegionList_); ClearGCInfo(recentPinnedRegionList_); ClearGCInfo(rawPointerRegionList_); @@ -532,6 +440,9 @@ public: ClearGCInfo(*oldFixedPinnedRegionList_[i]); } } + + void VisitRememberSet(const std::function& func); + void ClearRSet(); private: static const size_t MAX_UNIT_COUNT_PER_REGION; static const size_t HUGE_PAGE; @@ -554,32 +465,6 @@ private: // region lists actually represent life cycle of regions. // each region must belong to only one list at any time. - // regions for movable (small-sized) objects. - // regions for thread-local allocation. - // regions in this list are already used for allocation but not full yet, i.e. local regions. - RegionList tlRegionList_; - - // recentFullRegionList is a list of regions which is already full, thus escape current gc. - RegionList recentFullRegionList_; - // RegionList fullRegionList; // mimic old space - - // if region is allocated during gc trace phase, it is called a trace-region, it is recorded here when it is full. - RegionCache fullTraceRegions_; - - // fromRegionList is a list of full regions waiting to be collected (i.e. for forwarding). - // region type must be FROM_REGION. - RegionList fromRegionList_; - RegionList ghostFromRegionList_; - - // regions exempted by ExemptFromRegions, which will not be moved during current GC. - RegionList exemptedFromRegionList_; - - // toRegionList is a list of to-space regions produced by gc threads. - // when a region is prepended to this list, the region is probably not full, so the statistics - // of this region-list are not reliable and need to be updated. - RegionList toRegionList_; - RegionList tlToRegionList_; - // cache for fromRegionList after forwarding. RegionList garbageRegionList_; @@ -618,8 +503,6 @@ private: std::atomic inactiveZone_ = { 0 }; size_t maxUnitCountPerRegion_ = MAX_UNIT_COUNT_PER_REGION; // max units count for threadLocal buffer. size_t largeObjectThreshold_; - double fromSpaceGarbageThreshold_ = 0.5; // 0.5: default garbage ratio. - double exemptedRegionThreshold_; friend class VerifyIterator; }; } // namespace panda diff --git a/common_components/common_runtime/src/heap/allocator/region_space.cpp b/common_components/common_runtime/src/heap/allocator/region_space.cpp index a341c801d4..185cf6771d 100755 --- a/common_components/common_runtime/src/heap/allocator/region_space.cpp +++ b/common_components/common_runtime/src/heap/allocator/region_space.cpp @@ -25,6 +25,50 @@ #include "common_components/common_runtime/src/heap/heap.h" namespace panda { +RegionDesc* RegionSpace::AllocateThreadLocalRegion(bool expectPhysicalMem) +{ + RegionDesc* region = regionManager_.TakeRegion(expectPhysicalMem, true); + if (region != nullptr) { + if (IsGcThread()) { + toSpace_.AddThreadLocalRegion(region); + DLOG(REGION, "alloc to-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", + region, region->GetRegionStart(), region->GetRegionSize(), + region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), + region->GetRegionType()); + } else { + GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); + if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || + phase == GC_PHASE_POST_MARK) { + region->SetTraceLine(); + } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { + region->SetCopyLine(); + } + + youngSpace_.AddThreadLocalRegion(region); + DLOG(REGION, "alloc tl-region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, gc phase: %u", + region, region->GetRegionStart(), region->GetRegionSize(), + region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), + region->GetRegionType(), phase); + } + } + + return region; +} + +void RegionSpace::DumpAllRegionStats(const char* msg) const +{ + VLOG(REPORT, msg); + + youngSpace_.DumpRegionStats(); + matureSpace_.DumpRegionStats(); + fromSpace_.DumpRegionStats(); + toSpace_.DumpRegionStats(); + regionManager_.DumpRegionStats(); + + size_t usedUnits = GetUsedUnitCount(); + VLOG(REPORT, "\tused units: %zu (%zu B)", usedUnits, usedUnits * RegionDesc::UNIT_SIZE); + OHOS_HITRACE_COUNT("Ark_GC_usedUnits", usedUnits); +} HeapAddress RegionSpace::TryAllocateOnce(size_t allocSize, AllocType allocType) { if (UNLIKELY_CC(allocType == AllocType::PINNED_OBJECT)) { @@ -45,8 +89,6 @@ bool RegionSpace::ShouldRetryAllocation(size_t& tryTimes) const } if (!IsRuntimeThread() && tryTimes <= static_cast(TryAllocationThreshold::RESCHEDULE)) { - // re-add thread reschedule - // ThreadResched() // reschedule this thread for throughput. return true; } else if (tryTimes < static_cast(TryAllocationThreshold::TRIGGER_OOM)) { if (Heap::GetHeap().IsGcStarted()) { @@ -72,6 +114,73 @@ bool RegionSpace::ShouldRetryAllocation(size_t& tryTimes) const } } +uintptr_t RegionSpace::AllocRegion() +{ + RegionDesc* region = regionManager_.TakeRegion(false, false); + ASSERT(region != nullptr); + + GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); + if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || + phase == GC_PHASE_POST_MARK) { + region->SetTraceLine(); + } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { + region->SetCopyLine(); + } else if (phase == GC_PHASE_FIX) { + region->SetCopyLine(); + region->SetFixLine(); + } + + DLOG(REGION, "alloc small object region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u", + region, region->GetRegionStart(), region->GetRegionSize(), + region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), + region->GetRegionType()); + youngSpace_.AddFullRegion(region); + + uintptr_t start = region->GetRegionStart(); + uintptr_t addr = region->Alloc(region->GetRegionSize()); + ASSERT(addr != 0); + + return start; +} + +uintptr_t RegionSpace::AllocPinnedRegion() +{ + RegionDesc* region = regionManager_.TakeRegion(false, false); + ASSERT(region != nullptr); + + GCPhase phase = Mutator::GetMutator()->GetMutatorPhase(); + if (phase == GC_PHASE_ENUM || phase == GC_PHASE_MARK || phase == GC_PHASE_REMARK_SATB || + phase == GC_PHASE_POST_MARK) { + region->SetTraceLine(); + } else if (phase == GC_PHASE_PRECOPY || phase == GC_PHASE_COPY) { + region->SetCopyLine(); + } else if (phase == GC_PHASE_FIX) { + region->SetCopyLine(); + region->SetFixLine(); + } + + DLOG(REGION, "alloc pinned region @0x%zx+%zu type %u", region->GetRegionStart(), + region->GetRegionAllocatedSize(), + region->GetRegionType()); + regionManager_.AddRecentPinnedRegion(region); + + uintptr_t start = region->GetRegionStart(); + uintptr_t addr = region->Alloc(region->GetRegionSize()); + ASSERT(addr != 0); + + return start; +} + +uintptr_t RegionSpace::AllocLargeRegion(size_t size) +{ + return regionManager_.AllocLarge(size, false); +} + +uintptr_t RegionSpace::AllocJitFortRegion(size_t size) +{ + return regionManager_.AllocLarge(size, false); +} + HeapAddress RegionSpace::Allocate(size_t size, AllocType allocType) { size_t tryTimes = 0; @@ -113,6 +222,7 @@ HeapAddress RegionSpace::AllocateNoGC(size_t size, AllocType allocType) internalAddr = allocBuffer->Allocate(allocSize, allocType); } else { // Unreachable for serialization + UNREACHABLE_CC(); } if (internalAddr == 0) { return 0; @@ -123,6 +233,34 @@ HeapAddress RegionSpace::AllocateNoGC(size_t size, AllocType allocType) return internalAddr + HEADER_SIZE; } +void RegionSpace::CopyRegion(RegionDesc* region) +{ + LOGF_CHECK(region->IsFromRegion()) << "region type " << static_cast(region->GetRegionType()); + DLOG(COPY, "try forward region %p @0x%zx+%zu type %u, live bytes %u", + region, region->GetRegionStart(), region->GetRegionAllocatedSize(), + region->GetRegionType(), region->GetLiveByteCount()); + + if (region->GetLiveByteCount() == 0) { + return; + } + + int32_t rawPointerCount = region->GetRawPointerObjectCount(); + CHECK(rawPointerCount == 0); + Collector& collector = Heap::GetHeap().GetCollector(); + bool forwarded = region->VisitLiveObjectsUntilFalse( + [&collector](BaseObject* obj) { return collector.ForwardObject(obj); }); + if (!forwarded) { + DLOG(COPY, "failure to forward region %p @0x%zx+%zu units[%zu+%zu, %zu) type %u, %u live bytes", + region, region->GetRegionStart(), region->GetRegionAllocatedSize(), + region->GetUnitIdx(), region->GetUnitCount(), region->GetUnitIdx() + region->GetUnitCount(), + region->GetRegionType(), region->GetLiveByteCount()); + + fromSpace_.DeleteFromRegion(region); + // since this region is possibly partially-forwarded, treat it as to-region. + toSpace_.AddFullRegion(region); + } +} + void RegionSpace::Init(const RuntimeParam& param) { MemoryMap::Option opt = MemoryMap::DEFAULT_OPTIONS; @@ -143,6 +281,7 @@ void RegionSpace::Init(const RuntimeParam& param) #endif HeapAddress metadata = reinterpret_cast(map_->GetBaseAddr()); + fromSpace_.SetExemptedRegionThreshold(param.heapParam.exemptionThreshold); regionManager_.Initialize(regionNum, metadata); reservedStart_ = regionManager_.GetRegionHeapStart(); reservedEnd_ = reinterpret_cast(map_->GetMappedEndAddr()); @@ -165,13 +304,11 @@ AllocationBuffer* AllocationBuffer::GetOrCreateAllocBuffer() } return buffer; } -void AllocationBuffer::RefershRegion() +void AllocationBuffer::ClearThreadLocalRegion() { if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - RegionManager& manager = theAllocator.GetRegionManager(); - manager.RemoveThreadLocalRegion(tlRegion_); - manager.EnlistFullThreadLocalRegion(tlRegion_); + RegionSpace& heap = reinterpret_cast(Heap::GetHeap().GetAllocator()); + heap.HandleFullThreadLocalRegion(tlRegion_); tlRegion_ = RegionDesc::NullRegion(); } } @@ -179,13 +316,7 @@ AllocationBuffer* AllocationBuffer::GetAllocBuffer() { return ThreadLocal::GetAl AllocationBuffer::~AllocationBuffer() { - if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - RegionManager& manager = theAllocator.GetRegionManager(); - manager.RemoveThreadLocalRegion(tlRegion_); - manager.EnlistFullThreadLocalRegion(tlRegion_); - tlRegion_ = RegionDesc::NullRegion(); - } + ClearThreadLocalRegion(); } void AllocationBuffer::Init() @@ -239,8 +370,7 @@ HeapAddress AllocationBuffer::Allocate(size_t totalSize, AllocType allocType) // try an allocation but do not handle failure HeapAddress AllocationBuffer::AllocateImpl(size_t totalSize, AllocType allocType) { - RegionSpace& theAllocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); - RegionManager& manager = theAllocator.GetRegionManager(); + RegionSpace& heapSpace = reinterpret_cast(Heap::GetHeap().GetAllocator()); // allocate from thread local region if (LIKELY_CC(tlRegion_ != RegionDesc::NullRegion())) { @@ -251,21 +381,17 @@ HeapAddress AllocationBuffer::AllocateImpl(size_t totalSize, AllocType allocType // allocation failed because region is full. if (tlRegion_->IsThreadLocalRegion()) { - manager.RemoveThreadLocalRegion(tlRegion_); - manager.EnlistFullThreadLocalRegion(tlRegion_); + heapSpace.HandleFullThreadLocalRegion(tlRegion_); tlRegion_ = RegionDesc::NullRegion(); } } - RegionDesc* r = manager.AllocateThreadLocalRegion(); + RegionDesc* r = heapSpace.AllocateThreadLocalRegion(); if (UNLIKELY_CC(r == nullptr)) { return 0; } tlRegion_ = r; return r->Alloc(totalSize); - // } - // not enough region for thread local buffer. - return 0; } HeapAddress AllocationBuffer::AllocateRawPointerObject(size_t totalSize) @@ -310,7 +436,7 @@ void RegionSpace::FeedHungryBuffers() for (auto* buffer : hungryBuffers) { if (buffer->GetPreparedRegion() != nullptr) { continue; } if (region == nullptr) { - region = regionManager_.AllocateThreadLocalRegion(true); + region = AllocateThreadLocalRegion(true); if (region == nullptr) { return; } } if (buffer->SetPreparedRegion(region)) { @@ -321,4 +447,10 @@ void RegionSpace::FeedHungryBuffers() regionManager_.CollectRegion(region); } } + +void RegionSpace::VisitOldSpaceRememberSet(const std::function& func) +{ + matureSpace_.VisitRememberSet(func); + regionManager_.VisitRememberSet(func); +} } // namespace panda diff --git a/common_components/common_runtime/src/heap/allocator/region_space.h b/common_components/common_runtime/src/heap/allocator/region_space.h index 709e08c96b..329f07a141 100755 --- a/common_components/common_runtime/src/heap/allocator/region_space.h +++ b/common_components/common_runtime/src/heap/allocator/region_space.h @@ -15,7 +15,7 @@ #ifndef ARK_COMMON_REGION_SPACE_H #define ARK_COMMON_REGION_SPACE_H -#include +#include #include #include #include @@ -25,6 +25,10 @@ #include "common_components/common_runtime/src/heap/allocator/alloc_util.h" #include "common_components/common_runtime/src/heap/allocator/allocator.h" #include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/heap/space/young_space.h" +#include "common_components/common_runtime/src/heap/space/mature_space.h" +#include "common_components/common_runtime/src/heap/space/from_space.h" +#include "common_components/common_runtime/src/heap/space/to_space.h" #include "common_components/common_runtime/src/mutator/mutator.h" #if defined(ARKCOMMON_SANITIZER_SUPPORT) #include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" @@ -37,6 +41,8 @@ class Taskpool; // RegionSpace aims to be the API for other components of runtime // the complication of implementation is delegated to RegionManager // allocator should not depend on any assumptions on the details of RegionManager + +// todo: Allocator -> BaseAllocator, RegionSpace -> RegionalHeap class RegionSpace : public Allocator { public: static size_t ToAllocatedSize(size_t objSize) @@ -51,7 +57,8 @@ public: return ToAllocatedSize(objSize); } - RegionSpace() = default; + RegionSpace() : youngSpace_(regionManager_), matureSpace_(regionManager_), + fromSpace_(regionManager_, *this), toSpace_(regionManager_) {} NO_INLINE_CC virtual ~RegionSpace() { if (allocBufferManager_ != nullptr) { @@ -66,12 +73,39 @@ public: void Init(const RuntimeParam ¶m) override; + RegionDesc* AllocateThreadLocalRegion(bool expectPhysicalMem = false); + + void HandleFullThreadLocalRegion(RegionDesc* region) noexcept + { + ASSERT_LOGF(region->IsThreadLocalRegion(), "unexpected region type"); + if (IsGcThread()) { + toSpace_.HandleFullThreadLocalRegion(region); + } else { + youngSpace_.HandleFullThreadLocalRegion(region); + } + } + + // only used for deserialize allocation, allocate one region and regard it as full region + // todo: adapt for concurrent gc + uintptr_t AllocRegion(); + uintptr_t AllocPinnedRegion(); + uintptr_t AllocLargeRegion(size_t size); + uintptr_t AllocJitFortRegion(size_t size); + HeapAddress Allocate(size_t size, AllocType allocType) override; HeapAddress AllocateNoGC(size_t size, AllocType allocType) override; RegionManager& GetRegionManager() noexcept { return regionManager_; } + FromSpace& GetFromSpace() noexcept { return fromSpace_; } + + ToSpace& GetToSpace() noexcept { return toSpace_; } + + MatureSpace& GetMatureSpace() noexcept { return matureSpace_; } + + YoungSpace& GetYoungSpace() noexcept { return youngSpace_; } + HeapAddress GetSpaceStartAddress() const override { return reservedStart_; } HeapAddress GetSpaceEndAddress() const override { return reservedEnd_; } @@ -79,10 +113,23 @@ public: size_t GetCurrentCapacity() const override { return regionManager_.GetInactiveZone() - reservedStart_; } size_t GetMaxCapacity() const override { return reservedEnd_ - reservedStart_; } - inline size_t GetRecentAllocatedSize() const { return regionManager_.GetRecentAllocatedSize(); } + inline size_t GetRecentAllocatedSize() const + { + return youngSpace_.GetRecentAllocatedSize() + regionManager_.GetRecentAllocatedSize(); + } // size of objects survived in previous gc. - inline size_t GetSurvivedSize() const { return regionManager_.GetSurvivedSize(); } + inline size_t GetSurvivedSize() const + { + return fromSpace_.GetSurvivedSize() + toSpace_.GetAllocatedSize() + + youngSpace_.GetAllocatedSize() + matureSpace_.GetAllocatedSize() + regionManager_.GetSurvivedSize(); + } + + inline size_t GetUsedUnitCount() const + { + return fromSpace_.GetUsedUnitCount() + toSpace_.GetUsedUnitCount() + + youngSpace_.GetUsedUnitCount() + matureSpace_.GetUsedUnitCount() + regionManager_.GetUsedUnitCount(); + } size_t GetUsedPageSize() const override { return regionManager_.GetUsedRegionSize(); } @@ -92,16 +139,19 @@ public: return static_cast(GetUsedPageSize() / heapUtilization); } - size_t GetAllocatedBytes() const override { return regionManager_.GetAllocatedSize(); } + size_t GetAllocatedBytes() const override + { + return fromSpace_.GetAllocatedSize() + toSpace_.GetAllocatedSize() + + youngSpace_.GetAllocatedSize() + matureSpace_.GetAllocatedSize() + regionManager_.GetAllocatedSize(); + } size_t LargeObjectSize() const override { return regionManager_.GetLargeObjectSize(); } - size_t FromSpaceSize() const { return regionManager_.GetFromSpaceSize(); } + size_t FromSpaceSize() const { return fromSpace_.GetAllocatedSize(); } + size_t ToSpaceSize() const { return toSpace_.GetAllocatedSize(); } size_t PinnedSpaceSize() const { return regionManager_.GetPinnedSpaceSize(); } - inline size_t ToSpaceSize() const { return regionManager_.GetToSpaceSize(); } - #ifndef NDEBUG bool IsHeapObject(HeapAddress addr) const override; #endif @@ -117,7 +167,7 @@ public: if (releaseAll) { return regionManager_.ReleaseGarbageRegions(0); } else { - size_t size = regionManager_.GetAllocatedSize(); + size_t size = GetAllocatedBytes(); double cachedRatio = 1 - BaseRuntime::GetInstance()->GetHeapParam().heapUtilization; size_t targetCachedSize = static_cast(size * cachedRatio); return regionManager_.ReleaseGarbageRegions(targetCachedSize); @@ -137,7 +187,7 @@ public: void ExemptFromSpace() { ARK_COMMON_PHASE_TIMER("ExemptFromRegions"); - regionManager_.ExemptFromRegions(); + fromSpace_.ExemptFromRegions(); } BaseObject* RouteObject(BaseObject* fromObj, size_t size) @@ -147,17 +197,20 @@ public: return reinterpret_cast(toAddr); } - // void PrepareFromSpace() { regionManager_.PrepareFromRegionList(); } - - // void ClearAllLiveInfo() { regionManager_.ClearAllLiveInfo(); } - void CopyFromSpace(Taskpool *threadPool) { ARK_COMMON_PHASE_TIMER("CopyFromRegions"); - regionManager_.CopyFromRegions(threadPool); + fromSpace_.CopyFromRegions(threadPool); } - void FixHeap() { regionManager_.FixAllRegionLists(); } + void FixHeap() + { + youngSpace_.FixAllRegions(); + matureSpace_.FixAllRegions(); + fromSpace_.FixAllRegions(); + toSpace_.FixAllRegions(); + regionManager_.FixAllRegionLists(); + } using RootSet = MarkStack; @@ -165,23 +218,42 @@ public: void CollectFromSpaceGarbage() { - regionManager_.CollectFromSpaceGarbage(); - regionManager_.ReassembleFromSpace(); + regionManager_.CollectFromSpaceGarbage(fromSpace_.GetFromRegionList()); + } + + void HandlePromotion() + { + fromSpace_.GetPromotedTo(matureSpace_); + toSpace_.GetPromotedTo(matureSpace_); + } + + void AssembleSmallGarbageCandidates() + { + youngSpace_.AssembleGarbageCandidates(fromSpace_); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + matureSpace_.ClearRSet(); + matureSpace_.AssembleGarbageCandidates(fromSpace_); + regionManager_.ClearRSet(); + } } void ClearAllGCInfo() { regionManager_.ClearAllGCInfo(); + youngSpace_.ClearAllGCInfo(); + matureSpace_.ClearAllGCInfo(); + toSpace_.ClearAllGCInfo(); + fromSpace_.ClearAllGCInfo(); } - void AssembleGarbageCandidates(bool collectAll = false) + void AssembleGarbageCandidates() { - regionManager_.AssembleSmallGarbageCandidates(); - regionManager_.AssemblePinnedGarbageCandidates(collectAll); + AssembleSmallGarbageCandidates(); + regionManager_.AssemblePinnedGarbageCandidates(); regionManager_.AssembleLargeGarbageCandidates(); } - void DumpRegionStats(const char* msg) const { regionManager_.DumpRegionStats(msg); } + void DumpAllRegionStats(const char* msg) const; void CountLiveObject(const BaseObject* obj) { regionManager_.CountLiveObject(obj); } @@ -234,12 +306,50 @@ public: return region->IsNewObjectSinceTrace(object); } - void AddRawPointerObject(BaseObject* obj) { regionManager_.AddRawPointerObject(obj); } + static bool IsYoungSpaceObject(const BaseObject* object) + { + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); + ASSERT_LOGF(region != nullptr, "region is nullptr"); + return region->IsInYoungSpace(); + } - void RemoveRawPointerObject(BaseObject* obj) { regionManager_.RemoveRawPointerObject(obj); } + static bool IsInRememberSet(const BaseObject* object) + { + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(object)); + ASSERT_LOGF(region != nullptr, "region is nullptr"); + return region->IsInRSet(const_cast(object)); + } - friend class Allocator; + void AddRawPointerObject(BaseObject* obj) + { + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + region->IncRawPointerObjectCount(); + if (region->IsFromRegion() && fromSpace_.TryDeleteFromRegion(region, RegionDesc::RegionType::FROM_REGION, + RegionDesc::RegionType::RAW_POINTER_REGION)) { + GCPhase phase = Heap::GetHeap().GetGCPhase(); + CHECK(phase != GCPhase::GC_PHASE_COPY && phase != GCPhase::GC_PHASE_PRECOPY); + regionManager_.AddRawPointerRegion(region); + } else { + CHECK(region->GetRegionType() != RegionDesc::RegionType::LONE_FROM_REGION); + } + } + + void RemoveRawPointerObject(BaseObject* obj) + { + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + region->DecRawPointerObjectCount(); + } + + void AddRawPointerRegion(RegionDesc* region) + { + regionManager_.AddRawPointerRegion(region); + } + + void CopyRegion(RegionDesc* region); + void VisitOldSpaceRememberSet(const std::function& func); + + friend class Allocator; private: enum class TryAllocationThreshold { RESCHEDULE = 3, @@ -251,6 +361,14 @@ private: HeapAddress reservedEnd_ = 0; RegionManager regionManager_; MemoryMap* map_{ nullptr }; + + YoungSpace youngSpace_; + MatureSpace matureSpace_; + + FromSpace fromSpace_; + ToSpace toSpace_; }; + +using RegionalHeap = RegionSpace; } // namespace panda #endif // ARK_COMMON_REGION_SPACE_H diff --git a/common_components/common_runtime/src/heap/collector/gc_request.cpp b/common_components/common_runtime/src/heap/collector/gc_request.cpp index 6ed3b7921a..4453d665b7 100755 --- a/common_components/common_runtime/src/heap/collector/gc_request.cpp +++ b/common_components/common_runtime/src/heap/collector/gc_request.cpp @@ -60,6 +60,7 @@ bool GCRequest::ShouldBeIgnored() const GCRequest g_gcRequests[] = { { GC_REASON_USER, "user", false, true, 0, 0 }, { GC_REASON_OOM, "oom", true, false, 0, 0 }, + { GC_REASON_YOUNG, "young", false, true, 0, 0 }, { GC_REASON_BACKUP, "backup", true, false, 0, 0 }, { GC_REASON_HEU, "heuristic", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp }, { GC_REASON_NATIVE, "native_alloc", false, true, MIN_ASYNC_GC_INTERVAL_NS, g_initNativeTriggerTimestamp }, diff --git a/common_components/common_runtime/src/heap/collector/gc_request.h b/common_components/common_runtime/src/heap/collector/gc_request.h index 1c7968c15f..5da71c9a31 100755 --- a/common_components/common_runtime/src/heap/collector/gc_request.h +++ b/common_components/common_runtime/src/heap/collector/gc_request.h @@ -33,6 +33,7 @@ constexpr uint64_t LONG_MIN_HEU_GC_INTERVAL_NS = 200 * panda::MILLI_SECOND_TO_NA enum GCReason : uint32_t { GC_REASON_USER = 0, // Triggered by user explicitly. GC_REASON_OOM, // Out of memory. Failed to allocate object. + GC_REASON_YOUNG, GC_REASON_BACKUP, // backup gc is triggered if no other reason triggers gc for a long time. GC_REASON_HEU, // Statistics show it is worth doing GC. Does not have to be immediate. GC_REASON_NATIVE, // Native-Allocation-Registry shows it's worth doing GC. diff --git a/common_components/common_runtime/src/heap/collector/region_bitmap.h b/common_components/common_runtime/src/heap/collector/region_bitmap.h index c0009e3a51..9c18351de0 100755 --- a/common_components/common_runtime/src/heap/collector/region_bitmap.h +++ b/common_components/common_runtime/src/heap/collector/region_bitmap.h @@ -23,9 +23,9 @@ #endif namespace panda { -constexpr size_t kBitsPerByte = 8; -constexpr size_t kMarkedBytesPerBit = 8; -constexpr size_t kBitsPerWord = sizeof(uint64_t) * kBitsPerByte; +static constexpr size_t kBitsPerByte = 8; +static constexpr size_t kMarkedBytesPerBit = 8; +static constexpr size_t kBitsPerWord = sizeof(uint64_t) * kBitsPerByte; class RegionDesc; struct RegionBitmap { static constexpr uint8_t factor = 16; diff --git a/common_components/common_runtime/src/heap/collector/region_rset.h b/common_components/common_runtime/src/heap/collector/region_rset.h new file mode 100644 index 0000000000..c4705e76f3 --- /dev/null +++ b/common_components/common_runtime/src/heap/collector/region_rset.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_REGION_RSET_H +#define ARK_COMMON_REGION_RSET_H +#include + +namespace panda { +static constexpr size_t CARD_SIZE = 512; +class RegionRSet { +public: + explicit RegionRSet(size_t regionSize) : cardCnt(regionSize / CARD_SIZE) + { +#ifdef _WIN64 + void* startAddress = VirtualAlloc(NULL, cardCnt * sizeof(uint64_t), MEM_RESERVE, PAGE_READWRITE); + if (startAddress == NULL) { + LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager"; + UNREACHABLE_CC(); + } +#else + void* startAddress = mmap(nullptr, cardCnt * sizeof(uint64_t), + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (startAddress == MAP_FAILED) { + LOG_COMMON(FATAL) << "failed to initialize HeapBitmapManager"; + UNREACHABLE_CC(); + } else { +#ifndef __APPLE__ + (void)madvise(startAddress, cardCnt * sizeof(uint64_t), MADV_NOHUGEPAGE); + ARK_COMMON_PRCTL(startAddress, cardCnt * sizeof(uint64_t), "forward_data"); +#endif + } +#endif + cardTable = reinterpret_cast*>(startAddress); + } + + ~RegionRSet() + { +#ifdef _WIN64 + if (!VirtualFree(reinterpret_cast(cardTable), 0, MEM_RELEASE)) { + LOG_COMMON(ERROR) << "VirtualFree error for HeapBitmapManager"; + } +#else + if (munmap(reinterpret_cast(cardTable), cardCnt * sizeof(uint64_t)) != 0) { + LOG_COMMON(ERROR) << "munmap error for HeapBitmapManager"; + } +#endif + } + + bool MarkCardTable(size_t offset) + { + size_t cardIdx = (offset / kMarkedBytesPerBit) / kBitsPerWord; + size_t headMaskBitStart = (offset / kMarkedBytesPerBit) % kBitsPerWord; + uint64_t headMaskBits = static_cast(1) << headMaskBitStart; + uint64_t card = cardTable[cardIdx].load(); + bool isMarked = ((card & headMaskBits) != 0); + if (!isMarked) { + card = cardTable[cardIdx].fetch_or(headMaskBits); + isMarked = ((card & headMaskBits) != 0); + return isMarked; + } + return isMarked; + } + + bool IsMarkedCard(size_t offset) + { + size_t cardIdx = (offset / kMarkedBytesPerBit) / kBitsPerWord; + size_t headMaskBitStart = (offset / kMarkedBytesPerBit) % kBitsPerWord; + uint64_t headMaskBits = static_cast(1) << headMaskBitStart; + return (cardTable[cardIdx].load() & headMaskBits) != 0; + } + + void ClearCardTable() + { + LOGF_CHECK(memset_s(cardTable, cardCnt * sizeof(uint64_t), 0, cardCnt * sizeof(uint64_t)) == EOK) + << "memset_s fail"; + } + + void VisitAllMarkedCard(const std::function& func, HeapAddress regionStart) + { + for (size_t i = 0; i < cardCnt.load(); i++) { + uint64_t card = cardTable[i].load(); + for (size_t j = 0; j < kBitsPerWord; j++) { + uint64_t mask = static_cast(1) << j; + if ((card & mask) == 0) { + continue; + } + BaseObject* obj = reinterpret_cast(regionStart + + reinterpret_cast((i * kBitsPerWord) * kBitsPerByte + j * kBitsPerByte)); + func(obj); + } + } + } +private: + std::atomic cardCnt; + std::atomic* cardTable; +}; +} +#endif // ARK_COMMON_REGION_RSET_H \ No newline at end of file diff --git a/common_components/common_runtime/src/heap/collector/trace_collector.cpp b/common_components/common_runtime/src/heap/collector/trace_collector.cpp index 8016fee1a0..a10037888a 100755 --- a/common_components/common_runtime/src/heap/collector/trace_collector.cpp +++ b/common_components/common_runtime/src/heap/collector/trace_collector.cpp @@ -321,6 +321,7 @@ void TraceCollector::TraceRoots(WorkStack& workStack) } { + #ifdef ARK_USE_SATB_BARRIER ARK_COMMON_PHASE_TIMER("Concurrent re-marking"); ConcurrentReMark(workStack, maxWorkers > 0); @@ -334,6 +335,9 @@ void TraceCollector::TraceRoots(WorkStack& workStack) ConcurrentReMark(workStack, maxWorkers > 0); ProcessWeakReferences(); } else { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + LOGF_CHECK(MarkRememberSet(workStack)) << "not cleared\n"; + } ProcessWeakReferences(); } #endif @@ -407,8 +411,50 @@ bool TraceCollector::MarkSatbBuffer(WorkStack& workStack) return true; } +bool TraceCollector::MarkRememberSet(WorkStack& workStack) +{ + ARK_COMMON_PHASE_TIMER("MarkRememberSet"); + if (!workStack.empty()) { + workStack.clear(); + } + auto visitRSetObj = [this, &workStack]() { + auto func = [this, &workStack](BaseObject* object) { + if (Heap::IsHeapAddress(object)) { + object->ForEachRefField([this, &workStack, &object](RefField<>& field) { + BaseObject* targetObj = field.GetTargetObject(); + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(targetObj)); + if (Heap::IsHeapAddress(targetObj) && region->IsInYoungSpace() && + !region->IsNewObjectSinceTrace(targetObj) && !this->MarkObject(targetObj)) { + workStack.push_back(targetObj); + DLOG(TRACE, "remember set trace obj: %p@%p, ref: %p", object, &field, targetObj); + } + }); + } + }; + RegionSpace& space = reinterpret_cast(Heap::GetHeap().GetAllocator()); + space.VisitOldSpaceRememberSet(func); + }; + visitRSetObj(); + const uint32_t maxWorkers = GetGCThreadCount(true) - 1; + do { + if (LIKELY_CC(!workStack.empty())) { + Taskpool *threadPool = GetThreadPool(); + TracingImpl(workStack, (workStack.size() > MAX_MARKING_WORK_SIZE) && (maxWorkers > 0)); + } + visitRSetObj(); + if (workStack.empty()) { + TransitionToGCPhase(GCPhase::GC_PHASE_REMARK_SATB, true); + visitRSetObj(); + } + } while (!workStack.empty()); + return true; +} + void TraceCollector::ConcurrentReMark(WorkStack& remarkStack, bool parallel) { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + LOGF_CHECK(MarkRememberSet(remarkStack)) << "not cleared\n"; + } LOGF_CHECK(MarkSatbBuffer(remarkStack)) << "not cleared\n"; } @@ -622,6 +668,7 @@ void TraceCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason) gcReason_ = reason; PreGarbageCollection(true); VLOG(REPORT, "[GC] Start %s %s gcIndex= %lu", GetCollectorName(), g_gcRequests[gcReason_].name, gcIndex); + Heap::GetHeap().SetGCReason(reason); GCStats& gcStats = GetGCStats(); gcStats.collectedBytes = 0; gcStats.gcStartTime = TimeUtil::NanoSeconds(); @@ -629,7 +676,7 @@ void TraceCollector::RunGarbageCollection(uint64_t gcIndex, GCReason reason) DoGarbageCollection(); HeapBitmapManager::GetHeapBitmapManager().ClearHeapBitmap(); - reinterpret_cast(theAllocator_).DumpRegionStats("region statistics when gc ends"); + reinterpret_cast(theAllocator_).DumpAllRegionStats("region statistics when gc ends"); if (reason == GC_REASON_OOM) { Heap::GetHeap().GetAllocator().ReclaimGarbageMemory(true); @@ -659,8 +706,6 @@ void TraceCollector::CopyFromSpace() stats.fromSpaceSize = space.FromSpaceSize(); space.CopyFromSpace(GetThreadPool()); - // CopyFromSpace changes from-space size by exempting from regions, so re-read it. - // to-space is meaningless. stats.smallGarbageSize = space.FromSpaceSize() - space.ToSpaceSize(); } diff --git a/common_components/common_runtime/src/heap/collector/trace_collector.h b/common_components/common_runtime/src/heap/collector/trace_collector.h index 425146e126..cd8e1c5612 100755 --- a/common_components/common_runtime/src/heap/collector/trace_collector.h +++ b/common_components/common_runtime/src/heap/collector/trace_collector.h @@ -300,6 +300,7 @@ protected: void EnumerateAllRoots(WorkStack& workStack); void TraceRoots(WorkStack& workStack); bool MarkSatbBuffer(WorkStack& workStack); + bool MarkRememberSet(WorkStack& workStack); // concurrent marking. void TracingImpl(WorkStack& workStack, bool parallel); diff --git a/common_components/common_runtime/src/heap/heap.cpp b/common_components/common_runtime/src/heap/heap.cpp index 87e9ab4d7e..1f517c5b74 100755 --- a/common_components/common_runtime/src/heap/heap.cpp +++ b/common_components/common_runtime/src/heap/heap.cpp @@ -71,6 +71,10 @@ public: void EnableGC(bool val) override { return isGCEnabled.store(val); } + GCReason GetGCReason() override { return gcReason; } + + void SetGCReason(GCReason reason) override { gcReason = reason; } + HeapAddress Allocate(size_t size, AllocType allocType, bool allowGC = true) override; GCPhase GetGCPhase() const override; @@ -133,6 +137,7 @@ private: StaticRootTable staticRootTable; std::atomic isGCEnabled = { true }; + GCReason gcReason = GCReason::GC_REASON_INVALID; }; // end class HeapImpl static ImmortalWrapper g_heapInstance; diff --git a/common_components/common_runtime/src/heap/heap.h b/common_components/common_runtime/src/heap/heap.h index 72f0ad33b0..b151a6db8f 100755 --- a/common_components/common_runtime/src/heap/heap.h +++ b/common_components/common_runtime/src/heap/heap.h @@ -135,6 +135,10 @@ public: virtual void StopGCWork() = 0; + virtual GCReason GetGCReason() = 0; + + virtual void SetGCReason(GCReason reason) = 0; + static void OnHeapCreated(HeapAddress startAddr) { heapStartAddr = startAddr; diff --git a/common_components/common_runtime/src/heap/space/from_space.cpp b/common_components/common_runtime/src/heap/space/from_space.cpp new file mode 100644 index 0000000000..7898c9d7e1 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/from_space.cpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/src/heap/allocator/region_space.h" +#include "common_components/common_runtime/src/heap/space/from_space.h" +#include "common_components/common_runtime/src/heap/space/mature_space.h" +#include "common_components/common_runtime/src/heap/collector/collector_resources.h" +#include "common_components/taskpool/taskpool.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +void FromSpace::DumpRegionStats() const +{ + size_t fromRegions = fromRegionList_.GetRegionCount(); + size_t fromUnits = fromRegionList_.GetUnitCount(); + size_t fromSize = fromUnits * RegionDesc::UNIT_SIZE; + size_t allocFromSize = fromRegionList_.GetAllocatedSize(); + + size_t exemptedFromRegions = exemptedFromRegionList_.GetRegionCount(); + size_t exemptedFromUnits = exemptedFromRegionList_.GetUnitCount(); + size_t exemptedFromSize = exemptedFromUnits * RegionDesc::UNIT_SIZE; + size_t allocExemptedFromSize = exemptedFromRegionList_.GetAllocatedSize(); + size_t units = fromUnits + exemptedFromUnits; + + VLOG(REPORT, "\tfrom space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); + VLOG(REPORT, "\tfrom-regions %zu: %zu units (%zu B, alloc %zu)", fromRegions, fromUnits, fromSize, allocFromSize); + VLOG(REPORT, "\texempted from-regions %zu: %zu units (%zu B, alloc %zu)", + exemptedFromRegions, exemptedFromUnits, exemptedFromSize, allocExemptedFromSize); +} + +// forward only regions whose garbage bytes is greater than or equal to exemptedRegionThreshold. +void FromSpace::ExemptFromRegions() +{ + size_t forwardBytes = 0; + size_t floatingGarbage = 0; + size_t oldFromBytes = fromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; + RegionDesc* fromRegion = fromRegionList_.GetHeadRegion(); + while (fromRegion != nullptr) { + size_t threshold = static_cast(exemptedRegionThreshold_ * fromRegion->GetRegionSize()); + size_t liveBytes = fromRegion->GetLiveByteCount(); + long rawPtrCnt = fromRegion->GetRawPointerObjectCount(); + if (liveBytes > threshold) { // ignore this region + RegionDesc* del = fromRegion; + DLOG(REGION, "region %p @0x%zx+%zu exempted by forwarding: %zu units, %u live bytes", del, + del->GetRegionStart(), del->GetRegionAllocatedSize(), + del->GetUnitCount(), del->GetLiveByteCount()); + + fromRegion = fromRegion->GetNextRegion(); + if (fromRegionList_.TryDeleteRegion(del, RegionDesc::RegionType::FROM_REGION, + RegionDesc::RegionType::EXEMPTED_FROM_REGION)) { + ExemptFromRegion(del); + } + floatingGarbage += (del->GetRegionSize() - del->GetLiveByteCount()); + } else if (rawPtrCnt > 0) { + RegionDesc* del = fromRegion; + DLOG(REGION, "region %p @0x%zx+%zu pinned by forwarding: %zu units, %u live bytes rawPtr cnt %u", + del, del->GetRegionStart(), del->GetRegionAllocatedSize(), + del->GetUnitCount(), del->GetLiveByteCount(), rawPtrCnt); + + fromRegion = fromRegion->GetNextRegion(); + if (fromRegionList_.TryDeleteRegion(del, RegionDesc::RegionType::FROM_REGION, + RegionDesc::RegionType::RAW_POINTER_REGION)) { + heap_.AddRawPointerRegion(del); + } + floatingGarbage += (del->GetRegionSize() - del->GetLiveByteCount()); + } else { + forwardBytes += fromRegion->GetLiveByteCount(); + fromRegion = fromRegion->GetNextRegion(); + } + } + + size_t newFromBytes = fromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; + size_t exemptedFromBytes = exemptedFromRegionList_.GetUnitCount() * RegionDesc::UNIT_SIZE; + VLOG(REPORT, "exempt from-space: %zu B - %zu B -> %zu B, %zu B floating garbage, %zu B to forward", + oldFromBytes, exemptedFromBytes, newFromBytes, floatingGarbage, forwardBytes); +} + +class CopyTask : public Task { +public: + CopyTask(int32_t id, FromSpace& fromSpace, RegionDesc& region, size_t regionCnt, TaskPackMonitor &monitor) + : Task(id), fromSpace_(fromSpace), startRegion_(region), regionCount_(regionCnt), monitor_(monitor) {} + + ~CopyTask() override = default; + + bool Run([[maybe_unused]] uint32_t threadIndex) override + { + // set current thread as a gc thread. + ThreadLocal::SetThreadType(ThreadType::GC_THREAD); + fromSpace_.ParallelCopyFromRegions(startRegion_, regionCount_); + monitor_.NotifyFinishOne(); + ThreadLocal::SetThreadType(ThreadType::ARK_PROCESSOR); + return true; + } + +private: + FromSpace &fromSpace_; + RegionDesc &startRegion_; + size_t regionCount_; + TaskPackMonitor &monitor_; +}; + +void FromSpace::ParallelCopyFromRegions(RegionDesc &startRegion, size_t regionCnt) +{ + RegionDesc *currentRegion = &startRegion; + for (size_t count = 0; (count < regionCnt) && currentRegion != nullptr; ++count) { + RegionDesc *region = currentRegion; + currentRegion = currentRegion->GetNextRegion(); + heap_.CopyRegion(region); + } + + AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); + if (LIKELY_CC(allocBuffer != nullptr)) { + allocBuffer->ClearRegion(); // clear thread local region for gc threads. + } +} + +void FromSpace::CopyFromRegions() +{ + // iterate each region in fromRegionList + RegionDesc* fromRegion = fromRegionList_.GetHeadRegion(); + while (fromRegion != nullptr) { + ASSERT_LOGF(fromRegion->IsValidRegion(), "region is not head when get head region of from region list"); + RegionDesc* region = fromRegion; + fromRegion = fromRegion->GetNextRegion(); + heap_.CopyRegion(region); + } + + VLOG(REPORT, "forward %zu from-region units", fromRegionList_.GetUnitCount()); + + AllocationBuffer* allocBuffer = AllocationBuffer::GetAllocBuffer(); + if (LIKELY(allocBuffer != nullptr)) { + allocBuffer->ClearRegion(); // clear region for next GC + } +} + +void FromSpace::CopyFromRegions(Taskpool* threadPool) +{ + if (threadPool != nullptr) { + uint32_t parallel = Heap::GetHeap().GetCollectorResources().GetGCThreadCount(true) - 1; + uint32_t threadNum = parallel + 1; + // We won't change fromRegionList during gc, so we can use it without lock. + size_t totalRegionCount = fromRegionList_.GetRegionCount(); + if (UNLIKELY_CC(totalRegionCount == 0)) { + return; + } + size_t regionCntEachTask = totalRegionCount / static_cast(threadNum); + size_t leftRegionCnt = totalRegionCount - regionCntEachTask * parallel; + RegionDesc* region = fromRegionList_.GetHeadRegion(); + TaskPackMonitor monitor(parallel, parallel); + for (uint32_t i = 0; i < parallel; ++i) { + ASSERT_LOGF(region != nullptr, "from region list records wrong region info"); + RegionDesc* startRegion = region; + for (size_t count = 0; count < regionCntEachTask; ++count) { + region = region->GetNextRegion(); + } + threadPool->PostTask(std::make_unique(0, *this, *startRegion, regionCntEachTask, monitor)); + } + ParallelCopyFromRegions(*region, leftRegionCnt); + monitor.WaitAllFinished(); + } else { + CopyFromRegions(); + } +} + +void FromSpace::GetPromotedTo(MatureSpace& mspace) +{ + mspace.PromoteRegionList(exemptedFromRegionList_); +} +} // namespace panda diff --git a/common_components/common_runtime/src/heap/space/from_space.h b/common_components/common_runtime/src/heap/space/from_space.h new file mode 100644 index 0000000000..4cbbaafa54 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/from_space.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_FROM_SPACE_H +#define ARK_COMMON_FROM_SPACE_H + +#include +#include +#include +#include +#include +#include + +#include "common_components/common_runtime/src/heap/allocator/alloc_util.h" +#include "common_components/common_runtime/src/heap/allocator/allocator.h" +#include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/mutator/mutator.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +class RegionSpace; +class MatureSpace; +class Taskpool; + +// regions for small-sized movable objects, which may be moved during gc. +class FromSpace : public RegionalSpace { +public: + FromSpace(RegionManager& regionManager, RegionSpace& heap) : RegionalSpace(regionManager), + fromRegionList_("from-regions"), + exemptedFromRegionList_("exempted from-regions"), heap_(heap) {} + + void DumpRegionStats() const; + + void AssembleGarbageCandidates(RegionList& list) + { + fromRegionList_.MergeRegionList(list, RegionDesc::RegionType::FROM_REGION); + } + + void ExemptFromRegions(); + + void FixAllRegions() + { + TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + RegionManager::FixOldRegionList(collector, exemptedFromRegionList_); + } + + size_t GetUsedUnitCount() const + { + return fromRegionList_.GetUnitCount() + exemptedFromRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return fromRegionList_.GetAllocatedSize(); + } + + RegionList& GetFromRegionList() { return fromRegionList_; } + + bool TryDeleteFromRegion(RegionDesc* del, RegionDesc::RegionType oldType, RegionDesc::RegionType newType) + { + return fromRegionList_.TryDeleteRegion(del, oldType, newType); + } + + void DeleteFromRegion(RegionDesc* region) + { + fromRegionList_.DeleteRegion(region); + } + + void ExemptFromRegion(RegionDesc* region) + { + exemptedFromRegionList_.PrependRegion(region, RegionDesc::RegionType::EXEMPTED_FROM_REGION); + } + + size_t GetSurvivedSize() const + { + return exemptedFromRegionList_.GetAllocatedSize(); + } + + RegionSpace& GetHeap() { return heap_; } + + void ParallelCopyFromRegions(RegionDesc &startRegion, size_t regionCnt); + void CopyFromRegions(Taskpool* threadPool); + void CopyFromRegions(); + + void GetPromotedTo(MatureSpace& mspace); + + void SetExemptedRegionThreshold(double threshold) + { + exemptedRegionThreshold_ = threshold; + } + + void ClearAllGCInfo() + { + ClearGCInfo(exemptedFromRegionList_); + RegionDesc* region = fromRegionList_.GetHeadRegion(); + while (region != nullptr) { + region->ClearRSet(); + region = region->GetNextRegion(); + } + } + + void VisitRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSet(func); + }; + fromRegionList_.VisitAllRegions(visitFunc); + exemptedFromRegionList_.VisitAllRegions(visitFunc); + } + + RegionList& GetExemptedRegionList() noexcept { return exemptedFromRegionList_; } + +private: + void ClearGCInfo(RegionList& list) + { + RegionList tmp("temp region list"); + list.CopyListTo(tmp); + tmp.VisitAllRegions([](RegionDesc* region) { + region->ClearTraceCopyFixLine(); + region->ClearLiveInfo(); + region->ResetMarkBit(); + }); + } + // fromRegionList is a list of full regions waiting to be collected (i.e. for forwarding). + // region type must be FROM_REGION. + RegionList fromRegionList_; + + // regions exempted by ExemptFromRegions, which will not be moved during current GC. + RegionList exemptedFromRegionList_; + + RegionSpace& heap_; + + double exemptedRegionThreshold_; +}; +} // namespace panda +#endif // ARK_COMMON_FROM_SPACE_H diff --git a/common_components/common_runtime/src/heap/space/mature_space.cpp b/common_components/common_runtime/src/heap/space/mature_space.cpp new file mode 100644 index 0000000000..75c812f475 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/mature_space.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/src/heap/space/mature_space.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +void MatureSpace::DumpRegionStats() const +{ + size_t matureRegions = matureRegionList_.GetRegionCount(); + size_t matureUnits = matureRegionList_.GetUnitCount(); + size_t matureSize = matureUnits * RegionDesc::UNIT_SIZE; + size_t allocFromSize = matureRegionList_.GetAllocatedSize(); + + VLOG(REPORT, "\tmature-regions %zu: %zu units (%zu B, alloc %zu)", + matureRegions, matureUnits, matureSize, allocFromSize); +} +} // namespace panda diff --git a/common_components/common_runtime/src/heap/space/mature_space.h b/common_components/common_runtime/src/heap/space/mature_space.h new file mode 100644 index 0000000000..21f4ad4a3b --- /dev/null +++ b/common_components/common_runtime/src/heap/space/mature_space.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_MATURE_SPACE_H +#define ARK_COMMON_MATURE_SPACE_H + +#include +#include +#include +#include +#include +#include + +#include "common_components/common_runtime/src/heap/allocator/alloc_util.h" +#include "common_components/common_runtime/src/heap/allocator/allocator.h" +#include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/heap/space/regional_space.h" +#include "common_components/common_runtime/src/heap/space/from_space.h" +#include "common_components/common_runtime/src/heap/space/to_space.h" +#include "common_components/common_runtime/src/mutator/mutator.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +// regions for small-sized movable objects, which may be moved during gc. +class MatureSpace : public RegionalSpace { +public: + MatureSpace(RegionManager& regionManager) : RegionalSpace(regionManager), matureRegionList_("mature regions") {} + + void DumpRegionStats() const; + + void FixAllRegions() + { + TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + RegionManager::FixMatureRegionList(collector, matureRegionList_); + } else { + RegionManager::FixOldRegionList(collector, matureRegionList_); + } + } + + void AssembleGarbageCandidates(FromSpace& fromSpace) + { + fromSpace.AssembleGarbageCandidates(matureRegionList_); + } + + size_t GetAllocatedSize() const + { + return matureRegionList_.GetAllocatedSize(); + } + + size_t GetUsedUnitCount() const + { + return matureRegionList_.GetUnitCount(); + } + + void PromoteRegionList(RegionList& list) + { + matureRegionList_.MergeRegionList(list, RegionDesc::RegionType::MATURE_REGION); + } + + void ClearRSet() + { + RegionDesc* region = matureRegionList_.GetHeadRegion(); + while (region != nullptr) { + region->ClearRSet(); + region = region->GetNextRegion(); + } + } + + void ClearAllGCInfo() + { + ClearGCInfo(matureRegionList_); + } + + void VisitRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitAllObjects([®ion, &func](BaseObject* obj) { + if (region->IsInRSet(obj)) { + func(obj); + } + }); + }; + matureRegionList_.VisitAllRegions(visitFunc); + } + +private: + void ClearGCInfo(RegionList& list) + { + RegionList tmp("temp region list"); + list.CopyListTo(tmp); + tmp.VisitAllRegions([](RegionDesc* region) { + region->ClearTraceCopyFixLine(); + region->ClearLiveInfo(); + region->ResetMarkBit(); + }); + } + + RegionList matureRegionList_; +}; +} // namespace panda +#endif // ARK_COMMON_MATURE_SPACE_H diff --git a/common_components/common_runtime/src/heap/space/regional_space.h b/common_components/common_runtime/src/heap/space/regional_space.h new file mode 100644 index 0000000000..22734827e9 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/regional_space.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_REGIONAL_SPACE_H +#define ARK_COMMON_REGIONAL_SPACE_H + +#include +#include +#include +#include +#include +#include + +#include "common_components/common_runtime/src/heap/allocator/alloc_util.h" +#include "common_components/common_runtime/src/heap/allocator/allocator.h" +#include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/mutator/mutator.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +class RegionalSpace { +public: + RegionalSpace(RegionManager& regionManager) : regionManager_(regionManager) {} + + RegionManager& GetRegionManager() { return regionManager_; } + +protected: + RegionManager& regionManager_; +}; +} // namespace panda +#endif // ARK_COMMON_REGIONAL_SPACE_H diff --git a/common_components/common_runtime/src/heap/space/to_space.cpp b/common_components/common_runtime/src/heap/space/to_space.cpp new file mode 100644 index 0000000000..81befa4700 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/to_space.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/src/heap/space/to_space.h" +#include "common_components/common_runtime/src/heap/space/mature_space.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +void ToSpace::DumpRegionStats() const +{ + size_t fullToRegions = fullToRegionList_.GetRegionCount(); + size_t fullToUnits = fullToRegionList_.GetUnitCount(); + size_t fullToSize = fullToUnits * RegionDesc::UNIT_SIZE; + size_t allocfullToSize = fullToRegionList_.GetAllocatedSize(); + + size_t tlToRegions = tlToRegionList_.GetRegionCount(); + size_t tlToUnits = tlToRegionList_.GetUnitCount(); + size_t tlToSize = tlToUnits * RegionDesc::UNIT_SIZE; + size_t allocTLToSize = tlToRegionList_.GetAllocatedSize(); + + VLOG(REPORT, "\tfull to-regions %zu: %zu units (%zu B, alloc %zu)", + fullToRegions, fullToUnits, fullToSize, allocfullToSize); + VLOG(REPORT, "\tthread-local to-regions %zu: %zu units (%zu B, alloc %zu)", + fullToRegions, fullToUnits, fullToSize, allocTLToSize); +} + +void ToSpace::GetPromotedTo(MatureSpace& mspace) +{ + mspace.PromoteRegionList(fullToRegionList_); + mspace.PromoteRegionList(tlToRegionList_); +} +} // namespace panda diff --git a/common_components/common_runtime/src/heap/space/to_space.h b/common_components/common_runtime/src/heap/space/to_space.h new file mode 100644 index 0000000000..a40531f6da --- /dev/null +++ b/common_components/common_runtime/src/heap/space/to_space.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_TO_SPACE_H +#define ARK_COMMON_TO_SPACE_H + +#include +#include +#include +#include +#include +#include + +#include "common_components/common_runtime/src/heap/allocator/alloc_util.h" +#include "common_components/common_runtime/src/heap/allocator/allocator.h" +#include "common_components/common_runtime/src/heap/allocator/region_list.h" +#include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/heap/space/regional_space.h" +#include "common_components/common_runtime/src/heap/space/from_space.h" +#include "common_components/common_runtime/src/mutator/mutator.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +class MatureSpace; +// regions for small-sized movable objects, which may be moved during gc. +class ToSpace : public RegionalSpace { +public: + ToSpace(RegionManager& regionManager) : RegionalSpace(regionManager), + tlToRegionList_("thread local to-regions"), + fullToRegionList_("full to-regions") {} + + void DumpRegionStats() const; + + void FixAllRegions() + { + TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + RegionManager::FixToRegionList(collector, fullToRegionList_); + RegionManager::FixToRegionList(collector, tlToRegionList_); + } + + void AddFullRegion(RegionDesc* region) + { + DCHECK_CC(Heap::GetHeap().IsGcStarted()); + fullToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); + } + + void AddThreadLocalRegion(RegionDesc* region) + { + tlToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); + } + + void HandleFullThreadLocalRegion(RegionDesc* region) + { + DCHECK_CC(Heap::GetHeap().IsGcStarted()); + tlToRegionList_.DeleteRegion(region); + fullToRegionList_.PrependRegion(region, RegionDesc::RegionType::TO_REGION); + } + + size_t GetAllocatedSize() const + { + return tlToRegionList_.GetAllocatedSize() + fullToRegionList_.GetAllocatedSize(); + } + + size_t GetSurvivedSize() const + { + return tlToRegionList_.GetAllocatedSize() + fullToRegionList_.GetAllocatedSize(); + } + + size_t GetUsedUnitCount() const + { + return tlToRegionList_.GetUnitCount() + fullToRegionList_.GetUnitCount(); + } + + void GetPromotedTo(MatureSpace& mspace); + + void ClearAllGCInfo() + { + ClearGCInfo(tlToRegionList_); + ClearGCInfo(fullToRegionList_); + } + + RegionList& GetTlToRegionList() noexcept { return tlToRegionList_; } + + RegionList& GetFullToRegionList() noexcept { return fullToRegionList_; } + +private: + void ClearGCInfo(RegionList& list) + { + RegionList tmp("temp region list"); + list.CopyListTo(tmp); + tmp.VisitAllRegions([](RegionDesc* region) { + region->ClearTraceCopyFixLine(); + region->ClearLiveInfo(); + region->ResetMarkBit(); + }); + } + + // toRegionList is a list of to-space regions produced by gc threads. + // when a region is prepended to this list, the region is probably not full, so the statistics + // of this region-list are not reliable and need to be updated. + RegionList tlToRegionList_; + RegionList fullToRegionList_; +}; +} // namespace panda +#endif // ARK_COMMON_TO_SPACE_H diff --git a/common_components/common_runtime/src/heap/space/young_space.cpp b/common_components/common_runtime/src/heap/space/young_space.cpp new file mode 100644 index 0000000000..81b2c5c416 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/young_space.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_components/common_runtime/src/heap/space/young_space.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +void YoungSpace::DumpRegionStats() const +{ + size_t tlRegions = tlRegionList_.GetRegionCount(); + size_t tlUnits = tlRegionList_.GetUnitCount(); + size_t tlSize = tlUnits * RegionDesc::UNIT_SIZE; + size_t allocTLSize = tlRegionList_.GetAllocatedSize(); + + size_t recentFullRegions = recentFullRegionList_.GetRegionCount(); + size_t recentFullUnits = recentFullRegionList_.GetUnitCount(); + size_t recentFullSize = recentFullUnits * RegionDesc::UNIT_SIZE; + size_t allocRecentFullSize = recentFullRegionList_.GetAllocatedSize(); + + size_t units = tlUnits + recentFullUnits; + VLOG(REPORT, "young space units: %zu (%zu B)", units, units * RegionDesc::UNIT_SIZE); + VLOG(REPORT, "\ttl-regions %zu: %zu units (%zu B, alloc %zu)", tlRegions, tlUnits, tlSize, allocTLSize); + VLOG(REPORT, "\trecent-full regions %zu: %zu units (%zu B, alloc %zu)", + recentFullRegions, recentFullUnits, recentFullSize, allocRecentFullSize); +} +} // namespace panda diff --git a/common_components/common_runtime/src/heap/space/young_space.h b/common_components/common_runtime/src/heap/space/young_space.h new file mode 100644 index 0000000000..a6b4ab73f8 --- /dev/null +++ b/common_components/common_runtime/src/heap/space/young_space.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2025 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ARK_COMMON_YOUNG_SPACE_H +#define ARK_COMMON_YOUNG_SPACE_H + +#include +#include +#include +#include +#include +#include + +#include "common_components/common_runtime/src/heap/allocator/alloc_util.h" +#include "common_components/common_runtime/src/heap/allocator/allocator.h" +#include "common_components/common_runtime/src/heap/allocator/region_manager.h" +#include "common_components/common_runtime/src/heap/space/regional_space.h" +#include "common_components/common_runtime/src/heap/space/from_space.h" +#include "common_components/common_runtime/src/mutator/mutator.h" +#if defined(ARKCOMMON_SANITIZER_SUPPORT) +#include "common_components/common_runtime/src/sanitizer/sanitizer_interface.h" +#endif + +namespace panda { +// regions for small-sized movable objects, which may be moved during gc. +class YoungSpace : public RegionalSpace { +public: + YoungSpace(RegionManager& regionManager) : RegionalSpace(regionManager), + tlRegionList_("thread local regions"), + recentFullRegionList_("recent full regions") {} + + void DumpRegionStats() const; + + void FixAllRegions() + { + TraceCollector& collector = reinterpret_cast(Heap::GetHeap().GetCollector()); + RegionManager::FixRecentRegionList(collector, tlRegionList_); + RegionManager::FixRecentRegionList(collector, recentFullRegionList_); + } + + void AddThreadLocalRegion(RegionDesc* region) + { + tlRegionList_.PrependRegion(region, RegionDesc::RegionType::THREAD_LOCAL_REGION); + } + + void AddFullRegion(RegionDesc* region) + { + recentFullRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_FULL_REGION); + } + + void HandleFullThreadLocalRegion(RegionDesc* region) + { + DCHECK_CC(region->IsThreadLocalRegion()); + tlRegionList_.DeleteRegion(region); + recentFullRegionList_.PrependRegion(region, RegionDesc::RegionType::RECENT_FULL_REGION); + } + + void AssembleGarbageCandidates(FromSpace& fromSpace) + { + fromSpace.AssembleGarbageCandidates(recentFullRegionList_); + } + + size_t GetUsedUnitCount() const + { + return tlRegionList_.GetUnitCount() + recentFullRegionList_.GetUnitCount(); + } + + size_t GetAllocatedSize() const + { + return tlRegionList_.GetAllocatedSize() + recentFullRegionList_.GetAllocatedSize(); + } + + size_t GetRecentAllocatedSize() const + { + return recentFullRegionList_.GetAllocatedSize(); + } + + void ClearAllGCInfo() + { + ClearGCInfo(tlRegionList_); + ClearGCInfo(recentFullRegionList_); + } + + void VisitRememberSet(const std::function& func) + { + auto visitFunc = [&func](RegionDesc* region) { + region->VisitRememberSet(func); + }; + tlRegionList_.VisitAllRegions(visitFunc); + recentFullRegionList_.VisitAllRegions(visitFunc); + } + + RegionList& GetTlRegionList() noexcept { return tlRegionList_; } + + RegionList& GetRecentFullRegionList() noexcept { return recentFullRegionList_; } + +private: + void ClearGCInfo(RegionList& list) + { + RegionList tmp("temp region list"); + list.CopyListTo(tmp); + tmp.VisitAllRegions([](RegionDesc* region) { + region->ClearTraceCopyFixLine(); + region->ClearLiveInfo(); + region->ResetMarkBit(); + }); + } + // regions for thread-local allocation. + // regions in this list are already used for allocation but not full yet. + RegionList tlRegionList_; + + // recentFullRegionList is a list of regions which become full . + RegionList recentFullRegionList_; +}; +} // namespace panda +#endif // ARK_COMMON_YOUNG_SPACE_H diff --git a/common_components/common_runtime/src/heap/verification.cpp b/common_components/common_runtime/src/heap/verification.cpp index 7bda1f7eed..55c0334c8f 100755 --- a/common_components/common_runtime/src/heap/verification.cpp +++ b/common_components/common_runtime/src/heap/verification.cpp @@ -57,9 +57,13 @@ public: void VisitObject(BaseObject *obj) override { // check retraced objects, so they must be in one of the states below - ASSERT_LOGF(RegionSpace::IsResurrectedObject(obj) || RegionSpace::IsMarkedObject(obj) || - RegionSpace::IsNewObjectSinceTrace(obj), - "object is resurrected"); + if (!(RegionSpace::IsResurrectedObject(obj) || RegionSpace::IsMarkedObject(obj) || + RegionSpace::IsNewObjectSinceTrace(obj) || !RegionSpace::IsYoungSpaceObject(obj))) { + RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + DLOG(TRACE, "object is resurrected, object: %p, region: %p, region type: %d", + obj, region, region->GetRegionType()); + std::abort(); + } IsValidObject(obj); } @@ -102,22 +106,21 @@ public: void IterateFromSpace(VerifyVisitor &visitor) { - IterateRegionList(space_.GetRegionManager().fromRegionList_, visitor); + IterateRegionList(space_.GetFromSpace().GetFromRegionList(), visitor); } void IterateToSpace(VerifyVisitor &visitor) { - IterateRegionList(space_.GetRegionManager().toRegionList_, visitor); - IterateRegionList(space_.GetRegionManager().toRegionList_, visitor); - IterateRegionList(space_.GetRegionManager().tlToRegionList_, visitor); + IterateRegionList(space_.GetToSpace().GetTlToRegionList(), visitor); + IterateRegionList(space_.GetToSpace().GetFullToRegionList(), visitor); } void IterateNoForwardSpace(VerifyVisitor &visitor) { - IterateRegionList(space_.GetRegionManager().exemptedFromRegionList_, visitor); + IterateRegionList(space_.GetFromSpace().GetExemptedRegionList(), visitor); - IterateRegionList(space_.GetRegionManager().recentFullRegionList_, visitor); - IterateRegionList(space_.GetRegionManager().tlRegionList_, visitor); + IterateRegionList(space_.GetYoungSpace().GetTlRegionList(), visitor); + IterateRegionList(space_.GetYoungSpace().GetRecentFullRegionList(), visitor); IterateRegionList(space_.GetRegionManager().recentLargeRegionList_, visitor); IterateRegionList(space_.GetRegionManager().oldLargeRegionList_, visitor); diff --git a/common_components/common_runtime/src/heap/w_collector/copy_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/copy_barrier.cpp index a95d1096cf..0d97c12fe8 100755 --- a/common_components/common_runtime/src/heap/w_collector/copy_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/copy_barrier.cpp @@ -73,7 +73,7 @@ void CopyBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, BaseObject* CopyBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const { RefField tmpField(field.GetFieldValue(order)); - BaseObject* target = ReadRefField(nullptr, tmpField); + BaseObject* target = ReadRefField(obj, tmpField); DLOG(FBARRIER, "atomic read obj %p ref-field@%p: %#zx -> %p", obj, &field, tmpField.GetFieldValue(), target); return target; } diff --git a/common_components/common_runtime/src/heap/w_collector/enum_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/enum_barrier.cpp index ca25b946a0..742e596639 100755 --- a/common_components/common_runtime/src/heap/w_collector/enum_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/enum_barrier.cpp @@ -37,6 +37,7 @@ void EnumBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, void EnumBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { + UpdateRememberSet(obj, ref); RefField<> tmpField(field); BaseObject* remeberedObject = nullptr; remeberedObject = tmpField.GetTargetObject(); @@ -62,6 +63,7 @@ void EnumBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObje if (!Heap::IsTaggedObject(field.GetFieldValue())) { return; } + UpdateRememberSet(obj, ref); remeberedObject = tmpField.GetTargetObject(); Mutator* mutator = Mutator::GetMutator(); if (remeberedObject != nullptr) { @@ -80,6 +82,7 @@ void EnumBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObje if (!Heap::IsTaggedObject((HeapAddress)ref)) { return; } + UpdateRememberSet(obj, ref); ref = (BaseObject*)((uintptr_t)ref & ~(TAG_WEAK)); Mutator* mutator = Mutator::GetMutator(); mutator->RememberObjectInSatbBuffer(ref); @@ -119,7 +122,7 @@ BaseObject* EnumBarrier::AtomicReadRefField(BaseObject* obj, RefField& fie { BaseObject* target = nullptr; RefField oldField(field.GetFieldValue(order)); - target = ReadRefField(nullptr, oldField); + target = ReadRefField(obj, oldField); DLOG(EBARRIER, "atomic read obj %p ref@%p: %#zx -> %p", obj, &field, oldField.GetFieldValue(), target); return target; } diff --git a/common_components/common_runtime/src/heap/w_collector/idle_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/idle_barrier.cpp index 708551b146..62831f8a15 100755 --- a/common_components/common_runtime/src/heap/w_collector/idle_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/idle_barrier.cpp @@ -87,14 +87,40 @@ bool IdleBarrier::CompareAndSwapRefField(BaseObject* obj, RefField& field, return false; } +void IdleBarrier::UpdateRememberSet(BaseObject* object, BaseObject* ref) const +{ + if (!Heap::IsHeapAddress(ref) || object == nullptr) { + return; + } + RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)object)); + RegionDesc* refRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)ref)); + if (objRegion->IsFixedRegion()) { + size_t offset = (uintptr_t)(object) - objRegion->GetRegionStart(); + uint8_t cellCount = objRegion->GetRegionCellCount(); + if (offset % cellCount != 0) { + LOG_COMMON(ERROR) << "wrong write!!!"; + std::abort(); + } + } + if ((!objRegion->IsInYoungSpace() && refRegion->IsInYoungSpace()) || + (objRegion->IsInFromSpace() && refRegion->IsInRecentSpace())) { + if (objRegion->MarkRSetCardTable(object)) { + DLOG(BARRIER, "update point-out remember set of region %p, obj %p, ref: %p<%p>", + objRegion, object, ref, ref->GetTypeInfo()); + } + } +} + void IdleBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { DLOG(BARRIER, "write obj %p ref@%p: %p => %p", obj, &field, field.GetTargetObject(), ref); + UpdateRememberSet(obj, ref); field.SetTargetObject(ref); } void IdleBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { + UpdateRememberSet(obj, ref); DLOG(BARRIER, "write obj %p ref@%p: %p => %p", obj, &field, field.GetTargetObject(), ref); } diff --git a/common_components/common_runtime/src/heap/w_collector/idle_barrier.h b/common_components/common_runtime/src/heap/w_collector/idle_barrier.h index c3afc90006..1d3c0a6493 100755 --- a/common_components/common_runtime/src/heap/w_collector/idle_barrier.h +++ b/common_components/common_runtime/src/heap/w_collector/idle_barrier.h @@ -45,6 +45,8 @@ public: void CopyStructArray(BaseObject* dstObj, HeapAddress dstField, MIndex dstSize, BaseObject* srcObj, HeapAddress srcField, MIndex srcSize) const override; + + void UpdateRememberSet(BaseObject* object, BaseObject* ref) const; }; } // namespace panda #endif // ~ARK_COMMON_IDLE_BARRIER_H diff --git a/common_components/common_runtime/src/heap/w_collector/post_trace_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/post_trace_barrier.cpp index 5f60b9ddba..dab93facaf 100755 --- a/common_components/common_runtime/src/heap/w_collector/post_trace_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/post_trace_barrier.cpp @@ -36,11 +36,13 @@ void PostTraceBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress void PostTraceBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { RefField<> newField(ref); + UpdateRememberSet(obj, ref); field.SetFieldValue(newField.GetFieldValue()); } void PostTraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObject* ref) const { + UpdateRememberSet(obj, ref); } void PostTraceBarrier::WriteStaticRef(RefField& field, BaseObject* ref) const @@ -66,7 +68,7 @@ BaseObject* PostTraceBarrier::AtomicReadRefField(BaseObject* obj, RefField BaseObject* target = nullptr; RefField oldField(field.GetFieldValue(order)); - target = ReadRefField(nullptr, oldField); + target = ReadRefField(obj, oldField); DLOG(TBARRIER, "katomic read obj %p ref@%p: %#zx -> %p", obj, &field, oldField.GetFieldValue(), target); return target; } diff --git a/common_components/common_runtime/src/heap/w_collector/preforward_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/preforward_barrier.cpp index d0f8296806..e79ee6f2ec 100755 --- a/common_components/common_runtime/src/heap/w_collector/preforward_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/preforward_barrier.cpp @@ -76,7 +76,7 @@ void PreforwardBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress BaseObject* PreforwardBarrier::AtomicReadRefField(BaseObject* obj, RefField& field, MemoryOrder order) const { RefField tmpField(field.GetFieldValue(order)); - BaseObject* target = ReadRefField(nullptr, tmpField); + BaseObject* target = ReadRefField(obj, tmpField); DLOG(PBARRIER, "atomic read obj %p ref@%p: %#zx -> %p", obj, &field, tmpField.GetFieldValue(), target); return target; } diff --git a/common_components/common_runtime/src/heap/w_collector/trace_barrier.cpp b/common_components/common_runtime/src/heap/w_collector/trace_barrier.cpp index 63ea1eb94c..3ddb323bdc 100755 --- a/common_components/common_runtime/src/heap/w_collector/trace_barrier.cpp +++ b/common_components/common_runtime/src/heap/w_collector/trace_barrier.cpp @@ -37,6 +37,7 @@ void TraceBarrier::ReadStruct(HeapAddress dst, BaseObject* obj, HeapAddress src, void TraceBarrier::WriteRefField(BaseObject* obj, RefField& field, BaseObject* ref) const { + UpdateRememberSet(obj, ref); RefField<> tmpField(field); BaseObject* rememberedObject = nullptr; rememberedObject = tmpField.GetTargetObject(); @@ -58,6 +59,7 @@ void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObj if (!Heap::IsTaggedObject(field.GetFieldValue())) { return; } + UpdateRememberSet(obj, ref); Mutator* mutator = Mutator::GetMutator(); if (rememberedObject != nullptr) { mutator->RememberObjectInSatbBuffer(rememberedObject); @@ -78,6 +80,7 @@ void TraceBarrier::WriteBarrier(BaseObject* obj, RefField& field, BaseObj if (!Heap::IsTaggedObject((HeapAddress)ref)) { return; } + UpdateRememberSet(obj, ref); ref = (BaseObject*)((uintptr_t)ref & ~(TAG_WEAK)); Mutator* mutator = Mutator::GetMutator(); mutator->RememberObjectInSatbBuffer(ref); diff --git a/common_components/common_runtime/src/heap/w_collector/w_collector.cpp b/common_components/common_runtime/src/heap/w_collector/w_collector.cpp index bd89cafb22..cc38f82656 100755 --- a/common_components/common_runtime/src/heap/w_collector/w_collector.cpp +++ b/common_components/common_runtime/src/heap/w_collector/w_collector.cpp @@ -142,11 +142,17 @@ bool WCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject void WCollector::EnumRefFieldRoot(RefField<>& field, RootSet& rootSet) const { auto value = field.GetFieldValue(); - auto obj = field.GetTargetObject(); ASSERT_LOGF(Heap::IsTaggedObject(value), "EnumRefFieldRoot failed: Invalid root"); // need fix or clean - rootSet.push_back(field.GetTargetObject()); + BaseObject* obj = field.GetTargetObject(); + RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + // if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && !objRegion->IsInYoungSpace()) { + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && objRegion->IsInMatureSpace()) { + DLOG(ENUM, "enum: skip mature object %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize()); + return; + } + rootSet.push_back(obj); return; // consider remove below @@ -215,22 +221,35 @@ void WCollector::EnumAndTagRawRoot(ObjectRef& ref, RootSet& rootSet) const void WCollector::TraceRefField(BaseObject* obj, RefField<>& field, WorkStack& workStack, WeakStack& weakStack) const { BaseObject* targetObj = field.GetTargetObject(); - auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)targetObj)); - // field is tagged object, should be in heap - DCHECK_CC(Heap::IsHeapAddress(targetObj)); - - DLOG(TRACE, "trace obj %p ref@%p: %p<%p>(%zu)", obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); - if (region->IsNewObjectSinceTrace(targetObj)) { - DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); + if (!Heap::IsHeapAddress(targetObj)) { return; } - if (field.IsWeak()) { + auto targetRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)targetObj)); + + auto gcReason = Heap::GetHeap().GetGCReason(); + + if (gcReason != GC_REASON_YOUNG && field.IsWeak()) { + DLOG(TRACE, "trace: skip weak obj when full gc, object: %p@%p, targetObj: %p", obj, &field, targetObj); weakStack.push_back(&field); return; } - if (region->MarkObject(targetObj)) { + + auto objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + // if (gcReason == GC_REASON_YOUNG && !objRegion->IsInYoungSpace()) { + if (gcReason == GC_REASON_YOUNG && objRegion->IsInMatureSpace()) { + DLOG(TRACE, "trace: skip mature object %p@%p, target object: %p<%p>(%zu)", obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); + return; + } + + if (targetRegion->IsNewObjectSinceTrace(targetObj)) { + DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); + return; + } + if (targetRegion->MarkObject(targetObj)) { + DLOG(TRACE, "trace: obj has been marked %p", targetObj); return; } + DLOG(TRACE, "trace obj %p ref@%p: %p<%p>(%zu)", obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); workStack.push_back(targetObj); } @@ -254,6 +273,17 @@ void WCollector::FixRefField(BaseObject* obj, RefField<>& field) const } BaseObject* latest = FindToVersion(targetObj); + + // update remember set + BaseObject* toObj = latest == nullptr ? targetObj : latest; + RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)obj)); + RegionDesc* refRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)toObj)); + if (!objRegion->IsInRecentSpace() && refRegion->IsInRecentSpace()) { + if (objRegion->MarkRSetCardTable(obj)) { + DLOG(TRACE, "fix phase update point-out remember set of region %p, obj %p, ref: %p<%p>", + objRegion, obj, toObj, toObj->GetTypeInfo()); + } + } if (latest == nullptr) { return; } CHECK_CC(latest->IsValidObject()); @@ -311,9 +341,18 @@ void WCollector::PreforwardStaticRoots() panda::WeakRefFieldVisitor weakVisitor = [this](RefField<> &refField) -> bool { RefField<> oldField(refField); BaseObject *oldObj = oldField.GetTargetObject(); - if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { - return false; + auto gcReason = Heap::GetHeap().GetGCReason(); + if (gcReason == GC_REASON_YOUNG) { + if (RegionSpace::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) && + !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + return false; + } + } else { + if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + return false; + } } + DLOG(FIX, "visit weak raw-ref @%p: %p", &refField, oldObj); if (IsFromObject(oldObj)) { BaseObject *toVersion = TryForwardObject(oldObj); @@ -432,7 +471,9 @@ void WCollector::DoGarbageCollection() Preforward(); // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) - CollectLargeGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectLargeGarbage(); + } SweepThreadLocalJitFort(); CopyFromSpace(); @@ -440,7 +481,9 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); - CollectPinnedGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectPinnedGarbage(); + } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); ClearAllGCInfo(); @@ -461,7 +504,9 @@ void WCollector::DoGarbageCollection() Preforward(); // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) - CollectLargeGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectLargeGarbage(); + } SweepThreadLocalJitFort(); CopyFromSpace(); @@ -469,7 +514,9 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); - CollectPinnedGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectPinnedGarbage(); + } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); ClearAllGCInfo(); @@ -492,7 +539,9 @@ void WCollector::DoGarbageCollection() } // reclaim large objects should after preforward(may process weak ref) // and before fix heap(may clear live bit) - CollectLargeGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectLargeGarbage(); + } SweepThreadLocalJitFort(); CopyFromSpace(); @@ -503,7 +552,9 @@ void WCollector::DoGarbageCollection() PrepareFix(); } FixHeap(); - CollectPinnedGarbage(); + if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + CollectPinnedGarbage(); + } TransitionToGCPhase(GCPhase::GC_PHASE_IDLE, true); ClearAllGCInfo(); @@ -526,10 +577,18 @@ void WCollector::ProcessWeakReferences() globalWeakStack_.pop_back(); RefField<> oldField(field); BaseObject* targetObj = oldField.GetTargetObject(); - if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || - RegionSpace::IsNewObjectSinceTrace(targetObj)) { - continue; + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || + RegionSpace::IsNewObjectSinceTrace(targetObj) || !RegionSpace::IsYoungSpaceObject(targetObj)) { + continue; + } + } else { + if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || + RegionSpace::IsNewObjectSinceTrace(targetObj)) { + continue; + } } + if (field.ClearRef(oldField.GetFieldValue())) { // fix log // DLOG(FIX, "fix weak obj %p+%zu ref@%p: %#zx => %p<%p>(%zu)", obj, obj->GetSize(), &field, @@ -540,9 +599,17 @@ void WCollector::ProcessWeakReferences() panda::WeakRefFieldVisitor weakVisitor = [this](RefField<> &refField) -> bool { RefField<> oldField(refField); BaseObject *oldObj = oldField.GetTargetObject(); - if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { - return false; + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + if (RegionSpace::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) && + !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + return false; + } + } else { + if (!IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { + return false; + } } + return true; }; VisitWeakRoots(weakVisitor); @@ -621,12 +688,14 @@ BaseObject* WCollector::CopyObjectAfterExclusive(BaseObject* obj) } BaseObject* toObj = fwdTable_.RouteObject(obj, size); if (toObj == nullptr) { + RegionSpace& space = reinterpret_cast(Heap::GetHeap().GetAllocator()); + space.DumpAllRegionStats("oom!!!!!"); ASSERT_LOGF(0, "OOM"); // ConcurrentGC obj->UnlockExclusive(panda::BaseStateWord::ForwardState::NORMAL); return toObj; } - DLOG(COPY, "copy obj %p<%p>(%zu) to %p", obj, obj->GetTypeInfo(), size, toObj); + DLOG(FIX, "copy obj %p<%p>(%zu) to %p", obj, obj->GetTypeInfo(), size, toObj); CopyObject(*obj, *toObj, size); if (IsToObject(toObj)) { toObj->SetForwardState(panda::BaseStateWord::ForwardState::NORMAL); @@ -655,6 +724,7 @@ void WCollector::CollectSmallSpace() ARK_COMMON_PHASE_TIMER("CollectFromSpaceGarbage"); stats.collectedBytes += stats.smallGarbageSize; space.CollectFromSpaceGarbage(); + space.HandlePromotion(); } size_t candidateBytes = stats.fromSpaceSize + stats.pinnedSpaceSize + stats.largeSpaceSize; diff --git a/common_components/common_runtime/src/mutator/satb_buffer.cpp b/common_components/common_runtime/src/mutator/satb_buffer.cpp index 9ccf7347c7..368ff91a17 100755 --- a/common_components/common_runtime/src/mutator/satb_buffer.cpp +++ b/common_components/common_runtime/src/mutator/satb_buffer.cpp @@ -28,6 +28,9 @@ bool SatbBuffer::ShouldEnqueue(const BaseObject* obj) if (UNLIKELY_CC(obj == nullptr)) { return false; } + if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && !RegionSpace::IsYoungSpaceObject(obj)) { + return false; + } if (RegionSpace::IsNewObjectSinceTrace(obj)) { return false; } diff --git a/common_components/heap/heap_allocator.cpp b/common_components/heap/heap_allocator.cpp index d8c3ad4a99..57fe51ba69 100755 --- a/common_components/heap/heap_allocator.cpp +++ b/common_components/heap/heap_allocator.cpp @@ -58,8 +58,8 @@ Address HeapAllocator::AllocateInReadOnly(size_t size, Language language) uintptr_t HeapAllocator::AllocateLargeJitFortRegion(size_t size, Language language) { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - auto address = manager.AllocJitFortRegion(size); + RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + auto address = allocator.AllocJitFortRegion(size); BaseObject::Cast(address)->SetLanguage(language); return address; } @@ -77,20 +77,20 @@ Address HeapAllocator::AllocatePinNoGC(size_t size) Address HeapAllocator::AllocateRegion() { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - return manager.AllocRegion(); + RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + return allocator.AllocRegion(); } Address HeapAllocator::AllocatePinnedRegion() { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - return manager.AllocPinnedRegion(); + RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + return allocator.AllocPinnedRegion(); } Address HeapAllocator::AllocateLargeRegion(size_t size) { - RegionManager& manager = reinterpret_cast(Heap::GetHeap().GetAllocator()).GetRegionManager(); - return manager.AllocLargeRegion(size); + RegionSpace& allocator = reinterpret_cast(Heap::GetHeap().GetAllocator()); + return allocator.AllocLargeRegion(size); } } // namespace panda diff --git a/ecmascript/compiler/call_signature.cpp b/ecmascript/compiler/call_signature.cpp index a32f8546bb..98c0d88a6b 100644 --- a/ecmascript/compiler/call_signature.cpp +++ b/ecmascript/compiler/call_signature.cpp @@ -3475,7 +3475,7 @@ DEF_CALL_SIGNATURE(ReverseBarrier) DEF_CALL_SIGNATURE(ObjectCopy) { - constexpr size_t paramCount = 4; + constexpr size_t paramCount = 5; // 3 : 3 input parameters CallSignature ArrayCopy("ObjectCopy", 0, paramCount, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); @@ -3485,6 +3485,7 @@ DEF_CALL_SIGNATURE(ObjectCopy) VariableType::NATIVE_POINTER(), VariableType::NATIVE_POINTER(), VariableType::NATIVE_POINTER(), + VariableType::NATIVE_POINTER(), VariableType::INT32() }; callSign->SetParameters(params.data()); diff --git a/ecmascript/compiler/stub_builder.cpp b/ecmascript/compiler/stub_builder.cpp index d020740af7..23549cf903 100644 --- a/ecmascript/compiler/stub_builder.cpp +++ b/ecmascript/compiler/stub_builder.cpp @@ -11660,7 +11660,7 @@ void StubBuilder::ArrayCopy(GateRef glue, GateRef srcObj, GateRef srcAddr, GateR Label exit(env); #endif CallNGCRuntime(glue, RTSTUB_ID(ObjectCopy), - {glue, TaggedCastToIntPtr(dstAddr), TaggedCastToIntPtr(srcAddr), taggedValueCount}); + {glue, TaggedCastToIntPtr(dstObj), TaggedCastToIntPtr(dstAddr), TaggedCastToIntPtr(srcAddr), taggedValueCount}); #ifndef USE_CMC_GC Label handleBarrier(env); BRANCH_NO_WEIGHT(needBarrier, &handleBarrier, &exit); diff --git a/ecmascript/element_accessor-inl.h b/ecmascript/element_accessor-inl.h index ed7dc03188..0fd051b862 100644 --- a/ecmascript/element_accessor-inl.h +++ b/ecmascript/element_accessor-inl.h @@ -49,7 +49,8 @@ inline void ElementAccessor::Set(const JSThread *thread, JSHandle rece size_t offset = JSTaggedValue::TaggedTypeSize() * idx; // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if (value.GetTaggedValue().IsHeapObject()) { - Barriers::SetObject(thread, elements->GetData(), offset, convertedValue); + Barriers::SetObject(thread, reinterpret_cast(elements), + TaggedArray::DATA_OFFSET + offset, convertedValue); } else { // NOLINTNEXTLINE(readability-misleading-indentation) Barriers::SetPrimitive(elements->GetData(), offset, convertedValue); } @@ -82,7 +83,8 @@ void ElementAccessor::FastSet(const JSThread *thread, JSHandle elem break; case ElementsKind::TAGGED: if (value.GetTaggedValue().IsHeapObject()) { - Barriers::SetObject(thread, elements->GetData(), offset, rawValue.GetRawData()); + Barriers::SetObject(thread, elements->GetThis(), + TaggedArray::DATA_OFFSET + offset, rawValue.GetRawData()); } else { // NOLINTNEXTLINE(readability-misleading-indentation) Barriers::SetPrimitive(elements->GetData(), offset, rawValue.GetRawData()); } diff --git a/ecmascript/element_accessor.cpp b/ecmascript/element_accessor.cpp index 5f279d60e1..d6c4aef910 100644 --- a/ecmascript/element_accessor.cpp +++ b/ecmascript/element_accessor.cpp @@ -27,7 +27,7 @@ JSTaggedValue ElementAccessor::Get(const JSThread *thread, JSHandle re // dynamically-typed languages like JavaScript. So we simply skip the read-barrier. size_t offset = JSTaggedValue::TaggedTypeSize() * idx; // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) - JSTaggedType rawValue = Barriers::GetTaggedValue(elements->GetData(), offset); + JSTaggedType rawValue = Barriers::GetTaggedValue(elements, TaggedArray::DATA_OFFSET + offset); if (UNLIKELY(thread->IsEnableMutantArray())) { ElementsKind kind = receiver->GetClass()->GetElementsKind(); if (!elements->GetClass()->IsMutantTaggedArray()) { diff --git a/ecmascript/ic/profile_type_info.h b/ecmascript/ic/profile_type_info.h index 9eccf9f6a4..ffdb4dde01 100644 --- a/ecmascript/ic/profile_type_info.h +++ b/ecmascript/ic/profile_type_info.h @@ -249,7 +249,8 @@ public: void SetExtraInfoMap(const JSThread *thread, JSHandle extraInfoMap) { - Barriers::SetObject(thread, GetData(), GetExtraInfoMapOffset(), + Barriers::SetObject(thread, reinterpret_cast(this), + TaggedArray::DATA_OFFSET + GetExtraInfoMapOffset(), extraInfoMap.GetTaggedValue().GetRawData()); } diff --git a/ecmascript/mem/barriers-inl.h b/ecmascript/mem/barriers-inl.h index e82c4f09c1..c8a84bc66f 100644 --- a/ecmascript/mem/barriers-inl.h +++ b/ecmascript/mem/barriers-inl.h @@ -25,6 +25,7 @@ #include "ecmascript/mem/region-inl.h" #include "ecmascript/mem/heap.h" #include "ecmascript/ecma_vm.h" +#include "ecmascript/tagged_array.h" namespace panda::ecmascript { template @@ -186,7 +187,7 @@ void Barriers::CopyObject(const JSThread *thread, const TaggedObject *dstObj, JS } #ifdef USE_CMC_GC - Barriers::CMCArrayCopyWriteBarrier(thread, (void*)srcAddr, (void*)dstAddr, count); + Barriers::CMCArrayCopyWriteBarrier(thread, dstObj, (void*)srcAddr, (void*)dstAddr, count); return; #endif diff --git a/ecmascript/mem/barriers.cpp b/ecmascript/mem/barriers.cpp index 834b51136e..b77e77cdbe 100644 --- a/ecmascript/mem/barriers.cpp +++ b/ecmascript/mem/barriers.cpp @@ -120,7 +120,7 @@ void Barriers::CMCWriteBarrier(const JSThread *thread, void *obj, size_t offset, return; } -void Barriers::CMCArrayCopyWriteBarrier(const JSThread *thread, void* src, void* dst, size_t count) +void Barriers::CMCArrayCopyWriteBarrier(const JSThread *thread, const TaggedObject *dstObj, void* src, void* dst, size_t count) { // need opt (void)thread; @@ -129,7 +129,7 @@ void Barriers::CMCArrayCopyWriteBarrier(const JSThread *thread, void* src, void* for (size_t i = 0; i < count; i++) { uintptr_t offset = i * sizeof(uintptr_t); uintptr_t value = srcPtr[i]; - BaseRuntime::WriteBarrier(dst, (void *)((uintptr_t)dst + offset), (void*)value); + BaseRuntime::WriteBarrier(reinterpret_cast(const_cast(dstObj)), (void *)((uintptr_t)dst + offset), (void*)value); } return; } diff --git a/ecmascript/mem/barriers.h b/ecmascript/mem/barriers.h index 0287cc314c..214aff6e73 100644 --- a/ecmascript/mem/barriers.h +++ b/ecmascript/mem/barriers.h @@ -154,7 +154,7 @@ public: TaggedObject *value, Region *valueRegion); #ifdef USE_CMC_GC static void PUBLIC_API CMCWriteBarrier(const JSThread *thread, void *obj, size_t offset, JSTaggedType value); - static void PUBLIC_API CMCArrayCopyWriteBarrier(const JSThread *thread, void* src, void* dst, size_t count); + static void PUBLIC_API CMCArrayCopyWriteBarrier(const JSThread *thread, const TaggedObject *dstObj, void* src, void* dst, size_t count); #endif }; } // namespace panda::ecmascript diff --git a/ecmascript/stubs/runtime_stubs.cpp b/ecmascript/stubs/runtime_stubs.cpp index e03aeaf71c..7e2d509868 100644 --- a/ecmascript/stubs/runtime_stubs.cpp +++ b/ecmascript/stubs/runtime_stubs.cpp @@ -4475,14 +4475,14 @@ DEF_RUNTIME_STUBS(SlowSharedObjectStoreBarrier) return publishValue.GetTaggedValue().GetRawData(); } -void RuntimeStubs::ObjectCopy(uintptr_t argGlue, JSTaggedType *dst, JSTaggedType *src, uint32_t count) +void RuntimeStubs::ObjectCopy(uintptr_t argGlue, JSTaggedType *dstObj, JSTaggedType *dst, JSTaggedType *src, uint32_t count) { DISALLOW_GARBAGE_COLLECTION; #ifdef USE_READ_BARRIER auto thread = JSThread::GlueToJSThread(argGlue); // check CMC-GC phase inside Barriers::CopyObject( - thread, nullptr, reinterpret_cast(dst), reinterpret_cast(src), count); + thread, reinterpret_cast(dstObj), reinterpret_cast(dst), reinterpret_cast(src), count); #else (void)argGlue; std::copy_n(src, count, dst); diff --git a/ecmascript/stubs/runtime_stubs.h b/ecmascript/stubs/runtime_stubs.h index dbebfa81e8..819de0163b 100644 --- a/ecmascript/stubs/runtime_stubs.h +++ b/ecmascript/stubs/runtime_stubs.h @@ -162,7 +162,7 @@ public: static void ReverseTypedArray(JSTypedArray *typedArray); static void SortTypedArray(JSTypedArray *typedArray); static inline uintptr_t RuntimeGetNativePcOfstForBaseline(const JSHandle &func, uint64_t bytecodePos); - static void ObjectCopy(uintptr_t argGlue, JSTaggedType *dst, JSTaggedType *src, uint32_t count); + static void ObjectCopy(uintptr_t argGlue, JSTaggedType *dstObj, JSTaggedType *dst, JSTaggedType *src, uint32_t count); static void FillObject(JSTaggedType *dst, JSTaggedType value, uint32_t count); static void ReverseArray(uintptr_t argGlue, JSTaggedType *dst, uint32_t length); private: diff --git a/ecmascript/tagged_array-inl.h b/ecmascript/tagged_array-inl.h index 090178368d..bc4cd48c39 100644 --- a/ecmascript/tagged_array-inl.h +++ b/ecmascript/tagged_array-inl.h @@ -28,14 +28,15 @@ inline void TaggedArray::Set(const JSThread *thread, uint32_t idx, const T &valu if constexpr (std::is_same_v) { if (needBarrier && value.IsHeapObject()) { - Barriers::SetObject(thread, GetData(), offset, value.GetRawData()); + Barriers::SetObject(thread, reinterpret_cast(this), offset + DATA_OFFSET, value.GetRawData()); } else { Barriers::SetPrimitive(GetData(), offset, value.GetRawData()); } } else if constexpr (IsJSHandle::value) { auto taggedValue = value.GetTaggedValue(); if (taggedValue.IsHeapObject()) { - Barriers::SetObject(thread, GetData(), offset, taggedValue.GetRawData()); + Barriers::SetObject(thread, reinterpret_cast(this), + offset + DATA_OFFSET, taggedValue.GetRawData()); } else { Barriers::SetPrimitive(GetData(), offset, taggedValue.GetRawData()); } diff --git a/ecmascript/tagged_array.h b/ecmascript/tagged_array.h index 170cf4f650..8f25ac806b 100644 --- a/ecmascript/tagged_array.h +++ b/ecmascript/tagged_array.h @@ -75,6 +75,11 @@ public: return reinterpret_cast(ToUintPtr(this) + DATA_OFFSET); } + JSTaggedType *GetThis() const + { + return reinterpret_cast(ToUintPtr(this)); + } + bool IsDictionaryMode() const; bool HasDuplicateEntry() const; -- Gitee