From f72f6b8ddb7a61e886a31e412314ce487995b53b Mon Sep 17 00:00:00 2001 From: Aleksandr Emelenko Date: Wed, 20 Sep 2023 15:30:05 +0300 Subject: [PATCH] [G1GC] Free empty memory pools with interrupt check Change-Id: I142a475b196af882d4da3f19726437cf4463fce9 Signed-off-by: Aleksandr Emelenko --- libpandabase/mem/mmap_mem_pool-inl.h | 32 +++++++++++++++-------- libpandabase/mem/mmap_mem_pool.h | 8 ++++-- runtime/mem/gc/g1/g1-gc.cpp | 21 ++++++++++++--- runtime/mem/gc/g1/g1-gc.h | 1 + runtime/mem/gc/workers/gc_workers_tasks.h | 18 ++++++++++++- runtime/mem/heap_space.cpp | 2 +- 6 files changed, 63 insertions(+), 19 deletions(-) diff --git a/libpandabase/mem/mmap_mem_pool-inl.h b/libpandabase/mem/mmap_mem_pool-inl.h index 596a74b0a..bf3389ef4 100644 --- a/libpandabase/mem/mmap_mem_pool-inl.h +++ b/libpandabase/mem/mmap_mem_pool-inl.h @@ -554,19 +554,28 @@ inline size_t MmapMemPool::GetObjectUsedBytes() const return common_space_.GetOccupiedMemorySize() - common_space_pools_.GetAllSize(); } -inline void MmapMemPool::ReleasePagesInFreePools() +inline bool MmapMemPool::ReleasePagesInFreePools(bool release_only_one_pool) { os::memory::LockHolder lk(lock_); - common_space_pools_.IterateOverFreePools([](size_t pool_size, MmapPool *pool) { - // Iterate over non returned to OS pools: - if (!pool->IsReturnedToOS()) { - pool->SetReturnedToOS(true); - auto pool_start = ToUintPtr(pool->GetMem()); - LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool->GetMem() << " with size " - << pool_size; - os::mem::ReleasePages(pool_start, pool_start + pool_size); - } - }); + bool pool_released = false; + common_space_pools_.IterateOverFreePools( + [&release_only_one_pool, &pool_released](size_t pool_size, MmapPool *pool) { + // Iterate over non returned to OS pools: + if (!pool->IsReturnedToOS()) { + if (release_only_one_pool && pool_released) { + return; + } + pool_released = true; + pool->SetReturnedToOS(true); + auto pool_start = ToUintPtr(pool->GetMem()); + LOG_MMAP_MEM_POOL(DEBUG) << "Return pages to OS from Free Pool: start = " << pool->GetMem() + << " with size " << pool_size; + os::mem::ReleasePages(pool_start, pool_start + pool_size); + } + }); + if (release_only_one_pool && pool_released) { + return true; + } Pool main_pool = common_space_.GetAndClearUnreturnedToOSMemory(); if (main_pool.GetSize() != 0) { auto pool_start = ToUintPtr(main_pool.GetMem()); @@ -574,6 +583,7 @@ inline void MmapMemPool::ReleasePagesInFreePools() << " with size " << main_pool.GetSize(); os::mem::ReleasePages(pool_start, pool_start + main_pool.GetSize()); } + return false; } #undef LOG_MMAP_MEM_POOL diff --git a/libpandabase/mem/mmap_mem_pool.h b/libpandabase/mem/mmap_mem_pool.h index 254b88b0c..c89b2ffc1 100644 --- a/libpandabase/mem/mmap_mem_pool.h +++ b/libpandabase/mem/mmap_mem_pool.h @@ -188,8 +188,12 @@ public: // To check if we can alloc enough pools in object space bool HaveEnoughPoolsInObjectSpace(size_t pools_num, size_t pool_size) const; - /// Release pages in all cached free pools - void ReleasePagesInFreePools(); + /** + * Release pages in all cached free pools + * @param release_only_one_pool do we need to release only one pool or all of them. + * @return true if we have some pools to release + */ + bool ReleasePagesInFreePools(bool release_only_one_pool); /// @return used bytes count in object space (so exclude bytes in free pools) size_t GetObjectUsedBytes() const; diff --git a/runtime/mem/gc/g1/g1-gc.cpp b/runtime/mem/gc/g1/g1-gc.cpp index 15639cd8c..cf7936ab1 100644 --- a/runtime/mem/gc/g1/g1-gc.cpp +++ b/runtime/mem/gc/g1/g1-gc.cpp @@ -636,7 +636,13 @@ void G1GC::WorkerTaskProcessing(GCWorkersTask *task, [[maybe_unu break; } case GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS: { - PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(); + size_t counter = task->Cast()->GetGcCounter(); + // Atomic with acquire order reason: data race with gc_counter_ + while (this->gc_counter_.load(std::memory_order_acquire) == counter && + PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(true)) { + } + // Atomic with acquire order reason: data race with gc_counter_ + need_to_release_pages_to_os_ = (this->gc_counter_.load(std::memory_order_acquire) == counter); break; } case GCWorkersTaskTypes::TASK_ENQUEUE_REMSET_REFS: { @@ -829,11 +835,12 @@ void G1GC::RunFullGC(panda::GCTask &task) ScopedTiming release_pages("Release Pages in Free Pools", *this->GetTiming()); bool use_gc_workers = this->GetSettings()->GCWorkersCount() != 0; if (use_gc_workers) { - if (!this->GetWorkersTaskPool()->AddTask(GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS)) { - PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(); + if (!this->GetWorkersTaskPool()->AddTask( // Atomic with acquire order reason: data race with gc_counter_ + GCReturnFreePagesToOSTask(this->gc_counter_.load(std::memory_order_acquire)))) { + PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(false); } } else { - PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(); + PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(false); } } this->SetFullGC(false); @@ -853,6 +860,12 @@ void G1GC::RunMixedGC(panda::GCTask &task, const CollectionSet & g1_pause_tracker_.AddPauseInNanos(start_time, end_time); analytics_.ReportCollectionEnd(end_time, collection_set); collection_set_.clear(); + // Continue interrupted release pages to os + if (need_to_release_pages_to_os_ && this->GetSettings()->GCWorkersCount() != 0) { + this->GetWorkersTaskPool()->AddTask( + // Atomic with acquire order reason: data race with gc_counter_ + GCReturnFreePagesToOSTask(this->gc_counter_.load(std::memory_order_acquire))); + } } template diff --git a/runtime/mem/gc/g1/g1-gc.h b/runtime/mem/gc/g1/g1-gc.h index fa0a2649f..7e5b6ae2e 100644 --- a/runtime/mem/gc/g1/g1-gc.h +++ b/runtime/mem/gc/g1/g1-gc.h @@ -434,6 +434,7 @@ private: double g1_promotion_region_alive_rate_ {0.0}; bool g1_track_freed_objects_ {false}; bool is_explicit_concurrent_gc_enabled_ {false}; + bool need_to_release_pages_to_os_ {false}; CollectionSet collection_set_; // Max size of unique_refs_from_remsets_ buffer. It should be enough to store // almost all references to the collection set. diff --git a/runtime/mem/gc/workers/gc_workers_tasks.h b/runtime/mem/gc/workers/gc_workers_tasks.h index bc44d520d..24cd22d57 100644 --- a/runtime/mem/gc/workers/gc_workers_tasks.h +++ b/runtime/mem/gc/workers/gc_workers_tasks.h @@ -62,7 +62,7 @@ class GCWorkersTask : public TaskInterface { public: explicit GCWorkersTask(GCWorkersTaskTypes type = GCWorkersTaskTypes::TASK_EMPTY) : task_type_(type) { - ASSERT(type == GCWorkersTaskTypes::TASK_EMPTY || type == GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS); + ASSERT(type == GCWorkersTaskTypes::TASK_EMPTY); } ~GCWorkersTask() = default; @@ -163,6 +163,22 @@ public: } }; +class GCReturnFreePagesToOSTask : public GCWorkersTask { +public: + explicit GCReturnFreePagesToOSTask(size_t current_gc_counter) + : GCWorkersTask(GCWorkersTaskTypes::TASK_RETURN_FREE_PAGES_TO_OS, ToVoidPtr(current_gc_counter)) + { + } + DEFAULT_COPY_SEMANTIC(GCReturnFreePagesToOSTask); + DEFAULT_MOVE_SEMANTIC(GCReturnFreePagesToOSTask); + ~GCReturnFreePagesToOSTask() = default; + + size_t GetGcCounter() const + { + return reinterpret_cast(storage_); + } +}; + } // namespace panda::mem #endif // PANDA_RUNTIME_MEM_GC_GC_WORKERS_TASKS_H diff --git a/runtime/mem/heap_space.cpp b/runtime/mem/heap_space.cpp index 05dd8f78a..f3cc4d9c3 100644 --- a/runtime/mem/heap_space.cpp +++ b/runtime/mem/heap_space.cpp @@ -149,7 +149,7 @@ void HeapSpace::ClampCurrentMaxHeapSize() os::memory::WriteLockHolder lock(heap_lock_); mem_space_.ClampNewMaxSize( AlignUp(mem_space_.GetCurrentSize() + PANDA_DEFAULT_POOL_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES)); - PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(); + PoolManager::GetMmapMemPool()->ReleasePagesInFreePools(false); } inline Pool HeapSpace::TryAllocPoolBase(size_t pool_size, SpaceType space_type, AllocatorType allocator_type, -- Gitee