diff --git a/runtime/mem/gc/gc.cpp b/runtime/mem/gc/gc.cpp index 000b04cef26394d159b0d7cdf3a5b8eddf03f282..9faef11e41f1fb8ba5ba8fe1e68830edcd59d188 100644 --- a/runtime/mem/gc/gc.cpp +++ b/runtime/mem/gc/gc.cpp @@ -247,6 +247,17 @@ void GC::RestoreCpuAffinity() ResetCpuAffinity(false); } +bool GC::NeedRunGCAfterWaiting(size_t counter_before_waiting, const GCTask &task) const +{ + // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which + // should become visible + auto new_counter = gc_counter_.load(std::memory_order_acquire); + ASSERT(new_counter >= counter_before_waiting); + // Atomic with acquire order reason: data race with last_cause_ with dependecies on reads after the load which + // should become visible + return (new_counter == counter_before_waiting || last_cause_.load(std::memory_order_acquire) < task.reason); +} + // NOLINTNEXTLINE(performance-unnecessary-value-param) void GC::RunPhases(GCTask &task) { @@ -256,10 +267,7 @@ void GC::RunPhases(GCTask &task) // should become visible auto old_counter = gc_counter_.load(std::memory_order_acquire); WaitForIdleGC(); - // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which - // should become visible - auto new_counter = gc_counter_.load(std::memory_order_acquire); - if (new_counter > old_counter) { + if (!this->NeedRunGCAfterWaiting(old_counter, task)) { SetGCPhase(GCPhase::GC_PHASE_IDLE); return; } @@ -652,13 +660,7 @@ bool GC::WaitForGC(GCTask task) ScopedTiming t("SuspendThreads", suspend_threads_timing); this->GetPandaVm()->GetRendezvous()->SafepointBegin(); } - - // Atomic with acquire order reason: data race with gc_counter_ with dependecies on reads after the load which - // should become visible - auto new_counter = this->gc_counter_.load(std::memory_order_acquire); - // Atomic with acquire order reason: data race with last_cause_ with dependecies on reads after the load which - // should become visible - if (new_counter > old_counter && this->last_cause_.load(std::memory_order_acquire) >= task.reason) { + if (!this->NeedRunGCAfterWaiting(old_counter, task)) { this->GetPandaVm()->GetRendezvous()->SafepointEnd(); return false; } diff --git a/runtime/mem/gc/gc.h b/runtime/mem/gc/gc.h index 8b5fb13e48c121073f00dd99ca07c6c59b530d40..0ee433bf6b7bb61e0cb339865b67b0b4f5981172 100644 --- a/runtime/mem/gc/gc.h +++ b/runtime/mem/gc/gc.h @@ -648,6 +648,19 @@ private: */ void ResetCpuAffinity(bool before_concurrent); + /** + * Check whether run GC after waiting for mutator threads. Tasks for GC can pass from several mutator threads, so + * sometime no need to run GC many times. Also some GCs run in place, but in this time GC can run in GC-thread, and + * "in-place" GC wait for idle state for running, so need to check whether run such GC after waiting for threads + * @see WaitForIdleGC + * + * @param counter_before_waiting value of gc counter before waiting for mutator threads + * @param task current GC task + * + * @return true if need to run GC with current task after waiting for mutator threads or false otherwise + */ + bool NeedRunGCAfterWaiting(size_t counter_before_waiting, const GCTask &task) const; + /** * Entrypoint for GC worker thread * @param gc pointer to GC structure