From 0926fc94ee219696deb3854920fb659f325397b1 Mon Sep 17 00:00:00 2001 From: xiaowen Date: Fri, 30 May 2025 22:53:39 +0800 Subject: [PATCH] Support concurrent roots marking Signed-off-by: xiaowen --- common_components/base_runtime/hooks.h | 3 +- .../src/heap/collector/trace_collector.cpp | 108 ++++++++++++++---- .../src/heap/collector/trace_collector.h | 3 + .../common_runtime/src/heap/verification.cpp | 9 +- .../src/heap/w_collector/w_collector.cpp | 44 ++++++- .../src/heap/w_collector/w_collector.h | 3 + .../common_runtime/src/mutator/satb_buffer.h | 12 ++ common_components/heap/heap_visitor.cpp | 14 ++- ecmascript/ecma_context.cpp | 9 ++ ecmascript/ecma_context.h | 1 + ecmascript/js_thread.cpp | 7 ++ ecmascript/js_thread.h | 2 + ecmascript/mem/cmc_gc/hooks.cpp | 16 ++- ecmascript/mem/object_xray.h | 5 + ecmascript/module/js_module_manager.cpp | 17 +++ ecmascript/module/js_module_manager.h | 18 +++ 16 files changed, 239 insertions(+), 32 deletions(-) diff --git a/common_components/base_runtime/hooks.h b/common_components/base_runtime/hooks.h index d6f2bcba2c..d2e858e813 100644 --- a/common_components/base_runtime/hooks.h +++ b/common_components/base_runtime/hooks.h @@ -24,7 +24,8 @@ // `WorkStack` Should be moved to BaseRT and panda namespace later namespace panda { // Dynamic VM Roots scanning -PUBLIC_API void VisitDynamicRoots(const RefFieldVisitor &visitor, bool isMark); +PUBLIC_API void VisitDynamicSequentialRoots(const RefFieldVisitor &visitor, bool isMark); +PUBLIC_API void VisitDynamicConcurrentRoots(const RefFieldVisitor &visitor, bool isMark); PUBLIC_API void VisitDynamicWeakRoots(const WeakRefFieldVisitor &visitorFunc); PUBLIC_API void VisitJSThread(void *jsThread, CommonRootVisitor visitor); diff --git a/common_components/common_runtime/src/heap/collector/trace_collector.cpp b/common_components/common_runtime/src/heap/collector/trace_collector.cpp index 8016fee1a0..3356cc2f37 100755 --- a/common_components/common_runtime/src/heap/collector/trace_collector.cpp +++ b/common_components/common_runtime/src/heap/collector/trace_collector.cpp @@ -178,34 +178,56 @@ void TraceCollector::ProcessMarkStack([[maybe_unused]] uint32_t threadIndex, Tas { size_t nNewlyMarked = 0; WeakStack weakStack; - // loop until work stack empty. - for (;;) { - ++nNewlyMarked; - if (workStack.empty()) { - break; + TraceCollector::WorkStack remarkStack; + auto fetchFromSatbBuffer = [this, &workStack, &remarkStack]() { + SatbBuffer::Instance().TryFetchOneRetiredNode(remarkStack); + while (!remarkStack.empty()) { + BaseObject* obj = remarkStack.back(); + remarkStack.pop_back(); + if (Heap::IsHeapAddress(obj) && (!MarkObject(obj))) { + workStack.push_back(obj); + DLOG(TRACE, "tracing take from satb buffer: obj %p", obj); + } } - // get next object from work stack. - BaseObject* obj = workStack.back(); - workStack.pop_back(); - auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)obj)); - region->AddLiveByteCount(obj->GetSize()); + }; + size_t iterationCnt = 0; + constexpr size_t maxIterationLoopNum = 1000; + // loop until work stack empty. + do { + for (;;) { + ++nNewlyMarked; + if (workStack.empty()) { + break; + } + // get next object from work stack. + BaseObject* obj = workStack.back(); + workStack.pop_back(); + auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)obj)); + region->AddLiveByteCount(obj->GetSize()); #ifndef USE_CMC_GC - if (!obj->HasRefField()) { - continue; - } - TraceObjectRefFields(obj, workStack, weakStack); - + if (!obj->HasRefField()) { + continue; + } + TraceObjectRefFields(obj, workStack, weakStack); #else - auto beforeSize = workStack.count(); - TraceObjectRefFields(obj, workStack, weakStack); - DLOG(TRACE, "[tracing] visit finished, workstack size: before=%d, after=%d, newly added=%d", - beforeSize, workStack.count(), workStack.count() - beforeSize); + auto beforeSize = workStack.count(); + TraceObjectRefFields(obj, workStack, weakStack); + DLOG(TRACE, "[tracing] visit finished, workstack size: before=%d, after=%d, newly added=%d", beforeSize, + workStack.count(), workStack.count() - beforeSize); #endif - // try to fork new task if needed. - if (threadPool != nullptr) { - TryForkTask(threadPool, workStack, globalQueue); + // try to fork new task if needed. + if (threadPool != nullptr) { + TryForkTask(threadPool, workStack, globalQueue); + } } - } // end of mark loop. + // Try some task from satb buffer, bound the loop to make sure it converges in time + if (++iterationCnt < maxIterationLoopNum) { + fetchFromSatbBuffer(); + if (workStack.empty()) { + fetchFromSatbBuffer(); + } + } + } while (!workStack.empty()); // newly marked statistics. markedObjectCount_.fetch_add(nNewlyMarked, std::memory_order_relaxed); MergeWeakStack(weakStack); @@ -226,7 +248,13 @@ void TraceCollector::EnumConcurrencyModelRoots(RootSet& rootSet) const void TraceCollector::EnumStaticRoots(RootSet& rootSet) const { const RefFieldVisitor& visitor = [&rootSet, this](RefField<>& root) { EnumRefFieldRoot(root, rootSet); }; - VisitRoots(visitor, true); + VisitSequentialRoots(visitor, true); +} + +void TraceCollector::EnumStaticConcurrentRoots(RootSet& rootSet) const +{ + const RefFieldVisitor& visitor = [&rootSet, this](RefField<>& root) { EnumRefFieldRoot(root, rootSet); }; + VisitConcurrentRoots(visitor, true); } void TraceCollector::MergeMutatorRoots(WorkStack& workStack) @@ -243,6 +271,12 @@ void TraceCollector::MergeMutatorRoots(WorkStack& workStack) } } +void TraceCollector::EnumerateAllConcurrentRoots(WorkStack& workStack) +{ + OHOS_HITRACE("ARK_RT_GC_ENUM"); + EnumerateAllConcurrentRootsImpl(GetThreadPool(), workStack); +} + void TraceCollector::EnumerateAllRoots(WorkStack& workStack) { OHOS_HITRACE("ARK_RT_GC_ENUM"); @@ -563,6 +597,32 @@ void TraceCollector::EnumerateAllRootsImpl(Taskpool *threadPool, RootSet& rootSe VLOG(REPORT, "Total roots: %zu(exclude stack roots)", rootSet.size()); } +void TraceCollector::EnumerateAllConcurrentRootsImpl(Taskpool* threadPool, RootSet& rootSet) +{ + ASSERT_LOGF(threadPool != nullptr, "thread pool is null"); + + const size_t threadCount = static_cast(GetGCThreadCount(true)); + RootSet rootSetsInstance[threadCount]; + RootSet* rootSets = rootSetsInstance; // work_around the crash of clang parser + + // Only one root task, no need to post task. + EnumStaticConcurrentRoots(rootSets[0]); + + MergeMutatorRoots(rootSet); + WorkStack tempStack = NewWorkStack(); + for (size_t i = 0; i < threadCount; ++i) { + tempStack.insert(rootSets[i]); + } + while (!tempStack.empty()) { + auto temp = tempStack.back(); + tempStack.pop_back(); + if (!this->MarkObject(temp)) { + rootSet.push_back(temp); + } + } + VLOG(REPORT, "Total roots: %zu(exclude stack roots)", rootSet.size()); +} + void TraceCollector::VisitStaticRoots(const RefFieldVisitor& visitor) const { Heap::GetHeap().VisitStaticRoots(visitor); diff --git a/common_components/common_runtime/src/heap/collector/trace_collector.h b/common_components/common_runtime/src/heap/collector/trace_collector.h index 425146e126..50532543aa 100755 --- a/common_components/common_runtime/src/heap/collector/trace_collector.h +++ b/common_components/common_runtime/src/heap/collector/trace_collector.h @@ -289,6 +289,7 @@ protected: Taskpool *GetThreadPool() const { return collectorResources_.GetThreadPool(); } // enum all roots. void EnumerateAllRootsImpl(Taskpool *threadPool, RootSet& rootSet); + void EnumerateAllConcurrentRootsImpl(Taskpool *threadPool, RootSet& rootSet); // let finalizerProcessor process finalizers, and mark resurrected if in stw gc virtual void ProcessWeakReferences() {} @@ -298,6 +299,7 @@ protected: void MergeMutatorRoots(WorkStack& workStack); void EnumerateAllRoots(WorkStack& workStack); + void EnumerateAllConcurrentRoots(WorkStack& workStack); void TraceRoots(WorkStack& workStack); bool MarkSatbBuffer(WorkStack& workStack); @@ -316,6 +318,7 @@ private: void EnumMutatorRoot(ObjectPtr& obj, RootSet& rootSet) const; void EnumConcurrencyModelRoots(RootSet& rootSet) const; void EnumStaticRoots(RootSet& rootSet) const; + void EnumStaticConcurrentRoots(RootSet& rootSet) const; void EnumFinalizerProcessorRoots(RootSet& rootSet) const; void VisitStaticRoots(const RefFieldVisitor& visitor) const; diff --git a/common_components/common_runtime/src/heap/verification.cpp b/common_components/common_runtime/src/heap/verification.cpp index 7bda1f7eed..a1c80f9710 100755 --- a/common_components/common_runtime/src/heap/verification.cpp +++ b/common_components/common_runtime/src/heap/verification.cpp @@ -28,7 +28,8 @@ namespace panda { -void VisitRoots(const RefFieldVisitor &visitorFunc, bool isMark); +void VisitSequentialRoots(const RefFieldVisitor &visitorFunc, bool isMark); +void VisitConcurrentRoots(const RefFieldVisitor& visitorFunc, bool isMark); void VisitWeakRoots(const WeakRefFieldVisitor &visitorFunc); void IsValidObject(BaseObject *obj) @@ -136,7 +137,8 @@ public: MarkStack roots; RefFieldVisitor refVisitor = [&](RefField<> &ref) { visitor.VisitRefField(ref); }; - VisitRoots(refVisitor, true); + VisitSequentialRoots(refVisitor, true); + VisitConcurrentRoots(refVisitor, true); } void IterateWeakRoot(VerifyVisitor &visitor) @@ -201,7 +203,8 @@ private: } }; - VisitRoots(refVisitor, true); + VisitSequentialRoots(refVisitor, true); + VisitConcurrentRoots(refVisitor, true); } void Trace(MarkStack &markStack) {} diff --git a/common_components/common_runtime/src/heap/w_collector/w_collector.cpp b/common_components/common_runtime/src/heap/w_collector/w_collector.cpp index bd89cafb22..3dd3e25020 100755 --- a/common_components/common_runtime/src/heap/w_collector/w_collector.cpp +++ b/common_components/common_runtime/src/heap/w_collector/w_collector.cpp @@ -291,6 +291,26 @@ BaseObject* WCollector::ForwardUpdateRawRef(ObjectRef& root) return oldObj; } +void WCollector::PreforwardConcurrentStaticRoots() +{ + panda::RefFieldVisitor visitor = [this](RefField<>& refField) { + RefField<> oldField(refField); + BaseObject* oldObj = oldField.GetTargetObject(); + DLOG(FIX, "visit raw-ref @%p: %p", &refField, oldObj); + if (IsFromObject(oldObj)) { + BaseObject* toVersion = TryForwardObject(oldObj); + CHECK(toVersion != nullptr); + RefField<> newField(toVersion); + // CAS failure means some mutator or gc thread writes a new ref (must be a to-object), no need to retry. + if (refField.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) { + DLOG(FIX, "fix raw-ref @%p: %p -> %p", &refField, oldObj, toVersion); + } + } + }; + + VisitConcurrentRoots(visitor, false); +} + void WCollector::PreforwardStaticRoots() { panda::RefFieldVisitor visitor = [this](RefField<>& refField) { @@ -328,7 +348,7 @@ void WCollector::PreforwardStaticRoots() return true; }; - VisitRoots(visitor, false); + VisitSequentialRoots(visitor, false); VisitWeakRoots(weakVisitor); MutatorManager::Instance().VisitAllMutators([](Mutator& mutator) { // Request finalize callback in each vm-thread when gc finished. @@ -357,6 +377,11 @@ void WCollector::PreforwardConcurrencyModelRoots() UNREACHABLE_CC(); } +void WCollector::EnumConcurrentRoots(WorkStack& workStack) +{ + EnumerateAllConcurrentRoots(workStack); +} + void WCollector::EnumRoots(WorkStack& workStack) { // assemble garbage candidates. @@ -389,6 +414,21 @@ void WCollector::PostTrace() WVerify::VerifyAfterMark(*this); } + +void WCollector::ConcurrentPreforward() +{ + OHOS_HITRACE("ARK_RT_GC_PREFORWARD"); + ARK_COMMON_PHASE_TIMER("Preforward"); + TransitionToGCPhase(GCPhase::GC_PHASE_PRECOPY, true); + + [[maybe_unused]] Taskpool *threadPool = GetThreadPool(); + ASSERT_LOGF(threadPool != nullptr, "thread pool is null"); + + // copy and fix finalizer roots. + // Only one root task, no need to post task. + PreforwardConcurrentStaticRoots(); +} + void WCollector::Preforward() { OHOS_HITRACE("ARK_RT_GC_PREFORWARD"); @@ -482,6 +522,7 @@ void WCollector::DoGarbageCollection() ScopedStopTheWorld stw("wgc-enumroot"); EnumRoots(workStack); } + EnumConcurrentRoots(workStack); TraceHeap(workStack); PostTrace(); @@ -490,6 +531,7 @@ void WCollector::DoGarbageCollection() reinterpret_cast(theAllocator_).PrepareForward(); Preforward(); } + ConcurrentPreforward(); // reclaim large objects should after preforward(may process weak ref) // and before fix heap(may clear live bit) CollectLargeGarbage(); diff --git a/common_components/common_runtime/src/heap/w_collector/w_collector.h b/common_components/common_runtime/src/heap/w_collector/w_collector.h index 8ddedc0475..2f4a4b888e 100755 --- a/common_components/common_runtime/src/heap/w_collector/w_collector.h +++ b/common_components/common_runtime/src/heap/w_collector/w_collector.h @@ -162,12 +162,15 @@ private: bool TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& ref, BaseObject*& oldRef, BaseObject*& newRef) const; void EnumRoots(WorkStack& workStack); + void EnumConcurrentRoots(WorkStack& workStack); void TraceHeap(WorkStack& workStack); void PostTrace(); void Preforward(); + void ConcurrentPreforward(); void PreforwardStaticRoots(); + void PreforwardConcurrentStaticRoots(); void PreforwardConcurrencyModelRoots(); void PreforwardFinalizerProcessorRoots(); diff --git a/common_components/common_runtime/src/mutator/satb_buffer.h b/common_components/common_runtime/src/mutator/satb_buffer.h index f099ef51c0..414215cdc6 100755 --- a/common_components/common_runtime/src/mutator/satb_buffer.h +++ b/common_components/common_runtime/src/mutator/satb_buffer.h @@ -245,6 +245,18 @@ public: } } + template + void TryFetchOneRetiredNode(T& stack) + { + TreapNode* node = retiredNodes_.Pop(); + if (!node) { + return; + } + node->GetObjects(stack); + node->Clear(); + freeNodes_.Push(node); + } + void ClearBuffer() { TreapNode* head = retiredNodes_.PopAll(); diff --git a/common_components/heap/heap_visitor.cpp b/common_components/heap/heap_visitor.cpp index c69cfb7014..51a7bec78f 100755 --- a/common_components/heap/heap_visitor.cpp +++ b/common_components/heap/heap_visitor.cpp @@ -30,9 +30,19 @@ void __attribute__((weak)) SweepStaticRoots(const WeakRefFieldVisitor &visitor) { } -void VisitRoots(const RefFieldVisitor &visitor, bool isMark) +void VisitSequentialRoots(const RefFieldVisitor &visitor, bool isMark) { - VisitDynamicRoots(visitor, isMark); + VisitDynamicSequentialRoots(visitor, isMark); + if (isMark) { + VisitStaticRoots(visitor); + } else { + UpdateStaticRoots(visitor); + } +} + +void VisitConcurrentRoots(const RefFieldVisitor &visitor, bool isMark) +{ + VisitDynamicConcurrentRoots(visitor, isMark); if (isMark) { VisitStaticRoots(visitor); } else { diff --git a/ecmascript/ecma_context.cpp b/ecmascript/ecma_context.cpp index 8d24dad22e..79be135d19 100644 --- a/ecmascript/ecma_context.cpp +++ b/ecmascript/ecma_context.cpp @@ -837,6 +837,13 @@ void EcmaContext::IterateMegaIC(RootVisitor &v) } } +void EcmaContext::IterateConcurrentRoots(RootVisitor &v) +{ + if (moduleManager_) { + moduleManager_->AsyncIterate(v); + } +} + void EcmaContext::Iterate(RootVisitor &v) { // visit global Constant @@ -872,9 +879,11 @@ void EcmaContext::Iterate(RootVisitor &v) if (functionProtoTransitionTable_) { functionProtoTransitionTable_->Iterate(v); } +#ifndef USE_CMC_GC if (moduleManager_) { moduleManager_->Iterate(v); } +#endif if (ptManager_) { ptManager_->Iterate(v); } diff --git a/ecmascript/ecma_context.h b/ecmascript/ecma_context.h index 85ee325c73..1a5d95df00 100644 --- a/ecmascript/ecma_context.h +++ b/ecmascript/ecma_context.h @@ -354,6 +354,7 @@ public: void IterateMegaIC(RootVisitor &v); void Iterate(RootVisitor &v); + void IterateConcurrentRoots(RootVisitor &v); static void MountContext(JSThread *thread); static void UnmountContext(JSThread *thread); void SetMicroJobQueue(job::MicroJobQueue *queue); diff --git a/ecmascript/js_thread.cpp b/ecmascript/js_thread.cpp index de43136394..e20041694e 100644 --- a/ecmascript/js_thread.cpp +++ b/ecmascript/js_thread.cpp @@ -476,6 +476,13 @@ void JSThread::SetJitCodeMap(JSTaggedType exception, MachineCode* machineCode, } } +void JSThread::IterateConcurrentRoots(RootVisitor &visitor) +{ + for (EcmaContext *context : contexts_) { + context->IterateConcurrentRoots(visitor); + } +} + void JSThread::Iterate(RootVisitor &visitor) { if (!glueData_.exception_.IsHole()) { diff --git a/ecmascript/js_thread.h b/ecmascript/js_thread.h index 1fdbfa57ee..fa91ea72c2 100644 --- a/ecmascript/js_thread.h +++ b/ecmascript/js_thread.h @@ -345,6 +345,8 @@ public: glueData_.isTracing_ = isTracing; } + void IterateConcurrentRoots(RootVisitor &visitor); + void Iterate(RootVisitor &visitor); void IterateJitCodeMap(const JitCodeMapVisitor &updater); diff --git a/ecmascript/mem/cmc_gc/hooks.cpp b/ecmascript/mem/cmc_gc/hooks.cpp index 608da6c919..75cca06ae5 100644 --- a/ecmascript/mem/cmc_gc/hooks.cpp +++ b/ecmascript/mem/cmc_gc/hooks.cpp @@ -93,7 +93,7 @@ private: const WeakRefFieldVisitor &visitor_; }; -void VisitDynamicRoots(const RefFieldVisitor &visitorFunc, bool isMark) +void VisitDynamicSequentialRoots(const RefFieldVisitor &visitorFunc, bool isMark) { ecmascript::VMRootVisitType type = isMark ? ecmascript::VMRootVisitType::MARK : ecmascript::VMRootVisitType::UPDATE_ROOT; @@ -120,6 +120,20 @@ void VisitDynamicRoots(const RefFieldVisitor &visitorFunc, bool isMark) }); } +void VisitDynamicConcurrentRoots(const RefFieldVisitor &visitorFunc, bool isMark) +{ + ecmascript::VMRootVisitType type = isMark ? ecmascript::VMRootVisitType::MARK : + ecmascript::VMRootVisitType::UPDATE_ROOT; + CMCRootVisitor visitor(visitorFunc); + + ecmascript::Runtime *runtime = ecmascript::Runtime::GetInstance(); + + runtime->GCIterateThreadList([&](JSThread *thread) { + auto vm = thread->GetEcmaVM(); + ObjectXRay::VisitConcurrentVMRoots(vm, visitor, type); + }); +} + void VisitDynamicWeakRoots(const WeakRefFieldVisitor &visitorFunc) { CMCWeakVisitor visitor(visitorFunc); diff --git a/ecmascript/mem/object_xray.h b/ecmascript/mem/object_xray.h index bf2dd00214..e811ee4ce1 100644 --- a/ecmascript/mem/object_xray.h +++ b/ecmascript/mem/object_xray.h @@ -128,6 +128,11 @@ public: ObjectXRay() = default; ~ObjectXRay() = default; + static inline void VisitConcurrentVMRoots(EcmaVM *vm, RootVisitor &visitor, VMRootVisitType type) + { + vm->GetAssociatedJSThread()->IterateConcurrentRoots(visitor); + } + static inline void VisitVMRoots(EcmaVM *vm, RootVisitor &visitor, VMRootVisitType type) { vm->Iterate(visitor, type); diff --git a/ecmascript/module/js_module_manager.cpp b/ecmascript/module/js_module_manager.cpp index 9dda8b20c1..b5d01a8d34 100644 --- a/ecmascript/module/js_module_manager.cpp +++ b/ecmascript/module/js_module_manager.cpp @@ -537,6 +537,14 @@ void ModuleManager::Iterate(RootVisitor &v) } } +void ModuleManager::AsyncIterate(RootVisitor &v) +{ +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); +#endif + Iterate(v); +} + CString ModuleManager::GetRecordName(JSTaggedValue module) { CString entry = ""; @@ -701,6 +709,9 @@ JSHandle ModuleManager::TryGetImportedModule(const CString& refer void ModuleManager::RemoveModuleFromCache(const CString& recordName) { +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); +#endif auto entry = resolvedModules_.find(recordName); if (entry == resolvedModules_.end()) { // LCOV_EXCL_BR_LINE LOG_ECMA(FATAL) << "Can not get module: " << recordName << @@ -716,11 +727,17 @@ void ModuleManager::RemoveModuleFromCache(const CString& recordName) // this function only remove module's name from resolvedModules List, it's content still needed by sharedmodule void ModuleManager::RemoveModuleNameFromList(const CString& recordName) { +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); +#endif auto entry = resolvedModules_.find(recordName); if (entry == resolvedModules_.end()) { // LCOV_EXCL_BR_LINE LOG_ECMA(FATAL) << "Can not get module: " << recordName << ", when try to remove the module"; } +#ifdef USE_CMC_GC + BaseRuntime::WriteBarrier(nullptr, nullptr, (void*)resolvedModules_[recordName].GetRawData()); +#endif resolvedModules_.erase(recordName); } } // namespace panda::ecmascript diff --git a/ecmascript/module/js_module_manager.h b/ecmascript/module/js_module_manager.h index fd2116c9bf..9d76a4872f 100644 --- a/ecmascript/module/js_module_manager.h +++ b/ecmascript/module/js_module_manager.h @@ -33,6 +33,9 @@ public: ~ModuleManager() { InstantiatingSModuleList_.clear(); +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); +#endif resolvedModules_.clear(); } @@ -89,6 +92,7 @@ public: JSHandle TryGetImportedModule(const CString& referencing); void Iterate(RootVisitor &v); + void AsyncIterate(RootVisitor &v); void AddToInstantiatingSModuleList(const CString &record); ModuleExecuteMode GetExecuteMode() const { @@ -109,16 +113,27 @@ public: } inline void AddResolveImportedModule(const CString &recordName, JSTaggedValue module) { +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); + BaseRuntime::WriteBarrier(nullptr, nullptr, (void*)module.GetRawData()); +#endif resolvedModules_.emplace(recordName, module); } inline void UpdateResolveImportedModule(const CString &recordName, JSTaggedValue module) { +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); + BaseRuntime::WriteBarrier(nullptr, nullptr, (void*)module.GetRawData()); +#endif resolvedModules_[recordName] = module; } void NativeObjDestory() { +#ifdef USE_CMC_GC + LockHolder lock(asyncGCLock); +#endif for (auto it = resolvedModules_.begin(); it != resolvedModules_.end(); it++) { CString key = it->first; ASSERT(!key.empty()); @@ -165,6 +180,9 @@ private: CUnorderedMap resolvedModules_; std::atomic isExecuteBuffer_ {ModuleExecuteMode::ExecuteZipMode}; CVector InstantiatingSModuleList_; +#ifdef USE_CMC_GC + Mutex asyncGCLock; +#endif friend class EcmaVM; friend class PatchLoader; -- Gitee