From fe8412f530bdad138dd3e127e8f250a295c65eb1 Mon Sep 17 00:00:00 2001 From: noah Date: Mon, 21 Sep 2020 11:40:32 +0800 Subject: [PATCH] add several patches --- ...ator-to-mainline-and-make-it-default.patch | 909 ++++++++++++++++++ ...t_of_2-work-stealing-queue-selection.patch | 614 ++++++++++++ ...C1-OopMap-inserted-twice-fatal-error.patch | 18 + java-11-openjdk.spec | 15 +- 4 files changed, 1554 insertions(+), 2 deletions(-) create mode 100644 8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch create mode 100644 8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch create mode 100644 8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch diff --git a/8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch b/8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch new file mode 100644 index 0000000..6fd91f7 --- /dev/null +++ b/8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch @@ -0,0 +1,909 @@ +commit 21a400b0cecd92cbaf446ac3581df4ff17b1fde1 +Date: Tue Sep 15 19:52:43 2020 +0800 + + 8204947: Port ShenandoahTaskTerminator to mainline and make it default + + Summary: : Improve gc performance, port ShenandoahTaskTerminator to mainline and make it default + LLT: jtreg + Bug url: https://bugs.openjdk.java.net/browse/JDK-8204947 + +diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +index c98691ea7..5fc5ee83e 100644 +--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp ++++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +@@ -55,6 +55,7 @@ + #include "gc/shared/genOopClosures.inline.hpp" + #include "gc/shared/isGCActiveMark.hpp" + #include "gc/shared/oopStorageParState.hpp" ++#include "gc/shared/owstTaskTerminator.hpp" + #include "gc/shared/referencePolicy.hpp" + #include "gc/shared/referenceProcessorPhaseTimes.hpp" + #include "gc/shared/space.inline.hpp" +@@ -2993,7 +2994,7 @@ bool CMSCollector::markFromRootsWork() { + // Forward decl + class CMSConcMarkingTask; + +-class CMSConcMarkingTerminator: public ParallelTaskTerminator { ++class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator { + CMSCollector* _collector; + CMSConcMarkingTask* _task; + public: +@@ -3003,7 +3004,7 @@ class CMSConcMarkingTerminator: public ParallelTaskTerminator { + // "queue_set" is a set of work queues of other threads. + // "collector" is the CMS collector associated with this task terminator. + // "yield" indicates whether we need the gang as a whole to yield. +- CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : ++ CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : + ParallelTaskTerminator(n_threads, queue_set), + _collector(collector) { } + +@@ -3012,6 +3013,45 @@ class CMSConcMarkingTerminator: public ParallelTaskTerminator { + } + }; + ++class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator { ++ CMSCollector* _collector; ++ CMSConcMarkingTask* _task; ++ public: ++ virtual void yield(); ++ ++ // "n_threads" is the number of threads to be terminated. ++ // "queue_set" is a set of work queues of other threads. ++ // "collector" is the CMS collector associated with this task terminator. ++ // "yield" indicates whether we need the gang as a whole to yield. ++ CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : ++ OWSTTaskTerminator(n_threads, queue_set), ++ _collector(collector) { } ++ ++ void set_task(CMSConcMarkingTask* task) { ++ _task = task; ++ } ++}; ++ ++class CMSConcMarkingTaskTerminator { ++ private: ++ ParallelTaskTerminator* _term; ++ public: ++ CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) { ++ if (UseOWSTTaskTerminator) { ++ _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector); ++ } else { ++ _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector); ++ } ++ } ++ ~CMSConcMarkingTaskTerminator() { ++ assert(_term != NULL, "Must not be NULL"); ++ delete _term; ++ } ++ ++ void set_task(CMSConcMarkingTask* task); ++ ParallelTaskTerminator* terminator() const { return _term; } ++}; ++ + class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator { + CMSConcMarkingTask* _task; + public: +@@ -3039,7 +3079,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask { + OopTaskQueueSet* _task_queues; + + // Termination (and yielding) support +- CMSConcMarkingTerminator _term; ++ CMSConcMarkingTaskTerminator _term; + CMSConcMarkingTerminatorTerminator _term_term; + + public: +@@ -3068,7 +3108,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask { + + HeapWord* volatile* global_finger_addr() { return &_global_finger; } + +- CMSConcMarkingTerminator* terminator() { return &_term; } ++ ParallelTaskTerminator* terminator() { return _term.terminator(); } + + virtual void set_for_termination(uint active_workers) { + terminator()->reset_for_reuse(active_workers); +@@ -3086,7 +3126,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask { + void reset(HeapWord* ra) { + assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); + _restart_addr = _global_finger = ra; +- _term.reset_for_reuse(); ++ _term.terminator()->reset_for_reuse(); + } + + static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, +@@ -3107,7 +3147,7 @@ bool CMSConcMarkingTerminatorTerminator::should_exit_termination() { + // thread has yielded. + } + +-void CMSConcMarkingTerminator::yield() { ++void CMSConcMarkingParallelTerminator::yield() { + if (_task->should_yield()) { + _task->yield(); + } else { +@@ -3115,6 +3155,22 @@ void CMSConcMarkingTerminator::yield() { + } + } + ++void CMSConcMarkingOWSTTerminator::yield() { ++ if (_task->should_yield()) { ++ _task->yield(); ++ } else { ++ OWSTTaskTerminator::yield(); ++ } ++} ++ ++void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) { ++ if (UseOWSTTaskTerminator) { ++ ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task); ++ } else { ++ ((CMSConcMarkingParallelTerminator*)_term)->set_task(task); ++ } ++} ++ + //////////////////////////////////////////////////////////////// + // Concurrent Marking Algorithm Sketch + //////////////////////////////////////////////////////////////// +@@ -4303,7 +4359,7 @@ class CMSParRemarkTask: public CMSParMarkTask { + + // The per-thread work queues, available here for stealing. + OopTaskQueueSet* _task_queues; +- ParallelTaskTerminator _term; ++ TaskTerminator _term; + StrongRootsScope* _strong_roots_scope; + + public: +@@ -4325,7 +4381,7 @@ class CMSParRemarkTask: public CMSParMarkTask { + + OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } + +- ParallelTaskTerminator* terminator() { return &_term; } ++ ParallelTaskTerminator* terminator() { return _term.terminator(); } + uint n_workers() { return _n_workers; } + + void work(uint worker_id); +@@ -5014,11 +5070,11 @@ void CMSCollector::do_remark_non_parallel() { + //////////////////////////////////////////////////////// + class AbstractGangTaskWOopQueues : public AbstractGangTask { + OopTaskQueueSet* _queues; +- ParallelTaskTerminator _terminator; ++ TaskTerminator _terminator; + public: + AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) : + AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {} +- ParallelTaskTerminator* terminator() { return &_terminator; } ++ ParallelTaskTerminator* terminator() { return _terminator.terminator(); } + OopTaskQueueSet* queues() { return _queues; } + }; + +diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp +index f6c55900f..0febc5292 100644 +--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp ++++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp +@@ -74,7 +74,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_, + Stack* overflow_stacks_, + PreservedMarks* preserved_marks_, + size_t desired_plab_sz_, +- ParallelTaskTerminator& term_) : ++ TaskTerminator& term_) : + _to_space(to_space_), + _old_gen(old_gen_), + _young_gen(young_gen_), +@@ -92,7 +92,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_, + _older_gen_closure(young_gen_, this), + _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, + &_to_space_root_closure, young_gen_, &_old_gen_root_closure, +- work_queue_set_, &term_), ++ work_queue_set_, term_.terminator()), + _is_alive_closure(young_gen_), + _scan_weak_ref_closure(young_gen_, this), + _keep_alive_closure(&_scan_weak_ref_closure), +@@ -305,7 +305,7 @@ public: + Stack* overflow_stacks_, + PreservedMarksSet& preserved_marks_set, + size_t desired_plab_sz, +- ParallelTaskTerminator& term); ++ TaskTerminator& term); + + ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } + +@@ -326,14 +326,14 @@ public: + #endif // TASKQUEUE_STATS + + private: +- ParallelTaskTerminator& _term; ++ TaskTerminator& _term; + ParNewGeneration& _young_gen; + Generation& _old_gen; + ParScanThreadState* _per_thread_states; + const int _num_threads; + public: + bool is_valid(int id) const { return id < _num_threads; } +- ParallelTaskTerminator* terminator() { return &_term; } ++ ParallelTaskTerminator* terminator() { return _term.terminator(); } + }; + + ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, +@@ -344,7 +344,7 @@ ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, + Stack* overflow_stacks, + PreservedMarksSet& preserved_marks_set, + size_t desired_plab_sz, +- ParallelTaskTerminator& term) ++ TaskTerminator& term) + : _young_gen(young_gen), + _old_gen(old_gen), + _term(term), +@@ -378,7 +378,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace + } + + void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { +- _term.reset_for_reuse(active_threads); ++ _term.terminator()->reset_for_reuse(active_threads); + if (promotion_failed) { + for (int i = 0; i < _num_threads; ++i) { + thread_state(i).print_promotion_failure_size(); +@@ -909,7 +909,7 @@ void ParNewGeneration::collect(bool full, + + // Always set the terminator for the active number of workers + // because only those workers go through the termination protocol. +- ParallelTaskTerminator _term(active_workers, task_queues()); ++ TaskTerminator _term(active_workers, task_queues()); + ParScanThreadStateSet thread_state_set(active_workers, + *to(), *this, *_old_gen, *task_queues(), + _overflow_stacks, _preserved_marks_set, +diff --git a/src/hotspot/share/gc/cms/parNewGeneration.hpp b/src/hotspot/share/gc/cms/parNewGeneration.hpp +index b3e7c5a7d..7bf37e535 100644 +--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp ++++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp +@@ -134,7 +134,7 @@ class ParScanThreadState { + Stack* overflow_stacks_, + PreservedMarks* preserved_marks_, + size_t desired_plab_sz_, +- ParallelTaskTerminator& term_); ++ TaskTerminator& term_); + + public: + AgeTable* age_table() {return &_ageTable;} +diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +index a987377ae..130f8ec0a 100644 +--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp ++++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +@@ -3241,7 +3241,7 @@ protected: + G1ParScanThreadStateSet* _pss; + RefToScanQueueSet* _queues; + G1RootProcessor* _root_processor; +- ParallelTaskTerminator _terminator; ++ TaskTerminator _terminator; + uint _n_workers; + + public: +@@ -3286,7 +3286,7 @@ public: + size_t evac_term_attempts = 0; + { + double start = os::elapsedTime(); +- G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator); ++ G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator()); + evac.do_void(); + + evac_term_attempts = evac.term_attempts(); +@@ -3988,8 +3988,8 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers + assert(_workers->active_workers() >= ergo_workers, + "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)", + ergo_workers, _workers->active_workers()); +- ParallelTaskTerminator terminator(ergo_workers, _queues); +- G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator); ++ TaskTerminator terminator(ergo_workers, _queues); ++ G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, terminator.terminator()); + + _workers->run_task(&proc_task_proxy, ergo_workers); + } +diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +index eff6b7b02..920f796b5 100644 +--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp ++++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +@@ -366,7 +366,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, + // _tasks set inside the constructor + + _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), +- _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), ++ _terminator((int) _max_num_tasks, _task_queues), + + _first_overflow_barrier_sync(), + _second_overflow_barrier_sync(), +@@ -581,7 +581,7 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) { + _num_active_tasks = active_tasks; + // Need to update the three data structures below according to the + // number of active threads for this phase. +- _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); ++ _terminator = TaskTerminator((int) active_tasks, _task_queues); + _first_overflow_barrier_sync.set_n_workers((int) active_tasks); + _second_overflow_barrier_sync.set_n_workers((int) active_tasks); + } +diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +index 6707537fd..dd253bbdf 100644 +--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp ++++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +@@ -322,8 +322,8 @@ class G1ConcurrentMark : public CHeapObj { + uint _num_active_tasks; // Number of tasks currently active + G1CMTask** _tasks; // Task queue array (max_worker_id length) + +- G1CMTaskQueueSet* _task_queues; // Task queue set +- ParallelTaskTerminator _terminator; // For termination ++ G1CMTaskQueueSet* _task_queues; // Task queue set ++ TaskTerminator _terminator; // For termination + + // Two sync barriers that are used to synchronize tasks when an + // overflow occurs. The algorithm is the following. All tasks enter +@@ -409,10 +409,10 @@ class G1ConcurrentMark : public CHeapObj { + // Prints all gathered CM-related statistics + void print_stats(); + +- HeapWord* finger() { return _finger; } +- bool concurrent() { return _concurrent; } +- uint active_tasks() { return _num_active_tasks; } +- ParallelTaskTerminator* terminator() { return &_terminator; } ++ HeapWord* finger() { return _finger; } ++ bool concurrent() { return _concurrent; } ++ uint active_tasks() { return _num_active_tasks; } ++ ParallelTaskTerminator* terminator() const { return _terminator.terminator(); } + + // Claims the next available region to be scanned by a marking + // task/thread. It might return NULL if the next region is empty or +diff --git a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp +index 34025dce0..d2c4b8d60 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp +@@ -60,7 +60,7 @@ void G1FullGCMarkTask::work(uint worker_id) { + } + + // Mark stack is populated, now process and drain it. +- marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator); ++ marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), _terminator.terminator()); + + // This is the point where the entire marking should have completed. + assert(marker->oop_stack()->is_empty(), "Marking should have completed"); +diff --git a/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp b/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp +index 8dfff0555..7223cee50 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp +@@ -36,7 +36,7 @@ + + class G1FullGCMarkTask : public G1FullGCTask { + G1RootProcessor _root_processor; +- ParallelTaskTerminator _terminator; ++ TaskTerminator _terminator; + + public: + G1FullGCMarkTask(G1FullCollector* collector); +diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp +index 492c783b6..d4f06f56e 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp ++++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp +@@ -32,6 +32,7 @@ + #include "gc/g1/g1StringDedup.hpp" + #include "gc/g1/heapRegionManager.hpp" + #include "gc/shared/referenceProcessor.hpp" ++#include "gc/shared/taskqueue.hpp" + #include "utilities/ticks.hpp" + + class G1FullGCTracer; +@@ -58,9 +59,9 @@ private: + + class G1RefProcTaskProxy : public AbstractGangTask { + typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; +- ProcessTask& _proc_task; +- G1FullCollector* _collector; +- ParallelTaskTerminator _terminator; ++ ProcessTask& _proc_task; ++ G1FullCollector* _collector; ++ TaskTerminator _terminator; + + public: + G1RefProcTaskProxy(ProcessTask& proc_task, +diff --git a/src/hotspot/share/gc/parallel/pcTasks.cpp b/src/hotspot/share/gc/parallel/pcTasks.cpp +index 57ff23550..30a0f26bc 100644 +--- a/src/hotspot/share/gc/parallel/pcTasks.cpp ++++ b/src/hotspot/share/gc/parallel/pcTasks.cpp +@@ -154,14 +154,15 @@ void RefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) + "Ergonomically chosen workers (%u) must be equal to active workers (%u)", + ergo_workers, active_gc_threads); + OopTaskQueueSet* qset = ParCompactionManager::stack_array(); +- ParallelTaskTerminator terminator(active_gc_threads, qset); ++ TaskTerminator terminator(active_gc_threads, qset); ++ + GCTaskQueue* q = GCTaskQueue::create(); + for(uint i=0; ienqueue(new RefProcTaskProxy(task, i)); + } + if (task.marks_oops_alive() && (active_gc_threads>1)) { + for (uint j=0; jenqueue(new StealMarkingTask(&terminator)); ++ q->enqueue(new StealMarkingTask(terminator.terminator())); + } + } + PSParallelCompact::gc_task_manager()->execute_and_wait(q); +diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp +index f4cbb9f3a..f6ba28260 100644 +--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp ++++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp +@@ -2075,7 +2075,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, + uint parallel_gc_threads = heap->gc_task_manager()->workers(); + uint active_gc_threads = heap->gc_task_manager()->active_workers(); + TaskQueueSetSuper* qset = ParCompactionManager::stack_array(); +- ParallelTaskTerminator terminator(active_gc_threads, qset); ++ TaskTerminator terminator(active_gc_threads, qset); + + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowStackClosure follow_stack_closure(cm); +@@ -2104,7 +2104,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, + + if (active_gc_threads > 1) { + for (uint j = 0; j < active_gc_threads; j++) { +- q->enqueue(new StealMarkingTask(&terminator)); ++ q->enqueue(new StealMarkingTask(terminator.terminator())); + } + } + +@@ -2433,12 +2433,12 @@ void PSParallelCompact::compact() { + uint parallel_gc_threads = heap->gc_task_manager()->workers(); + uint active_gc_threads = heap->gc_task_manager()->active_workers(); + TaskQueueSetSuper* qset = ParCompactionManager::region_array(); +- ParallelTaskTerminator terminator(active_gc_threads, qset); ++ TaskTerminator terminator(active_gc_threads, qset); + + GCTaskQueue* q = GCTaskQueue::create(); + prepare_region_draining_tasks(q, active_gc_threads); + enqueue_dense_prefix_tasks(q, active_gc_threads); +- enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); ++ enqueue_region_stealing_tasks(q, terminator.terminator(), active_gc_threads); + + { + GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer); +diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp +index 87b2b4cac..9022654da 100644 +--- a/src/hotspot/share/gc/parallel/psScavenge.cpp ++++ b/src/hotspot/share/gc/parallel/psScavenge.cpp +@@ -166,11 +166,11 @@ void PSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) + for(uint i=0; i < active_workers; i++) { + q->enqueue(new PSRefProcTaskProxy(task, i)); + } +- ParallelTaskTerminator terminator(active_workers, +- (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); ++ TaskTerminator terminator(active_workers, ++ (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); + if (task.marks_oops_alive() && active_workers > 1) { + for (uint j = 0; j < active_workers; j++) { +- q->enqueue(new StealTask(&terminator)); ++ q->enqueue(new StealTask(terminator.terminator())); + } + } + manager->execute_and_wait(q); +@@ -379,16 +379,15 @@ bool PSScavenge::invoke_no_policy() { + q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); + q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); + +- ParallelTaskTerminator terminator( +- active_workers, +- (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); ++ TaskTerminator terminator(active_workers, ++ (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); + // If active_workers can exceed 1, add a StrealTask. + // PSPromotionManager::drain_stacks_depth() does not fully drain its + // stacks and expects a StealTask to complete the draining if + // ParallelGCThreads is > 1. + if (gc_task_manager()->workers() > 1) { + for (uint j = 0; j < active_workers; j++) { +- q->enqueue(new StealTask(&terminator)); ++ q->enqueue(new StealTask(terminator.terminator())); + } + } + +diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp +index 33ce1dadd..5f39a2671 100644 +--- a/src/hotspot/share/gc/shared/gc_globals.hpp ++++ b/src/hotspot/share/gc/shared/gc_globals.hpp +@@ -348,6 +348,10 @@ + develop(uintx, PromotionFailureALotInterval, 5, \ + "Total collections between promotion failures a lot") \ + \ ++ diagnostic(bool, UseOWSTTaskTerminator, true, \ ++ "Use Optimized Work Stealing Threads task termination " \ ++ "protocol") \ ++ \ + experimental(uintx, WorkStealingSleepMillis, 1, \ + "Sleep time when sleep is used for yields") \ + \ +diff --git a/src/hotspot/share/gc/shared/owstTaskTerminator.cpp b/src/hotspot/share/gc/shared/owstTaskTerminator.cpp +new file mode 100644 +index 000000000..3c32ab627 +--- /dev/null ++++ b/src/hotspot/share/gc/shared/owstTaskTerminator.cpp +@@ -0,0 +1,171 @@ ++/* ++ * Copyright (c) 2018, Red Hat, Inc. All rights reserved. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++ ++#include "gc/shared/owstTaskTerminator.hpp" ++#include "logging/log.hpp" ++ ++bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) { ++ return tasks > 0 || (terminator != NULL && terminator->should_exit_termination()); ++} ++ ++bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) { ++ assert(_n_threads > 0, "Initialization is incorrect"); ++ assert(_offered_termination < _n_threads, "Invariant"); ++ assert(_blocker != NULL, "Invariant"); ++ ++ // Single worker, done ++ if (_n_threads == 1) { ++ _offered_termination = 1; ++ return true; ++ } ++ ++ _blocker->lock_without_safepoint_check(); ++ // All arrived, done ++ _offered_termination++; ++ if (_offered_termination == _n_threads) { ++ _blocker->notify_all(); ++ _blocker->unlock(); ++ return true; ++ } ++ ++ Thread* the_thread = Thread::current(); ++ while (true) { ++ if (_spin_master == NULL) { ++ _spin_master = the_thread; ++ ++ _blocker->unlock(); ++ ++ if (do_spin_master_work(terminator)) { ++ assert(_offered_termination == _n_threads, "termination condition"); ++ return true; ++ } else { ++ _blocker->lock_without_safepoint_check(); ++ } ++ } else { ++ _blocker->wait(true, WorkStealingSleepMillis); ++ ++ if (_offered_termination == _n_threads) { ++ _blocker->unlock(); ++ return true; ++ } ++ } ++ ++ size_t tasks = tasks_in_queue_set(); ++ if (exit_termination(tasks, terminator)) { ++ _offered_termination--; ++ _blocker->unlock(); ++ return false; ++ } ++ } ++} ++ ++bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) { ++ uint yield_count = 0; ++ // Number of hard spin loops done since last yield ++ uint hard_spin_count = 0; ++ // Number of iterations in the hard spin loop. ++ uint hard_spin_limit = WorkStealingHardSpins; ++ ++ // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. ++ // If it is greater than 0, then start with a small number ++ // of spins and increase number with each turn at spinning until ++ // the count of hard spins exceeds WorkStealingSpinToYieldRatio. ++ // Then do a yield() call and start spinning afresh. ++ if (WorkStealingSpinToYieldRatio > 0) { ++ hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; ++ hard_spin_limit = MAX2(hard_spin_limit, 1U); ++ } ++ // Remember the initial spin limit. ++ uint hard_spin_start = hard_spin_limit; ++ ++ // Loop waiting for all threads to offer termination or ++ // more work. ++ while (true) { ++ // Look for more work. ++ // Periodically sleep() instead of yield() to give threads ++ // waiting on the cores the chance to grab this code ++ if (yield_count <= WorkStealingYieldsBeforeSleep) { ++ // Do a yield or hardspin. For purposes of deciding whether ++ // to sleep, count this as a yield. ++ yield_count++; ++ ++ // Periodically call yield() instead spinning ++ // After WorkStealingSpinToYieldRatio spins, do a yield() call ++ // and reset the counts and starting limit. ++ if (hard_spin_count > WorkStealingSpinToYieldRatio) { ++ yield(); ++ hard_spin_count = 0; ++ hard_spin_limit = hard_spin_start; ++#ifdef TRACESPINNING ++ _total_yields++; ++#endif ++ } else { ++ // Hard spin this time ++ // Increase the hard spinning period but only up to a limit. ++ hard_spin_limit = MIN2(2*hard_spin_limit, ++ (uint) WorkStealingHardSpins); ++ for (uint j = 0; j < hard_spin_limit; j++) { ++ SpinPause(); ++ } ++ hard_spin_count++; ++#ifdef TRACESPINNING ++ _total_spins++; ++#endif ++ } ++ } else { ++ log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", ++ p2i(Thread::current()), yield_count); ++ yield_count = 0; ++ ++ MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); ++ _spin_master = NULL; ++ locker.wait(Mutex::_no_safepoint_check_flag, WorkStealingSleepMillis); ++ if (_spin_master == NULL) { ++ _spin_master = Thread::current(); ++ } else { ++ return false; ++ } ++ } ++ ++#ifdef TRACESPINNING ++ _total_peeks++; ++#endif ++ size_t tasks = tasks_in_queue_set(); ++ if (exit_termination(tasks, terminator)) { ++ MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); ++ if (tasks >= _offered_termination - 1) { ++ locker.notify_all(); ++ } else { ++ for (; tasks > 1; tasks--) { ++ locker.notify(); ++ } ++ } ++ _spin_master = NULL; ++ return false; ++ } else if (_offered_termination == _n_threads) { ++ return true; ++ } ++ } ++} +diff --git a/src/hotspot/share/gc/shared/owstTaskTerminator.hpp b/src/hotspot/share/gc/shared/owstTaskTerminator.hpp +new file mode 100644 +index 000000000..9e6fe135a +--- /dev/null ++++ b/src/hotspot/share/gc/shared/owstTaskTerminator.hpp +@@ -0,0 +1,79 @@ ++/* ++ * Copyright (c) 2018, Red Hat, Inc. All rights reserved. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++#ifndef SHARE_VM_GC_SHARED_OWSTTASKTERMINATOR_HPP ++#define SHARE_VM_GC_SHARED_OWSTTASKTERMINATOR_HPP ++ ++#include "gc/shared/taskqueue.hpp" ++#include "runtime/mutex.hpp" ++#include "runtime/thread.hpp" ++ ++/* ++ * OWST stands for Optimized Work Stealing Threads ++ * ++ * This is an enhanced implementation of Google's work stealing ++ * protocol, which is described in the paper: ++ * "Wessam Hassanein. 2016. Understanding and improving JVM GC work ++ * stealing at the data center scale. In Proceedings of the 2016 ACM ++ * SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM, ++ * New York, NY, USA, 46-54. DOI: https://doi.org/10.1145/2926697.2926706" ++ * ++ * Instead of a dedicated spin-master, our implementation will let spin-master relinquish ++ * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role. ++ * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks ++ * for stealing and termination condition. ++ */ ++ ++class OWSTTaskTerminator: public ParallelTaskTerminator { ++private: ++ Monitor* _blocker; ++ Thread* _spin_master; ++ ++public: ++ OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : ++ ParallelTaskTerminator(n_threads, queue_set), _spin_master(NULL) { ++ _blocker = new Monitor(Mutex::leaf, "OWSTTaskTerminator", false, Monitor::_safepoint_check_never); ++ } ++ ++ virtual ~OWSTTaskTerminator() { ++ assert(_blocker != NULL, "Can not be NULL"); ++ delete _blocker; ++ } ++ ++ bool offer_termination(TerminatorTerminator* terminator); ++ ++protected: ++ // If should exit current termination protocol ++ virtual bool exit_termination(size_t tasks, TerminatorTerminator* terminator); ++ ++private: ++ size_t tasks_in_queue_set() { return _queue_set->tasks(); } ++ ++ /* ++ * Perform spin-master task. ++ * Return true if termination condition is detected, otherwise return false ++ */ ++ bool do_spin_master_work(TerminatorTerminator* terminator); ++}; ++ ++ ++#endif // SHARE_VM_GC_SHARED_OWSTTASKTERMINATOR_HPP +diff --git a/src/hotspot/share/gc/shared/taskqueue.cpp b/src/hotspot/share/gc/shared/taskqueue.cpp +index 024fbbc9b..2738d6823 100644 +--- a/src/hotspot/share/gc/shared/taskqueue.cpp ++++ b/src/hotspot/share/gc/shared/taskqueue.cpp +@@ -24,6 +24,7 @@ + + #include "precompiled.hpp" + #include "gc/shared/taskqueue.hpp" ++#include "gc/shared/owstTaskTerminator.hpp" + #include "oops/oop.inline.hpp" + #include "logging/log.hpp" + #include "runtime/atomic.hpp" +@@ -265,3 +266,25 @@ void ParallelTaskTerminator::reset_for_reuse(uint n_threads) { + reset_for_reuse(); + _n_threads = n_threads; + } ++ ++TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : ++ _terminator(UseOWSTTaskTerminator ? new OWSTTaskTerminator(n_threads, queue_set) ++ : new ParallelTaskTerminator(n_threads, queue_set)) { ++} ++ ++TaskTerminator::~TaskTerminator() { ++ if (_terminator != NULL) { ++ delete _terminator; ++ } ++} ++ ++// Move assignment ++TaskTerminator& TaskTerminator::operator=(const TaskTerminator& o) { ++ if (_terminator != NULL) { ++ delete _terminator; ++ } ++ _terminator = o.terminator(); ++ const_cast(o)._terminator = NULL; ++ return *this; ++} ++ +diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp +index 877b843ab..1cafcdeb2 100644 +--- a/src/hotspot/share/gc/shared/taskqueue.hpp ++++ b/src/hotspot/share/gc/shared/taskqueue.hpp +@@ -358,6 +358,8 @@ protected: + public: + // Returns "true" if some TaskQueue in the set contains a task. + virtual bool peek() = 0; ++ // Tasks in queue ++ virtual uint tasks() const = 0; + }; + + template class TaskQueueSetSuperImpl: public CHeapObj, public TaskQueueSetSuper { +@@ -389,6 +391,7 @@ public: + bool steal(uint queue_num, int* seed, E& t); + + bool peek(); ++ uint tasks() const; + + uint size() const { return _n; } + }; +@@ -414,6 +417,15 @@ bool GenericTaskQueueSet::peek() { + return false; + } + ++template ++uint GenericTaskQueueSet::tasks() const { ++ uint n = 0; ++ for (uint j = 0; j < _n; j++) { ++ n += _queues[j]->size(); ++ } ++ return n; ++} ++ + // When to terminate from the termination protocol. + class TerminatorTerminator: public CHeapObj { + public: +@@ -425,8 +437,8 @@ public: + + #undef TRACESPINNING + +-class ParallelTaskTerminator: public StackObj { +-private: ++class ParallelTaskTerminator: public CHeapObj { ++protected: + uint _n_threads; + TaskQueueSetSuper* _queue_set; + +@@ -462,7 +474,7 @@ public: + // As above, but it also terminates if the should_exit_termination() + // method of the terminator parameter returns true. If terminator is + // NULL, then it is ignored. +- bool offer_termination(TerminatorTerminator* terminator); ++ virtual bool offer_termination(TerminatorTerminator* terminator); + + // Reset the terminator, so that it may be reused again. + // The caller is responsible for ensuring that this is done +@@ -481,6 +493,38 @@ public: + #endif + }; + ++#ifdef _MSC_VER ++#pragma warning(push) ++// warning C4521: multiple copy constructors specified ++#pragma warning(disable:4521) ++// warning C4522: multiple assignment operators specified ++#pragma warning(disable:4522) ++#endif ++ ++class TaskTerminator : public StackObj { ++private: ++ ParallelTaskTerminator* _terminator; ++ ++ // Disable following copy constructors and assignment operator ++ TaskTerminator(TaskTerminator& o) { } ++ TaskTerminator(const TaskTerminator& o) { } ++ TaskTerminator& operator=(TaskTerminator& o) { return *this; } ++public: ++ TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set); ++ ~TaskTerminator(); ++ ++ // Move assignment ++ TaskTerminator& operator=(const TaskTerminator& o); ++ ++ ParallelTaskTerminator* terminator() const { ++ return _terminator; ++ } ++}; ++#ifdef _MSC_VER ++#pragma warning(pop) ++#endif ++ ++ + typedef GenericTaskQueue OopTaskQueue; + typedef GenericTaskQueueSet OopTaskQueueSet; + diff --git a/8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch b/8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch new file mode 100644 index 0000000..9035b81 --- /dev/null +++ b/8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch @@ -0,0 +1,614 @@ +diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +index 5fc5ee8..708e92c 100644 +--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp ++++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp +@@ -574,7 +574,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, + log_warning(gc)("task_queues allocation failure."); + return; + } +- _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC); + typedef Padded PaddedOopTaskQueue; + for (i = 0; i < num_queues; i++) { + PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); +@@ -586,7 +585,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, + } + for (i = 0; i < num_queues; i++) { + _task_queues->queue(i)->initialize(); +- _hash_seed[i] = 17; // copied from ParNew + } + } + } +@@ -3482,7 +3480,6 @@ void CMSConcMarkingTask::do_work_steal(int i) { + oop obj_to_scan; + CMSBitMap* bm = &(_collector->_markBitMap); + CMSMarkStack* ovflw = &(_collector->_markStack); +- int* seed = _collector->hash_seed(i); + ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); + while (true) { + cl.trim_queue(0); +@@ -3492,7 +3489,7 @@ void CMSConcMarkingTask::do_work_steal(int i) { + // overflow stack may already have been stolen from us. + // assert(work_q->size() > 0, "Work from overflow stack"); + continue; +- } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { ++ } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) { + assert(oopDesc::is_oop(obj_to_scan), "Should be an oop"); + assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); + obj_to_scan->oop_iterate(&cl); +@@ -4392,7 +4389,7 @@ class CMSParRemarkTask: public CMSParMarkTask { + ParMarkRefsIntoAndScanClosure* cl); + + // ... work stealing for the above +- void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed); ++ void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl); + }; + + class RemarkCLDClosure : public CLDClosure { +@@ -4537,7 +4534,7 @@ void CMSParRemarkTask::work(uint worker_id) { + // ---------- ... and drain overflow list. + _timer.reset(); + _timer.start(); +- do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id)); ++ do_work_steal(worker_id, &par_mrias_cl); + _timer.stop(); + log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); + } +@@ -4686,8 +4683,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks( + + // . see if we can share work_queues with ParNew? XXX + void +-CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, +- int* seed) { ++CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) { + OopTaskQueue* work_q = work_queue(i); + NOT_PRODUCT(int num_steals = 0;) + oop obj_to_scan; +@@ -4718,7 +4714,7 @@ CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, + // Verify that we have no work before we resort to stealing + assert(work_q->size() == 0, "Have work, shouldn't steal"); + // Try to steal from other queues that have work +- if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { ++ if (task_queues()->steal(i, /* reference */ obj_to_scan)) { + NOT_PRODUCT(num_steals++;) + assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); + assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); +@@ -5108,8 +5104,7 @@ public: + + void do_work_steal(int i, + CMSParDrainMarkingStackClosure* drain, +- CMSParKeepAliveClosure* keep_alive, +- int* seed); ++ CMSParKeepAliveClosure* keep_alive); + + virtual void work(uint worker_id); + }; +@@ -5127,8 +5122,7 @@ void CMSRefProcTaskProxy::work(uint worker_id) { + CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); + _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack); + if (_task.marks_oops_alive()) { +- do_work_steal(worker_id, &par_drain_stack, &par_keep_alive, +- _collector->hash_seed(worker_id)); ++ do_work_steal(worker_id, &par_drain_stack, &par_keep_alive); + } + assert(work_queue(worker_id)->size() == 0, "work_queue should be empty"); + assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); +@@ -5147,8 +5141,7 @@ CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, + // . see if we can share work_queues with ParNew? XXX + void CMSRefProcTaskProxy::do_work_steal(int i, + CMSParDrainMarkingStackClosure* drain, +- CMSParKeepAliveClosure* keep_alive, +- int* seed) { ++ CMSParKeepAliveClosure* keep_alive) { + OopTaskQueue* work_q = work_queue(i); + NOT_PRODUCT(int num_steals = 0;) + oop obj_to_scan; +@@ -5177,7 +5170,7 @@ void CMSRefProcTaskProxy::do_work_steal(int i, + // Verify that we have no work before we resort to stealing + assert(work_q->size() == 0, "Have work, shouldn't steal"); + // Try to steal from other queues that have work +- if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { ++ if (task_queues()->steal(i, /* reference */ obj_to_scan)) { + NOT_PRODUCT(num_steals++;) + assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); + assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); +diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp +index 62f0d60..4f27239 100644 +--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp ++++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp +@@ -544,8 +544,6 @@ class CMSCollector: public CHeapObj { + Stack _preserved_oop_stack; + Stack _preserved_mark_stack; + +- int* _hash_seed; +- + // In support of multi-threaded concurrent phases + YieldingFlexibleWorkGang* _conc_workers; + +@@ -713,7 +711,6 @@ class CMSCollector: public CHeapObj { + bool stop_world_and_do(CMS_op_type op); + + OopTaskQueueSet* task_queues() { return _task_queues; } +- int* hash_seed(int i) { return &_hash_seed[i]; } + YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } + + // Support for parallelizing Eden rescan in CMS remark phase +diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp +index 0febc52..1b95cf5 100644 +--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp ++++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp +@@ -106,7 +106,6 @@ ParScanThreadState::ParScanThreadState(Space* to_space_, + #endif // TASKQUEUE_STATS + + _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); +- _hash_seed = 17; // Might want to take time-based random value. + _start = os::elapsedTime(); + _old_gen_closure.set_generation(old_gen_); + _old_gen_root_closure.set_generation(old_gen_); +@@ -550,7 +549,6 @@ void ParEvacuateFollowersClosure::do_void() { + + // Attempt to steal work from promoted. + if (task_queues()->steal(par_scan_state()->thread_num(), +- par_scan_state()->hash_seed(), + obj_to_scan)) { + bool res = work_q->push(obj_to_scan); + assert(res, "Empty queue should have room for a push."); +diff --git a/src/hotspot/share/gc/cms/parNewGeneration.hpp b/src/hotspot/share/gc/cms/parNewGeneration.hpp +index 7bf37e5..0f3502b 100644 +--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp ++++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp +@@ -96,7 +96,6 @@ class ParScanThreadState { + + HeapWord *_young_old_boundary; + +- int _hash_seed; + int _thread_num; + AgeTable _ageTable; + +@@ -165,7 +164,6 @@ class ParScanThreadState { + // Is new_obj a candidate for scan_partial_array_and_push_remainder method. + inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const; + +- int* hash_seed() { return &_hash_seed; } + int thread_num() { return _thread_num; } + + // Allocate a to-space block of size "sz", or else return NULL. +diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +index 920f796..3749a99 100644 +--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp ++++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +@@ -2466,8 +2466,8 @@ void G1CMTask::print_stats() { + hits, misses, percent_of(hits, hits + misses)); + } + +-bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { +- return _task_queues->steal(worker_id, hash_seed, task_entry); ++bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) { ++ return _task_queues->steal(worker_id, task_entry); + } + + /***************************************************************************** +@@ -2773,7 +2773,7 @@ void G1CMTask::do_marking_step(double time_target_ms, + "only way to reach here"); + while (!has_aborted()) { + G1TaskQueueEntry entry; +- if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { ++ if (_cm->try_stealing(_worker_id, entry)) { + scan_task_entry(entry); + + // And since we're towards the end, let's totally drain the +@@ -2915,7 +2915,6 @@ G1CMTask::G1CMTask(uint worker_id, + _refs_reached(0), + _refs_reached_limit(0), + _real_refs_reached_limit(0), +- _hash_seed(17), + _has_aborted(false), + _has_timed_out(false), + _draining_satb_buffers(false), +diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +index dd253bb..b5eb261 100644 +--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp ++++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +@@ -519,7 +519,7 @@ public: + } + + // Attempts to steal an object from the task queues of other tasks +- bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry); ++ bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry); + + G1ConcurrentMark(G1CollectedHeap* g1h, + G1RegionToSpaceMapper* prev_bitmap_storage, +@@ -685,8 +685,6 @@ private: + // it was decreased). + size_t _real_refs_reached_limit; + +- // Used by the work stealing +- int _hash_seed; + // If true, then the task has aborted for some reason + bool _has_aborted; + // Set when the task aborts because it has met its time quota +diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp +index 7738f5c..7041d38 100644 +--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp ++++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp +@@ -46,15 +46,14 @@ G1FullGCMarker::~G1FullGCMarker() { + void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks, + ObjArrayTaskQueueSet* array_stacks, + ParallelTaskTerminator* terminator) { +- int hash_seed = 17; + do { + drain_stack(); + ObjArrayTask steal_array; +- if (array_stacks->steal(_worker_id, &hash_seed, steal_array)) { ++ if (array_stacks->steal(_worker_id, steal_array)) { + follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index()); + } else { + oop steal_oop; +- if (oop_stacks->steal(_worker_id, &hash_seed, steal_oop)) { ++ if (oop_stacks->steal(_worker_id, steal_oop)) { + follow_object(steal_oop); + } + } +diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +index 1551c70..9da132d 100644 +--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp ++++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +@@ -47,7 +47,6 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, + _age_table(false), + _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), + _scanner(g1h, this), +- _hash_seed(17), + _worker_id(worker_id), + _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1), + _stack_trim_lower_threshold(GCDrainStackTargetSize), +diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +index ed80fb0..706f985 100644 +--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp ++++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +@@ -57,7 +57,6 @@ class G1ParScanThreadState : public CHeapObj { + uint _tenuring_threshold; + G1ScanEvacuatedObjClosure _scanner; + +- int _hash_seed; + uint _worker_id; + + // Upper and lower threshold to start and end work queue draining. +diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp +index 0942675..554bb3f 100644 +--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp ++++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp +@@ -140,7 +140,7 @@ inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { + + void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { + StarTask stolen_task; +- while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) { ++ while (task_queues->steal(_worker_id, stolen_task)) { + assert(verify_task(stolen_task), "sanity"); + dispatch_reference(stolen_task); + +diff --git a/src/hotspot/share/gc/parallel/pcTasks.cpp b/src/hotspot/share/gc/parallel/pcTasks.cpp +index 30a0f26..b2464c8 100644 +--- a/src/hotspot/share/gc/parallel/pcTasks.cpp ++++ b/src/hotspot/share/gc/parallel/pcTasks.cpp +@@ -184,13 +184,12 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { + + oop obj = NULL; + ObjArrayTask task; +- int random_seed = 17; + do { +- while (ParCompactionManager::steal_objarray(which, &random_seed, task)) { ++ while (ParCompactionManager::steal_objarray(which, task)) { + cm->follow_contents((objArrayOop)task.obj(), task.index()); + cm->follow_marking_stacks(); + } +- while (ParCompactionManager::steal(which, &random_seed, obj)) { ++ while (ParCompactionManager::steal(which, obj)) { + cm->follow_contents(obj); + cm->follow_marking_stacks(); + } +@@ -218,10 +217,9 @@ void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) { + guarantee(cm->region_stack()->is_empty(), "Not empty"); + + size_t region_index = 0; +- int random_seed = 17; + + while(true) { +- if (ParCompactionManager::steal(which, &random_seed, region_index)) { ++ if (ParCompactionManager::steal(which, region_index)) { + PSParallelCompact::fill_and_update_region(cm, region_index); + cm->drain_region_stacks(); + } else { +diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.hpp +index 69d0b5c..a79c360 100644 +--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp ++++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp +@@ -159,9 +159,9 @@ private: + // Access function for compaction managers + static ParCompactionManager* gc_thread_compaction_manager(uint index); + +- static bool steal(int queue_num, int* seed, oop& t); +- static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t); +- static bool steal(int queue_num, int* seed, size_t& region); ++ static bool steal(int queue_num, oop& t); ++ static bool steal_objarray(int queue_num, ObjArrayTask& t); ++ static bool steal(int queue_num, size_t& region); + + // Process tasks remaining on any marking stack + void follow_marking_stacks(); +diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp +index f5f7201..dbac5a0 100644 +--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp ++++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp +@@ -37,16 +37,16 @@ + #include "utilities/debug.hpp" + #include "utilities/globalDefinitions.hpp" + +-inline bool ParCompactionManager::steal(int queue_num, int* seed, oop& t) { +- return stack_array()->steal(queue_num, seed, t); ++inline bool ParCompactionManager::steal(int queue_num, oop& t) { ++ return stack_array()->steal(queue_num, t); + } + +-inline bool ParCompactionManager::steal_objarray(int queue_num, int* seed, ObjArrayTask& t) { +- return _objarray_queues->steal(queue_num, seed, t); ++inline bool ParCompactionManager::steal_objarray(int queue_num, ObjArrayTask& t) { ++ return _objarray_queues->steal(queue_num, t); + } + +-inline bool ParCompactionManager::steal(int queue_num, int* seed, size_t& region) { +- return region_array()->steal(queue_num, seed, region); ++inline bool ParCompactionManager::steal(int queue_num, size_t& region) { ++ return region_array()->steal(queue_num, region); + } + + inline void ParCompactionManager::push(oop obj) { +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp +index 58b1756..bff20ac 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp +@@ -159,7 +159,7 @@ class PSPromotionManager { + static PSPromotionManager* gc_thread_promotion_manager(uint index); + static PSPromotionManager* vm_thread_promotion_manager(); + +- static bool steal_depth(int queue_num, int* seed, StarTask& t); ++ static bool steal_depth(int queue_num, StarTask& t); + + PSPromotionManager(); + +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +index 1ef9007..8c590e0 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +@@ -322,8 +322,8 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) { + } + } + +-inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) { +- return stack_array_depth()->steal(queue_num, seed, t); ++inline bool PSPromotionManager::steal_depth(int queue_num, StarTask& t) { ++ return stack_array_depth()->steal(queue_num, t); + } + + #if TASKQUEUE_STATS +diff --git a/src/hotspot/share/gc/parallel/psTasks.cpp b/src/hotspot/share/gc/parallel/psTasks.cpp +index 25ed498..aa2c6b7 100644 +--- a/src/hotspot/share/gc/parallel/psTasks.cpp ++++ b/src/hotspot/share/gc/parallel/psTasks.cpp +@@ -141,10 +141,9 @@ void StealTask::do_it(GCTaskManager* manager, uint which) { + guarantee(pm->stacks_empty(), + "stacks should be empty at this point"); + +- int random_seed = 17; + while(true) { + StarTask p; +- if (PSPromotionManager::steal_depth(which, &random_seed, p)) { ++ if (PSPromotionManager::steal_depth(which, p)) { + TASKQUEUE_STATS_ONLY(pm->record_steal(p)); + pm->process_popped_location_depth(p); + pm->drain_stacks_depth(true); +diff --git a/src/hotspot/share/gc/shared/taskqueue.cpp b/src/hotspot/share/gc/shared/taskqueue.cpp +index 2738d68..47639bd 100644 +--- a/src/hotspot/share/gc/shared/taskqueue.cpp ++++ b/src/hotspot/share/gc/shared/taskqueue.cpp +@@ -112,24 +112,6 @@ void TaskQueueStats::verify() const + #endif // ASSERT + #endif // TASKQUEUE_STATS + +-int TaskQueueSetSuper::randomParkAndMiller(int *seed0) { +- const int a = 16807; +- const int m = 2147483647; +- const int q = 127773; /* m div a */ +- const int r = 2836; /* m mod a */ +- assert(sizeof(int) == 4, "I think this relies on that"); +- int seed = *seed0; +- int hi = seed / q; +- int lo = seed % q; +- int test = a * lo - r * hi; +- if (test > 0) +- seed = test; +- else +- seed = test + m; +- *seed0 = seed; +- return seed; +-} +- + ParallelTaskTerminator:: + ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : + _n_threads(n_threads), +diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp +index 1cafcde..f162ffb 100644 +--- a/src/hotspot/share/gc/shared/taskqueue.hpp ++++ b/src/hotspot/share/gc/shared/taskqueue.hpp +@@ -303,12 +303,30 @@ public: + template void iterate(Fn fn); + + private: ++ DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); + // Element array. + volatile E* _elems; ++ ++ DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(E*)); ++ // Queue owner local variables. Not to be accessed by other threads. ++ ++ static const uint InvalidQueueId = uint(-1); ++ uint _last_stolen_queue_id; // The id of the queue we last stole from ++ ++ int _seed; // Current random seed used for selecting a random queue during stealing. ++ ++ DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(uint) + sizeof(int)); ++public: ++ int next_random_queue_id(); ++ ++ void set_last_stolen_queue_id(uint id) { _last_stolen_queue_id = id; } ++ uint last_stolen_queue_id() const { return _last_stolen_queue_id; } ++ bool is_last_stolen_queue_id_valid() const { return _last_stolen_queue_id != InvalidQueueId; } ++ void invalidate_last_stolen_queue_id() { _last_stolen_queue_id = InvalidQueueId; } + }; + + template +-GenericTaskQueue::GenericTaskQueue() { ++GenericTaskQueue::GenericTaskQueue() : _last_stolen_queue_id(InvalidQueueId), _seed(17 /* random number */) { + assert(sizeof(Age) == sizeof(size_t), "Depends on this."); + } + +@@ -353,8 +371,6 @@ private: + }; + + class TaskQueueSetSuper { +-protected: +- static int randomParkAndMiller(int* seed0); + public: + // Returns "true" if some TaskQueue in the set contains a task. + virtual bool peek() = 0; +@@ -374,21 +390,18 @@ private: + public: + typedef typename T::element_type E; + +- GenericTaskQueueSet(int n); ++ GenericTaskQueueSet(uint n); + ~GenericTaskQueueSet(); + +- bool steal_best_of_2(uint queue_num, int* seed, E& t); ++ bool steal_best_of_2(uint queue_num, E& t); + + void register_queue(uint i, T* q); + + T* queue(uint n); + +- // The thread with queue number "queue_num" (and whose random number seed is +- // at "seed") is trying to steal a task from some other queue. (It may try +- // several queues, according to some configuration parameter.) If some steal +- // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns +- // false. +- bool steal(uint queue_num, int* seed, E& t); ++ // Try to steal a task from some other queue than queue_num. It may perform several attempts at doing so. ++ // Returns if stealing succeeds, and sets "t" to the stolen task. ++ bool steal(uint queue_num, E& t); + + bool peek(); + uint tasks() const; +diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp +index 289dcdc..ac532e9 100644 +--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp ++++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp +@@ -34,10 +34,10 @@ + #include "utilities/stack.inline.hpp" + + template +-inline GenericTaskQueueSet::GenericTaskQueueSet(int n) : _n(n) { ++inline GenericTaskQueueSet::GenericTaskQueueSet(uint n) : _n(n) { + typedef T* GenericTaskQueuePtr; + _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); +- for (int i = 0; i < n; i++) { ++ for (uint i = 0; i < n; i++) { + _queues[i] = NULL; + } + } +@@ -232,18 +232,71 @@ bool GenericTaskQueue::pop_global(volatile E& t) { + return resAge == oldAge; + } + ++inline int randomParkAndMiller(int *seed0) { ++ const int a = 16807; ++ const int m = 2147483647; ++ const int q = 127773; /* m div a */ ++ const int r = 2836; /* m mod a */ ++ STATIC_ASSERT(sizeof(int) == 4); ++ int seed = *seed0; ++ int hi = seed / q; ++ int lo = seed % q; ++ int test = a * lo - r * hi; ++ if (test > 0) { ++ seed = test; ++ } else { ++ seed = test + m; ++ } ++ *seed0 = seed; ++ return seed; ++} ++ ++template ++int GenericTaskQueue::next_random_queue_id() { ++ return randomParkAndMiller(&_seed); ++} ++ + template bool +-GenericTaskQueueSet::steal_best_of_2(uint queue_num, int* seed, E& t) { ++GenericTaskQueueSet::steal_best_of_2(uint queue_num, E& t) { + if (_n > 2) { ++ T* const local_queue = _queues[queue_num]; + uint k1 = queue_num; +- while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; ++ ++ if (local_queue->is_last_stolen_queue_id_valid()) { ++ k1 = local_queue->last_stolen_queue_id(); ++ assert(k1 != queue_num, "Should not be the same"); ++ } else { ++ while (k1 == queue_num) { ++ k1 = local_queue->next_random_queue_id() % _n; ++ } ++ } ++ + uint k2 = queue_num; +- while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; ++ while (k2 == queue_num || k2 == k1) { ++ k2 = local_queue->next_random_queue_id() % _n; ++ } + // Sample both and try the larger. + uint sz1 = _queues[k1]->size(); + uint sz2 = _queues[k2]->size(); +- if (sz2 > sz1) return _queues[k2]->pop_global(t); +- else return _queues[k1]->pop_global(t); ++ ++ uint sel_k = 0; ++ bool suc = false; ++ ++ if (sz2 > sz1) { ++ sel_k = k2; ++ suc = _queues[k2]->pop_global(t); ++ } else if (sz1 > 0) { ++ sel_k = k1; ++ suc = _queues[k1]->pop_global(t); ++ } ++ ++ if (suc) { ++ local_queue->set_last_stolen_queue_id(sel_k); ++ } else { ++ local_queue->invalidate_last_stolen_queue_id(); ++ } ++ ++ return suc; + } else if (_n == 2) { + // Just try the other one. + uint k = (queue_num + 1) % 2; +@@ -255,9 +308,9 @@ GenericTaskQueueSet::steal_best_of_2(uint queue_num, int* seed, E& t) { + } + + template bool +-GenericTaskQueueSet::steal(uint queue_num, int* seed, E& t) { ++GenericTaskQueueSet::steal(uint queue_num, E& t) { + for (uint i = 0; i < 2 * _n; i++) { +- if (steal_best_of_2(queue_num, seed, t)) { ++ if (steal_best_of_2(queue_num, t)) { + TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); + return true; + } diff --git a/8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch b/8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch new file mode 100644 index 0000000..10fc9e8 --- /dev/null +++ b/8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch @@ -0,0 +1,18 @@ +diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +index 82abda9..df3addf 100644 +--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +@@ -2045,6 +2045,13 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit + + // get current pc information + // pc is only needed if the method has an exception handler, the unwind code does not need it. ++ if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) { ++ // As no instructions have been generated yet for this LIR node it's ++ // possible that an oop map already exists for the current offset. ++ // In that case insert an dummy NOP here to ensure all oop map PCs ++ // are unique. See JDK-8237483. ++ __ nop(); ++ } + int pc_for_athrow_offset = __ offset(); + InternalAddress pc_for_athrow(__ pc()); + __ adr(exceptionPC->as_register(), pc_for_athrow); diff --git a/java-11-openjdk.spec b/java-11-openjdk.spec index c94aed1..89c9202 100644 --- a/java-11-openjdk.spec +++ b/java-11-openjdk.spec @@ -735,7 +735,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release} Name: java-%{javaver}-%{origin} Version: %{newjavaver}.%{buildver} -Release: 7 +Release: 8 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages # also included the epoch in their virtual provides. This created a @@ -832,6 +832,9 @@ Patch43: 8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch Patch44: fix-IfNode-s-bugs.patch Patch45: leaf-optimize-in-ParallelScanvageGC.patch Patch46: ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch +Patch47: 8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch +Patch48: 8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch +Patch49: 8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch BuildRequires: autoconf @@ -1096,6 +1099,9 @@ pushd %{top_level_dir_name} %patch44 -p1 %patch45 -p1 %patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 popd # openjdk %patch1000 @@ -1598,7 +1604,12 @@ require "copy_jdk_configs.lua" %changelog -* add Fri Sep 11 2020 noah - 1:11.0.8.10-7 +* Mon Sep 21 2020 noah - 1:11.0.8.10-8 +- add 8204947-Port-ShenandoahTaskTerminator-to-mainline-and-make-it-default.patch +- add 8205921-Optimizing-best_of_2-work-stealing-queue-selection.patch +- add 8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch + +* Fri Sep 11 2020 noah - 1:11.0.8.10-7 - add 8223667-ASAN-build-broken.patch - add 8229495-SIGILL-in-C2-generated-OSR-compilation.patch - add 8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch -- Gitee