From 2512952b698cf05e5b0846d6fd14fe02e77cf6f6 Mon Sep 17 00:00:00 2001 From: jvmboy Date: Fri, 11 Sep 2020 18:20:33 +0800 Subject: [PATCH] Add several patches --- 8223667-ASAN-build-broken.patch | 26 + ...GILL-in-C2-generated-OSR-compilation.patch | 564 ++++++++++++++++++ ...on-by-zero-in-C2-OSR-compiled-method.patch | 377 ++++++++++++ ...ed-test-result-caused-by-C2-MergeMem.patch | 170 ++++++ ...luding-waste-in-rule_allocation_rate.patch | 30 + fix-IfNode-s-bugs.patch | 29 + java-11-openjdk.spec | 26 +- leaf-optimize-in-ParallelScanvageGC.patch | 118 ++++ 8 files changed, 1339 insertions(+), 1 deletion(-) create mode 100644 8223667-ASAN-build-broken.patch create mode 100644 8229495-SIGILL-in-C2-generated-OSR-compilation.patch create mode 100644 8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch create mode 100644 8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch create mode 100644 ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch create mode 100644 fix-IfNode-s-bugs.patch create mode 100644 leaf-optimize-in-ParallelScanvageGC.patch diff --git a/8223667-ASAN-build-broken.patch b/8223667-ASAN-build-broken.patch new file mode 100644 index 0000000..c9fd099 --- /dev/null +++ b/8223667-ASAN-build-broken.patch @@ -0,0 +1,26 @@ +From 2d8c2049c7caad19e6cacccea1bc8962ac481f5a Mon Sep 17 00:00:00 2001 +Date: Wed, 17 Jun 2020 10:10:35 +0000 +Subject: [PATCH] 8223667: ASAN build broken + +Summary: : +LLT: N/A +Bug url: https://bugs.openjdk.java.net/browse/JDK-8223667 +--- + make/autoconf/spec.gmk.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in +index 9473481bc..38fa47d9f 100644 +--- a/make/autoconf/spec.gmk.in ++++ b/make/autoconf/spec.gmk.in +@@ -382,7 +382,7 @@ GCOV_ENABLED=@GCOV_ENABLED@ + export ASAN_ENABLED:=@ASAN_ENABLED@ + export DEVKIT_LIB_DIR:=@DEVKIT_LIB_DIR@ + ifeq ($(ASAN_ENABLED), yes) +- export ASAN_OPTIONS="handle_segv=0 detect_leaks=0" ++ export ASAN_OPTIONS=handle_segv=0 detect_leaks=0 + ifneq ($(DEVKIT_LIB_DIR),) + export LD_LIBRARY_PATH:=$(LD_LIBRARY_PATH):$(DEVKIT_LIB_DIR) + endif +-- +1.8.3.1 diff --git a/8229495-SIGILL-in-C2-generated-OSR-compilation.patch b/8229495-SIGILL-in-C2-generated-OSR-compilation.patch new file mode 100644 index 0000000..88a4f23 --- /dev/null +++ b/8229495-SIGILL-in-C2-generated-OSR-compilation.patch @@ -0,0 +1,564 @@ +diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp +index da01e48..6938ed2 100644 +--- a/src/hotspot/share/opto/classes.hpp ++++ b/src/hotspot/share/opto/classes.hpp +@@ -225,6 +225,8 @@ macro(NegF) + macro(NeverBranch) + macro(OnSpinWait) + macro(Opaque1) ++macro(OpaqueLoopInit) ++macro(OpaqueLoopStride) + macro(Opaque2) + macro(Opaque3) + macro(Opaque4) +diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp +index b6980c4..ac7b848 100644 +--- a/src/hotspot/share/opto/compile.cpp ++++ b/src/hotspot/share/opto/compile.cpp +@@ -1981,7 +1981,17 @@ void Compile::remove_opaque4_nodes(PhaseIterGVN &igvn) { + for (int i = opaque4_count(); i > 0; i--) { + Node* opaq = opaque4_node(i-1); + assert(opaq->Opcode() == Op_Opaque4, "Opaque4 only"); ++ // With Opaque4 nodes, the expectation is that the test of input 1 ++ // is always equal to the constant value of input 2. So we can ++ // remove the Opaque4 and replace it by input 2. In debug builds, ++ // leave the non constant test in instead to sanity check that it ++ // never fails (if it does, that subgraph was constructed so, at ++ // runtime, a Halt node is executed). ++ #ifdef ASSERT ++ igvn.replace_node(opaq, opaq->in(1)); ++ #else + igvn.replace_node(opaq, opaq->in(2)); ++ #endif + } + assert(opaque4_count() == 0, "should be empty"); + } +diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp +index ee20972..e255f46 100644 +--- a/src/hotspot/share/opto/loopPredicate.cpp ++++ b/src/hotspot/share/opto/loopPredicate.cpp +@@ -1244,8 +1244,9 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo + Node* init, Node* limit, jint stride, + Node* rng, bool &overflow, + Deoptimization::DeoptReason reason) { ++ // First predicate for the initial value on first loop iteration + assert(proj->_con && predicate_proj->_con, "not a range check?"); +- Node* opaque_init = new Opaque1Node(C, init); ++ Node* opaque_init = new OpaqueLoopInitNode(C, init); + register_new_node(opaque_init, upper_bound_proj); + BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow); + Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over +@@ -1253,6 +1254,22 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo + ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); + _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol); + assert(opaque_init->outcnt() > 0, "should be used"); ++ // Second predicate for init + (current stride - initial stride) ++ // This is identical to the previous predicate initially but as ++ // unrolling proceeds current stride is updated. ++ Node* init_stride = loop->_head->as_CountedLoop()->stride(); ++ Node* opaque_stride = new OpaqueLoopStrideNode(C, init_stride); ++ register_new_node(opaque_stride, new_proj); ++ Node* max_value = new SubINode(opaque_stride, init_stride); ++ register_new_node(max_value, new_proj); ++ max_value = new AddINode(opaque_init, max_value); ++ register_new_node(max_value, new_proj); ++ bol = rc_predicate(loop, new_proj, scale, offset, max_value, limit, stride, rng, (stride > 0) != (scale > 0), overflow); ++ opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); ++ register_new_node(opaque_bol, new_proj); ++ new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); ++ _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol); ++ assert(max_value->outcnt() > 0, "should be used"); + return new_proj; + } + +diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp +index 46790ef..c678544 100644 +--- a/src/hotspot/share/opto/loopTransform.cpp ++++ b/src/hotspot/share/opto/loopTransform.cpp +@@ -1080,7 +1080,7 @@ void PhaseIdealLoop::ensure_zero_trip_guard_proj(Node* node, bool is_main_loop) + // CastII/ConvI2L nodes cause some data paths to die. For consistency, + // the control paths must die too but the range checks were removed by + // predication. The range checks that we add here guarantee that they do. +-void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* start, Node* end, ++void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, + IdealLoopTree* outer_loop, LoopNode* outer_main_head, + uint dd_main_head, const uint idx_before_pre_post, + const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main, +@@ -1098,6 +1098,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat + predicate = iff->in(0); + Node* current_proj = outer_main_head->in(LoopNode::EntryControl); + Node* prev_proj = current_proj; ++ Node* opaque_init = new OpaqueLoopInitNode(C, init); ++ register_new_node(opaque_init, outer_main_head->in(LoopNode::EntryControl)); ++ Node* opaque_stride = new OpaqueLoopStrideNode(C, stride); ++ register_new_node(opaque_stride, outer_main_head->in(LoopNode::EntryControl)); + while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) { + iff = predicate->in(0)->as_If(); + uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con); +@@ -1108,11 +1112,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat + // Clone the skeleton predicate twice and initialize one with the initial + // value of the loop induction variable. Leave the other predicate + // to be initialized when increasing the stride during loop unrolling. +- prev_proj = clone_skeleton_predicate(iff, start, predicate, uncommon_proj, current_proj, outer_loop, prev_proj); +- assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (start->Opcode() == Op_Opaque1), ""); +- prev_proj = clone_skeleton_predicate(iff, end, predicate, uncommon_proj, current_proj, outer_loop, prev_proj); +- assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (end->Opcode() == Op_Opaque1), ""); +- ++ prev_proj = clone_skeleton_predicate(iff, opaque_init, NULL, predicate, uncommon_proj, current_proj, outer_loop, prev_proj); ++ assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), ""); ++ prev_proj = clone_skeleton_predicate(iff, init, stride, predicate, uncommon_proj, current_proj, outer_loop, prev_proj); ++ assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), ""); + // Rewire any control inputs from the cloned skeleton predicates down to the main and post loop for data nodes that are part of the + // main loop (and were cloned to the pre and post loop). + for (DUIterator i = predicate->outs(); predicate->has_out(i); i++) { +@@ -1177,14 +1180,14 @@ bool PhaseIdealLoop::skeleton_predicate_has_opaque(IfNode* iff) { + } + continue; + } +- if (op == Op_Opaque1) { ++ if (n->is_Opaque1()) { + return true; + } + } + return false; + } + +-Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj, ++Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, + Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj) { + Node_Stack to_clone(2); + to_clone.push(iff->in(1), 1); +@@ -1204,12 +1207,19 @@ Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* pre + to_clone.push(m, 1); + continue; + } +- if (op == Op_Opaque1) { ++ if (m->is_Opaque1()) { + if (n->_idx < current) { + n = n->clone(); ++ register_new_node(n, current_proj); ++ } ++ if (op == Op_OpaqueLoopInit) { ++ n->set_req(i, new_init); ++ } else { ++ assert(op == Op_OpaqueLoopStride, "unexpected opaque node"); ++ if (new_stride != NULL) { ++ n->set_req(i, new_stride); ++ } + } +- n->set_req(i, value); +- register_new_node(n, current_proj); + to_clone.set_node(n); + } + for (;;) { +@@ -1259,7 +1269,7 @@ Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* pre + return proj; + } + +-void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* start, Node* end, ++void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, + IdealLoopTree* outer_loop, LoopNode* outer_main_head, + uint dd_main_head, const uint idx_before_pre_post, + const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main, +@@ -1279,10 +1289,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_ + } + } + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); +- copy_skeleton_predicates_to_main_loop_helper(predicate, start, end, outer_loop, outer_main_head, dd_main_head, ++ copy_skeleton_predicates_to_main_loop_helper(predicate, init, stride, outer_loop, outer_main_head, dd_main_head, + idx_before_pre_post, idx_after_post_before_pre, zero_trip_guard_proj_main, + zero_trip_guard_proj_post, old_new); +- copy_skeleton_predicates_to_main_loop_helper(profile_predicate, start, end, outer_loop, outer_main_head, dd_main_head, ++ copy_skeleton_predicates_to_main_loop_helper(profile_predicate, init, stride, outer_loop, outer_main_head, dd_main_head, + idx_before_pre_post, idx_after_post_before_pre, zero_trip_guard_proj_main, + zero_trip_guard_proj_post, old_new); + } +@@ -1433,10 +1443,8 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_ + // CastII for the main loop: + Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head ); + assert(castii != NULL, "no castII inserted"); +- Node* opaque_castii = new Opaque1Node(C, castii); +- register_new_node(opaque_castii, outer_main_head->in(LoopNode::EntryControl)); + assert(post_head->in(1)->is_IfProj(), "must be zero-trip guard If node projection of the post loop"); +- copy_skeleton_predicates_to_main_loop(pre_head, castii, opaque_castii, outer_loop, outer_main_head, dd_main_head, ++ copy_skeleton_predicates_to_main_loop(pre_head, castii, stride, outer_loop, outer_main_head, dd_main_head, + idx_before_pre_post, idx_after_post_before_pre, min_taken, post_head->in(1), old_new); + + // Step B4: Shorten the pre-loop to run only 1 iteration (for now). +@@ -1722,6 +1730,11 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo + Node* prev_proj = ctrl; + LoopNode* outer_loop_head = loop_head->skip_strip_mined(); + IdealLoopTree* outer_loop = get_loop(outer_loop_head); ++ // Compute the value of the loop induction variable at the end of the ++ // first iteration of the unrolled loop: init + new_stride_con - init_inc ++ int new_stride_con = stride_con * 2; ++ Node* max_value = _igvn.intcon(new_stride_con); ++ set_ctrl(max_value, C->root()); + while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { + IfNode* iff = entry->in(0)->as_If(); + ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con); +@@ -1737,18 +1750,8 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo + // tell. Kill it in any case. + _igvn.replace_input_of(iff, 1, iff->in(1)->in(2)); + } else { +- // Add back the predicate for the value at the beginning of the first entry +- prev_proj = clone_skeleton_predicate(iff, init, entry, proj, ctrl, outer_loop, prev_proj); +- assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected"); +- // Compute the value of the loop induction variable at the end of the +- // first iteration of the unrolled loop: init + new_stride_con - init_inc +- int init_inc = stride_con/loop_head->unrolled_count(); +- assert(init_inc != 0, "invalid loop increment"); +- int new_stride_con = stride_con * 2; +- Node* max_value = _igvn.intcon(new_stride_con - init_inc); +- max_value = new AddINode(init, max_value); +- register_new_node(max_value, get_ctrl(iff->in(1))); +- prev_proj = clone_skeleton_predicate(iff, max_value, entry, proj, ctrl, outer_loop, prev_proj); ++ //Add back predicates updated for the new stride. ++ prev_proj = clone_skeleton_predicate(iff, init, max_value, entry, proj, ctrl, outer_loop, prev_proj); + assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected"); + } + } +@@ -2594,22 +2597,22 @@ int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { + // (0-offset)/scale could be outside of loop iterations range. + conditional_rc = true; + Node* init = cl->init_trip(); +- Node* opaque_init = new Opaque1Node(C, init); ++ Node* opaque_init = new OpaqueLoopInitNode(C, init); + register_new_node(opaque_init, predicate_proj); ++ // predicate on first value of first iteration ++ predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, init); ++ assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected"); + // template predicate so it can be updated on next unrolling + predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, opaque_init); + assert(skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected"); +- // predicate on first value of first iteration +- predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, init); +- assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected"); +- int init_inc = stride_con/cl->unrolled_count(); +- assert(init_inc != 0, "invalid loop increment"); +- Node* max_value = _igvn.intcon(stride_con - init_inc); +- max_value = new AddINode(init, max_value); ++ Node* opaque_stride = new OpaqueLoopStrideNode(C, cl->stride()); ++ register_new_node(opaque_stride, predicate_proj); ++ Node* max_value = new SubINode(opaque_stride, cl->stride()); ++ register_new_node(max_value, predicate_proj); ++ max_value = new AddINode(opaque_init, max_value); + register_new_node(max_value, predicate_proj); +- // predicate on last value of first iteration (in case unrolling has already happened) + predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con, max_value); +- assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected"); ++ assert(skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected"); + } else { + if (PrintOpto) { + tty->print_cr("missed RCE opportunity"); +diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp +index 3951259..6d68df0 100644 +--- a/src/hotspot/share/opto/loopnode.hpp ++++ b/src/hotspot/share/opto/loopnode.hpp +@@ -747,13 +747,13 @@ private: + #ifdef ASSERT + void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop); + #endif +- void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* start, Node* end, IdealLoopTree* outer_loop, LoopNode* outer_main_head, ++ void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head, + uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre, + Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new); +- void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* start, Node* end, IdealLoopTree* outer_loop, LoopNode* outer_main_head, ++ void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head, + uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre, + Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new); +- Node* clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj, ++ Node* clone_skeleton_predicate(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj, + Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj); + bool skeleton_predicate_has_opaque(IfNode* iff); + void update_main_loop_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con); +diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp +index 6b85529..adbee17 100644 +--- a/src/hotspot/share/opto/loopopts.cpp ++++ b/src/hotspot/share/opto/loopopts.cpp +@@ -890,30 +890,42 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) { + Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { + // Cloning these guys is unlikely to win + int n_op = n->Opcode(); +- if( n_op == Op_MergeMem ) return n; +- if( n->is_Proj() ) return n; ++ if (n_op == Op_MergeMem) { ++ return n; ++ } ++ if (n->is_Proj()) { ++ return n; ++ } + // Do not clone-up CmpFXXX variations, as these are always + // followed by a CmpI +- if( n->is_Cmp() ) return n; ++ if (n->is_Cmp()) { ++ return n; ++ } + // Attempt to use a conditional move instead of a phi/branch +- if( ConditionalMoveLimit > 0 && n_op == Op_Region ) { ++ if (ConditionalMoveLimit > 0 && n_op == Op_Region) { + Node *cmov = conditional_move( n ); +- if( cmov ) return cmov; ++ if (cmov) { ++ return cmov; ++ } + } +- if( n->is_CFG() || n->is_LoadStore() ) ++ if (n->is_CFG() || n->is_LoadStore()) { + return n; +- if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd +- n_op == Op_Opaque2 ) { +- if( !C->major_progress() ) // If chance of no more loop opts... ++ } ++ if (n->is_Opaque1() || // Opaque nodes cannot be mod'd ++ n_op == Op_Opaque2) { ++ if (!C->major_progress()) { // If chance of no more loop opts... + _igvn._worklist.push(n); // maybe we'll remove them ++ } + return n; + } + +- if( n->is_Con() ) return n; // No cloning for Con nodes +- ++ if (n->is_Con()) { ++ return n; // No cloning for Con nodes ++ } + Node *n_ctrl = get_ctrl(n); +- if( !n_ctrl ) return n; // Dead node +- ++ if (!n_ctrl) { ++ return n; // Dead node ++ } + Node* res = try_move_store_before_loop(n, n_ctrl); + if (res != NULL) { + return n; +diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp +index 9f3cb34..5f5cc75 100644 +--- a/src/hotspot/share/opto/macro.cpp ++++ b/src/hotspot/share/opto/macro.cpp +@@ -2594,9 +2594,10 @@ void PhaseMacroExpand::eliminate_macro_nodes() { + break; + case Node::Class_OuterStripMinedLoop: + break; ++ case Node::Class_Opaque1: ++ break; + default: + assert(n->Opcode() == Op_LoopLimit || +- n->Opcode() == Op_Opaque1 || + n->Opcode() == Op_Opaque2 || + n->Opcode() == Op_Opaque3 || + BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n), +@@ -2638,7 +2639,7 @@ bool PhaseMacroExpand::expand_macro_nodes() { + C->remove_macro_node(n); + _igvn._worklist.push(n); + success = true; +- } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { ++ } else if (n->is_Opaque1() || n->Opcode() == Op_Opaque2) { + _igvn.replace_node(n, n->in(1)); + success = true; + #if INCLUDE_RTM_OPT +diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp +index cf0975e..a36f3bb 100644 +--- a/src/hotspot/share/opto/node.hpp ++++ b/src/hotspot/share/opto/node.hpp +@@ -116,6 +116,7 @@ class MulNode; + class MultiNode; + class MultiBranchNode; + class NeverBranchNode; ++class Opaque1Node; + class OuterStripMinedLoopNode; + class OuterStripMinedLoopEndNode; + class Node; +@@ -608,10 +609,10 @@ public: + // This enum is used only for C2 ideal and mach nodes with is_() methods + // so that it's values fits into 16 bits. + enum NodeClasses { +- Bit_Node = 0x0000, +- Class_Node = 0x0000, +- ClassMask_Node = 0xFFFF, +- ++ Bit_Node = 0x00000000, ++ Class_Node = 0x00000000, ++ ClassMask_Node = 0xFFFFFFFF, ++ + DEFINE_CLASS_ID(Multi, Node, 0) + DEFINE_CLASS_ID(SafePoint, Multi, 0) + DEFINE_CLASS_ID(Call, SafePoint, 0) +@@ -714,6 +715,7 @@ public: + DEFINE_CLASS_ID(Vector, Node, 13) + DEFINE_CLASS_ID(ClearArray, Node, 14) + DEFINE_CLASS_ID(Halt, Node, 15) ++ DEFINE_CLASS_ID(Opaque1, Node, 16) + + _max_classes = ClassMask_Halt + }; +@@ -740,12 +742,12 @@ public: + }; + + private: +- jushort _class_id; ++ juint _class_id; + jushort _flags; + + protected: + // These methods should be called from constructors only. +- void init_class_id(jushort c) { ++ void init_class_id(juint c) { + _class_id = c; // cast out const + } + void init_flags(jushort fl) { +@@ -758,7 +760,7 @@ protected: + } + + public: +- const jushort class_id() const { return _class_id; } ++ const juint class_id() const { return _class_id; } + + const jushort flags() const { return _flags; } + +@@ -859,6 +861,7 @@ public: + DEFINE_CLASS_QUERY(Mul) + DEFINE_CLASS_QUERY(Multi) + DEFINE_CLASS_QUERY(MultiBranch) ++ DEFINE_CLASS_QUERY(Opaque1) + DEFINE_CLASS_QUERY(OuterStripMinedLoop) + DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) + DEFINE_CLASS_QUERY(Parm) +diff --git a/src/hotspot/share/opto/opaquenode.hpp b/src/hotspot/share/opto/opaquenode.hpp +index f97de4a..4c00528 100644 +--- a/src/hotspot/share/opto/opaquenode.hpp ++++ b/src/hotspot/share/opto/opaquenode.hpp +@@ -38,6 +38,7 @@ class Opaque1Node : public Node { + Opaque1Node(Compile* C, Node *n) : Node(NULL, n) { + // Put it on the Macro nodes list to removed during macro nodes expansion. + init_flags(Flag_is_macro); ++ init_class_id(Class_Opaque1); + C->add_macro_node(this); + } + // Special version for the pre-loop to hold the original loop limit +@@ -45,6 +46,7 @@ class Opaque1Node : public Node { + Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(NULL, n, orig_limit) { + // Put it on the Macro nodes list to removed during macro nodes expansion. + init_flags(Flag_is_macro); ++ init_class_id(Class_Opaque1); + C->add_macro_node(this); + } + Node* original_loop_limit() { return req()==3 ? in(2) : NULL; } +@@ -52,6 +54,20 @@ class Opaque1Node : public Node { + virtual const Type *bottom_type() const { return TypeInt::INT; } + virtual Node* Identity(PhaseGVN* phase); + }; ++// Opaque nodes specific to range check elimination handling ++class OpaqueLoopInitNode : public Opaque1Node { ++ public: ++ OpaqueLoopInitNode(Compile* C, Node *n) : Opaque1Node(C, n) { ++ } ++ virtual int Opcode() const; ++}; ++ ++class OpaqueLoopStrideNode : public Opaque1Node { ++ public: ++ OpaqueLoopStrideNode(Compile* C, Node *n) : Opaque1Node(C, n) { ++ } ++ virtual int Opcode() const; ++}; + + //------------------------------Opaque2Node------------------------------------ + // A node to prevent unwanted optimizations. Allows constant folding. Stops +diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp +index 413188e..98b8ade 100644 +--- a/src/hotspot/share/runtime/vmStructs.cpp ++++ b/src/hotspot/share/runtime/vmStructs.cpp +@@ -949,7 +949,7 @@ typedef PaddedEnd PaddedObjectMonitor; + c2_nonstatic_field(Node, _outcnt, node_idx_t) \ + c2_nonstatic_field(Node, _outmax, node_idx_t) \ + c2_nonstatic_field(Node, _idx, const node_idx_t) \ +- c2_nonstatic_field(Node, _class_id, jushort) \ ++ c2_nonstatic_field(Node, _class_id, juint) \ + c2_nonstatic_field(Node, _flags, jushort) \ + \ + c2_nonstatic_field(Compile, _root, RootNode*) \ +diff --git a/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java b/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java +new file mode 100644 +index 0000000..06bca79 +--- /dev/null ++++ b/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java +@@ -0,0 +1,78 @@ ++/* ++ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ++ * @test ++ * @bug 8229495 ++ * @summary SIGILL in C2 generated OSR compilation. ++ * ++ * @run main/othervm -Xcomp -XX:-TieredCompilation -XX:CompileOnly=TestRCEAfterUnrolling::test TestRCEAfterUnrolling ++ * ++ */ ++ ++public class TestRCEAfterUnrolling { ++ ++ public static int iFld = 0; ++ public static short sFld = 1; ++ ++ public static void main(String[] strArr) { ++ test(); ++ } ++ ++ public static int test() { ++ int x = 11; ++ int y = 0; ++ int j = 0; ++ int iArr[] = new int[400]; ++ ++ init(iArr); ++ ++ for (int i = 0; i < 2; i++) { ++ doNothing(); ++ for (j = 10; j > 1; j -= 2) { ++ sFld += (short)j; ++ iArr = iArr; ++ y += (j * 3); ++ x = (iArr[j - 1]/ x); ++ x = sFld; ++ } ++ int k = 1; ++ while (++k < 8) { ++ iFld += x; ++ } ++ } ++ return Float.floatToIntBits(654) + x + j + y; ++ } ++ ++ // Inlined ++ public static void doNothing() { ++ } ++ ++ // Inlined ++ public static void init(int[] a) { ++ for (int j = 0; j < a.length; j++) { ++ a[j] = 0; ++ } ++ } ++} ++ diff --git a/8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch b/8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch new file mode 100644 index 0000000..c01f704 --- /dev/null +++ b/8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch @@ -0,0 +1,377 @@ +diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad +index d6de489..65606e3 100644 +--- a/src/hotspot/cpu/aarch64/aarch64.ad ++++ b/src/hotspot/cpu/aarch64/aarch64.ad +@@ -8315,6 +8315,17 @@ instruct castII(iRegI dst) + ins_pipe(pipe_class_empty); + %} + ++instruct castLL(iRegL dst) ++%{ ++ match(Set dst (CastLL dst)); ++ ++ size(0); ++ format %{ "# castLL of $dst" %} ++ ins_encode(/* empty encoding */); ++ ins_cost(0); ++ ins_pipe(pipe_class_empty); ++%} ++ + // ============================================================================ + // Atomic operation instructions + // +diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad +index f338006..18e81bd 100644 +--- a/src/hotspot/cpu/arm/arm.ad ++++ b/src/hotspot/cpu/arm/arm.ad +@@ -6901,6 +6901,14 @@ instruct castII( iRegI dst ) %{ + ins_pipe(empty); + %} + ++instruct castLL( iRegL dst ) %{ ++ match(Set dst (CastLL dst)); ++ format %{ "! castLL of $dst" %} ++ ins_encode( /*empty encoding*/ ); ++ ins_cost(0); ++ ins_pipe(empty); ++%} ++ + //----------Arithmetic Instructions-------------------------------------------- + // Addition Instructions + // Register Addition +diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad +index 64b2d6b..07bda6d 100644 +--- a/src/hotspot/cpu/ppc/ppc.ad ++++ b/src/hotspot/cpu/ppc/ppc.ad +@@ -10821,6 +10821,14 @@ instruct castII(iRegIdst dst) %{ + ins_pipe(pipe_class_default); + %} + ++instruct castLL(iRegLdst dst) %{ ++ match(Set dst (CastLL dst)); ++ format %{ " -- \t// castLL of $dst" %} ++ size(0); ++ ins_encode( /*empty*/ ); ++ ins_pipe(pipe_class_default); ++%} ++ + instruct checkCastPP(iRegPdst dst) %{ + match(Set dst (CheckCastPP dst)); + format %{ " -- \t// checkcastPP of $dst" %} +diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad +index e335f47..96c231b 100644 +--- a/src/hotspot/cpu/s390/s390.ad ++++ b/src/hotspot/cpu/s390/s390.ad +@@ -5371,6 +5371,14 @@ instruct castII(iRegI dst) %{ + ins_pipe(pipe_class_dummy); + %} + ++instruct castLL(iRegL dst) %{ ++ match(Set dst (CastLL dst)); ++ size(0); ++ format %{ "# castLL of $dst" %} ++ ins_encode(/*empty*/); ++ ins_pipe(pipe_class_dummy); ++%} ++ + + //----------Conditional_store-------------------------------------------------- + // Conditional-store of the updated heap-top. +diff --git a/src/hotspot/cpu/sparc/sparc.ad b/src/hotspot/cpu/sparc/sparc.ad +index 7a2798a..a09c795 100644 +--- a/src/hotspot/cpu/sparc/sparc.ad ++++ b/src/hotspot/cpu/sparc/sparc.ad +@@ -6812,6 +6812,14 @@ instruct castII( iRegI dst ) %{ + ins_pipe(empty); + %} + ++instruct castLL( iRegL dst ) %{ ++ match(Set dst (CastLL dst)); ++ format %{ "# castLL of $dst" %} ++ ins_encode( /*empty encoding*/ ); ++ ins_cost(0); ++ ins_pipe(empty); ++%} ++ + //----------Arithmetic Instructions-------------------------------------------- + // Addition Instructions + // Register Addition +diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad +index 93551b8..de256fe 100644 +--- a/src/hotspot/cpu/x86/x86_32.ad ++++ b/src/hotspot/cpu/x86/x86_32.ad +@@ -7324,6 +7324,14 @@ instruct castII( rRegI dst ) %{ + ins_pipe( empty ); + %} + ++instruct castLL( rRegL dst ) %{ ++ match(Set dst (CastLL dst)); ++ format %{ "#castLL of $dst" %} ++ ins_encode( /*empty encoding*/ ); ++ ins_cost(0); ++ ins_pipe( empty ); ++%} ++ + // Load-locked - same as a regular pointer load when used with compare-swap + instruct loadPLocked(eRegP dst, memory mem) %{ + match(Set dst (LoadPLocked mem)); +diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad +index 7c5feb4..7879547 100644 +--- a/src/hotspot/cpu/x86/x86_64.ad ++++ b/src/hotspot/cpu/x86/x86_64.ad +@@ -7354,6 +7354,17 @@ instruct castII(rRegI dst) + ins_pipe(empty); + %} + ++instruct castLL(rRegL dst) ++%{ ++ match(Set dst (CastLL dst)); ++ ++ size(0); ++ format %{ "# castLL of $dst" %} ++ ins_encode(/* empty encoding */); ++ ins_cost(0); ++ ins_pipe(empty); ++%} ++ + // LoadP-locked same as a regular LoadP when used with compare-swap + instruct loadPLocked(rRegP dst, memory mem) + %{ +diff --git a/src/hotspot/share/opto/castnode.cpp b/src/hotspot/share/opto/castnode.cpp +index ebd7d33..320d25f 100644 +--- a/src/hotspot/share/opto/castnode.cpp ++++ b/src/hotspot/share/opto/castnode.cpp +@@ -63,6 +63,14 @@ const Type* ConstraintCastNode::Value(PhaseGVN* phase) const { + if (rt->empty()) assert(ft == Type::TOP, "special case #2"); + break; + } ++ case Op_CastLL: ++ { ++ const Type* t1 = phase->type(in(1)); ++ if (t1 == Type::TOP) assert(ft == Type::TOP, "special case #1"); ++ const Type* rt = t1->join_speculative(_type); ++ if (rt->empty()) assert(ft == Type::TOP, "special case #2"); ++ break; ++ } + case Op_CastPP: + if (phase->type(in(1)) == TypePtr::NULL_PTR && + _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) +@@ -96,6 +104,11 @@ Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node *n, const Type *t, + cast->set_req(0, c); + return cast; + } ++ case Op_CastLL: { ++ Node* cast = new CastLLNode(n, t, carry_dependency); ++ cast->set_req(0, c); ++ return cast; ++ } + case Op_CastPP: { + Node* cast = new CastPPNode(n, t, carry_dependency); + cast->set_req(0, c); +@@ -279,6 +292,45 @@ void CastIINode::dump_spec(outputStream* st) const { + } + #endif + ++Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) { ++ Node* progress = ConstraintCastNode::Ideal(phase, can_reshape); ++ if (progress != NULL) { ++ return progress; ++ } ++ ++ // Same as in CastIINode::Ideal but for TypeLong instead of TypeInt ++ if (can_reshape && !phase->C->major_progress()) { ++ const TypeLong* this_type = this->type()->is_long(); ++ const TypeLong* in_type = phase->type(in(1))->isa_long(); ++ if (in_type != NULL && this_type != NULL && ++ (in_type->_lo != this_type->_lo || ++ in_type->_hi != this_type->_hi)) { ++ jlong lo1 = this_type->_lo; ++ jlong hi1 = this_type->_hi; ++ int w1 = this_type->_widen; ++ ++ if (lo1 >= 0) { ++ // Keep a range assertion of >=0. ++ lo1 = 0; hi1 = max_jlong; ++ } else if (hi1 < 0) { ++ // Keep a range assertion of <0. ++ lo1 = min_jlong; hi1 = -1; ++ } else { ++ lo1 = min_jlong; hi1 = max_jlong; ++ } ++ const TypeLong* wtype = TypeLong::make(MAX2(in_type->_lo, lo1), ++ MIN2(in_type->_hi, hi1), ++ MAX2((int)in_type->_widen, w1)); ++ if (wtype != type()) { ++ set_type(wtype); ++ return this; ++ } ++ } ++ } ++ return NULL; ++} ++ ++ + //============================================================================= + //------------------------------Identity--------------------------------------- + // If input is already higher or equal to cast type, then this is an identity. +diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp +index eaf3e32..6e48a07 100644 +--- a/src/hotspot/share/opto/castnode.hpp ++++ b/src/hotspot/share/opto/castnode.hpp +@@ -91,6 +91,19 @@ class CastIINode: public ConstraintCastNode { + #endif + }; + ++//------------------------------CastLLNode------------------------------------- ++// cast long to long (different range) ++class CastLLNode: public ConstraintCastNode { ++ public: ++ CastLLNode(Node* n, const Type* t, bool carry_dependency = false) ++ : ConstraintCastNode(n, t, carry_dependency) { ++ init_class_id(Class_CastLL); ++ } ++ virtual int Opcode() const; ++ virtual uint ideal_reg() const { return Op_RegL; } ++ virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); ++}; ++ + //------------------------------CastPPNode------------------------------------- + // cast pointer to pointer (different type) + class CastPPNode: public ConstraintCastNode { +diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp +index eec3193..8c59efb 100644 +--- a/src/hotspot/share/opto/cfgnode.cpp ++++ b/src/hotspot/share/opto/cfgnode.cpp +@@ -1727,12 +1727,13 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { + // Wait until after parsing for the type information to propagate from the casts. + assert(can_reshape, "Invalid during parsing"); + const Type* phi_type = bottom_type(); +- assert(phi_type->isa_int() || phi_type->isa_ptr(), "bad phi type"); +- // Add casts to carry the control dependency of the Phi that is +- // going away ++ assert(phi_type->isa_int() || phi_type->isa_long() || phi_type->isa_ptr(), "bad phi type"); ++ // Add casts to carry the control dependency of the Phi that is going away + Node* cast = NULL; + if (phi_type->isa_int()) { + cast = ConstraintCastNode::make_cast(Op_CastII, r, uin, phi_type, true); ++ } else if (phi_type->isa_long()) { ++ cast = ConstraintCastNode::make_cast(Op_CastLL, r, uin, phi_type, true); + } else { + const Type* uin_type = phase->type(uin); + if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) { +diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp +index 6938ed2..2f435d1 100644 +--- a/src/hotspot/share/opto/classes.hpp ++++ b/src/hotspot/share/opto/classes.hpp +@@ -57,6 +57,7 @@ macro(CallLeafNoFP) + macro(CallRuntime) + macro(CallStaticJava) + macro(CastII) ++macro(CastLL) + macro(CastX2P) + macro(CastP2X) + macro(CastPP) +diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp +index 7a18207..25f5fa6 100644 +--- a/src/hotspot/share/opto/graphKit.cpp ++++ b/src/hotspot/share/opto/graphKit.cpp +@@ -1363,35 +1363,37 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, + + // Cast obj to not-null on this path, if there is no null_control. + // (If there is a null_control, a non-null value may come back to haunt us.) +- if (type == T_OBJECT) { +- Node* cast = cast_not_null(value, false); +- if (null_control == NULL || (*null_control) == top()) +- replace_in_map(value, cast); +- value = cast; +- } +- +- return value; ++ return cast_not_null(value, (null_control == NULL || (*null_control) == top())); + } + + + //------------------------------cast_not_null---------------------------------- + // Cast obj to not-null on this path + Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { +- const Type *t = _gvn.type(obj); +- const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); +- // Object is already not-null? +- if( t == t_not_null ) return obj; +- +- Node *cast = new CastPPNode(obj,t_not_null); +- cast->init_req(0, control()); +- cast = _gvn.transform( cast ); ++ Node* cast = NULL; ++ const Type* t = _gvn.type(obj); ++ if (t->make_ptr() != NULL) { ++ const Type* t_not_null = t->join_speculative(TypePtr::NOTNULL); ++ // Object is already not-null? ++ if (t == t_not_null) { ++ return obj; ++ } ++ cast = ConstraintCastNode::make_cast(Op_CastPP, control(), obj, t_not_null, false); ++ } else if (t->isa_int() != NULL) { ++ cast = ConstraintCastNode::make_cast(Op_CastII, control(), obj, TypeInt::INT, true); ++ } else if (t->isa_long() != NULL) { ++ cast = ConstraintCastNode::make_cast(Op_CastLL, control(), obj, TypeLong::LONG, true); ++ } else { ++ fatal("unexpected type: %s", type2name(t->basic_type())); ++ } ++ cast = _gvn.transform(cast); + + // Scan for instances of 'obj' in the current JVM mapping. + // These instances are known to be not-null after the test. +- if (do_replace_in_map) ++ if (do_replace_in_map) { + replace_in_map(obj, cast); +- +- return cast; // Return casted value ++ } ++ return cast; + } + + // Sometimes in intrinsics, we implicitly know an object is not null +diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp +index a36f3bb..1a8eeea 100644 +--- a/src/hotspot/share/opto/node.hpp ++++ b/src/hotspot/share/opto/node.hpp +@@ -52,6 +52,7 @@ class CallNode; + class CallRuntimeNode; + class CallStaticJavaNode; + class CastIINode; ++class CastLLNode; + class CatchNode; + class CatchProjNode; + class CheckCastPPNode; +@@ -666,7 +667,8 @@ public: + DEFINE_CLASS_ID(Phi, Type, 0) + DEFINE_CLASS_ID(ConstraintCast, Type, 1) + DEFINE_CLASS_ID(CastII, ConstraintCast, 0) +- DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) ++ DEFINE_CLASS_ID(CastLL, ConstraintCast, 1) ++ DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 2) + DEFINE_CLASS_ID(CMove, Type, 3) + DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) + DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) +@@ -805,6 +807,7 @@ public: + DEFINE_CLASS_QUERY(CatchProj) + DEFINE_CLASS_QUERY(CheckCastPP) + DEFINE_CLASS_QUERY(CastII) ++ DEFINE_CLASS_QUERY(CastLL) + DEFINE_CLASS_QUERY(ConstraintCast) + DEFINE_CLASS_QUERY(ClearArray) + DEFINE_CLASS_QUERY(CMove) +diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp +index 98b8ade..d2e1927 100644 +--- a/src/hotspot/share/runtime/vmStructs.cpp ++++ b/src/hotspot/share/runtime/vmStructs.cpp +@@ -1608,6 +1608,7 @@ typedef PaddedEnd PaddedObjectMonitor; + declare_c2_type(DecodeNKlassNode, TypeNode) \ + declare_c2_type(ConstraintCastNode, TypeNode) \ + declare_c2_type(CastIINode, ConstraintCastNode) \ ++ declare_c2_type(CastLLNode, ConstraintCastNode) \ + declare_c2_type(CastPPNode, ConstraintCastNode) \ + declare_c2_type(CheckCastPPNode, TypeNode) \ + declare_c2_type(Conv2BNode, Node) \ + diff --git a/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch b/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch new file mode 100644 index 0000000..245dfa6 --- /dev/null +++ b/8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch @@ -0,0 +1,170 @@ +diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp +index 8c59efb..e32b5e7 100644 +--- a/src/hotspot/share/opto/cfgnode.cpp ++++ b/src/hotspot/share/opto/cfgnode.cpp +@@ -1192,6 +1192,30 @@ Node* PhiNode::Identity(PhaseGVN* phase) { + if (id != NULL) return id; + } + ++ // Looking for phis with identical inputs. If we find one that has ++ // type TypePtr::BOTTOM, replace the current phi with the bottom phi. ++ if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() != ++ TypePtr::BOTTOM && !adr_type()->is_known_instance()) { ++ uint phi_len = req(); ++ Node* phi_reg = region(); ++ for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) { ++ Node* u = phi_reg->fast_out(i); ++ if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY && ++ u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg && ++ u->req() == phi_len) { ++ for (uint j = 1; j < phi_len; j++) { ++ if (in(j) != u->in(j)) { ++ u = NULL; ++ break; ++ } ++ } ++ if (u != NULL) { ++ return u; ++ } ++ } ++ } ++ } ++ + return this; // No identity + } + +diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp +index 9e22011..bc314c0 100644 +--- a/src/hotspot/share/opto/memnode.cpp ++++ b/src/hotspot/share/opto/memnode.cpp +@@ -4554,24 +4554,6 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { + } + // else preceding memory was not a MergeMem + +- // replace equivalent phis (unfortunately, they do not GVN together) +- if (new_mem != NULL && new_mem != new_base && +- new_mem->req() == phi_len && new_mem->in(0) == phi_reg) { +- if (new_mem->is_Phi()) { +- PhiNode* phi_mem = new_mem->as_Phi(); +- for (uint i = 1; i < phi_len; i++) { +- if (phi_base->in(i) != phi_mem->in(i)) { +- phi_mem = NULL; +- break; +- } +- } +- if (phi_mem != NULL) { +- // equivalent phi nodes; revert to the def +- new_mem = new_base; +- } +- } +- } +- + // maybe store down a new value + Node* new_in = new_mem; + if (new_in == new_base) new_in = empty_mem; +diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp +index 8103c6f..5928f44 100644 +--- a/src/hotspot/share/opto/type.hpp ++++ b/src/hotspot/share/opto/type.hpp +@@ -453,6 +453,7 @@ public: + const Type* maybe_remove_speculative(bool include_speculative) const; + + virtual bool maybe_null() const { return true; } ++ virtual bool is_known_instance() const { return false; } + + private: + // support arrays +@@ -1390,6 +1391,10 @@ public: + return _ptrtype; + } + ++ bool is_known_instance() const { ++ return _ptrtype->is_known_instance(); ++ } ++ + #ifndef PRODUCT + virtual void dump2( Dict &d, uint depth, outputStream *st ) const; + #endif +diff --git a/test/hotspot/jtreg/compiler/c2/TestReplaceEquivPhis.java b/test/hotspot/jtreg/compiler/c2/TestReplaceEquivPhis.java +new file mode 100644 +index 0000000..d4c93b3 +--- /dev/null ++++ b/test/hotspot/jtreg/compiler/c2/TestReplaceEquivPhis.java +@@ -0,0 +1,77 @@ ++/* ++ * Copyright (c) 2020, Huawei Technologies Co. Ltd. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/** ++ * @test ++ * @bug 8243670 ++ * @summary Unexpected test result caused by C2 MergeMemNode::Ideal ++ * ++ * @run main/othervm -Xcomp -XX:-SplitIfBlocks ++ * -XX:CompileOnly=compiler.c2.TestReplaceEquivPhis::test ++ * -XX:-BackgroundCompilation compiler.c2.TestReplaceEquivPhis ++ */ ++ ++package compiler.c2; ++ ++public class TestReplaceEquivPhis { ++ ++ public static final int N = 400; ++ public static volatile int instanceCount = 0; ++ public int iFld = 0; ++ public static int iArrFld[] = new int[N]; ++ ++ public int test() { ++ int v = 0; ++ boolean bArr[] = new boolean[N]; ++ ++ for (int i = 1; i < 344; i++) { ++ iFld = i; ++ for (int j = 2; j <177 ; j++) { ++ v = iFld; ++ iFld = TestReplaceEquivPhis.instanceCount; ++ TestReplaceEquivPhis.iArrFld[i] = 0; ++ iFld += TestReplaceEquivPhis.instanceCount; ++ TestReplaceEquivPhis.iArrFld[i] = 0; ++ bArr[j] = false; ++ TestReplaceEquivPhis.instanceCount = 1; ++ ++ for (int k = 1; k < 3; k++) { ++ // do nothing ++ } ++ } ++ } ++ return v; ++ } ++ ++ public static void main(String[] args) { ++ TestReplaceEquivPhis obj = new TestReplaceEquivPhis(); ++ for (int i = 0; i < 5; i++) { ++ int result = obj.test(); ++ if (result != 2) { ++ throw new RuntimeException("Test failed."); ++ } ++ } ++ System.out.println("Test passed."); ++ } ++ ++} diff --git a/ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch b/ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch new file mode 100644 index 0000000..9e71ac4 --- /dev/null +++ b/ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch @@ -0,0 +1,30 @@ +From f3ec83e45b3c6f7f3183c2726701dccd11a00550 Mon Sep 17 00:00:00 2001 +Date: Wed, 3 Jun 2020 09:39:34 +0000 +Subject: [PATCH] ZGC: correct free heap size excluding waste in + rule_allocation_rate + +Summary: : +LLT: +Bug url: +--- + src/hotspot/share/gc/z/zDirector.cpp | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/hotspot/share/gc/z/zDirector.cpp b/src/hotspot/share/gc/z/zDirector.cpp +index 209bf0a..6f894c3 100644 +--- a/src/hotspot/share/gc/z/zDirector.cpp ++++ b/src/hotspot/share/gc/z/zDirector.cpp +@@ -120,7 +120,9 @@ bool ZDirector::rule_allocation_rate() const { + // the allocation rate variance, which means the probability is 1 in 1000 + // that a sample is outside of the confidence interval. + const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000); +- const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero ++ ++ // Plus 1.0B/s to avoid division by zero. Small or medium page allow 12.5% waste, so 87.5% used. ++ const double time_until_oom = (0.875 * free) / (max_alloc_rate + 1.0); + + // Calculate max duration of a GC cycle. The duration of GC is a moving + // average, we add ~3.3 sigma to account for the GC duration variance. +-- +1.8.3.1 + diff --git a/fix-IfNode-s-bugs.patch b/fix-IfNode-s-bugs.patch new file mode 100644 index 0000000..dd15938 --- /dev/null +++ b/fix-IfNode-s-bugs.patch @@ -0,0 +1,29 @@ +From 466cb0f1a98c7b93e47e6056c460b6ec81864e3b Mon Sep 17 00:00:00 2001 +Date: Tue, 12 May 2020 10:08:51 +0000 +Subject: [PATCH] fix IfNode's bugs + +Summary: : fix wrong use of transform function +LLT: +Bug url: NA +--- + src/hotspot/share/opto/ifnode.cpp | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp +index 6f87d7c..a856200 100644 +--- a/src/hotspot/share/opto/ifnode.cpp ++++ b/src/hotspot/share/opto/ifnode.cpp +@@ -1398,7 +1398,10 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (in(0) == NULL) return NULL; // Dead loop? + + PhaseIterGVN *igvn = phase->is_IterGVN(); ++ bool delay_state = igvn->delay_transform(); ++ igvn->set_delay_transform(true); + Node* result = fold_compares(igvn); ++ igvn->set_delay_transform(delay_state); + if (result != NULL) { + return result; + } +-- +1.8.3.1 + diff --git a/java-11-openjdk.spec b/java-11-openjdk.spec index 2022463..c94aed1 100644 --- a/java-11-openjdk.spec +++ b/java-11-openjdk.spec @@ -735,7 +735,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release} Name: java-%{javaver}-%{origin} Version: %{newjavaver}.%{buildver} -Release: 6 +Release: 7 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages # also included the epoch in their virtual provides. This created a @@ -825,6 +825,14 @@ Patch36: ZGC-in-c1-load-barrier-d0-and-d1-registers-miss-restoring.patch Patch37: fix-compile-error-without-disable-precompiled-headers.patch Patch38: fast-serializer-jdk11.patch Patch39: fix-jck-failure-on-FastSerializer.patch +Patch40: 8223667-ASAN-build-broken.patch +Patch41: 8229495-SIGILL-in-C2-generated-OSR-compilation.patch +Patch42: 8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch +Patch43: 8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch +Patch44: fix-IfNode-s-bugs.patch +Patch45: leaf-optimize-in-ParallelScanvageGC.patch +Patch46: ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch + BuildRequires: autoconf BuildRequires: alsa-lib-devel @@ -1081,6 +1089,13 @@ pushd %{top_level_dir_name} %patch37 -p1 %patch38 -p1 %patch39 -p1 +%patch40 -p1 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch45 -p1 +%patch46 -p1 popd # openjdk %patch1000 @@ -1583,6 +1598,15 @@ require "copy_jdk_configs.lua" %changelog +* add Fri Sep 11 2020 noah - 1:11.0.8.10-7 +- add 8223667-ASAN-build-broken.patch +- add 8229495-SIGILL-in-C2-generated-OSR-compilation.patch +- add 8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch +- add 8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch +- add fix-IfNode-s-bugs.patch +- add leaf-optimize-in-ParallelScanvageGC.patch +- add ZGC-correct-free-heap-size-excluding-waste-in-rule_allocation_rate.patch + * Tue Sep 8 2020 noah - 1:11.0.8.10-6 - add fast-serializer-jdk11.patch - add fix-jck-failure-on-FastSerializer.patch diff --git a/leaf-optimize-in-ParallelScanvageGC.patch b/leaf-optimize-in-ParallelScanvageGC.patch new file mode 100644 index 0000000..1c2d357 --- /dev/null +++ b/leaf-optimize-in-ParallelScanvageGC.patch @@ -0,0 +1,118 @@ +From 6f43328c953e50be924d16b41e35255f694d8794 Mon Sep 17 00:00:00 2001 +Date: Mon, 6 Apr 2020 18:13:30 +0000 +Subject: [PATCH] leaf optimize in ParallelScanvageGC + +Summary: : <1. add _is_gc_leaf field in klass, if none of klass field is reference,then this klass's oop is leaf, set this at classfile parser.2. ParallelScanvage copy before push_depth if oop is leaf.3. leaf oop don't push_contents.> +LLT: +Bug url: +--- + src/hotspot/share/classfile/classFileParser.cpp | 7 ++++++- + .../share/gc/parallel/psPromotionManager.inline.hpp | 16 ++++++++++++---- + src/hotspot/share/oops/klass.cpp | 2 ++ + src/hotspot/share/oops/klass.hpp | 5 +++++ + 4 files changed, 25 insertions(+), 5 deletions(-) + +diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp +index bf8ac6f3b..99f99309d 100644 +--- a/src/hotspot/share/classfile/classFileParser.cpp ++++ b/src/hotspot/share/classfile/classFileParser.cpp +@@ -4363,7 +4363,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp, + info->has_nonstatic_fields = has_nonstatic_fields; + } + +-static void fill_oop_maps(const InstanceKlass* k, ++static void fill_oop_maps(InstanceKlass* k, + unsigned int nonstatic_oop_map_count, + const int* nonstatic_oop_offsets, + const unsigned int* nonstatic_oop_counts) { +@@ -4373,6 +4373,11 @@ static void fill_oop_maps(const InstanceKlass* k, + OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps(); + const InstanceKlass* const super = k->superklass(); + const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0; ++ ++ const bool super_is_gc_leaf = super ? super->oop_is_gc_leaf() : true; ++ bool this_is_gc_leaf = super_is_gc_leaf && (nonstatic_oop_map_count == 0); ++ k->set_oop_is_gc_leaf(this_is_gc_leaf); ++ + if (super_count > 0) { + // Copy maps from superklass + OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps(); +diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +index 1ef900783..07f736cf2 100644 +--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp ++++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +@@ -59,7 +59,12 @@ inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { + } + RawAccess::oop_store(p, o); + } else { +- push_depth(p); ++ //leaf object copy in advanced, reduce cost of push and pop ++ if (!o->klass()->oop_is_gc_leaf()) { ++ push_depth(p); ++ } else { ++ copy_and_push_safe_barrier(p); ++ } + } + } + } +@@ -214,7 +219,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { + + // Now we have to CAS in the header. + // Make copy visible to threads reading the forwardee. +- if (o->cas_forward_to(new_obj, test_mark, memory_order_release)) { ++ if (o->cas_forward_to(new_obj, test_mark, o->klass()->oop_is_gc_leaf()? memory_order_relaxed : memory_order_release)) { + // We won any races, we "own" this object. + assert(new_obj == o->forwardee(), "Sanity"); + +@@ -238,8 +243,11 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { + push_depth(masked_o); + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); + } else { +- // we'll just push its contents +- push_contents(new_obj); ++ //leaf object don't have contents, never need push_contents ++ if (!o->klass()->oop_is_gc_leaf()) { ++ // we'll just push its contents ++ push_contents(new_obj); ++ } + } + } else { + // We lost, someone else "owns" this object +diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp +index 01c5ed5bd..180ba8f41 100644 +--- a/src/hotspot/share/oops/klass.cpp ++++ b/src/hotspot/share/oops/klass.cpp +@@ -202,6 +202,8 @@ Klass::Klass(KlassID id) : _id(id), + CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;) + _primary_supers[0] = this; + set_super_check_offset(in_bytes(primary_supers_offset())); ++ ++ set_oop_is_gc_leaf(false); + } + + jint Klass::array_layout_helper(BasicType etype) { +diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp +index b77a19deb..a624586aa 100644 +--- a/src/hotspot/share/oops/klass.hpp ++++ b/src/hotspot/share/oops/klass.hpp +@@ -165,6 +165,8 @@ class Klass : public Metadata { + // vtable length + int _vtable_len; + ++ bool _is_gc_leaf; ++ + private: + // This is an index into FileMapHeader::_shared_path_table[], to + // associate this class with the JAR file where it's loaded from during +@@ -603,6 +605,9 @@ protected: + is_typeArray_klass_slow()); } + #undef assert_same_query + ++ void set_oop_is_gc_leaf(bool is_gc_leaf) { _is_gc_leaf = is_gc_leaf; } ++ inline bool oop_is_gc_leaf() const { return _is_gc_leaf; } ++ + // Access flags + AccessFlags access_flags() const { return _access_flags; } + void set_access_flags(AccessFlags flags) { _access_flags = flags; } +-- +1.8.3.1 -- Gitee