diff --git a/LoongArch64-support.patch b/LoongArch64-support.patch index a9a6ec6688f8f65e11799caf879c0581504c43da..b1718f98acecb9cd588c65a9093aa1669e2d3a80 100644 --- a/LoongArch64-support.patch +++ b/LoongArch64-support.patch @@ -1,5 +1,11435 @@ +commit 98016d6f75ec4b8b58ad30ae75609a47f93fc54a +Merge: 789ba0f24a3 c5c90ffa45d +Author: aoqi +Date: Sat Feb 8 17:20:42 2025 +0800 + + Merge + + +commit 789ba0f24a3fcaf64609eaa065845861c8de83d8 +Merge: 06f00bc81c7 bdc07d202a9 +Author: aoqi +Date: Sat Feb 8 17:18:33 2025 +0800 + + Merge + + +commit 06f00bc81c7d0dc3e83363fd2b00dd6b03f4d62a +Merge: 15f5afd71be eed263c8077 +Author: aoqi +Date: Thu Oct 17 10:41:06 2024 +0800 + + Merge + + +commit 15f5afd71be34dd61b9044dff02e004eb2bb34b6 +Author: loongson-jvm +Date: Thu Oct 17 10:27:02 2024 +0800 + + Update (2024.10.17) + + 11541: Prevent load reorder of VarHandle::getOpaque + 34943: 8338748: [17u,21u] Test Disconnect.java compile error: cannot find symbol after JDK-8299813 + +diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad +index 0c6516fc1d5..7e01822e0e9 100644 +--- a/src/hotspot/cpu/loongarch/loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/loongarch_64.ad +@@ -6444,6 +6444,17 @@ instruct membar_storestore() %{ + ins_pipe(empty); + %} + ++instruct same_addr_load_fence() %{ ++ match(SameAddrLoadFence); ++ ins_cost(400); ++ ++ format %{ "MEMBAR @ same_addr_load_fence" %} ++ ins_encode %{ ++ __ dbar(0x700); ++ %} ++ ins_pipe(pipe_slow); ++%} ++ + //----------Move Instructions-------------------------------------------------- + instruct castX2P(mRegP dst, mRegL src) %{ + match(Set dst (CastX2P src)); +diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp +index b45f432dac9..02bec17ddf3 100644 +--- a/src/hotspot/share/adlc/formssel.cpp ++++ b/src/hotspot/share/adlc/formssel.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + // FORMS.CPP - Definitions for ADL Parser Forms Classes + #include "adlc.hpp" + +@@ -4124,6 +4130,7 @@ bool MatchRule::is_ideal_membar() const { + !strcmp(_opType,"MemBarVolatile") || + !strcmp(_opType,"MemBarCPUOrder") || + !strcmp(_opType,"MemBarStoreStore") || ++ !strcmp(_opType,"SameAddrLoadFence" ) || + !strcmp(_opType,"OnSpinWait"); + } + +diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp +index 56fdd2abeb4..17b9bcafd47 100644 +--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp ++++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "gc/shared/tlab_globals.hpp" + #include "gc/shared/c2/barrierSetC2.hpp" +@@ -266,6 +272,8 @@ public: + + bool is_volatile = (decorators & MO_SEQ_CST) != 0; + bool is_acquire = (decorators & MO_ACQUIRE) != 0; ++ bool is_relaxed = (decorators & MO_RELAXED) != 0; ++ bool is_unsafe = (decorators & C2_UNSAFE_ACCESS) != 0; + + // If reference is volatile, prevent following volatiles ops from + // floating up before the volatile access. +@@ -299,6 +307,13 @@ public: + assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); + Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); + mb->as_MemBar()->set_trailing_load(); ++ } else if (is_relaxed && is_unsafe) { ++#ifdef LOONGARCH64 ++ assert(kit != nullptr, "unsupported at optimization time"); ++ Node* n = _access.raw_access(); ++ Node* mb = kit->insert_mem_bar(Op_SameAddrLoadFence, n); ++ mb->as_MemBar()->set_trailing_load(); ++#endif + } + } + } +diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp +index 614d0b4e112..8d7764eb8a0 100644 +--- a/src/hotspot/share/opto/classes.hpp ++++ b/src/hotspot/share/opto/classes.hpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "utilities/macros.hpp" + + // The giant table of Node classes. +@@ -223,6 +229,7 @@ macro(StoreStoreFence) + macro(MemBarReleaseLock) + macro(MemBarVolatile) + macro(MemBarStoreStore) ++macro(SameAddrLoadFence) + macro(MergeMem) + macro(MinI) + macro(MinL) +diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp +index 4e8e39ffa74..6c7770dbf9c 100644 +--- a/src/hotspot/share/opto/compile.cpp ++++ b/src/hotspot/share/opto/compile.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "jvm_io.h" + #include "asm/macroAssembler.hpp" +@@ -3522,6 +3528,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f + n->set_req(MemBarNode::Precedent, top()); + } + break; ++ case Op_SameAddrLoadFence: + case Op_MemBarAcquire: { + if (n->as_MemBar()->trailing_load() && n->req() > MemBarNode::Precedent) { + // At parse time, the trailing MemBarAcquire for a volatile load +diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp +index 074b129b059..a1886f813c3 100644 +--- a/src/hotspot/share/opto/memnode.cpp ++++ b/src/hotspot/share/opto/memnode.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "classfile/javaClasses.hpp" + #include "compiler/compileLog.hpp" +@@ -3298,6 +3304,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { + case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); + case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); + case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); ++ case Op_SameAddrLoadFence: return new SameAddrLoadFenceNode(C, atp, pn); + case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); + case Op_Initialize: return new InitializeNode(C, atp, pn); + default: ShouldNotReachHere(); return nullptr; +diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp +index 99a30486274..71bf997533f 100644 +--- a/src/hotspot/share/opto/memnode.hpp ++++ b/src/hotspot/share/opto/memnode.hpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #ifndef SHARE_OPTO_MEMNODE_HPP + #define SHARE_OPTO_MEMNODE_HPP + +@@ -1336,6 +1342,14 @@ public: + virtual uint ideal_reg() const { return 0; } // not matched in the AD file + }; + ++// Used to prevent LoadLoad reorder for same address. ++class SameAddrLoadFenceNode: public MemBarNode { ++public: ++ SameAddrLoadFenceNode(Compile* C, int alias_idx, Node* precedent) ++ : MemBarNode(C, alias_idx, precedent) {} ++ virtual int Opcode() const; ++}; ++ + class OnSpinWaitNode: public MemBarNode { + public: + OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) +diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp +index 33de84a68c1..7dc3d58f381 100644 +--- a/src/hotspot/share/runtime/vmStructs.cpp ++++ b/src/hotspot/share/runtime/vmStructs.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "cds/filemap.hpp" + #include "ci/ciField.hpp" +@@ -1597,6 +1603,7 @@ typedef HashtableEntry KlassHashtableEntry; + declare_c2_type(StoreFenceNode, MemBarNode) \ + declare_c2_type(MemBarVolatileNode, MemBarNode) \ + declare_c2_type(MemBarCPUOrderNode, MemBarNode) \ ++ declare_c2_type(SameAddrLoadFenceNode, MemBarNode) \ + declare_c2_type(OnSpinWaitNode, MemBarNode) \ + declare_c2_type(BlackholeNode, MultiNode) \ + declare_c2_type(InitializeNode, MemBarNode) \ +diff --git a/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java b/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java +index cdc5882fefd..16120f85168 100644 +--- a/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java ++++ b/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java +@@ -52,7 +52,7 @@ public class Disconnect { + if (IPSupport.hasIPv4()) { + // test with IPv4 only + try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET)) { +- InetAddress lo4 = InetAddress.ofLiteral("127.0.0.1"); ++ InetAddress lo4 = InetAddress.getByName("127.0.0.1"); + System.out.println("Testing with INET family and " + lo4); + test(dc, lo4); + test(dc, lo4); +@@ -62,7 +62,7 @@ public class Disconnect { + if (IPSupport.hasIPv6()) { + // test with IPv6 only + try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET6)) { +- InetAddress lo6 = InetAddress.ofLiteral("::1"); ++ InetAddress lo6 = InetAddress.getByName("::1"); + System.out.println("Testing with INET6 family and " + lo6); + test(dc, lo6); + test(dc, lo6); + +commit 76487ad345f72808f2dbed45bde1fbe52f91f7b0 +Merge: c6583c8c5cb 33cd4b41b38 +Author: aoqi +Date: Wed Oct 16 18:04:00 2024 +0800 + + Merge + + +commit c6583c8c5cbd49c33df3dc3e65eb9644b8755bfc +Author: loongson-jvm +Date: Wed Oct 16 18:02:45 2024 +0800 + + Update (2024.10.16) + + 34860: [MIPS] build with gtest failed after JDK-8300806 + +diff --git a/src/hotspot/cpu/mips/register_mips.hpp b/src/hotspot/cpu/mips/register_mips.hpp +index 4f74717c24f..c2124538a0f 100644 +--- a/src/hotspot/cpu/mips/register_mips.hpp ++++ b/src/hotspot/cpu/mips/register_mips.hpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2024, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -105,8 +105,9 @@ CONSTANT_REGISTER_DECLARATION(Register, i31, (31)); + #define NOREG ((Register)(noreg_RegisterEnumValue)) + + #define I0 ((Register)(i0_RegisterEnumValue)) +-#define I1 ((Register)(i1_RegisterEnumValue)) +-#define I2 ((Register)(i2_RegisterEnumValue)) ++// Conflict with I1 and I2 in googlemock/include/gmock/gmock-actions.h ++//#define I1 ((Register)(i1_RegisterEnumValue)) ++//#define I2 ((Register)(i2_RegisterEnumValue)) + #define I3 ((Register)(i3_RegisterEnumValue)) + #define I4 ((Register)(i4_RegisterEnumValue)) + #define I5 ((Register)(i5_RegisterEnumValue)) + +commit 6d0dc6b25f6441f3d37672ea377cc9c75e958693 +Merge: 0389c6ca1b0 03cbfaa733b +Author: aoqi +Date: Wed Oct 16 17:28:09 2024 +0800 + + Merge + + +commit 0389c6ca1b08f91f75c63ed72edbe16abe2210c0 +Merge: 00ca185ba7e 833f65ecb30 +Author: aoqi +Date: Fri Jul 26 14:48:30 2024 +0800 + + Merge + + +commit 00ca185ba7eaf8a3bf9412bad9305fe742c3378c +Merge: 4cc0b3e9764 c1c901179e8 +Author: aoqi +Date: Fri Jul 26 14:47:58 2024 +0800 + + Merge + + +commit 4cc0b3e97643ac46a8e64ea4f3c62510fe49e10a +Author: loongson-jvm +Date: Fri Jul 26 11:32:26 2024 +0800 + + Update (2024.07.26) + + 33980: Fix generate__kernel_rem_pio2 + +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp +index 63b5b0da7e7..9c74be2dbd3 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp +@@ -1,6 +1,6 @@ + /* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Cavium. All rights reserved. (By BELLSOFT) +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2024, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -907,7 +907,7 @@ void MacroAssembler::generate__kernel_rem_pio2(address two_over_pi, address pio2 + b(Q_DONE); + bind(JX_IS_0); + if (UseLASX) { +- xvfmul_d(v28, v18, v6); // f[0,1] * x[0] ++ xvfmul_d(v28, v18, v6); // f[0,3] * x[0] + fmul_d(v30, v19, v6); // f[4] * x[0] + } else { + vfmul_d(v28, v18, v6); // f[0,1] * x[0] +@@ -1136,6 +1136,7 @@ void MacroAssembler::generate__kernel_rem_pio2(address two_over_pi, address pio2 + st_w(tmp2, SCR2, 0); + addi_w(SCR1, SCR1, 24); + addi_w(jz, jz, 1); ++ alsl_d(SCR2, jz, iqBase, 2 - 1); + st_w(tmp3, SCR2, 0); // iq[jz] = (int) fw + b(Z_ZERO_CHECK_DONE); + bind(Z_IS_LESS_THAN_TWO24B); + +commit c5070ec1c962df668c876c96554747da28c35520 +Merge: c53840e66c9 c9d83d392f3 +Author: aoqi +Date: Fri Jul 26 09:24:27 2024 +0800 + + Merge + + +commit c53840e66c908f283ba8cf2b167cb3f5d68008fd +Author: loongson-jvm +Date: Fri Apr 26 18:35:30 2024 +0800 + + Update (2024.04.26, 2nd) + + 34058: LA port of 8322122: Enhance generation of addresses + +diff --git a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp +index fedcc547d48..28298dcc375 100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2021, 2024, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -157,8 +157,10 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, + if (index->is_register()) { + // apply the shift and accumulate the displacement + if (shift > 0) { +- LIR_Opr tmp = new_pointer_register(); +- __ shift_left(index, shift, tmp); ++ // Use long register to avoid overflow when shifting large index values left. ++ LIR_Opr tmp = new_register(T_LONG); ++ __ convert(Bytecodes::_i2l, index, tmp); ++ __ shift_left(tmp, shift, tmp); + index = tmp; + } + if (large_disp != 0) { + +commit af76bcbea0b9c4b802af19ba09b062ef510e1d97 +Merge: 38b11e8e016 1c40f899c9c +Author: aoqi +Date: Fri Apr 26 18:25:16 2024 +0800 + + Merge + + +commit 38b11e8e016687f029cad4c183de4aaaf49020d0 +Author: loongson-jvm +Date: Fri Apr 26 18:24:13 2024 +0800 + + Update (2024.04.26) + + 33160: LA port of 8261837: SIGSEGV in ciVirtualCallTypeData::translate_from + +diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +index 8665c2d8881..cde86e3b3a1 100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +@@ -3013,7 +3013,10 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + __ verify_oop(obj); + + if (tmp != obj) { ++ assert_different_registers(obj, tmp, SCR1, SCR2, mdo_addr.base(), mdo_addr.index()); + __ move(tmp, obj); ++ } else { ++ assert_different_registers(obj, SCR1, SCR2, mdo_addr.base(), mdo_addr.index()); + } + if (do_null) { + __ bnez(tmp, update); +@@ -3072,10 +3075,11 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + __ beqz(SCR2, none); + __ li(SCR1, (u1)TypeEntries::null_seen); + __ beq(SCR2, SCR1, none); +- // There is a chance that the checks above (re-reading profiling +- // data from memory) fail if another thread has just set the ++ // There is a chance that the checks above ++ // fail if another thread has just set the + // profiling to this obj's klass + membar_acquire(); ++ __ XOR(tmp, tmp, SCR2); // get back original value before XOR + __ ld_ptr(SCR2, mdo_addr); + __ XOR(tmp, tmp, SCR2); + assert(TypeEntries::type_klass_mask == -4, "must be"); +@@ -3102,6 +3106,11 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + __ bind(none); + // first time here. Set profile type. + __ st_ptr(tmp, mdo_addr); ++#ifdef ASSERT ++ assert(TypeEntries::type_mask == -2, "must be"); ++ __ bstrpick_d(tmp, tmp, 63, 1); ++ __ verify_klass_ptr(tmp); ++#endif + } + } else { + // There's a single possible klass at this profile point +@@ -3135,6 +3144,11 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + #endif + // first time here. Set profile type. + __ st_ptr(tmp, mdo_addr); ++#ifdef ASSERT ++ assert(TypeEntries::type_mask == -2, "must be"); ++ __ bstrpick_d(tmp, tmp, 63, 1); ++ __ verify_klass_ptr(tmp); ++#endif + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); +diff --git a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp +index 9f1bf88c605..cb8ad8a359c 100644 +--- a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp +@@ -1642,6 +1642,7 @@ void InterpreterMacroAssembler::narrow(Register result) { + + + void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { ++ assert_different_registers(obj, AT, T5, mdo_addr.base(), mdo_addr.index()); + Label update, next, none; + + verify_oop(obj); +@@ -1680,25 +1681,21 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md + xorr(obj, obj, AT); + + assert(TypeEntries::type_klass_mask == -4, "must be"); +- bstrpick_d(AT, obj, 63, 2); +- beqz(AT, next); ++ bstrpick_d(T5, obj, 63, 2); ++ beqz(T5, next); + +- andi(AT, obj, TypeEntries::type_unknown); +- bnez(AT, next); ++ andi(T5, obj, TypeEntries::type_unknown); ++ bnez(T5, next); + +- if (mdo_addr.index() == noreg) { +- ld_d(AT, mdo_addr); +- } else { +- ld_d(AT, T0, mdo_addr.disp()); +- } + beqz(AT, none); + +- addi_d(AT, AT, -(TypeEntries::null_seen)); +- beqz(AT, none); ++ addi_d(T5, AT, -(TypeEntries::null_seen)); ++ beqz(T5, none); + +- // There is a chance that the checks above (re-reading profiling +- // data from memory) fail if another thread has just set the ++ // There is a chance that the checks above ++ // fail if another thread has just set the + // profiling to this obj's klass ++ xorr(obj, obj, AT); // get back original value before XOR + if (mdo_addr.index() == noreg) { + ld_d(AT, mdo_addr); + } else { +@@ -1730,6 +1727,11 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md + } else { + st_d(obj, T0, mdo_addr.disp()); + } ++#ifdef ASSERT ++ assert(TypeEntries::type_mask == -2, "must be"); ++ bstrpick_d(obj, obj, 63, 1); ++ verify_klass_ptr(obj); ++#endif + + bind(next); + if (mdo_addr.index() != noreg) { + +commit a41749611bd86a21d5021378bc8b0cf6c9caa1f7 +Merge: 202b0ef58d9 39c9e9d2bf8 +Author: aoqi +Date: Fri Apr 26 18:07:56 2024 +0800 + + Merge + +diff --cc src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c +index d991f29cbb1,1101b999961..301c9f9f26e +--- a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c ++++ b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c +@@@ -144,8 -134,17 +140,17 @@@ static bool process_get_lwp_regs(struc + #define PTRACE_GETREGS_REQ PT_GETREGS + #endif + +- #if defined(PTRACE_GETREGS_REQ) && !defined(loongarch64) +- if (ptrace_getregs(PTRACE_GETREGS_REQ, pid, user, NULL) < 0) { ++ #if defined(PTRACE_GETREGSET) ++ struct iovec iov; ++ iov.iov_base = user; ++ iov.iov_len = sizeof(*user); ++ if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, (void*) &iov) < 0) { ++ print_debug("ptrace(PTRACE_GETREGSET, ...) failed for lwp %d\n", pid); ++ return false; ++ } ++ return true; + -#elif defined(PTRACE_GETREGS_REQ) +++#elif defined(PTRACE_GETREGS_REQ) && !defined(loongarch64) ++ if (ptrace(PTRACE_GETREGS_REQ, pid, NULL, user) < 0) { + print_debug("ptrace(PTRACE_GETREGS, ...) failed for lwp(%d) errno(%d) \"%s\"\n", pid, + errno, strerror(errno)); + return false; +diff --cc test/jdk/sun/security/pkcs11/PKCS11Test.java +index 99295d779c5,9c61ffe47cf..fdcac49a58c +--- a/test/jdk/sun/security/pkcs11/PKCS11Test.java ++++ b/test/jdk/sun/security/pkcs11/PKCS11Test.java +@@@ -689,32 -602,27 +608,30 @@@ public abstract class PKCS11Test + } + + osMap = new HashMap<>(); +- osMap.put("Linux-i386-32", new String[] { ++ osMap.put("Linux-i386-32", new String[]{ + "/usr/lib/i386-linux-gnu/", + "/usr/lib32/", +- "/usr/lib/" }); +- osMap.put("Linux-amd64-64", new String[] { ++ "/usr/lib/"}); ++ osMap.put("Linux-amd64-64", new String[]{ + "/usr/lib/x86_64-linux-gnu/", + "/usr/lib/x86_64-linux-gnu/nss/", +- "/usr/lib64/" }); +- osMap.put("Linux-ppc64-64", new String[] { "/usr/lib64/" }); +- osMap.put("Linux-ppc64le-64", new String[] { "/usr/lib64/" }); ++ "/usr/lib64/"}); ++ osMap.put("Linux-ppc64-64", new String[]{"/usr/lib64/"}); + + osMap.put("Linux-mips64el-64", new String[]{"/usr/lib64/"}); + + osMap.put("Linux-loongarch64-64", new String[]{"/usr/lib/loongarch64-linux-gnu/", + + "/usr/lib64/" }); +- osMap.put("Linux-s390x-64", new String[] { "/usr/lib64/" }); +- osMap.put("Windows-x86-32", new String[] {}); +- osMap.put("Windows-amd64-64", new String[] {}); +- osMap.put("MacOSX-x86_64-64", new String[] {}); +- osMap.put("Linux-arm-32", new String[] { ++ osMap.put("Linux-ppc64le-64", new String[]{"/usr/lib64/"}); ++ osMap.put("Linux-s390x-64", new String[]{"/usr/lib64/"}); ++ osMap.put("Windows-x86-32", new String[]{}); ++ osMap.put("Windows-amd64-64", new String[]{}); ++ osMap.put("MacOSX-x86_64-64", new String[]{}); ++ osMap.put("Linux-arm-32", new String[]{ + "/usr/lib/arm-linux-gnueabi/nss/", +- "/usr/lib/arm-linux-gnueabihf/nss/" }); +- // Exclude linux-aarch64 at the moment until the following bug is fixed: +- // 8296631: NSS tests failing on OL9 linux-aarch64 hosts +- // osMap.put("Linux-aarch64-64", new String[] { +- // "/usr/lib/aarch64-linux-gnu/", +- // "/usr/lib/aarch64-linux-gnu/nss/", +- // "/usr/lib64/" }); ++ "/usr/lib/arm-linux-gnueabihf/nss/"}); ++ osMap.put("Linux-aarch64-64", new String[] { ++ "/usr/lib/aarch64-linux-gnu/", ++ "/usr/lib/aarch64-linux-gnu/nss/", ++ "/usr/lib64/" }); + return osMap; + } + + +commit 202b0ef58d9311f534f9bd10606e0e45accf92b7 +Merge: 5f77562ba03 ca760c86642 +Author: aoqi +Date: Fri Jan 26 16:40:44 2024 +0800 + + Merge + + +commit 5f77562ba03d1713d3615d84f8a3229a39258d41 +Author: loongson-jvm +Date: Fri Jan 26 16:39:46 2024 +0800 + + Update (2024.01.26, 2nd) + + 24527: Fix a typo for invokeinterface in #8604 + +diff --git a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp +index c0d1daea305..2474f90c247 100644 +--- a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp +@@ -3446,7 +3446,6 @@ void TemplateTable::invokeinterface(int byte_no) { + + __ bind(no_such_method); + // throw exception +- __ pop(Rmethod); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); + __ restore_locals(); + // Pass arguments for generating a verbose error message. +@@ -3460,7 +3459,6 @@ void TemplateTable::invokeinterface(int byte_no) { + + __ bind(no_such_interface); + // throw exception +- __ pop(Rmethod); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); + __ restore_locals(); + // Pass arguments for generating a verbose error message. + +commit fe193bcbc1cb971738db7fa2d38a9d69456a2c6c +Author: loongson-jvm +Date: Fri Jan 26 16:29:44 2024 +0800 + + Update (2024.01.26) + + 29494: Fix assert(_succ != current) failed: invariant + 30985: Insert acqure membar for load-exclusive with acquire + 26386: Fix atomic ops with memory order relaxed + 28135: Add linux-loongarch64 to CheckedFeatures.notImplemented + 33199: GHA testing failed + +diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +index 2ddf19a6e5a..8665c2d8881 100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +@@ -1541,12 +1541,16 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { + + void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, bool sign) { + __ cmpxchg32(Address(addr, 0), cmpval, newval, SCR1, sign, +- /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); ++ /* retold */ false, /* acquire */ true, /* weak */ false, /* exchange */ false); ++ // LA SC equals store-conditional dbar, so no need AnyAny after CAS. ++ //__ membar(__ AnyAny); + } + + void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { + __ cmpxchg(Address(addr, 0), cmpval, newval, SCR1, +- /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); ++ /* retold */ false, /* acquire */ true, /* weak */ false, /* exchange */ false); ++ // LA SC equals store-conditional dbar, so no need AnyAny after CAS. ++ //__ membar(__ AnyAny); + } + + void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { +diff --git a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp +index 56c6281d415..0221951342a 100644 +--- a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp +@@ -86,7 +86,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr + // displaced header address in the object header - if it is not the same, get the + // object header instead + lea(SCR2, Address(obj, hdr_offset)); +- cmpxchg(Address(SCR2, 0), hdr, disp_hdr, SCR1, true, false, done); ++ cmpxchg(Address(SCR2, 0), hdr, disp_hdr, SCR1, true, true /* acquire */, done); + // if the object header was the same, we're done + // if the object header was not the same, it is now in the hdr register + // => test if it is a stack pointer into the same stack (recursive locking), i.e.: +@@ -146,9 +146,9 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ + // we do unlocking via runtime call + if (hdr_offset) { + lea(SCR1, Address(obj, hdr_offset)); +- cmpxchg(Address(SCR1, 0), disp_hdr, hdr, SCR2, false, false, done, &slow_case); ++ cmpxchg(Address(SCR1, 0), disp_hdr, hdr, SCR2, false, true /* acquire */, done, &slow_case); + } else { +- cmpxchg(Address(obj, 0), disp_hdr, hdr, SCR2, false, false, done, &slow_case); ++ cmpxchg(Address(obj, 0), disp_hdr, hdr, SCR2, false, true /* acquire */, done, &slow_case); + } + // done + bind(done); +diff --git a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp +index ef520a39ff3..0c91c74d63e 100644 +--- a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp +@@ -195,7 +195,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register res + + if (PrintBiasedLockingStatistics) { + Label SUCC, FAIL; +- cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, false, SUCC, &FAIL); // Updates tmpReg ++ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, true /* acquire */, SUCC, &FAIL); // Updates tmpReg + bind(SUCC); + atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, AT, scrReg); + li(resReg, 1); +@@ -203,7 +203,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register res + bind(FAIL); + } else { + // If cmpxchg is succ, then scrReg = 1 +- cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, false, DONE_SET); // Updates tmpReg ++ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, true /* acquire */, DONE_SET); // Updates tmpReg + } + + // Recursive locking +@@ -248,7 +248,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register res + #endif + // It's inflated and appears unlocked + addi_d(tmpReg, tmpReg, ObjectMonitor::owner_offset_in_bytes() - 2); +- cmpxchg(Address(tmpReg, 0), R0, TREG, scrReg, false, false); ++ cmpxchg(Address(tmpReg, 0), R0, TREG, scrReg, false, true /* acquire */); + // Intentional fall-through into DONE ... + + bind(DONE_SET); +@@ -374,7 +374,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register r + + bind(Stacked); + ld_d(tmpReg, Address(boxReg, 0)); +- cmpxchg(Address(objReg, 0), boxReg, tmpReg, AT, false, false); ++ cmpxchg(Address(objReg, 0), boxReg, tmpReg, AT, false, true /* acquire */); + + bind(DONE_SET); + move(resReg, AT); +diff --git a/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp +index 7cf552e283a..e90623fe989 100644 +--- a/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp +@@ -515,9 +515,9 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, + + if (is_narrow) { + __ cmpxchg32(addr, expected, new_val, tmp2, false /* sign */, false /* retold */, +- acquire /* barrier */, false /* weak */, true /* exchange */); ++ acquire /* acquire */, false /* weak */, true /* exchange */); + } else { +- __ cmpxchg(addr, expected, new_val, tmp2, false /* retold */, acquire /* barrier */, ++ __ cmpxchg(addr, expected, new_val, tmp2, false /* retold */, acquire /* acquire */, + false /* weak */, true /* exchange */); + } + // tmp2 holds value fetched. +@@ -581,9 +581,9 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, + // compares result with expected. + if (is_narrow) { + __ cmpxchg32(addr, tmp2, new_val, tmp1, false /* sign */, false /* retold */, +- acquire /* barrier */, false /* weak */, false /* exchange */); ++ acquire /* acquire */, false /* weak */, false /* exchange */); + } else { +- __ cmpxchg(addr, tmp2, new_val, tmp1, false /* retold */, acquire /* barrier */, ++ __ cmpxchg(addr, tmp2, new_val, tmp1, false /* retold */, acquire /* acquire */, + false /* weak */, false /* exchange */); + } + // tmp1 set iff success, tmp2 holds value fetched. +diff --git a/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad b/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad +index 59656e75376..262cfd50b65 100644 +--- a/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad +@@ -64,12 +64,12 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node, + #endif + Address addr(mem); + if (node->barrier_data() == ZLoadBarrierElided) { +- __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* barrier */, ++ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* acquire */, + weak /* weak */, false /* exchange */); + __ move(res, tmp); + } else { + __ move(tmp, oldval); +- __ cmpxchg(addr, tmp, newval, AT, true /* retold */, acquire /* barrier */, ++ __ cmpxchg(addr, tmp, newval, AT, true /* retold */, acquire /* acquire */, + false /* weak */, false /* exchange */); + __ move(res, AT); + +@@ -78,7 +78,7 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node, + __ andr(AT, AT, tmp); + __ beqz(AT, good); + z_load_barrier_slow_path(_masm, node, addr, tmp, res /* used as tmp */); +- __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* barrier */, weak /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* acquire */, weak /* weak */, false /* exchange */); + __ move(res, tmp); + __ bind(good); + } +diff --git a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp +index fa65d10765c..9f1bf88c605 100644 +--- a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp +@@ -879,13 +879,13 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { + + if (PrintBiasedLockingStatistics) { + Label succ, fail; +- cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, false, succ, &fail); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, true /* acquire */, succ, &fail); + bind(succ); + atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, AT, scr_reg); + b(done); + bind(fail); + } else { +- cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, false, done); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, true /* acquire */, done); + } + + // Test if the oopMark is an obvious stack pointer, i.e., +@@ -959,7 +959,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) { + beqz(hdr_reg, done); + + // Atomic swap back the old header +- cmpxchg(Address(scr_reg, 0), tmp_reg, hdr_reg, AT, false, false, done); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, hdr_reg, AT, false, true /* acquire */, done); + + // Call the runtime routine for slow case. + st_d(scr_reg, lock_reg, BasicObjectLock::obj_offset_in_bytes()); // restore obj +diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad +index f1bb1c2f6cb..0c6516fc1d5 100644 +--- a/src/hotspot/cpu/loongarch/loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/loongarch_64.ad +@@ -10585,9 +10585,9 @@ instruct compareAndSwapI(mRegI res, mRegP mem_ptr, mRegI oldval, mRegI newval) % + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg32(addr, oldval, newval, res, true, false, true); ++ __ cmpxchg32(addr, oldval, newval, res, true, false, true /* acquire */); + } else { +- __ cmpxchg32(addr, oldval, newval, AT, true, false, true); ++ __ cmpxchg32(addr, oldval, newval, AT, true, false, true /* acquire */); + __ move(res, AT); + } + %} +@@ -10606,9 +10606,9 @@ instruct compareAndSwapL(mRegI res, mRegP mem_ptr, mRegL oldval, mRegL newval) % + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg(addr, oldval, newval, res, false, true); ++ __ cmpxchg(addr, oldval, newval, res, false, true /* acquire */); + } else { +- __ cmpxchg(addr, oldval, newval, AT, false, true); ++ __ cmpxchg(addr, oldval, newval, AT, false, true /* acquire */); + __ move(res, AT); + } + %} +@@ -10627,9 +10627,9 @@ instruct compareAndSwapP(mRegI res, mRegP mem_ptr, mRegP oldval, mRegP newval) % + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg(addr, oldval, newval, res, false, true); ++ __ cmpxchg(addr, oldval, newval, res, false, true /* acquire */); + } else { +- __ cmpxchg(addr, oldval, newval, AT, false, true); ++ __ cmpxchg(addr, oldval, newval, AT, false, true /* acquire */); + __ move(res, AT); + } + %} +@@ -10647,9 +10647,9 @@ instruct compareAndSwapN(mRegI res, mRegP mem_ptr, mRegN oldval, mRegN newval) % + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg32(addr, oldval, newval, res, false, false, true); ++ __ cmpxchg32(addr, oldval, newval, res, false, false, true /* acquire */); + } else { +- __ cmpxchg32(addr, oldval, newval, AT, false, false, true); ++ __ cmpxchg32(addr, oldval, newval, AT, false, false, true /* acquire */); + __ move(res, AT); + } + %} +@@ -10788,14 +10788,14 @@ instruct compareAndExchangeI(mRegI res, indirect mem, mRegI oldval, mRegI newval + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ +- "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeI" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeI" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); +- __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); + %} +@@ -10806,14 +10806,14 @@ instruct compareAndExchangeL(mRegL res, indirect mem, mRegL oldval, mRegL newval + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ +- "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeL" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeL" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); +- __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); + %} +@@ -10824,7 +10824,7 @@ instruct compareAndExchangeP(mRegP res, indirect mem, mRegP oldval, mRegP newval + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ +- "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeP" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeP" + %} + ins_encode %{ + Register newval = $newval$$Register; +@@ -10842,14 +10842,14 @@ instruct compareAndExchangeN(mRegN res, indirect mem, mRegN oldval, mRegN newval + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ +- "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeN" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeN" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); +- __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); + %} +@@ -10867,9 +10867,9 @@ instruct weakCompareAndSwapI(mRegI res, indirect mem, mRegI oldval, mRegI newval + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + } else { +- __ cmpxchg32(addr, oldval, newval, AT, true /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, AT, true /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} +@@ -10881,7 +10881,7 @@ instruct weakCompareAndSwapL(mRegI res, indirect mem, mRegL oldval, mRegL newval + match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ +- "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @WeakCompareAndSwapL" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @WeakCompareAndSwapL" + %} + ins_encode %{ + Register newval = $newval$$Register; +@@ -10889,9 +10889,9 @@ instruct weakCompareAndSwapL(mRegI res, indirect mem, mRegL oldval, mRegL newval + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + } else { +- __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} +@@ -10903,7 +10903,7 @@ instruct weakCompareAndSwapP(mRegI res, indirect mem, mRegP oldval, mRegP newval + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + ins_cost(MEMORY_REF_COST); + format %{ +- "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" + %} + ins_encode %{ + Register newval = $newval$$Register; +@@ -10911,9 +10911,9 @@ instruct weakCompareAndSwapP(mRegI res, indirect mem, mRegP oldval, mRegP newval + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg(addr, oldval, newval, res, false /* retold */, false /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, false /* acquire */, true /* weak */, false /* exchange */); + } else { +- __ cmpxchg(addr, oldval, newval, AT, false /* retold */, false /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, false /* acquire */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} +@@ -10925,7 +10925,7 @@ instruct weakCompareAndSwapP_acq(mRegI res, indirect mem, mRegP oldval, mRegP ne + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ +- "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" + %} + ins_encode %{ + Register newval = $newval$$Register; +@@ -10933,9 +10933,9 @@ instruct weakCompareAndSwapP_acq(mRegI res, indirect mem, mRegP oldval, mRegP ne + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + } else { +- __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} +@@ -10947,7 +10947,7 @@ instruct weakCompareAndSwapN(mRegI res, indirect mem, mRegN oldval, mRegN newval + match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ +- "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapN" ++ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapN" + %} + ins_encode %{ + Register newval = $newval$$Register; +@@ -10955,9 +10955,9 @@ instruct weakCompareAndSwapN(mRegI res, indirect mem, mRegN oldval, mRegN newval + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { +- __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + } else { +- __ cmpxchg32(addr, oldval, newval, AT, false /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, AT, false /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +index a7062552f76..06fbc181583 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +@@ -656,7 +656,7 @@ void MacroAssembler::biased_locking_enter(Register lock_reg, + #else + orr(tmp_reg, TREG, swap_reg); + #endif +- cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); + if (need_tmp_reg) { + pop(tmp_reg); + } +@@ -701,7 +701,7 @@ void MacroAssembler::biased_locking_enter(Register lock_reg, + #endif + ld_ptr(swap_reg, saved_mark_addr); + +- cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); + if (need_tmp_reg) { + pop(tmp_reg); + } +@@ -741,7 +741,7 @@ void MacroAssembler::biased_locking_enter(Register lock_reg, + push(tmp_reg); + } + load_prototype_header(tmp_reg, obj_reg); +- cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); + if (need_tmp_reg) { + pop(tmp_reg); + } +@@ -1822,7 +1822,7 @@ void MacroAssembler::bswap_w(Register dst, Register src) { + } + + void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, +- Register resflag, bool retold, bool barrier, ++ Register resflag, bool retold, bool acquire, + bool weak, bool exchange) { + assert(oldval != resflag, "oldval != resflag"); + assert(newval != resflag, "newval != resflag"); +@@ -1845,8 +1845,11 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + b(succ); + + bind(fail); +- if (barrier) ++ if (acquire) { ++ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); ++ } else { + dbar(0x700); ++ } + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { +@@ -1856,7 +1859,7 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + } + + void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, +- Register tmp, bool retold, bool barrier, Label& succ, Label* fail) { ++ Register tmp, bool retold, bool acquire, Label& succ, Label* fail) { + assert(oldval != tmp, "oldval != tmp"); + assert(newval != tmp, "newval != tmp"); + Label again, neq; +@@ -1870,8 +1873,11 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + b(succ); + + bind(neq); +- if (barrier) ++ if (acquire) { ++ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); ++ } else { + dbar(0x700); ++ } + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) +@@ -1879,7 +1885,7 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + } + + void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, +- Register resflag, bool sign, bool retold, bool barrier, ++ Register resflag, bool sign, bool retold, bool acquire, + bool weak, bool exchange) { + assert(oldval != resflag, "oldval != resflag"); + assert(newval != resflag, "newval != resflag"); +@@ -1904,8 +1910,11 @@ void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, + b(succ); + + bind(fail); +- if (barrier) ++ if (acquire) { ++ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); ++ } else { + dbar(0x700); ++ } + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { +@@ -1915,7 +1924,7 @@ void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, + } + + void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, Register tmp, +- bool sign, bool retold, bool barrier, Label& succ, Label* fail) { ++ bool sign, bool retold, bool acquire, Label& succ, Label* fail) { + assert(oldval != tmp, "oldval != tmp"); + assert(newval != tmp, "newval != tmp"); + Label again, neq; +@@ -1931,8 +1940,11 @@ void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, R + b(succ); + + bind(neq); +- if (barrier) ++ if (acquire) { ++ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); ++ } else { + dbar(0x700); ++ } + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +index c24d8a4712a..204ca1a1a07 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +@@ -550,13 +550,13 @@ class MacroAssembler: public Assembler { + void bswap_w(Register dst, Register src); + + void cmpxchg(Address addr, Register oldval, Register newval, Register resflag, +- bool retold, bool barrier, bool weak = false, bool exchange = false); ++ bool retold, bool acquire, bool weak = false, bool exchange = false); + void cmpxchg(Address addr, Register oldval, Register newval, Register tmp, +- bool retold, bool barrier, Label& succ, Label* fail = nullptr); ++ bool retold, bool acquire, Label& succ, Label* fail = nullptr); + void cmpxchg32(Address addr, Register oldval, Register newval, Register resflag, +- bool sign, bool retold, bool barrier, bool weak = false, bool exchange = false); ++ bool sign, bool retold, bool acquire, bool weak = false, bool exchange = false); + void cmpxchg32(Address addr, Register oldval, Register newval, Register tmp, +- bool sign, bool retold, bool barrier, Label& succ, Label* fail = nullptr); ++ bool sign, bool retold, bool acquire, Label& succ, Label* fail = nullptr); + + void extend_sign(Register rh, Register rl) { /*stop("extend_sign");*/ guarantee(0, "LA not implemented yet");} + void neg(Register reg) { /*dsubu(reg, R0, reg);*/ guarantee(0, "LA not implemented yet");} +diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +index 0b3ea4c42f3..30c06f40493 100644 +--- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +@@ -1928,7 +1928,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + __ orr(swap_reg, swap_reg, AT); + + __ st_d(swap_reg, lock_reg, mark_word_offset); +- __ cmpxchg(Address(obj_reg, 0), swap_reg, lock_reg, AT, true, false, lock_done); ++ __ cmpxchg(Address(obj_reg, 0), swap_reg, lock_reg, AT, true, true /* acquire */, lock_done); + // Test if the oopMark is an obvious stack pointer, i.e., + // 1) (mark & 3) == 0, and + // 2) sp <= mark < mark + os::pagesize() +@@ -2112,7 +2112,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + // get address of the stack lock + __ addi_d (c_rarg0, FP, lock_slot_fp_offset); + // Atomic swap old header if oop still contains the stack lock +- __ cmpxchg(Address(obj_reg, 0), c_rarg0, T8, AT, false, false, unlock_done, &slow_path_unlock); ++ __ cmpxchg(Address(obj_reg, 0), c_rarg0, T8, AT, false, true /* acquire */, unlock_done, &slow_path_unlock); + + // slow path re-enters here + __ bind(unlock_done); +diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +index 27e431c2c61..17fc8e5078e 100644 +--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp ++++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +@@ -292,8 +292,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R + // Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier. + // The actual compare and branch instructions are represented as stand-alone LIR nodes. + void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref, +- LIR_Opr res) const { ++ LIR_Opr ref) const { + __ block_comment("load_barrier_test (zgc) {"); + + __ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); +diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +index 4957e73ae22..e2ff1bf53ae 100644 +--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp ++++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +@@ -67,8 +67,7 @@ public: + + #ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref, +- LIR_Opr res) const; ++ LIR_Opr ref) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; +diff --git a/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp +index beb717b67ff..77413aba9f4 100644 +--- a/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp ++++ b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp +@@ -167,13 +167,16 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, + + switch (order) { + case memory_order_relaxed: ++ case memory_order_release: + asm volatile ( + "1: ll.w %[prev], %[dest] \n\t" + " bne %[prev], %[_old], 2f \n\t" + " move %[temp], %[_new] \n\t" + " sc.w %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" +- "2: \n\t" ++ " b 3f \n\t" ++ "2: dbar 0x700 \n\t" ++ "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) + : "memory"); +@@ -186,7 +189,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, + " sc.w %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" +- "2: dbar 0x700 \n\t" ++ "2: dbar 0x14 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) +@@ -208,13 +211,16 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, + + switch (order) { + case memory_order_relaxed: ++ case memory_order_release: + asm volatile ( + "1: ll.d %[prev], %[dest] \n\t" + " bne %[prev], %[_old], 2f \n\t" + " move %[temp], %[_new] \n\t" + " sc.d %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" +- "2: \n\t" ++ " b 3f \n\t" ++ "2: dbar 0x700 \n\t" ++ "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) + : "memory"); +@@ -227,7 +233,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, + " sc.d %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" +- "2: dbar 0x700 \n\t" ++ "2: dbar 0x14 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) +diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp +index 0fffd4aabfc..e9a981b20c2 100644 +--- a/src/hotspot/share/c1/c1_LIR.hpp ++++ b/src/hotspot/share/c1/c1_LIR.hpp +@@ -1619,11 +1619,7 @@ class LIR_Op2: public LIR_Op { + , _tmp4(LIR_OprFact::illegalOpr) + , _tmp5(LIR_OprFact::illegalOpr) + , _condition(condition) { +- assert(code == lir_cmp || code == lir_assert +-#if defined(RISCV) || defined(LOONGARCH) +- || code == lir_branch || code == lir_cond_float_branch +-#endif +- , "code check"); ++ assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch)LOONGARCH64_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check"); + } + + LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) +@@ -1655,11 +1651,7 @@ class LIR_Op2: public LIR_Op { + , _tmp4(LIR_OprFact::illegalOpr) + , _tmp5(LIR_OprFact::illegalOpr) + , _condition(lir_cond_unknown) { +- assert(code != lir_cmp && +-#if defined(RISCV) || defined(LOONGARCH) +- code != lir_branch && code != lir_cond_float_branch && +-#endif +- is_in_range(code, begin_op2, end_op2), "code check"); ++ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&)LOONGARCH64_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); + } + + LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, +@@ -1675,11 +1667,7 @@ class LIR_Op2: public LIR_Op { + , _tmp4(tmp4) + , _tmp5(tmp5) + , _condition(lir_cond_unknown) { +- assert(code != lir_cmp && +-#if defined(RISCV) || defined(LOONGARCH) +- code != lir_branch && code != lir_cond_float_branch && +-#endif +- is_in_range(code, begin_op2, end_op2), "code check"); ++ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&)LOONGARCH64_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); + } + + LIR_Opr in_opr1() const { return _opr1; } +diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp +index 374816fd355..e7015344a18 100644 +--- a/src/hotspot/share/runtime/objectMonitor.cpp ++++ b/src/hotspot/share/runtime/objectMonitor.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "classfile/vmSymbols.hpp" + #include "gc/shared/oopStorage.hpp" +@@ -362,6 +368,9 @@ bool ObjectMonitor::enter(JavaThread* current) { + } + + assert(owner_raw() != current, "invariant"); ++ // Thread _succ != current assertion load reording before Thread if (_succ == current) _succ = nullptr. ++ // But expect order is firstly if (_succ == current) _succ = nullptr then _succ != current assertion. ++ DEBUG_ONLY(LOONGARCH64_ONLY(__asm__ __volatile__ ("dbar 0x700\n");)MIPS64_ONLY(OrderAccess::loadload();)) + assert(_succ != current, "invariant"); + assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(current->thread_state() != _thread_blocked, "invariant"); +@@ -723,6 +732,7 @@ void ObjectMonitor::EnterI(JavaThread* current) { + } + + // The Spin failed -- Enqueue and park the thread ... ++ DEBUG_ONLY(LOONGARCH64_ONLY(__asm__ __volatile__ ("dbar 0x700\n");)MIPS64_ONLY(OrderAccess::loadload();)) + assert(_succ != current, "invariant"); + assert(owner_raw() != current, "invariant"); + assert(_Responsible != current, "invariant"); +diff --git a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java +index 16863189500..e2f3a13473b 100644 +--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java ++++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java +@@ -21,6 +21,12 @@ + * questions. + */ + ++/* ++ * This file has been modified by Loongson Technology in 2022, These ++ * modifications are Copyright (c) 2022, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + package nsk.share.jdi; + + import nsk.share.*; +@@ -520,21 +526,22 @@ class CheckedFeatures { + * available only on the Microsoft Windows platform. + * " + */ +- {"linux-i586", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-ia64", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-amd64", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-x64", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-aarch64", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-arm", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-ppc64", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-ppc64le", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, +- {"linux-riscv64", "com.sun.jdi.SharedMemoryAttach"}, +- {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, +- {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, +- {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, +- {"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"}, +- {"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-i586", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-ia64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-amd64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-x64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-aarch64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-arm", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-ppc64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-ppc64le", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-riscv64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"linux-loongarch64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"}, ++ {"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"}, + + // listening connectors + /* +@@ -546,21 +553,22 @@ class CheckedFeatures { + * It is available only on the Microsoft Windows platform. + * " + */ +- {"linux-i586", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-ia64", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-amd64", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-x64", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-aarch64", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-arm", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-ppc64", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-ppc64le", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, +- {"linux-riscv64", "com.sun.jdi.SharedMemoryListen"}, +- {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, +- {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, +- {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, +- {"mac-aarch64", "com.sun.jdi.SharedMemoryListen"}, +- {"aix-ppc64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-i586", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-ia64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-amd64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-x64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-aarch64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-arm", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-ppc64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-ppc64le", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-riscv64", "com.sun.jdi.SharedMemoryListen"}, ++ {"linux-loongarch64", "com.sun.jdi.SharedMemoryListen"}, ++ {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, ++ {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, ++ {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, ++ {"mac-aarch64", "com.sun.jdi.SharedMemoryListen"}, ++ {"aix-ppc64", "com.sun.jdi.SharedMemoryListen"}, + + // launching connectors + /* +@@ -575,78 +583,82 @@ class CheckedFeatures { + * Windows, the shared memory transport is used. On Linux the socket transport is used. + * " + */ +- {"linux-i586", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-i586", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ ++ {"linux-ia64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-ia64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-arm", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-arm", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-arm", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-arm", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-ppc64le", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-ppc64le", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-ppc64le", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-ppc64le", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-s390x", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-s390x", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-s390x", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-s390x", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-riscv64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-riscv64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"linux-riscv64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"linux-riscv64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"linux-loongarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"linux-loongarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"windows-i586", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, +- {"windows-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, ++ {"windows-i586", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, ++ {"windows-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, + +- {"windows-ia64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, +- {"windows-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, ++ {"windows-ia64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, ++ {"windows-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, + +- {"windows-amd64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, +- {"windows-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, ++ {"windows-amd64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, ++ {"windows-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, + +- {"windows-x64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, +- {"windows-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, ++ {"windows-x64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, ++ {"windows-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, + +- {"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + +- {"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, +- {"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ {"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, ++ {"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + + // shared memory transport is implemented only on windows platform +- {"linux-i586", "dt_shmem"}, +- {"linux-ia64", "dt_shmem"}, +- {"linux-amd64", "dt_shmem"}, +- {"linux-x64", "dt_shmem"}, +- {"linux-aarch64", "dt_shmem"}, +- {"linux-arm", "dt_shmem"}, +- {"linux-ppc64", "dt_shmem"}, +- {"linux-ppc64le", "dt_shmem"}, +- {"linux-s390x", "dt_shmem"}, +- {"linux-riscv64", "dt_shmem"}, +- {"macosx-amd64", "dt_shmem"}, +- {"mac-x64", "dt_shmem"}, +- {"macosx-aarch64", "dt_shmem"}, +- {"mac-aarch64", "dt_shmem"}, +- {"aix-ppc64", "dt_shmem"}, ++ {"linux-i586", "dt_shmem"}, ++ {"linux-ia64", "dt_shmem"}, ++ {"linux-amd64", "dt_shmem"}, ++ {"linux-x64", "dt_shmem"}, ++ {"linux-aarch64", "dt_shmem"}, ++ {"linux-arm", "dt_shmem"}, ++ {"linux-ppc64", "dt_shmem"}, ++ {"linux-ppc64le", "dt_shmem"}, ++ {"linux-s390x", "dt_shmem"}, ++ {"linux-riscv64", "dt_shmem"}, ++ {"linux-loongarch64", "dt_shmem"}, ++ {"macosx-amd64", "dt_shmem"}, ++ {"mac-x64", "dt_shmem"}, ++ {"macosx-aarch64", "dt_shmem"}, ++ {"mac-aarch64", "dt_shmem"}, ++ {"aix-ppc64", "dt_shmem"}, + }; + } + +commit a6ce2246d45bfb51c72d93e48dcf48cd6ad24917 +Merge: 84bd3bf8f10 b78a848cc7a +Author: aoqi +Date: Fri Jan 26 16:13:03 2024 +0800 + + Merge + + +commit 84bd3bf8f104c294b595be579fa4268c5c83ed82 +Merge: 99147f78245 9c16e89d275 +Author: aoqi +Date: Thu Dec 7 22:42:44 2023 +0800 + + Merge + + +commit 99147f78245703b07fbbf35b7198521eada4cf2c +Author: loongson-jvm +Date: Thu Dec 7 20:59:32 2023 +0800 + + Update (2023.12.07, 2nd) + + 32519: Fix for 31967 set default MaxGCPauseMillis + 31967: [G1GC] Set default MaxGCPauseMillis=150ms + 32564: MIPS port of 8284273: Early crashes in os::print_context on AArch64 + 32563: MIPS port of 8283326: Implement SafeFetch statically + 32186: LA port of 8314020: Print instruction blocks in byte units + 31295: Provide information when hitting a HaltNode + 28314: Misc crash dump improvements + 28096: LA port of 8293851: hs_err should print more stack in hex dump + 26263: LA port of 8284273: Early crashes in os::print_context on AArch64 + 32079: LA port of 8313796: AsyncGetCallTrace crash on unreadable interpreter method pointer + 30410: LA port of 8303154: Investigate and improve instruction cache flushing during compilation + 23671: MIPS/LA port of 8277417: C1 LIR instruction for load-klass + 25388: LA port of 8283326: Implement SafeFetch statically + +diff --git a/src/hotspot/cpu/loongarch/assembler_loongarch.hpp b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp +index af65eb878e4..5eae8b9995c 100644 +--- a/src/hotspot/cpu/loongarch/assembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp +@@ -1196,6 +1196,7 @@ class Assembler : public AbstractAssembler { + stptr_w_op = 0b00100101, + ldptr_d_op = 0b00100110, + stptr_d_op = 0b00100111, ++ csr_op = 0b00000100, + + unknow_ops8 = 0b11111111 + }; +@@ -1864,6 +1865,8 @@ public: + void stptr_w (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(stptr_w_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } + void ldptr_d (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(ldptr_d_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } + void stptr_d (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(stptr_d_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } ++ void csrrd (Register rd, int csr) { emit_int32(insn_I14RR(csr_op, csr, 0, (int)rd->encoding())); } ++ void csrwr (Register rd, int csr) { emit_int32(insn_I14RR(csr_op, csr, 1, (int)rd->encoding())); } + + void ld_b (Register rd, Register rj, int si12) { assert(is_simm(si12, 12), "not a signed 12-bit int"); emit_int32(insn_I12RR(ld_b_op, si12, (int)rj->encoding(), (int)rd->encoding())); } + void ld_h (Register rd, Register rj, int si12) { assert(is_simm(si12, 12), "not a signed 12-bit int"); emit_int32(insn_I12RR(ld_h_op, si12, (int)rj->encoding(), (int)rd->encoding())); } +diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +index aff47cb97e1..2ddf19a6e5a 100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +@@ -890,14 +890,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch + __ ld_ptr(dest->as_register(), as_Address(from_addr)); + break; + case T_ADDRESS: +- // FIXME: OMG this is a horrible kludge. Any offset from an +- // address that matches klass_offset_in_bytes() will be loaded +- // as a word, not a long. +- if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { +- __ ld_wu(dest->as_register(), as_Address(from_addr)); +- } else { +- __ ld_ptr(dest->as_register(), as_Address(from_addr)); +- } ++ __ ld_ptr(dest->as_register(), as_Address(from_addr)); + break; + case T_INT: + __ ld_w(dest->as_register(), as_Address(from_addr)); +@@ -930,10 +923,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch + // Load barrier has not yet been applied, so ZGC can't verify the oop here + __ verify_oop(dest->as_register()); + } +- } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { +- if (UseCompressedClassPointers) { +- __ decode_klass_not_null(dest->as_register()); +- } + } + } + +@@ -2876,6 +2865,23 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { + __ bind(*op->stub()->continuation()); + } + ++void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { ++ Register obj = op->obj()->as_pointer_register(); ++ Register result = op->result_opr()->as_pointer_register(); ++ ++ CodeEmitInfo* info = op->info(); ++ if (info != NULL) { ++ add_debug_info_for_null_check_here(info); ++ } ++ ++ if (UseCompressedClassPointers) { ++ __ ld_wu(result, obj, oopDesc::klass_offset_in_bytes()); ++ __ decode_klass_not_null(result); ++ } else { ++ __ ld_ptr(result, obj, oopDesc::klass_offset_in_bytes()); ++ } ++} ++ + void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { + ciMethod* method = op->profiled_method(); + ciMethod* callee = op->profiled_callee(); +diff --git a/src/hotspot/cpu/loongarch/frame_loongarch.cpp b/src/hotspot/cpu/loongarch/frame_loongarch.cpp +index 23a63a77d98..1aba8e4dd27 100644 +--- a/src/hotspot/cpu/loongarch/frame_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/frame_loongarch.cpp +@@ -538,7 +538,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const { + + // first the method + +- Method* m = *interpreter_frame_method_addr(); ++ Method* m = safe_interpreter_frame_method(); + + // validate the method we'd find in this potential sender + if (!Method::is_valid_method(m)) return false; +diff --git a/src/hotspot/cpu/loongarch/globals_loongarch.hpp b/src/hotspot/cpu/loongarch/globals_loongarch.hpp +index e31a3d02555..2358ca31596 100644 +--- a/src/hotspot/cpu/loongarch/globals_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/globals_loongarch.hpp +@@ -97,7 +97,9 @@ define_pd_global(intx, AllocatePrefetchDistance, -1); + "Use CRC32 instructions for CRC32 computation") \ + \ + product(bool, UseActiveCoresMP, false, \ +- "Eliminate barriers for single active cpu") ++ "Eliminate barriers for single active cpu") \ ++ \ ++ product(bool, TraceTraps, false, "Trace all traps the signal handler") + + // end of ARCH_FLAGS + +diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad +index 43e32570a0f..f1bb1c2f6cb 100644 +--- a/src/hotspot/cpu/loongarch/loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/loongarch_64.ad +@@ -10395,13 +10395,13 @@ instruct ShouldNotReachHere( ) + ins_cost(300); + + // Use the following format syntax +- format %{ "ILLTRAP ;#@ShouldNotReachHere" %} ++ format %{ "stop; #@ShouldNotReachHere" %} + ins_encode %{ + if (is_reachable()) { +- // Here we should emit illtrap! +- __ stop("ShouldNotReachHere"); ++ __ stop(_halt_reason); + } + %} ++ + ins_pipe( pipe_jump ); + %} + +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +index f1cf308b447..a7062552f76 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +@@ -978,22 +978,11 @@ void MacroAssembler::debug(char* msg/*, RegistersForDebugging* regs*/) { + + + void MacroAssembler::stop(const char* msg) { +- li(A0, (long)msg); +- call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); +- brk(17); +-} +- +-void MacroAssembler::warn(const char* msg) { +- push_call_clobbered_registers(); +- li(A0, (long)msg); +- push(S2); +- move(S2, SP); // use S2 as a sender SP holder +- assert(StackAlignmentInBytes == 16, "must be"); +- bstrins_d(SP, R0, 3, 0); // align stack as required by ABI +- call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); +- move(SP, S2); // use S2 as a sender SP holder +- pop(S2); +- pop_call_clobbered_registers(); ++#ifndef PRODUCT ++ block_comment(msg); ++#endif ++ csrrd(R0, 0); ++ emit_int64((uintptr_t)msg); + } + + void MacroAssembler::increment(Register reg, int imm) { +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +index 07c33b80151..c24d8a4712a 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +@@ -387,9 +387,6 @@ class MacroAssembler: public Assembler { + // prints msg, dumps registers and stops execution + void stop(const char* msg); + +- // prints msg and continues +- void warn(const char* msg); +- + static void debug(char* msg/*, RegistersForDebugging* regs*/); + static void debug64(char* msg, int64_t pc, int64_t regs[]); + +diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp +index 407f539e8d7..25ef0ecd224 100644 +--- a/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp +@@ -422,6 +422,10 @@ bool NativeInstruction::is_sigill_zombie_not_entrant() { + return uint_at(0) == NativeIllegalInstruction::instruction_code; + } + ++bool NativeInstruction::is_stop() { ++ return uint_at(0) == 0x04000000; // csrrd R0 0 ++} ++ + void NativeIllegalInstruction::insert(address code_pos) { + *(juint*)code_pos = instruction_code; + ICache::invalidate_range(code_pos, instruction_size); +diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp +index e445ebeb8be..0ec8ebddf09 100644 +--- a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp +@@ -80,6 +80,7 @@ class NativeInstruction { + inline bool is_NativeCallTrampolineStub_at(); + //We use an illegal instruction for marking a method as not_entrant or zombie. + bool is_sigill_zombie_not_entrant(); ++ bool is_stop(); + + protected: + address addr_at(int offset) const { return address(this) + offset; } +diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +index 930b6240b4b..0b3ea4c42f3 100644 +--- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +@@ -893,7 +893,6 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm + + gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); + +- __ flush(); + return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); + } + +diff --git a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +index 10242a3df4a..21bfc7d78cb 100644 +--- a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +@@ -4357,45 +4357,6 @@ class StubGenerator: public StubCodeGenerator { + return start; + } + +- // add a function to implement SafeFetch32 and SafeFetchN +- void generate_safefetch(const char* name, int size, address* entry, +- address* fault_pc, address* continuation_pc) { +- // safefetch signatures: +- // int SafeFetch32(int* adr, int errValue); +- // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); +- // +- // arguments: +- // A0 = adr +- // A1 = errValue +- // +- // result: +- // PPC_RET = *adr or errValue +- StubCodeMark mark(this, "StubRoutines", name); +- +- // Entry point, pc or function descriptor. +- *entry = __ pc(); +- +- // Load *adr into A1, may fault. +- *fault_pc = __ pc(); +- switch (size) { +- case 4: +- // int32_t +- __ ld_w(A1, A0, 0); +- break; +- case 8: +- // int64_t +- __ ld_d(A1, A0, 0); +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // return errValue or *adr +- *continuation_pc = __ pc(); +- __ add_d(V0, A1, R0); +- __ jr(RA); +- } +- + + #undef __ + #define __ masm-> +@@ -5149,14 +5110,6 @@ class StubGenerator: public StubCodeGenerator { + StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true); + } + +- // Safefetch stubs. +- generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, +- &StubRoutines::_safefetch32_fault_pc, +- &StubRoutines::_safefetch32_continuation_pc); +- generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, +- &StubRoutines::_safefetchN_fault_pc, +- &StubRoutines::_safefetchN_continuation_pc); +- + #ifdef COMPILER2 + if (UseMulAddIntrinsic) { + StubRoutines::_mulAdd = generate_mulAdd(); +diff --git a/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp +index c9a19b379b7..1a1ac923117 100644 +--- a/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp +@@ -175,7 +175,7 @@ void VM_Version::get_processor_features() { + _supports_cx8 = true; + + if (UseG1GC && FLAG_IS_DEFAULT(MaxGCPauseMillis)) { +- FLAG_SET_CMDLINE(MaxGCPauseMillis, 650); ++ FLAG_SET_DEFAULT(MaxGCPauseMillis, 150); + } + + if (supports_lsx()) { +diff --git a/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp b/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp +index ad44d23c531..e894a302b50 100644 +--- a/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp ++++ b/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -1905,47 +1905,6 @@ class StubGenerator: public StubCodeGenerator { + StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + } + +- // add a function to implement SafeFetch32 and SafeFetchN +- void generate_safefetch(const char* name, int size, address* entry, +- address* fault_pc, address* continuation_pc) { +- // safefetch signatures: +- // int SafeFetch32(int* adr, int errValue); +- // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); +- // +- // arguments: +- // A0 = adr +- // A1 = errValue +- // +- // result: +- // PPC_RET = *adr or errValue +- +- StubCodeMark mark(this, "StubRoutines", name); +- +- // Entry point, pc or function descriptor. +- *entry = __ pc(); +- +- // Load *adr into A1, may fault. +- *fault_pc = __ pc(); +- switch (size) { +- case 4: +- // int32_t +- __ lw(A1, A0, 0); +- break; +- case 8: +- // int64_t +- __ ld(A1, A0, 0); +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // return errValue or *adr +- *continuation_pc = __ pc(); +- __ addu(V0,A1,R0); +- __ jr(RA); +- __ delayed()->nop(); +- } +- + + #undef __ + #define __ masm-> +@@ -2721,14 +2680,6 @@ class StubGenerator: public StubCodeGenerator { + generate_arraycopy_stubs(); + #endif + +- // Safefetch stubs. +- generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, +- &StubRoutines::_safefetch32_fault_pc, +- &StubRoutines::_safefetch32_continuation_pc); +- generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, +- &StubRoutines::_safefetchN_fault_pc, +- &StubRoutines::_safefetchN_continuation_pc); +- + #ifdef COMPILER2 + if (UseMontgomeryMultiplyIntrinsic) { + if (UseLEXT1) { +diff --git a/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp +index a2e4fea109c..b32ffe9105e 100644 +--- a/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp ++++ b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp +@@ -129,6 +129,12 @@ frame os::fetch_frame_from_context(const void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + address epc = fetch_frame_from_context(ucVoid, &sp, &fp); ++ if (!is_readable_pointer(epc)) { ++ // Try to recover from calling into bad memory ++ // Assume new frame has not been set up, the same as ++ // compiled frame stack bang ++ return fetch_compiled_frame_from_context(ucVoid); ++ } + return frame(sp, fp, epc); + } + +@@ -276,6 +282,24 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, + #ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("continuation_for_implicit_exception stub: %lx", stub); + #endif ++ } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) { ++ // Pull a pointer to the error message out of the instruction ++ // stream. ++ const uint64_t *detail_msg_ptr ++ = (uint64_t*)(pc + 4/*NativeInstruction::instruction_size*/); ++ const char *detail_msg = (const char *)*detail_msg_ptr; ++ const char *msg = "stop"; ++ if (TraceTraps) { ++ tty->print_cr("trap: %s: (SIGILL)", msg); ++ } ++ ++ // End life with a fatal error, message and detail message and the context. ++ // Note: no need to do any post-processing here (e.g. signal chaining) ++ va_list va_dummy; ++ VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy); ++ va_end(va_dummy); ++ ++ ShouldNotReachHere(); + } + } else if ((thread->thread_state() == _thread_in_vm || + thread->thread_state() == _thread_in_native) && +@@ -423,6 +447,7 @@ void os::print_context(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; ++ + st->print_cr("Registers:"); + st->print( "ZERO=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[0]); + st->print(", RA=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[1]); +@@ -465,19 +490,23 @@ void os::print_context(outputStream *st, const void *context) { + st->print(", S8=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[31]); + st->cr(); + st->cr(); ++} ++ ++void os::print_tos_pc(outputStream *st, const void *context) { ++ if (context == NULL) return; + +- intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); +- st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); +- print_hex_dump(st, (address)(sp - 32), (address)(sp + 32), sizeof(intptr_t)); ++ const ucontext_t* uc = (const ucontext_t*)context; ++ ++ address sp = (address)os::Linux::ucontext_get_sp(uc); ++ print_tos(st, sp); + st->cr(); + + // Note: it may be unsafe to inspect memory near pc. For example, pc may + // point to garbage if entry point in an nmethod is corrupted. Leave + // this at the end, and hope for the best. +- address pc = os::Posix::ucontext_get_pc(uc); +- st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); +- print_hex_dump(st, pc - 64, pc + 64, sizeof(char)); +- Disassembler::decode(pc - 80, pc + 80, st); ++ address pc = os::fetch_frame_from_context(uc).pc(); ++ print_instructions(st, pc); ++ st->cr(); + } + + void os::setup_fpu() { +diff --git a/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S b/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S +new file mode 100644 +index 00000000000..fdc6da358e5 +--- /dev/null ++++ b/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S +@@ -0,0 +1,56 @@ ++/* ++ * Copyright (c) 2022 SAP SE. All rights reserved. ++ * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ .globl SafeFetchN_impl ++ .globl _SafeFetchN_fault ++ .globl _SafeFetchN_continuation ++ .globl SafeFetch32_impl ++ .globl _SafeFetch32_fault ++ .globl _SafeFetch32_continuation ++ ++ # Support for int SafeFetch32(int* address, int defaultval); ++ # ++ # a0 : address ++ # a1 : defaultval ++SafeFetch32_impl: ++_SafeFetch32_fault: ++ ld.w $r4, $r4, 0 ++ jr $r1 ++_SafeFetch32_continuation: ++ or $r4, $r5, $r0 ++ jr $r1 ++ ++ # Support for intptr_t SafeFetchN(intptr_t* address, intptr_t defaultval); ++ # ++ # a0 : address ++ # a1 : defaultval ++SafeFetchN_impl: ++_SafeFetchN_fault: ++ ld.d $r4, $r4, 0 ++ jr $r1 ++_SafeFetchN_continuation: ++ or $r4, $r5, $r0 ++ jr $r1 +diff --git a/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp +index 8344945ff79..ff1af7beb68 100644 +--- a/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp ++++ b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp +@@ -720,6 +720,7 @@ void os::print_context(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; ++ + st->print_cr("Registers:"); + st->print( "R0=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[0]); + st->print(", AT=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[1]); +@@ -762,6 +763,12 @@ void os::print_context(outputStream *st, const void *context) { + st->print(", RA=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[31]); + st->cr(); + st->cr(); ++} ++ ++void os::print_tos_pc(outputStream *st, const void *context) { ++ if (context == NULL) return; ++ ++ const ucontext_t* uc = (const ucontext_t*)context; + + intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); +diff --git a/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S b/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S +new file mode 100644 +index 00000000000..fc6ee6eca65 +--- /dev/null ++++ b/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S +@@ -0,0 +1,60 @@ ++/* ++ * Copyright (c) 2022 SAP SE. All rights reserved. ++ * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2023, Loongson Technology. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ .globl SafeFetchN_impl ++ .globl _SafeFetchN_fault ++ .globl _SafeFetchN_continuation ++ .globl SafeFetch32_impl ++ .globl _SafeFetch32_fault ++ .globl _SafeFetch32_continuation ++ ++ # Support for int SafeFetch32(int* address, int defaultval); ++ # ++ # a0 : address ++ # a1 : defaultval ++SafeFetch32_impl: ++_SafeFetch32_fault: ++ lw $2, 0($4) ++ j $31 ++ nop ++_SafeFetch32_continuation: ++ or $2, $5, $0 ++ j $31 ++ nop ++ ++ # Support for intptr_t SafeFetchN(intptr_t* address, intptr_t defaultval); ++ # ++ # a0 : address ++ # a1 : defaultval ++SafeFetchN_impl: ++_SafeFetchN_fault: ++ ld $2, 0($4) ++ j $31 ++ nop ++_SafeFetchN_continuation: ++ or $2, $5, $0 ++ j $31 ++ nop + +commit 52397e8a10845dc42966971478c0c50a75330dff +Merge: e7efed5c455 dc00ae47d7a +Author: aoqi +Date: Thu Dec 7 18:25:10 2023 +0800 + + Merge + +diff --cc make/autoconf/jvm-features.m4 +index bfe31396e22,aa99b037b2b..d0c3a85757b +--- a/make/autoconf/jvm-features.m4 ++++ b/make/autoconf/jvm-features.m4 +@@@ -337,7 -312,7 +337,8 @@@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENA + if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || \ + test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \ + test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \ + - test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then +++ test "x$OPENJDK_TARGET_CPU" = "xriscv64" || \ + + test "x$OPENJDK_TARGET_CPU" = "xloongarch64"; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU]) +diff --cc make/autoconf/platform.m4 +index d003aa87436,5752d3bd1a6..d38d650e0fa +--- a/make/autoconf/platform.m4 ++++ b/make/autoconf/platform.m4 +@@@ -582,12 -561,8 +582,14 @@@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HE + HOTSPOT_$1_CPU_DEFINE=PPC64 + elif test "x$OPENJDK_$1_CPU" = xppc64le; then + HOTSPOT_$1_CPU_DEFINE=PPC64 ++ elif test "x$OPENJDK_$1_CPU" = xriscv64; then ++ HOTSPOT_$1_CPU_DEFINE=RISCV64 + + elif test "x$OPENJDK_$1_CPU" = xmips64; then + + HOTSPOT_$1_CPU_DEFINE=MIPS64 + + elif test "x$OPENJDK_$1_CPU" = xmips64el; then + + HOTSPOT_$1_CPU_DEFINE=MIPS64 + + elif test "x$OPENJDK_$1_CPU" = xloongarch64; then + + HOTSPOT_$1_CPU_DEFINE=LOONGARCH64 + + # The cpu defines below are for zero, we don't support them directly. + elif test "x$OPENJDK_$1_CPU" = xsparc; then +diff --cc src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp +index 486592903ce,00000000000..baadeebb243 +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp +@@@ -1,82 -1,0 +1,84 @@@ + +/* + + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + * + + */ + + + +#ifndef CPU_LOONGARCH_C1_LIRASSEMBLER_LOONGARCH_HPP + +#define CPU_LOONGARCH_C1_LIRASSEMBLER_LOONGARCH_HPP + + + +// ArrayCopyStub needs access to bailout + +friend class ArrayCopyStub; + + + + private: + + int array_element_size(BasicType type) const; + + + + void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, + + int dest_index, bool pop_fpu_stack); + + + + // helper functions which checks for overflow and sets bailout if it + + // occurs. Always returns a valid embeddable pointer but in the + + // bailout case the pointer won't be to unique storage. + + address float_constant(float f); + + address double_constant(double d); + + + + address int_constant(jlong n); + + + + bool is_literal_address(LIR_Address* addr); + + + + // Ensure we have a valid Address (base+offset) to a stack-slot. + + Address stack_slot_address(int index, uint shift, int adjust = 0); + + + + // Record the type of the receiver in ReceiverTypeData + + void type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, + + Register recv, Label* update_done); + + void add_debug_info_for_branch(address adr, CodeEmitInfo* info); + + + + void casw(Register addr, Register newval, Register cmpval, bool sign); + + void casl(Register addr, Register newval, Register cmpval); + + + + void poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info = NULL); + + + + static const int max_tableswitches = 20; + + struct tableswitch switches[max_tableswitches]; + + int tableswitch_count; + + + + void init() { tableswitch_count = 0; } + + + + void deoptimize_trap(CodeEmitInfo *info); + + +++ void emit_cmp_branch(LIR_OpBranch* op); +++ + + enum { + + // call stub: CompiledStaticCall::to_interp_stub_size() + + + // CompiledStaticCall::to_trampoline_stub_size() + + _call_stub_size = 13 * NativeInstruction::nop_instruction_size, + + _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), + + _deopt_handler_size = 7 * NativeInstruction::nop_instruction_size + + }; + + + +public: + + void store_parameter(Register r, int offset_from_sp_in_words); + + void store_parameter(jint c, int offset_from_sp_in_words); + + void store_parameter(jobject c, int offset_from_sp_in_words); + + + +#endif // CPU_LOONGARCH_C1_LIRASSEMBLER_LOONGARCH_HPP +diff --cc src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +index 2e4fbc1ecc0,00000000000..aff47cb97e1 +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +@@@ -1,3382 -1,0 +1,3378 @@@ + +/* + + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + * + + */ + + + +#include "precompiled.hpp" + +#include "asm/macroAssembler.inline.hpp" + +#include "asm/assembler.hpp" + +#include "c1/c1_CodeStubs.hpp" + +#include "c1/c1_Compilation.hpp" + +#include "c1/c1_LIRAssembler.hpp" + +#include "c1/c1_MacroAssembler.hpp" + +#include "c1/c1_Runtime1.hpp" + +#include "c1/c1_ValueStack.hpp" + +#include "ci/ciArrayKlass.hpp" + +#include "ci/ciInstance.hpp" + +#include "code/compiledIC.hpp" + +#include "gc/shared/collectedHeap.hpp" + +#include "gc/shared/gc_globals.hpp" + +#include "nativeInst_loongarch.hpp" + +#include "oops/objArrayKlass.hpp" + +#include "runtime/frame.inline.hpp" + +#include "runtime/sharedRuntime.hpp" + +#include "runtime/stubRoutines.hpp" + +#include "utilities/powerOfTwo.hpp" + +#include "vmreg_loongarch.inline.hpp" + + + +#define A0 RA0 + +#define A1 RA1 + +#define A2 RA2 + +#define A3 RA3 + +#define A4 RA4 + +#define A5 RA5 + +#define A6 RA6 + +#define A7 RA7 + +#define T0 RT0 + +#define T1 RT1 + +#define T2 RT2 + +#define T3 RT3 + +#define T5 RT5 + +#define T6 RT6 + +#define T7 RT7 + +#define T8 RT8 + + + +#ifndef PRODUCT + +#define COMMENT(x) do { __ block_comment(x); } while (0) + +#else + +#define COMMENT(x) + +#endif + + + +NEEDS_CLEANUP // remove this definitions? + + + +#define __ _masm-> + + + +static void select_different_registers(Register preserve, Register extra, + + Register &tmp1, Register &tmp2) { + + if (tmp1 == preserve) { + + assert_different_registers(tmp1, tmp2, extra); + + tmp1 = extra; + + } else if (tmp2 == preserve) { + + assert_different_registers(tmp1, tmp2, extra); + + tmp2 = extra; + + } + + assert_different_registers(preserve, tmp1, tmp2); + +} + + + +static void select_different_registers(Register preserve, Register extra, + + Register &tmp1, Register &tmp2, + + Register &tmp3) { + + if (tmp1 == preserve) { + + assert_different_registers(tmp1, tmp2, tmp3, extra); + + tmp1 = extra; + + } else if (tmp2 == preserve) { + + assert_different_registers(tmp1, tmp2, tmp3, extra); + + tmp2 = extra; + + } else if (tmp3 == preserve) { + + assert_different_registers(tmp1, tmp2, tmp3, extra); + + tmp3 = extra; + + } + + assert_different_registers(preserve, tmp1, tmp2, tmp3); + +} + + + +bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; } + + + +LIR_Opr LIR_Assembler::receiverOpr() { + + return FrameMap::receiver_opr; + +} + + + +LIR_Opr LIR_Assembler::osrBufferPointer() { + + return FrameMap::as_pointer_opr(receiverOpr()->as_register()); + +} + + + +//--------------fpu register translations----------------------- + + + +address LIR_Assembler::float_constant(float f) { + + address const_addr = __ float_constant(f); + + if (const_addr == NULL) { + + bailout("const section overflow"); + + return __ code()->consts()->start(); + + } else { + + return const_addr; + + } + +} + + + +address LIR_Assembler::double_constant(double d) { + + address const_addr = __ double_constant(d); + + if (const_addr == NULL) { + + bailout("const section overflow"); + + return __ code()->consts()->start(); + + } else { + + return const_addr; + + } + +} + + + +void LIR_Assembler::breakpoint() { Unimplemented(); } + + + +void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); } + + + +void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); } + + + +bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; } + + + +static Register as_reg(LIR_Opr op) { + + return op->is_double_cpu() ? op->as_register_lo() : op->as_register(); + +} + + + +static jlong as_long(LIR_Opr data) { + + jlong result; + + switch (data->type()) { + + case T_INT: + + result = (data->as_jint()); + + break; + + case T_LONG: + + result = (data->as_jlong()); + + break; + + default: + + ShouldNotReachHere(); + + result = 0; // unreachable + + } + + return result; + +} + + + +Address LIR_Assembler::as_Address(LIR_Address* addr) { + + Register base = addr->base()->as_pointer_register(); + + LIR_Opr opr = addr->index(); + + if (opr->is_cpu_register()) { + + Register index; + + if (opr->is_single_cpu()) + + index = opr->as_register(); + + else + + index = opr->as_register_lo(); + + assert(addr->disp() == 0, "must be"); + + return Address(base, index, Address::ScaleFactor(addr->scale())); + + } else { + + assert(addr->scale() == 0, "must be"); + + return Address(base, addr->disp()); + + } + + return Address(); + +} + + + +Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { + + ShouldNotReachHere(); + + return Address(); + +} + + + +Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { + + return as_Address(addr); // Ouch + + // FIXME: This needs to be much more clever. See x86. + +} + + + +// Ensure a valid Address (base + offset) to a stack-slot. If stack access is + +// not encodable as a base + (immediate) offset, generate an explicit address + +// calculation to hold the address in a temporary register. + +Address LIR_Assembler::stack_slot_address(int index, uint size, int adjust) { + + precond(size == 4 || size == 8); + + Address addr = frame_map()->address_for_slot(index, adjust); + + precond(addr.index() == noreg); + + precond(addr.base() == SP); + + precond(addr.disp() > 0); + + uint mask = size - 1; + + assert((addr.disp() & mask) == 0, "scaled offsets only"); + + return addr; + +} + + + +void LIR_Assembler::osr_entry() { + + offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); + + BlockBegin* osr_entry = compilation()->hir()->osr_entry(); + + ValueStack* entry_state = osr_entry->state(); + + int number_of_locks = entry_state->locks_size(); + + + + // we jump here if osr happens with the interpreter + + // state set up to continue at the beginning of the + + // loop that triggered osr - in particular, we have + + // the following registers setup: + + // + + // A2: osr buffer + + // + + + + // build frame + + ciMethod* m = compilation()->method(); + + __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); + + + + // OSR buffer is + + // + + // locals[nlocals-1..0] + + // monitors[0..number_of_locks] + + // + + // locals is a direct copy of the interpreter frame so in the osr buffer + + // so first slot in the local array is the last local from the interpreter + + // and last slot is local[0] (receiver) from the interpreter + + // + + // Similarly with locks. The first lock slot in the osr buffer is the nth lock + + // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock + + // in the interpreter frame (the method lock if a sync method) + + + + // Initialize monitors in the compiled activation. + + // A2: pointer to osr buffer + + // + + // All other registers are dead at this point and the locals will be + + // copied into place by code emitted in the IR. + + + + Register OSR_buf = osrBufferPointer()->as_pointer_register(); + + { + + assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); + + int monitor_offset = BytesPerWord * method()->max_locals() + (2 * BytesPerWord) * (number_of_locks - 1); + + // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in + + // the OSR buffer using 2 word entries: first the lock and then + + // the oop. + + for (int i = 0; i < number_of_locks; i++) { + + int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); + +#ifdef ASSERT + + // verify the interpreter's monitor has a non-null object + + { + + Label L; + + __ ld_ptr(SCR1, Address(OSR_buf, slot_offset + 1 * BytesPerWord)); + + __ bnez(SCR1, L); + + __ stop("locked object is NULL"); + + __ bind(L); + + } + +#endif + + __ ld_ptr(S0, Address(OSR_buf, slot_offset + 0)); + + __ st_ptr(S0, frame_map()->address_for_monitor_lock(i)); + + __ ld_ptr(S0, Address(OSR_buf, slot_offset + 1*BytesPerWord)); + + __ st_ptr(S0, frame_map()->address_for_monitor_object(i)); + + } + + } + +} + + + +// inline cache check; done before the frame is built. + +int LIR_Assembler::check_icache() { + + Register receiver = FrameMap::receiver_opr->as_register(); + + Register ic_klass = IC_Klass; + + int start_offset = __ offset(); + + Label dont; + + + + __ verify_oop(receiver); + + + + // explicit NULL check not needed since load from [klass_offset] causes a trap + + // check against inline cache + + assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), + + "must add explicit null check"); + + + + __ load_klass(SCR2, receiver); + + __ beq(SCR2, ic_klass, dont); + + + + // if icache check fails, then jump to runtime routine + + // Note: RECEIVER must still contain the receiver! + + __ jmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); + + + + // We align the verified entry point unless the method body + + // (including its inline cache check) will fit in a single 64-byte + + // icache line. + + if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) { + + // force alignment after the cache check. + + __ align(CodeEntryAlignment); + + } + + + + __ bind(dont); + + return start_offset; + +} + + + +void LIR_Assembler::clinit_barrier(ciMethod* method) { + + assert(VM_Version::supports_fast_class_init_checks(), "sanity"); + + assert(!method->holder()->is_not_initialized(), "initialization should have been started"); + + Label L_skip_barrier; + + + + __ mov_metadata(SCR2, method->holder()->constant_encoding()); + + __ clinit_barrier(SCR2, SCR1, &L_skip_barrier /*L_fast_path*/); + + __ jmp(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type); + + __ bind(L_skip_barrier); + +} + + + +void LIR_Assembler::jobject2reg(jobject o, Register reg) { + + if (o == NULL) { + + __ move(reg, R0); + + } else { + + int oop_index = __ oop_recorder()->find_index(o); + + RelocationHolder rspec = oop_Relocation::spec(oop_index); + + __ relocate(rspec); + + __ patchable_li52(reg, (long)o); + + } + +} + + + +void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { + + address target = NULL; + + + + switch (patching_id(info)) { + + case PatchingStub::access_field_id: + + target = Runtime1::entry_for(Runtime1::access_field_patching_id); + + break; + + case PatchingStub::load_klass_id: + + target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + + break; + + case PatchingStub::load_mirror_id: + + target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + + break; + + case PatchingStub::load_appendix_id: + + target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + + break; + + default: ShouldNotReachHere(); + + } + + + + __ call(target, relocInfo::runtime_call_type); + + add_call_info_here(info); + +} + + + +void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { + + deoptimize_trap(info); + +} + + + +// This specifies the rsp decrement needed to build the frame + +int LIR_Assembler::initial_frame_size_in_bytes() const { + + // if rounding, must let FrameMap know! + + return in_bytes(frame_map()->framesize_in_bytes()); + +} + + + +int LIR_Assembler::emit_exception_handler() { + + // if the last instruction is a call (typically to do a throw which + + // is coming at the end after block reordering) the return address + + // must still point into the code area in order to avoid assertion + + // failures when searching for the corresponding bci => add a nop + + // (was bug 5/14/1999 - gri) + + __ nop(); + + + + // generate code for exception handler + + address handler_base = __ start_a_stub(exception_handler_size()); + + if (handler_base == NULL) { + + // not enough space left for the handler + + bailout("exception handler overflow"); + + return -1; + + } + + + + int offset = code_offset(); + + + + // the exception oop and pc are in A0, and A1 + + // no other registers need to be preserved, so invalidate them + + __ invalidate_registers(false, true, true, true, true, true); + + + + // check that there is really an exception + + __ verify_not_null_oop(A0); + + + + // search an exception handler (A0: exception oop, A1: throwing pc) + + __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); + + __ should_not_reach_here(); + + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); + + __ end_a_stub(); + + + + return offset; + +} + + + +// Emit the code to remove the frame from the stack in the exception unwind path. + +int LIR_Assembler::emit_unwind_handler() { + +#ifndef PRODUCT + + if (CommentedAssembly) { + + _masm->block_comment("Unwind handler"); + + } + +#endif + + + + int offset = code_offset(); + + + + // Fetch the exception from TLS and clear out exception related thread state + + __ ld_ptr(A0, Address(TREG, JavaThread::exception_oop_offset())); + + __ st_ptr(R0, Address(TREG, JavaThread::exception_oop_offset())); + + __ st_ptr(R0, Address(TREG, JavaThread::exception_pc_offset())); + + + + __ bind(_unwind_handler_entry); + + __ verify_not_null_oop(V0); + + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + + __ move(S0, V0); // Preserve the exception + + } + + + + // Perform needed unlocking + + MonitorExitStub* stub = NULL; + + if (method()->is_synchronized()) { + + monitor_address(0, FrameMap::a0_opr); + + stub = new MonitorExitStub(FrameMap::a0_opr, true, 0); + + __ unlock_object(A5, A4, A0, *stub->entry()); + + __ bind(*stub->continuation()); + + } + + + + if (compilation()->env()->dtrace_method_probes()) { + + __ mov_metadata(A1, method()->constant_encoding()); + + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), TREG, A1); + + } + + + + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + + __ move(A0, S0); // Restore the exception + + } + + + + // remove the activation and dispatch to the unwind handler + + __ block_comment("remove_frame and dispatch to the unwind handler"); + + __ remove_frame(initial_frame_size_in_bytes()); + + __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); + + + + // Emit the slow path assembly + + if (stub != NULL) { + + stub->emit_code(this); + + } + + + + return offset; + +} + + + +int LIR_Assembler::emit_deopt_handler() { + + // if the last instruction is a call (typically to do a throw which + + // is coming at the end after block reordering) the return address + + // must still point into the code area in order to avoid assertion + + // failures when searching for the corresponding bci => add a nop + + // (was bug 5/14/1999 - gri) + + __ nop(); + + + + // generate code for exception handler + + address handler_base = __ start_a_stub(deopt_handler_size()); + + if (handler_base == NULL) { + + // not enough space left for the handler + + bailout("deopt handler overflow"); + + return -1; + + } + + + + int offset = code_offset(); + + + + __ call(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); + + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); + + __ end_a_stub(); + + + + return offset; + +} + + + +void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) { + + _masm->code_section()->relocate(adr, relocInfo::poll_type); + + int pc_offset = code_offset(); + + flush_debug_info(pc_offset); + + info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); + + if (info->exception_handlers() != NULL) { + + compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); + + } + +} + + + +void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { + + assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == V0, + + "word returns are in V0,"); + + + + // Pop the stack before the safepoint code + + __ remove_frame(initial_frame_size_in_bytes()); + + + + if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { + + __ reserved_stack_check(); + + } + + + + code_stub->set_safepoint_offset(__ offset()); + + __ relocate(relocInfo::poll_return_type); + + __ safepoint_poll(*code_stub->entry(), TREG, true /* at_return */, false /* acquire */, true /* in_nmethod */); + + + + __ jr(RA); + +} + + + +int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { + + guarantee(info != NULL, "Shouldn't be NULL"); + + __ ld_ptr(SCR1, Address(TREG, JavaThread::polling_page_offset())); + + add_debug_info_for_branch(info); // This isn't just debug info: it's the oop map + + __ relocate(relocInfo::poll_type); + + __ ld_w(SCR1, SCR1, 0); + + return __ offset(); + +} + + + +void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { + + __ move(to_reg, from_reg); + +} + + + +void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); } + + + +void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { + + assert(src->is_constant(), "should not call otherwise"); + + assert(dest->is_register(), "should not call otherwise"); + + LIR_Const* c = src->as_constant_ptr(); + + + + switch (c->type()) { + + case T_INT: + + assert(patch_code == lir_patch_none, "no patching handled here"); + + __ li(dest->as_register(), c->as_jint()); + + break; + + case T_ADDRESS: + + assert(patch_code == lir_patch_none, "no patching handled here"); + + __ li(dest->as_register(), c->as_jint()); + + break; + + case T_LONG: + + assert(patch_code == lir_patch_none, "no patching handled here"); + + __ li(dest->as_register_lo(), (intptr_t)c->as_jlong()); + + break; + + case T_OBJECT: + + if (patch_code == lir_patch_none) { + + jobject2reg(c->as_jobject(), dest->as_register()); + + } else { + + jobject2reg_with_patching(dest->as_register(), info); + + } + + break; + + case T_METADATA: + + if (patch_code != lir_patch_none) { + + klass2reg_with_patching(dest->as_register(), info); + + } else { + + __ mov_metadata(dest->as_register(), c->as_metadata()); + + } + + break; + + case T_FLOAT: + + __ lea(SCR1, InternalAddress(float_constant(c->as_jfloat()))); + + __ fld_s(dest->as_float_reg(), SCR1, 0); + + break; + + case T_DOUBLE: + + __ lea(SCR1, InternalAddress(double_constant(c->as_jdouble()))); + + __ fld_d(dest->as_double_reg(), SCR1, 0); + + break; + + default: + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { + + LIR_Const* c = src->as_constant_ptr(); + + switch (c->type()) { + + case T_OBJECT: + + if (!c->as_jobject()) + + __ st_ptr(R0, frame_map()->address_for_slot(dest->single_stack_ix())); + + else { + + const2reg(src, FrameMap::scr1_opr, lir_patch_none, NULL); + + reg2stack(FrameMap::scr1_opr, dest, c->type(), false); + + } + + break; + + case T_ADDRESS: + + const2reg(src, FrameMap::scr1_opr, lir_patch_none, NULL); + + reg2stack(FrameMap::scr1_opr, dest, c->type(), false); + + case T_INT: + + case T_FLOAT: + + if (c->as_jint_bits() == 0) + + __ st_w(R0, frame_map()->address_for_slot(dest->single_stack_ix())); + + else { + + __ li(SCR2, c->as_jint_bits()); + + __ st_w(SCR2, frame_map()->address_for_slot(dest->single_stack_ix())); + + } + + break; + + case T_LONG: + + case T_DOUBLE: + + if (c->as_jlong_bits() == 0) + + __ st_ptr(R0, frame_map()->address_for_slot(dest->double_stack_ix(), + + lo_word_offset_in_bytes)); + + else { + + __ li(SCR2, (intptr_t)c->as_jlong_bits()); + + __ st_ptr(SCR2, frame_map()->address_for_slot(dest->double_stack_ix(), + + lo_word_offset_in_bytes)); + + } + + break; + + default: + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, + + CodeEmitInfo* info, bool wide) { + + assert(src->is_constant(), "should not call otherwise"); + + LIR_Const* c = src->as_constant_ptr(); + + LIR_Address* to_addr = dest->as_address_ptr(); + + + + void (Assembler::* insn)(Register Rt, Address adr); + + + + switch (type) { + + case T_ADDRESS: + + assert(c->as_jint() == 0, "should be"); + + insn = &Assembler::st_d; + + break; + + case T_LONG: + + assert(c->as_jlong() == 0, "should be"); + + insn = &Assembler::st_d; + + break; + + case T_INT: + + assert(c->as_jint() == 0, "should be"); + + insn = &Assembler::st_w; + + break; + + case T_OBJECT: + + case T_ARRAY: + + assert(c->as_jobject() == 0, "should be"); + + if (UseCompressedOops && !wide) { + + insn = &Assembler::st_w; + + } else { + + insn = &Assembler::st_d; + + } + + break; + + case T_CHAR: + + case T_SHORT: + + assert(c->as_jint() == 0, "should be"); + + insn = &Assembler::st_h; + + break; + + case T_BOOLEAN: + + case T_BYTE: + + assert(c->as_jint() == 0, "should be"); + + insn = &Assembler::st_b; + + break; + + default: + + ShouldNotReachHere(); + + insn = &Assembler::st_d; // unreachable + + } + + + + if (info) add_debug_info_for_null_check_here(info); + + (_masm->*insn)(R0, as_Address(to_addr)); + +} + + + +void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { + + assert(src->is_register(), "should not call otherwise"); + + assert(dest->is_register(), "should not call otherwise"); + + + + // move between cpu-registers + + if (dest->is_single_cpu()) { + + if (src->type() == T_LONG) { + + // Can do LONG -> OBJECT + + move_regs(src->as_register_lo(), dest->as_register()); + + return; + + } + + assert(src->is_single_cpu(), "must match"); + + if (src->type() == T_OBJECT) { + + __ verify_oop(src->as_register()); + + } + + move_regs(src->as_register(), dest->as_register()); + + } else if (dest->is_double_cpu()) { + + if (is_reference_type(src->type())) { + + // Surprising to me but we can see move of a long to t_object + + __ verify_oop(src->as_register()); + + move_regs(src->as_register(), dest->as_register_lo()); + + return; + + } + + assert(src->is_double_cpu(), "must match"); + + Register f_lo = src->as_register_lo(); + + Register f_hi = src->as_register_hi(); + + Register t_lo = dest->as_register_lo(); + + Register t_hi = dest->as_register_hi(); + + assert(f_hi == f_lo, "must be same"); + + assert(t_hi == t_lo, "must be same"); + + move_regs(f_lo, t_lo); + + } else if (dest->is_single_fpu()) { + + __ fmov_s(dest->as_float_reg(), src->as_float_reg()); + + } else if (dest->is_double_fpu()) { + + __ fmov_d(dest->as_double_reg(), src->as_double_reg()); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { + + precond(src->is_register() && dest->is_stack()); + + + + uint const c_sz32 = sizeof(uint32_t); + + uint const c_sz64 = sizeof(uint64_t); + + + + if (src->is_single_cpu()) { + + int index = dest->single_stack_ix(); + + if (is_reference_type(type)) { + + __ st_ptr(src->as_register(), stack_slot_address(index, c_sz64)); + + __ verify_oop(src->as_register()); + + } else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) { + + __ st_ptr(src->as_register(), stack_slot_address(index, c_sz64)); + + } else { + + __ st_w(src->as_register(), stack_slot_address(index, c_sz32)); + + } + + } else if (src->is_double_cpu()) { + + int index = dest->double_stack_ix(); + + Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); + + __ st_ptr(src->as_register_lo(), dest_addr_LO); + + } else if (src->is_single_fpu()) { + + int index = dest->single_stack_ix(); + + __ fst_s(src->as_float_reg(), stack_slot_address(index, c_sz32)); + + } else if (src->is_double_fpu()) { + + int index = dest->double_stack_ix(); + + __ fst_d(src->as_double_reg(), stack_slot_address(index, c_sz64)); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, + + CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { + + LIR_Address* to_addr = dest->as_address_ptr(); + + PatchingStub* patch = NULL; + + Register compressed_src = SCR2; + + + + if (patch_code != lir_patch_none) { + + deoptimize_trap(info); + + return; + + } + + + + if (is_reference_type(type)) { + + __ verify_oop(src->as_register()); + + + + if (UseCompressedOops && !wide) { + + __ encode_heap_oop(compressed_src, src->as_register()); + + } else { + + compressed_src = src->as_register(); + + } + + } + + + + int null_check_here = code_offset(); + + switch (type) { + + case T_FLOAT: + + __ fst_s(src->as_float_reg(), as_Address(to_addr)); + + break; + + case T_DOUBLE: + + __ fst_d(src->as_double_reg(), as_Address(to_addr)); + + break; + + case T_ARRAY: // fall through + + case T_OBJECT: // fall through + + if (UseCompressedOops && !wide) { + + __ st_w(compressed_src, as_Address(to_addr)); + + } else { + + __ st_ptr(compressed_src, as_Address(to_addr)); + + } + + break; + + case T_METADATA: + + // We get here to store a method pointer to the stack to pass to + + // a dtrace runtime call. This can't work on 64 bit with + + // compressed klass ptrs: T_METADATA can be a compressed klass + + // ptr or a 64 bit method pointer. + + ShouldNotReachHere(); + + __ st_ptr(src->as_register(), as_Address(to_addr)); + + break; + + case T_ADDRESS: + + __ st_ptr(src->as_register(), as_Address(to_addr)); + + break; + + case T_INT: + + __ st_w(src->as_register(), as_Address(to_addr)); + + break; + + case T_LONG: + + __ st_ptr(src->as_register_lo(), as_Address_lo(to_addr)); + + break; + + case T_BYTE: // fall through + + case T_BOOLEAN: + + __ st_b(src->as_register(), as_Address(to_addr)); + + break; + + case T_CHAR: // fall through + + case T_SHORT: + + __ st_h(src->as_register(), as_Address(to_addr)); + + break; + + default: + + ShouldNotReachHere(); + + } + + if (info != NULL) { + + add_debug_info_for_null_check(null_check_here, info); + + } + +} + + + +void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { + + precond(src->is_stack() && dest->is_register()); + + + + uint const c_sz32 = sizeof(uint32_t); + + uint const c_sz64 = sizeof(uint64_t); + + + + if (dest->is_single_cpu()) { + + int index = src->single_stack_ix(); + + if (is_reference_type(type)) { + + __ ld_ptr(dest->as_register(), stack_slot_address(index, c_sz64)); + + __ verify_oop(dest->as_register()); + + } else if (type == T_METADATA || type == T_ADDRESS) { + + __ ld_ptr(dest->as_register(), stack_slot_address(index, c_sz64)); + + } else { + + __ ld_w(dest->as_register(), stack_slot_address(index, c_sz32)); + + } + + } else if (dest->is_double_cpu()) { + + int index = src->double_stack_ix(); + + Address src_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes); + + __ ld_ptr(dest->as_register_lo(), src_addr_LO); + + } else if (dest->is_single_fpu()) { + + int index = src->single_stack_ix(); + + __ fld_s(dest->as_float_reg(), stack_slot_address(index, c_sz32)); + + } else if (dest->is_double_fpu()) { + + int index = src->double_stack_ix(); + + __ fld_d(dest->as_double_reg(), stack_slot_address(index, c_sz64)); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { + + address target = NULL; + + + + switch (patching_id(info)) { + + case PatchingStub::access_field_id: + + target = Runtime1::entry_for(Runtime1::access_field_patching_id); + + break; + + case PatchingStub::load_klass_id: + + target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + + break; + + case PatchingStub::load_mirror_id: + + target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + + break; + + case PatchingStub::load_appendix_id: + + target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + + break; + + default: ShouldNotReachHere(); + + } + + + + __ call(target, relocInfo::runtime_call_type); + + add_call_info_here(info); + +} + + + +void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { + + LIR_Opr temp; + + + + if (type == T_LONG || type == T_DOUBLE) + + temp = FrameMap::scr1_long_opr; + + else + + temp = FrameMap::scr1_opr; + + + + stack2reg(src, temp, src->type()); + + reg2stack(temp, dest, dest->type(), false); + +} + + + +void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, + + CodeEmitInfo* info, bool wide, bool /* unaligned */) { + + LIR_Address* addr = src->as_address_ptr(); + + LIR_Address* from_addr = src->as_address_ptr(); + + + + if (addr->base()->type() == T_OBJECT) { + + __ verify_oop(addr->base()->as_pointer_register()); + + } + + + + if (patch_code != lir_patch_none) { + + deoptimize_trap(info); + + return; + + } + + + + if (info != NULL) { + + add_debug_info_for_null_check_here(info); + + } + + int null_check_here = code_offset(); + + switch (type) { + + case T_FLOAT: + + __ fld_s(dest->as_float_reg(), as_Address(from_addr)); + + break; + + case T_DOUBLE: + + __ fld_d(dest->as_double_reg(), as_Address(from_addr)); + + break; + + case T_ARRAY: // fall through + + case T_OBJECT: // fall through + + if (UseCompressedOops && !wide) { + + __ ld_wu(dest->as_register(), as_Address(from_addr)); + + } else { + + __ ld_ptr(dest->as_register(), as_Address(from_addr)); + + } + + break; + + case T_METADATA: + + // We get here to store a method pointer to the stack to pass to + + // a dtrace runtime call. This can't work on 64 bit with + + // compressed klass ptrs: T_METADATA can be a compressed klass + + // ptr or a 64 bit method pointer. + + ShouldNotReachHere(); + + __ ld_ptr(dest->as_register(), as_Address(from_addr)); + + break; + + case T_ADDRESS: + + // FIXME: OMG this is a horrible kludge. Any offset from an + + // address that matches klass_offset_in_bytes() will be loaded + + // as a word, not a long. + + if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { + + __ ld_wu(dest->as_register(), as_Address(from_addr)); + + } else { + + __ ld_ptr(dest->as_register(), as_Address(from_addr)); + + } + + break; + + case T_INT: + + __ ld_w(dest->as_register(), as_Address(from_addr)); + + break; + + case T_LONG: + + __ ld_ptr(dest->as_register_lo(), as_Address_lo(from_addr)); + + break; + + case T_BYTE: + + __ ld_b(dest->as_register(), as_Address(from_addr)); + + break; + + case T_BOOLEAN: + + __ ld_bu(dest->as_register(), as_Address(from_addr)); + + break; + + case T_CHAR: + + __ ld_hu(dest->as_register(), as_Address(from_addr)); + + break; + + case T_SHORT: + + __ ld_h(dest->as_register(), as_Address(from_addr)); + + break; + + default: + + ShouldNotReachHere(); + + } + + + + if (is_reference_type(type)) { + + if (UseCompressedOops && !wide) { + + __ decode_heap_oop(dest->as_register()); + + } + + + + if (!UseZGC) { + + // Load barrier has not yet been applied, so ZGC can't verify the oop here + + __ verify_oop(dest->as_register()); + + } + + } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { + + if (UseCompressedClassPointers) { + + __ decode_klass_not_null(dest->as_register()); + + } + + } + +} + + + +int LIR_Assembler::array_element_size(BasicType type) const { + + int elem_size = type2aelembytes(type); + + return exact_log2(elem_size); + +} + + + +void LIR_Assembler::emit_op3(LIR_Op3* op) { + + switch (op->code()) { + + case lir_idiv: + + case lir_irem: + + arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), + + op->result_opr(), op->info()); + + break; + + case lir_fmad: + + __ fmadd_d(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), + + op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); + + break; + + case lir_fmaf: + + __ fmadd_s(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), + + op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + +} + + + +void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + +#ifdef ASSERT + + assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); + + if (op->block() != NULL) _branch_target_blocks.append(op->block()); +- assert(op->cond() == lir_cond_always, "must be"); + +#endif + + +- if (op->info() != NULL) +- add_debug_info_for_branch(op->info()); +++ if (op->cond() == lir_cond_always) { +++ if (op->info() != NULL) +++ add_debug_info_for_branch(op->info()); + + +- __ b_far(*(op->label())); +++ __ b_far(*(op->label())); +++ } else { +++ emit_cmp_branch(op); +++ } + +} + + +- void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { +++void LIR_Assembler::emit_cmp_branch(LIR_OpBranch* op) { + +#ifdef ASSERT +- assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); +- if (op->block() != NULL) _branch_target_blocks.append(op->block()); + + if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); + +#endif + + + + if (op->info() != NULL) { + + assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), + + "shouldn't be codeemitinfo for non-address operands"); + + add_debug_info_for_null_check_here(op->info()); // exception possible + + } + + + + Label& L = *(op->label()); + + Assembler::Condition acond; + + LIR_Opr opr1 = op->in_opr1(); + + LIR_Opr opr2 = op->in_opr2(); + + assert(op->condition() != lir_cond_always, "must be"); + + +- if (op->code() == lir_cmp_float_branch) { +++ if (op->code() == lir_cond_float_branch) { + + bool is_unordered = (op->ublock() == op->block()); + + if (opr1->is_single_fpu()) { + + FloatRegister reg1 = opr1->as_float_reg(); + + assert(opr2->is_single_fpu(), "expect single float register"); + + FloatRegister reg2 = opr2->as_float_reg(); + + switch(op->condition()) { + + case lir_cond_equal: + + if (is_unordered) + + __ fcmp_cueq_s(FCC0, reg1, reg2); + + else + + __ fcmp_ceq_s(FCC0, reg1, reg2); + + break; + + case lir_cond_notEqual: + + if (is_unordered) + + __ fcmp_cune_s(FCC0, reg1, reg2); + + else + + __ fcmp_cne_s(FCC0, reg1, reg2); + + break; + + case lir_cond_less: + + if (is_unordered) + + __ fcmp_cult_s(FCC0, reg1, reg2); + + else + + __ fcmp_clt_s(FCC0, reg1, reg2); + + break; + + case lir_cond_lessEqual: + + if (is_unordered) + + __ fcmp_cule_s(FCC0, reg1, reg2); + + else + + __ fcmp_cle_s(FCC0, reg1, reg2); + + break; + + case lir_cond_greaterEqual: + + if (is_unordered) + + __ fcmp_cule_s(FCC0, reg2, reg1); + + else + + __ fcmp_cle_s(FCC0, reg2, reg1); + + break; + + case lir_cond_greater: + + if (is_unordered) + + __ fcmp_cult_s(FCC0, reg2, reg1); + + else + + __ fcmp_clt_s(FCC0, reg2, reg1); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else if (opr1->is_double_fpu()) { + + FloatRegister reg1 = opr1->as_double_reg(); + + assert(opr2->is_double_fpu(), "expect double float register"); + + FloatRegister reg2 = opr2->as_double_reg(); + + switch(op->condition()) { + + case lir_cond_equal: + + if (is_unordered) + + __ fcmp_cueq_d(FCC0, reg1, reg2); + + else + + __ fcmp_ceq_d(FCC0, reg1, reg2); + + break; + + case lir_cond_notEqual: + + if (is_unordered) + + __ fcmp_cune_d(FCC0, reg1, reg2); + + else + + __ fcmp_cne_d(FCC0, reg1, reg2); + + break; + + case lir_cond_less: + + if (is_unordered) + + __ fcmp_cult_d(FCC0, reg1, reg2); + + else + + __ fcmp_clt_d(FCC0, reg1, reg2); + + break; + + case lir_cond_lessEqual: + + if (is_unordered) + + __ fcmp_cule_d(FCC0, reg1, reg2); + + else + + __ fcmp_cle_d(FCC0, reg1, reg2); + + break; + + case lir_cond_greaterEqual: + + if (is_unordered) + + __ fcmp_cule_d(FCC0, reg2, reg1); + + else + + __ fcmp_cle_d(FCC0, reg2, reg1); + + break; + + case lir_cond_greater: + + if (is_unordered) + + __ fcmp_cult_d(FCC0, reg2, reg1); + + else + + __ fcmp_clt_d(FCC0, reg2, reg1); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + ShouldNotReachHere(); + + } + + __ bcnez(FCC0, L); + + } else { + + if (opr1->is_constant() && opr2->is_single_cpu()) { + + // tableswitch + + Unimplemented(); + + } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) { + + Register reg1 = as_reg(opr1); + + Register reg2 = noreg; + + jlong imm2 = 0; + + if (opr2->is_single_cpu()) { + + // cpu register - cpu register + + reg2 = opr2->as_register(); + + } else if (opr2->is_double_cpu()) { + + // cpu register - cpu register + + reg2 = opr2->as_register_lo(); + + } else if (opr2->is_constant()) { + + switch(opr2->type()) { + + case T_INT: + + case T_ADDRESS: + + imm2 = opr2->as_constant_ptr()->as_jint(); + + break; + + case T_LONG: + + imm2 = opr2->as_constant_ptr()->as_jlong(); + + break; + + case T_METADATA: + + imm2 = (intptr_t)opr2->as_constant_ptr()->as_metadata(); + + break; + + case T_OBJECT: + + case T_ARRAY: + + if (opr2->as_constant_ptr()->as_jobject() != NULL) { + + reg2 = SCR1; + + jobject2reg(opr2->as_constant_ptr()->as_jobject(), reg2); + + } else { + + reg2 = R0; + + } + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + + } else { + + ShouldNotReachHere(); + + } + + if (reg2 == noreg) { + + if (imm2 == 0) { + + reg2 = R0; + + } else { + + reg2 = SCR1; + + __ li(reg2, imm2); + + } + + } + + switch (op->condition()) { + + case lir_cond_equal: + + __ beq_far(reg1, reg2, L); break; + + case lir_cond_notEqual: + + __ bne_far(reg1, reg2, L); break; + + case lir_cond_less: + + __ blt_far(reg1, reg2, L, true); break; + + case lir_cond_lessEqual: + + __ bge_far(reg2, reg1, L, true); break; + + case lir_cond_greaterEqual: + + __ bge_far(reg1, reg2, L, true); break; + + case lir_cond_greater: + + __ blt_far(reg2, reg1, L, true); break; + + case lir_cond_belowEqual: + + __ bge_far(reg2, reg1, L, false); break; + + case lir_cond_aboveEqual: + + __ bge_far(reg1, reg2, L, false); break; + + default: + + ShouldNotReachHere(); + + } + + } + + } + +} + + + +void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + + LIR_Opr src = op->in_opr(); + + LIR_Opr dest = op->result_opr(); + + LIR_Opr tmp = op->tmp(); + + + + switch (op->bytecode()) { + + case Bytecodes::_i2f: + + __ movgr2fr_w(dest->as_float_reg(), src->as_register()); + + __ ffint_s_w(dest->as_float_reg(), dest->as_float_reg()); + + break; + + case Bytecodes::_i2d: + + __ movgr2fr_w(dest->as_double_reg(), src->as_register()); + + __ ffint_d_w(dest->as_double_reg(), dest->as_double_reg()); + + break; + + case Bytecodes::_l2d: + + __ movgr2fr_d(dest->as_double_reg(), src->as_register_lo()); + + __ ffint_d_l(dest->as_double_reg(), dest->as_double_reg()); + + break; + + case Bytecodes::_l2f: + + __ movgr2fr_d(dest->as_float_reg(), src->as_register_lo()); + + __ ffint_s_l(dest->as_float_reg(), dest->as_float_reg()); + + break; + + case Bytecodes::_f2d: + + __ fcvt_d_s(dest->as_double_reg(), src->as_float_reg()); + + break; + + case Bytecodes::_d2f: + + __ fcvt_s_d(dest->as_float_reg(), src->as_double_reg()); + + break; + + case Bytecodes::_i2c: + + __ bstrpick_w(dest->as_register(), src->as_register(), 15, 0); + + break; + + case Bytecodes::_i2l: + + _masm->block_comment("FIXME: This could be a no-op"); + + __ slli_w(dest->as_register_lo(), src->as_register(), 0); + + break; + + case Bytecodes::_i2s: + + __ ext_w_h(dest->as_register(), src->as_register()); + + break; + + case Bytecodes::_i2b: + + __ ext_w_b(dest->as_register(), src->as_register()); + + break; + + case Bytecodes::_l2i: + + __ slli_w(dest->as_register(), src->as_register_lo(), 0); + + break; + + case Bytecodes::_d2l: + + __ ftintrz_l_d(tmp->as_double_reg(), src->as_double_reg()); + + __ movfr2gr_d(dest->as_register_lo(), tmp->as_double_reg()); + + break; + + case Bytecodes::_f2i: + + __ ftintrz_w_s(tmp->as_float_reg(), src->as_float_reg()); + + __ movfr2gr_s(dest->as_register(), tmp->as_float_reg()); + + break; + + case Bytecodes::_f2l: + + __ ftintrz_l_s(tmp->as_float_reg(), src->as_float_reg()); + + __ movfr2gr_d(dest->as_register_lo(), tmp->as_float_reg()); + + break; + + case Bytecodes::_d2i: + + __ ftintrz_w_d(tmp->as_double_reg(), src->as_double_reg()); + + __ movfr2gr_s(dest->as_register(), tmp->as_double_reg()); + + break; + + default: ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { + + if (op->init_check()) { + + __ ld_bu(SCR1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset())); + + __ li(SCR2, InstanceKlass::fully_initialized); + + add_debug_info_for_null_check_here(op->stub()->info()); + + __ bne_far(SCR1, SCR2, *op->stub()->entry()); + + } + + __ allocate_object(op->obj()->as_register(), op->tmp1()->as_register(), + + op->tmp2()->as_register(), op->header_size(), + + op->object_size(), op->klass()->as_register(), + + *op->stub()->entry()); + + __ bind(*op->stub()->continuation()); + +} + + + +void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { + + Register len = op->len()->as_register(); + + if (UseSlowPath || + + (!UseFastNewObjectArray && is_reference_type(op->type())) || + + (!UseFastNewTypeArray && !is_reference_type(op->type()))) { + + __ b(*op->stub()->entry()); + + } else { + + Register tmp1 = op->tmp1()->as_register(); + + Register tmp2 = op->tmp2()->as_register(); + + Register tmp3 = op->tmp3()->as_register(); + + if (len == tmp1) { + + tmp1 = tmp3; + + } else if (len == tmp2) { + + tmp2 = tmp3; + + } else if (len == tmp3) { + + // everything is ok + + } else { + + __ move(tmp3, len); + + } + + __ allocate_array(op->obj()->as_register(), len, tmp1, tmp2, + + arrayOopDesc::header_size(op->type()), + + array_element_size(op->type()), + + op->klass()->as_register(), + + *op->stub()->entry()); + + } + + __ bind(*op->stub()->continuation()); + +} + + + +void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, + + Register recv, Label* update_done) { + + for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { + + Label next_test; + + // See if the receiver is receiver[n]. + + __ lea(SCR2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); + + __ ld_ptr(SCR1, Address(SCR2)); + + __ bne(recv, SCR1, next_test); + + Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); + + __ ld_ptr(SCR2, data_addr); + + __ addi_d(SCR2, SCR2, DataLayout::counter_increment); + + __ st_ptr(SCR2, data_addr); + + __ b(*update_done); + + __ bind(next_test); + + } + + + + // Didn't find receiver; find next empty slot and fill it in + + for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { + + Label next_test; + + __ lea(SCR2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); + + Address recv_addr(SCR2); + + __ ld_ptr(SCR1, recv_addr); + + __ bnez(SCR1, next_test); + + __ st_ptr(recv, recv_addr); + + __ li(SCR1, DataLayout::counter_increment); + + __ lea(SCR2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); + + __ st_ptr(SCR1, Address(SCR2)); + + __ b(*update_done); + + __ bind(next_test); + + } + +} + + + +void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, + + Label* failure, Label* obj_is_null) { + + // we always need a stub for the failure case. + + CodeStub* stub = op->stub(); + + Register obj = op->object()->as_register(); + + Register k_RInfo = op->tmp1()->as_register(); + + Register klass_RInfo = op->tmp2()->as_register(); + + Register dst = op->result_opr()->as_register(); + + ciKlass* k = op->klass(); + + Register Rtmp1 = noreg; + + + + // check if it needs to be profiled + + ciMethodData* md; + + ciProfileData* data; + + + + const bool should_profile = op->should_profile(); + + + + if (should_profile) { + + ciMethod* method = op->profiled_method(); + + assert(method != NULL, "Should have method"); + + int bci = op->profiled_bci(); + + md = method->method_data_or_null(); + + assert(md != NULL, "Sanity"); + + data = md->bci_to_data(bci); + + assert(data != NULL, "need data for type check"); + + assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); + + } + + + + Label profile_cast_success, profile_cast_failure; + + Label *success_target = should_profile ? &profile_cast_success : success; + + Label *failure_target = should_profile ? &profile_cast_failure : failure; + + + + if (obj == k_RInfo) { + + k_RInfo = dst; + + } else if (obj == klass_RInfo) { + + klass_RInfo = dst; + + } + + if (k->is_loaded() && !UseCompressedClassPointers) { + + select_different_registers(obj, dst, k_RInfo, klass_RInfo); + + } else { + + Rtmp1 = op->tmp3()->as_register(); + + select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); + + } + + + + assert_different_registers(obj, k_RInfo, klass_RInfo); + + + + if (should_profile) { + + Label not_null; + + __ bnez(obj, not_null); + + // Object is null; update MDO and exit + + Register mdo = klass_RInfo; + + __ mov_metadata(mdo, md->constant_encoding()); + + Address data_addr = Address(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); + + __ ld_bu(SCR2, data_addr); + + __ ori(SCR2, SCR2, BitData::null_seen_byte_constant()); + + __ st_b(SCR2, data_addr); + + __ b(*obj_is_null); + + __ bind(not_null); + + } else { + + __ beqz(obj, *obj_is_null); + + } + + + + if (!k->is_loaded()) { + + klass2reg_with_patching(k_RInfo, op->info_for_patch()); + + } else { + + __ mov_metadata(k_RInfo, k->constant_encoding()); + + } + + __ verify_oop(obj); + + + + if (op->fast_check()) { + + // get object class + + // not a safepoint as obj null check happens earlier + + __ load_klass(SCR2, obj); + + __ bne_far(SCR2, k_RInfo, *failure_target); + + // successful cast, fall through to profile or jump + + } else { + + // get object class + + // not a safepoint as obj null check happens earlier + + __ load_klass(klass_RInfo, obj); + + if (k->is_loaded()) { + + // See if we get an immediate positive hit + + __ ld_ptr(SCR1, Address(klass_RInfo, int64_t(k->super_check_offset()))); + + if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { + + __ bne_far(k_RInfo, SCR1, *failure_target); + + // successful cast, fall through to profile or jump + + } else { + + // See if we get an immediate positive hit + + __ beq_far(k_RInfo, SCR1, *success_target); + + // check for self + + __ beq_far(klass_RInfo, k_RInfo, *success_target); + + + + __ addi_d(SP, SP, -2 * wordSize); + + __ st_ptr(k_RInfo, Address(SP, 0 * wordSize)); + + __ st_ptr(klass_RInfo, Address(SP, 1 * wordSize)); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + + __ ld_ptr(klass_RInfo, Address(SP, 0 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + // result is a boolean + + __ beqz(klass_RInfo, *failure_target); + + // successful cast, fall through to profile or jump + + } + + } else { + + // perform the fast part of the checking logic + + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); + + // call out-of-line instance of __ check_klass_subtype_slow_path(...): + + __ addi_d(SP, SP, -2 * wordSize); + + __ st_ptr(k_RInfo, Address(SP, 0 * wordSize)); + + __ st_ptr(klass_RInfo, Address(SP, 1 * wordSize)); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + + __ ld_ptr(k_RInfo, Address(SP, 0 * wordSize)); + + __ ld_ptr(klass_RInfo, Address(SP, 1 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + // result is a boolean + + __ beqz(k_RInfo, *failure_target); + + // successful cast, fall through to profile or jump + + } + + } + + if (should_profile) { + + Register mdo = klass_RInfo, recv = k_RInfo; + + __ bind(profile_cast_success); + + __ mov_metadata(mdo, md->constant_encoding()); + + __ load_klass(recv, obj); + + Label update_done; + + type_profile_helper(mdo, md, data, recv, success); + + __ b(*success); + + + + __ bind(profile_cast_failure); + + __ mov_metadata(mdo, md->constant_encoding()); + + Address counter_addr = Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); + + __ ld_ptr(SCR2, counter_addr); + + __ addi_d(SCR2, SCR2, -DataLayout::counter_increment); + + __ st_ptr(SCR2, counter_addr); + + __ b(*failure); + + } + + __ b(*success); + +} + + + +void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { + + const bool should_profile = op->should_profile(); + + + + LIR_Code code = op->code(); + + if (code == lir_store_check) { + + Register value = op->object()->as_register(); + + Register array = op->array()->as_register(); + + Register k_RInfo = op->tmp1()->as_register(); + + Register klass_RInfo = op->tmp2()->as_register(); + + Register Rtmp1 = op->tmp3()->as_register(); + + CodeStub* stub = op->stub(); + + + + // check if it needs to be profiled + + ciMethodData* md; + + ciProfileData* data; + + + + if (should_profile) { + + ciMethod* method = op->profiled_method(); + + assert(method != NULL, "Should have method"); + + int bci = op->profiled_bci(); + + md = method->method_data_or_null(); + + assert(md != NULL, "Sanity"); + + data = md->bci_to_data(bci); + + assert(data != NULL, "need data for type check"); + + assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); + + } + + Label profile_cast_success, profile_cast_failure, done; + + Label *success_target = should_profile ? &profile_cast_success : &done; + + Label *failure_target = should_profile ? &profile_cast_failure : stub->entry(); + + + + if (should_profile) { + + Label not_null; + + __ bnez(value, not_null); + + // Object is null; update MDO and exit + + Register mdo = klass_RInfo; + + __ mov_metadata(mdo, md->constant_encoding()); + + Address data_addr = Address(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); + + __ ld_bu(SCR2, data_addr); + + __ ori(SCR2, SCR2, BitData::null_seen_byte_constant()); + + __ st_b(SCR2, data_addr); + + __ b(done); + + __ bind(not_null); + + } else { + + __ beqz(value, done); + + } + + + + add_debug_info_for_null_check_here(op->info_for_exception()); + + __ load_klass(k_RInfo, array); + + __ load_klass(klass_RInfo, value); + + + + // get instance klass (it's already uncompressed) + + __ ld_ptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); + + // perform the fast part of the checking logic + + __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); + + // call out-of-line instance of __ check_klass_subtype_slow_path(...): + + __ addi_d(SP, SP, -2 * wordSize); + + __ st_ptr(k_RInfo, Address(SP, 0 * wordSize)); + + __ st_ptr(klass_RInfo, Address(SP, 1 * wordSize)); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + + __ ld_ptr(k_RInfo, Address(SP, 0 * wordSize)); + + __ ld_ptr(klass_RInfo, Address(SP, 1 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + // result is a boolean + + __ beqz(k_RInfo, *failure_target); + + // fall through to the success case + + + + if (should_profile) { + + Register mdo = klass_RInfo, recv = k_RInfo; + + __ bind(profile_cast_success); + + __ mov_metadata(mdo, md->constant_encoding()); + + __ load_klass(recv, value); + + Label update_done; + + type_profile_helper(mdo, md, data, recv, &done); + + __ b(done); + + + + __ bind(profile_cast_failure); + + __ mov_metadata(mdo, md->constant_encoding()); + + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); + + __ lea(SCR2, counter_addr); + + __ ld_ptr(SCR1, Address(SCR2)); + + __ addi_d(SCR1, SCR1, -DataLayout::counter_increment); + + __ st_ptr(SCR1, Address(SCR2)); + + __ b(*stub->entry()); + + } + + + + __ bind(done); + + } else if (code == lir_checkcast) { + + Register obj = op->object()->as_register(); + + Register dst = op->result_opr()->as_register(); + + Label success; + + emit_typecheck_helper(op, &success, op->stub()->entry(), &success); + + __ bind(success); + + if (dst != obj) { + + __ move(dst, obj); + + } + + } else if (code == lir_instanceof) { + + Register obj = op->object()->as_register(); + + Register dst = op->result_opr()->as_register(); + + Label success, failure, done; + + emit_typecheck_helper(op, &success, &failure, &failure); + + __ bind(failure); + + __ move(dst, R0); + + __ b(done); + + __ bind(success); + + __ li(dst, 1); + + __ bind(done); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, bool sign) { + + __ cmpxchg32(Address(addr, 0), cmpval, newval, SCR1, sign, + + /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); + +} + + + +void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { + + __ cmpxchg(Address(addr, 0), cmpval, newval, SCR1, + + /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); + +} + + + +void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { + + assert(VM_Version::supports_cx8(), "wrong machine"); + + Register addr; + + if (op->addr()->is_register()) { + + addr = as_reg(op->addr()); + + } else { + + assert(op->addr()->is_address(), "what else?"); + + LIR_Address* addr_ptr = op->addr()->as_address_ptr(); + + assert(addr_ptr->disp() == 0, "need 0 disp"); + + assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index"); + + addr = as_reg(addr_ptr->base()); + + } + + Register newval = as_reg(op->new_value()); + + Register cmpval = as_reg(op->cmp_value()); + + + + if (op->code() == lir_cas_obj) { + + if (UseCompressedOops) { + + Register t1 = op->tmp1()->as_register(); + + assert(op->tmp1()->is_valid(), "must be"); + + __ encode_heap_oop(t1, cmpval); + + cmpval = t1; + + __ encode_heap_oop(SCR2, newval); + + newval = SCR2; + + casw(addr, newval, cmpval, false); + + } else { + + casl(addr, newval, cmpval); + + } + + } else if (op->code() == lir_cas_int) { + + casw(addr, newval, cmpval, true); + + } else { + + casl(addr, newval, cmpval); + + } + +} + + +- void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, +- LIR_Opr result, BasicType type) { +- Unimplemented(); +- } +- +- void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, +- LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { +++void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type, +++ LIR_Opr left, LIR_Opr right) { + + assert(result->is_single_cpu() || result->is_double_cpu(), "expect single register for result"); + + assert(left->is_single_cpu() || left->is_double_cpu(), "must be"); + + Register regd = (result->type() == T_LONG) ? result->as_register_lo() : result->as_register(); + + Register regl = as_reg(left); + + Register regr = noreg; + + Register reg1 = noreg; + + Register reg2 = noreg; + + jlong immr = 0; + + + + // comparison operands + + if (right->is_single_cpu()) { + + // cpu register - cpu register + + regr = right->as_register(); + + } else if (right->is_double_cpu()) { + + // cpu register - cpu register + + regr = right->as_register_lo(); + + } else if (right->is_constant()) { + + switch(right->type()) { + + case T_INT: + + case T_ADDRESS: + + immr = right->as_constant_ptr()->as_jint(); + + break; + + case T_LONG: + + immr = right->as_constant_ptr()->as_jlong(); + + break; + + case T_METADATA: + + immr = (intptr_t)right->as_constant_ptr()->as_metadata(); + + break; + + case T_OBJECT: + + case T_ARRAY: + + if (right->as_constant_ptr()->as_jobject() != NULL) { + + regr = SCR1; + + jobject2reg(right->as_constant_ptr()->as_jobject(), regr); + + } else { + + immr = 0; + + } + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + + } else { + + ShouldNotReachHere(); + + } + + + + if (regr == noreg) { + + switch (condition) { + + case lir_cond_equal: + + case lir_cond_notEqual: + + if (!Assembler::is_simm(-immr, 12)) { + + regr = SCR1; + + __ li(regr, immr); + + } + + break; + + default: + + if (!Assembler::is_simm(immr, 12)) { + + regr = SCR1; + + __ li(regr, immr); + + } + + } + + } + + + + // special cases + + if (src1->is_constant() && src2->is_constant()) { + + jlong val1 = 0, val2 = 0; + + if (src1->type() == T_INT && src2->type() == T_INT) { + + val1 = src1->as_jint(); + + val2 = src2->as_jint(); + + } else if (src1->type() == T_LONG && src2->type() == T_LONG) { + + val1 = src1->as_jlong(); + + val2 = src2->as_jlong(); + + } + + if (val1 == 0 && val2 == 1) { + + if (regr == noreg) { + + switch (condition) { + + case lir_cond_equal: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + } else { + + __ addi_d(SCR1, regl, -immr); + + __ li(regd, 1); + + __ maskeqz(regd, regd, SCR1); + + } + + break; + + case lir_cond_notEqual: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + __ xori(regd, regd, 1); + + } else { + + __ addi_d(SCR1, regl, -immr); + + __ li(regd, 1); + + __ masknez(regd, regd, SCR1); + + } + + break; + + case lir_cond_less: + + __ slti(regd, regl, immr); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_lessEqual: + + if (immr == 0) { + + __ slt(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ slt(regd, SCR1, regl); + + } + + break; + + case lir_cond_greater: + + if (immr == 0) { + + __ slt(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ slt(regd, SCR1, regl); + + } + + __ xori(regd, regd, 1); + + break; + + case lir_cond_greaterEqual: + + __ slti(regd, regl, immr); + + break; + + case lir_cond_belowEqual: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ sltu(regd, SCR1, regl); + + } + + break; + + case lir_cond_aboveEqual: + + __ sltui(regd, regl, immr); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + switch (condition) { + + case lir_cond_equal: + + __ sub_d(SCR1, regl, regr); + + __ li(regd, 1); + + __ maskeqz(regd, regd, SCR1); + + break; + + case lir_cond_notEqual: + + __ sub_d(SCR1, regl, regr); + + __ li(regd, 1); + + __ masknez(regd, regd, SCR1); + + break; + + case lir_cond_less: + + __ slt(regd, regl, regr); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_lessEqual: + + __ slt(regd, regr, regl); + + break; + + case lir_cond_greater: + + __ slt(regd, regr, regl); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_greaterEqual: + + __ slt(regd, regl, regr); + + break; + + case lir_cond_belowEqual: + + __ sltu(regd, regr, regl); + + break; + + case lir_cond_aboveEqual: + + __ sltu(regd, regl, regr); + + break; + + default: + + ShouldNotReachHere(); + + } + + } + + return; + + } else if (val1 == 1 && val2 == 0) { + + if (regr == noreg) { + + switch (condition) { + + case lir_cond_equal: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + __ xori(regd, regd, 1); + + } else { + + __ addi_d(SCR1, regl, -immr); + + __ li(regd, 1); + + __ masknez(regd, regd, SCR1); + + } + + break; + + case lir_cond_notEqual: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + } else { + + __ addi_d(SCR1, regl, -immr); + + __ li(regd, 1); + + __ maskeqz(regd, regd, SCR1); + + } + + break; + + case lir_cond_less: + + __ slti(regd, regl, immr); + + break; + + case lir_cond_lessEqual: + + if (immr == 0) { + + __ slt(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ slt(regd, SCR1, regl); + + } + + __ xori(regd, regd, 1); + + break; + + case lir_cond_greater: + + if (immr == 0) { + + __ slt(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ slt(regd, SCR1, regl); + + } + + break; + + case lir_cond_greaterEqual: + + __ slti(regd, regl, immr); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_belowEqual: + + if (immr == 0) { + + __ sltu(regd, R0, regl); + + } else { + + __ li(SCR1, immr); + + __ sltu(regd, SCR1, regl); + + } + + __ xori(regd, regd, 1); + + break; + + case lir_cond_aboveEqual: + + __ sltui(regd, regl, immr); + + __ xori(regd, regd, 1); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + switch (condition) { + + case lir_cond_equal: + + __ sub_d(SCR1, regl, regr); + + __ li(regd, 1); + + __ masknez(regd, regd, SCR1); + + break; + + case lir_cond_notEqual: + + __ sub_d(SCR1, regl, regr); + + __ li(regd, 1); + + __ maskeqz(regd, regd, SCR1); + + break; + + case lir_cond_less: + + __ slt(regd, regl, regr); + + break; + + case lir_cond_lessEqual: + + __ slt(regd, regr, regl); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_greater: + + __ slt(regd, regr, regl); + + break; + + case lir_cond_greaterEqual: + + __ slt(regd, regl, regr); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_belowEqual: + + __ sltu(regd, regr, regl); + + __ xori(regd, regd, 1); + + break; + + case lir_cond_aboveEqual: + + __ sltu(regd, regl, regr); + + __ xori(regd, regd, 1); + + break; + + default: + + ShouldNotReachHere(); + + } + + } + + return; + + } + + } + + + + // cmp + + if (regr == noreg) { + + switch (condition) { + + case lir_cond_equal: + + __ addi_d(SCR2, regl, -immr); + + break; + + case lir_cond_notEqual: + + __ addi_d(SCR2, regl, -immr); + + break; + + case lir_cond_less: + + __ slti(SCR2, regl, immr); + + break; + + case lir_cond_lessEqual: + + __ li(SCR1, immr); + + __ slt(SCR2, SCR1, regl); + + break; + + case lir_cond_greater: + + __ li(SCR1, immr); + + __ slt(SCR2, SCR1, regl); + + break; + + case lir_cond_greaterEqual: + + __ slti(SCR2, regl, immr); + + break; + + case lir_cond_belowEqual: + + __ li(SCR1, immr); + + __ sltu(SCR2, SCR1, regl); + + break; + + case lir_cond_aboveEqual: + + __ sltui(SCR2, regl, immr); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + switch (condition) { + + case lir_cond_equal: + + __ sub_d(SCR2, regl, regr); + + break; + + case lir_cond_notEqual: + + __ sub_d(SCR2, regl, regr); + + break; + + case lir_cond_less: + + __ slt(SCR2, regl, regr); + + break; + + case lir_cond_lessEqual: + + __ slt(SCR2, regr, regl); + + break; + + case lir_cond_greater: + + __ slt(SCR2, regr, regl); + + break; + + case lir_cond_greaterEqual: + + __ slt(SCR2, regl, regr); + + break; + + case lir_cond_belowEqual: + + __ sltu(SCR2, regr, regl); + + break; + + case lir_cond_aboveEqual: + + __ sltu(SCR2, regl, regr); + + break; + + default: + + ShouldNotReachHere(); + + } + + } + + + + // value operands + + if (src1->is_stack()) { + + stack2reg(src1, result, result->type()); + + reg1 = regd; + + } else if (src1->is_constant()) { + + const2reg(src1, result, lir_patch_none, NULL); + + reg1 = regd; + + } else { + + reg1 = (src1->type() == T_LONG) ? src1->as_register_lo() : src1->as_register(); + + } + + + + if (src2->is_stack()) { + + stack2reg(src2, FrameMap::scr1_opr, result->type()); + + reg2 = SCR1; + + } else if (src2->is_constant()) { + + LIR_Opr tmp = src2->type() == T_LONG ? FrameMap::scr1_long_opr : FrameMap::scr1_opr; + + const2reg(src2, tmp, lir_patch_none, NULL); + + reg2 = SCR1; + + } else { + + reg2 = (src2->type() == T_LONG) ? src2->as_register_lo() : src2->as_register(); + + } + + + + // cmove + + switch (condition) { + + case lir_cond_equal: + + __ masknez(regd, reg1, SCR2); + + __ maskeqz(SCR2, reg2, SCR2); + + break; + + case lir_cond_notEqual: + + __ maskeqz(regd, reg1, SCR2); + + __ masknez(SCR2, reg2, SCR2); + + break; + + case lir_cond_less: + + __ maskeqz(regd, reg1, SCR2); + + __ masknez(SCR2, reg2, SCR2); + + break; + + case lir_cond_lessEqual: + + __ masknez(regd, reg1, SCR2); + + __ maskeqz(SCR2, reg2, SCR2); + + break; + + case lir_cond_greater: + + __ maskeqz(regd, reg1, SCR2); + + __ masknez(SCR2, reg2, SCR2); + + break; + + case lir_cond_greaterEqual: + + __ masknez(regd, reg1, SCR2); + + __ maskeqz(SCR2, reg2, SCR2); + + break; + + case lir_cond_belowEqual: + + __ masknez(regd, reg1, SCR2); + + __ maskeqz(SCR2, reg2, SCR2); + + break; + + case lir_cond_aboveEqual: + + __ masknez(regd, reg1, SCR2); + + __ maskeqz(SCR2, reg2, SCR2); + + break; + + default: + + ShouldNotReachHere(); + + } + + + + __ OR(regd, regd, SCR2); + +} + + + +void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, + + CodeEmitInfo* info, bool pop_fpu_stack) { + + assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); + + + + if (left->is_single_cpu()) { + + Register lreg = left->as_register(); + + Register dreg = as_reg(dest); + + + + if (right->is_single_cpu()) { + + // cpu register - cpu register + + assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT, "should be"); + + Register rreg = right->as_register(); + + switch (code) { + + case lir_add: __ add_w (dest->as_register(), lreg, rreg); break; + + case lir_sub: __ sub_w (dest->as_register(), lreg, rreg); break; + + case lir_mul: __ mul_w (dest->as_register(), lreg, rreg); break; + + default: ShouldNotReachHere(); + + } + + } else if (right->is_double_cpu()) { + + Register rreg = right->as_register_lo(); + + // single_cpu + double_cpu: can happen with obj+long + + assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); + + switch (code) { + + case lir_add: __ add_d(dreg, lreg, rreg); break; + + case lir_sub: __ sub_d(dreg, lreg, rreg); break; + + default: ShouldNotReachHere(); + + } + + } else if (right->is_constant()) { + + // cpu register - constant + + jlong c; + + + + // FIXME: This is fugly: we really need to factor all this logic. + + switch(right->type()) { + + case T_LONG: + + c = right->as_constant_ptr()->as_jlong(); + + break; + + case T_INT: + + case T_ADDRESS: + + c = right->as_constant_ptr()->as_jint(); + + break; + + default: + + ShouldNotReachHere(); + + c = 0; // unreachable + + break; + + } + + + + assert(code == lir_add || code == lir_sub, "mismatched arithmetic op"); + + if (c == 0 && dreg == lreg) { + + COMMENT("effective nop elided"); + + return; + + } + + + + switch(left->type()) { + + case T_INT: + + switch (code) { + + case lir_add: __ addi_w(dreg, lreg, c); break; + + case lir_sub: __ addi_w(dreg, lreg, -c); break; + + default: ShouldNotReachHere(); + + } + + break; + + case T_OBJECT: + + case T_ADDRESS: + + switch (code) { + + case lir_add: __ addi_d(dreg, lreg, c); break; + + case lir_sub: __ addi_d(dreg, lreg, -c); break; + + default: ShouldNotReachHere(); + + } + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + ShouldNotReachHere(); + + } + + } else if (left->is_double_cpu()) { + + Register lreg_lo = left->as_register_lo(); + + + + if (right->is_double_cpu()) { + + // cpu register - cpu register + + Register rreg_lo = right->as_register_lo(); + + switch (code) { + + case lir_add: __ add_d(dest->as_register_lo(), lreg_lo, rreg_lo); break; + + case lir_sub: __ sub_d(dest->as_register_lo(), lreg_lo, rreg_lo); break; + + case lir_mul: __ mul_d(dest->as_register_lo(), lreg_lo, rreg_lo); break; + + case lir_div: __ div_d(dest->as_register_lo(), lreg_lo, rreg_lo); break; + + case lir_rem: __ mod_d(dest->as_register_lo(), lreg_lo, rreg_lo); break; + + default: ShouldNotReachHere(); + + } + + + + } else if (right->is_constant()) { + + jlong c = right->as_constant_ptr()->as_jlong(); + + Register dreg = as_reg(dest); + + switch (code) { + + case lir_add: + + case lir_sub: + + if (c == 0 && dreg == lreg_lo) { + + COMMENT("effective nop elided"); + + return; + + } + + code == lir_add ? __ addi_d(dreg, lreg_lo, c) : __ addi_d(dreg, lreg_lo, -c); + + break; + + case lir_div: + + assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); + + if (c == 1) { + + // move lreg_lo to dreg if divisor is 1 + + __ move(dreg, lreg_lo); + + } else { + + unsigned int shift = log2i_exact(c); + + // use scr1 as intermediate result register + + __ srai_d(SCR1, lreg_lo, 63); + + __ srli_d(SCR1, SCR1, 64 - shift); + + __ add_d(SCR1, lreg_lo, SCR1); + + __ srai_d(dreg, SCR1, shift); + + } + + break; + + case lir_rem: + + assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); + + if (c == 1) { + + // move 0 to dreg if divisor is 1 + + __ move(dreg, R0); + + } else { + + // use scr1/2 as intermediate result register + + __ sub_d(SCR1, R0, lreg_lo); + + __ slt(SCR2, SCR1, R0); + + __ andi(dreg, lreg_lo, c - 1); + + __ andi(SCR1, SCR1, c - 1); + + __ sub_d(SCR1, R0, SCR1); + + __ maskeqz(dreg, dreg, SCR2); + + __ masknez(SCR1, SCR1, SCR2); + + __ OR(dreg, dreg, SCR1); + + } + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + ShouldNotReachHere(); + + } + + } else if (left->is_single_fpu()) { + + assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register"); + + switch (code) { + + case lir_add: __ fadd_s (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; + + case lir_sub: __ fsub_s (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; + + case lir_mul: __ fmul_s (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; + + case lir_div: __ fdiv_s (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break; + + default: ShouldNotReachHere(); + + } + + } else if (left->is_double_fpu()) { + + if (right->is_double_fpu()) { + + // fpu register - fpu register + + switch (code) { + + case lir_add: __ fadd_d (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; + + case lir_sub: __ fsub_d (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; + + case lir_mul: __ fmul_d (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; + + case lir_div: __ fdiv_d (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break; + + default: ShouldNotReachHere(); + + } + + } else { + + if (right->is_constant()) { + + ShouldNotReachHere(); + + } + + ShouldNotReachHere(); + + } + + } else if (left->is_single_stack() || left->is_address()) { + + assert(left == dest, "left and dest must be equal"); + + ShouldNotReachHere(); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, + + int dest_index, bool pop_fpu_stack) { + + Unimplemented(); + +} + + + +void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { + + switch(code) { + + case lir_abs : __ fabs_d(dest->as_double_reg(), value->as_double_reg()); break; + + case lir_sqrt: __ fsqrt_d(dest->as_double_reg(), value->as_double_reg()); break; + + default : ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { + + assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register"); + + Register Rleft = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); + + + + if (dst->is_single_cpu()) { + + Register Rdst = dst->as_register(); + + if (right->is_constant()) { + + switch (code) { + + case lir_logic_and: + + if (Assembler::is_uimm(right->as_jint(), 12)) { + + __ andi(Rdst, Rleft, right->as_jint()); + + } else { + + __ li(AT, right->as_jint()); + + __ AND(Rdst, Rleft, AT); + + } + + break; + + case lir_logic_or: __ ori(Rdst, Rleft, right->as_jint()); break; + + case lir_logic_xor: __ xori(Rdst, Rleft, right->as_jint()); break; + + default: ShouldNotReachHere(); break; + + } + + } else { + + Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); + + switch (code) { + + case lir_logic_and: __ AND(Rdst, Rleft, Rright); break; + + case lir_logic_or: __ OR(Rdst, Rleft, Rright); break; + + case lir_logic_xor: __ XOR(Rdst, Rleft, Rright); break; + + default: ShouldNotReachHere(); break; + + } + + } + + } else { + + Register Rdst = dst->as_register_lo(); + + if (right->is_constant()) { + + switch (code) { + + case lir_logic_and: + + if (Assembler::is_uimm(right->as_jlong(), 12)) { + + __ andi(Rdst, Rleft, right->as_jlong()); + + } else { + + // We can guarantee that transform from HIR LogicOp is in range of + + // uimm(12), but the common code directly generates LIR LogicAnd, + + // and the right-operand is mask with all ones in the high bits. + + __ li(AT, right->as_jlong()); + + __ AND(Rdst, Rleft, AT); + + } + + break; + + case lir_logic_or: __ ori(Rdst, Rleft, right->as_jlong()); break; + + case lir_logic_xor: __ xori(Rdst, Rleft, right->as_jlong()); break; + + default: ShouldNotReachHere(); break; + + } + + } else { + + Register Rright = right->is_single_cpu() ? right->as_register() : right->as_register_lo(); + + switch (code) { + + case lir_logic_and: __ AND(Rdst, Rleft, Rright); break; + + case lir_logic_or: __ OR(Rdst, Rleft, Rright); break; + + case lir_logic_xor: __ XOR(Rdst, Rleft, Rright); break; + + default: ShouldNotReachHere(); break; + + } + + } + + } + +} + + + +void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, + + LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) { + + // opcode check + + assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem"); + + bool is_irem = (code == lir_irem); + + + + // operand check + + assert(left->is_single_cpu(), "left must be register"); + + assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); + + assert(result->is_single_cpu(), "result must be register"); + + Register lreg = left->as_register(); + + Register dreg = result->as_register(); + + + + // power-of-2 constant check and codegen + + if (right->is_constant()) { + + int c = right->as_constant_ptr()->as_jint(); + + assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); + + if (is_irem) { + + if (c == 1) { + + // move 0 to dreg if divisor is 1 + + __ move(dreg, R0); + + } else { + + // use scr1/2 as intermediate result register + + __ sub_w(SCR1, R0, lreg); + + __ slt(SCR2, SCR1, R0); + + __ andi(dreg, lreg, c - 1); + + __ andi(SCR1, SCR1, c - 1); + + __ sub_w(SCR1, R0, SCR1); + + __ maskeqz(dreg, dreg, SCR2); + + __ masknez(SCR1, SCR1, SCR2); + + __ OR(dreg, dreg, SCR1); + + } + + } else { + + if (c == 1) { + + // move lreg to dreg if divisor is 1 + + __ move(dreg, lreg); + + } else { + + unsigned int shift = exact_log2(c); + + // use scr1 as intermediate result register + + __ srai_w(SCR1, lreg, 31); + + __ srli_w(SCR1, SCR1, 32 - shift); + + __ add_w(SCR1, lreg, SCR1); + + __ srai_w(dreg, SCR1, shift); + + } + + } + + } else { + + Register rreg = right->as_register(); + + if (is_irem) + + __ mod_w(dreg, lreg, rreg); + + else + + __ div_w(dreg, lreg, rreg); + + } + +} + + + +void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { + + Unimplemented(); + +} + + + +void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ + + if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { + + bool is_unordered_less = (code == lir_ucmp_fd2i); + + if (left->is_single_fpu()) { + + if (is_unordered_less) { + + __ fcmp_clt_s(FCC0, right->as_float_reg(), left->as_float_reg()); + + __ fcmp_cult_s(FCC1, left->as_float_reg(), right->as_float_reg()); + + } else { + + __ fcmp_cult_s(FCC0, right->as_float_reg(), left->as_float_reg()); + + __ fcmp_clt_s(FCC1, left->as_float_reg(), right->as_float_reg()); + + } + + } else if (left->is_double_fpu()) { + + if (is_unordered_less) { + + __ fcmp_clt_d(FCC0, right->as_double_reg(), left->as_double_reg()); + + __ fcmp_cult_d(FCC1, left->as_double_reg(), right->as_double_reg()); + + } else { + + __ fcmp_cult_d(FCC0, right->as_double_reg(), left->as_double_reg()); + + __ fcmp_clt_d(FCC1, left->as_double_reg(), right->as_double_reg()); + + } + + } else { + + ShouldNotReachHere(); + + } + + __ movcf2gr(dst->as_register(), FCC0); + + __ movcf2gr(SCR1, FCC1); + + __ sub_d(dst->as_register(), dst->as_register(), SCR1); + + } else if (code == lir_cmp_l2i) { + + __ slt(SCR1, left->as_register_lo(), right->as_register_lo()); + + __ slt(dst->as_register(), right->as_register_lo(), left->as_register_lo()); + + __ sub_d(dst->as_register(), dst->as_register(), SCR1); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +void LIR_Assembler::align_call(LIR_Code code) {} + + + +void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { + + address call = __ trampoline_call(AddressLiteral(op->addr(), rtype)); + + if (call == NULL) { + + bailout("trampoline stub overflow"); + + return; + + } + + add_call_info(code_offset(), op->info()); + +} + + + +void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { + + address call = __ ic_call(op->addr()); + + if (call == NULL) { + + bailout("trampoline stub overflow"); + + return; + + } + + add_call_info(code_offset(), op->info()); + +} + + + +void LIR_Assembler::emit_static_call_stub() { + + address call_pc = __ pc(); + + address stub = __ start_a_stub(call_stub_size()); + + if (stub == NULL) { + + bailout("static call stub overflow"); + + return; + + } + + + + int start = __ offset(); + + + + __ relocate(static_stub_Relocation::spec(call_pc)); + + + + // Code stream for loading method may be changed. + + __ ibar(0); + + + + // Rmethod contains Method*, it should be relocated for GC + + // static stub relocation also tags the Method* in the code-stream. + + __ mov_metadata(Rmethod, NULL); + + // This is recognized as unresolved by relocs/nativeInst/ic code + + __ patchable_jump(__ pc()); + + + + assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size() <= call_stub_size(), + + "stub too big"); + + __ end_a_stub(); + +} + + + +void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { + + assert(exceptionOop->as_register() == A0, "must match"); + + assert(exceptionPC->as_register() == A1, "must match"); + + + + // exception object is not added to oop map by LinearScan + + // (LinearScan assumes that no oops are in fixed registers) + + info->add_register_oop(exceptionOop); + + Runtime1::StubID unwind_id; + + + + // get current pc information + + // pc is only needed if the method has an exception handler, the unwind code does not need it. + + if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) { + + // As no instructions have been generated yet for this LIR node it's + + // possible that an oop map already exists for the current offset. + + // In that case insert an dummy NOP here to ensure all oop map PCs + + // are unique. See JDK-8237483. + + __ nop(); + + } + + Label L; + + int pc_for_athrow_offset = __ offset(); + + __ bind(L); + + __ lipc(exceptionPC->as_register(), L); + + add_call_info(pc_for_athrow_offset, info); // for exception handler + + + + __ verify_not_null_oop(A0); + + // search an exception handler (A0: exception oop, A1: throwing pc) + + if (compilation()->has_fpu_code()) { + + unwind_id = Runtime1::handle_exception_id; + + } else { + + unwind_id = Runtime1::handle_exception_nofpu_id; + + } + + __ call(Runtime1::entry_for(unwind_id), relocInfo::runtime_call_type); + + + + // FIXME: enough room for two byte trap ???? + + __ nop(); + +} + + + +void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { + + assert(exceptionOop->as_register() == A0, "must match"); + + __ b(_unwind_handler_entry); + +} + + + +void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { + + Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); + + Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); + + + + switch (left->type()) { + + case T_INT: { + + switch (code) { + + case lir_shl: __ sll_w(dreg, lreg, count->as_register()); break; + + case lir_shr: __ sra_w(dreg, lreg, count->as_register()); break; + + case lir_ushr: __ srl_w(dreg, lreg, count->as_register()); break; + + default: ShouldNotReachHere(); break; + + } + + break; + + case T_LONG: + + case T_ADDRESS: + + case T_OBJECT: + + switch (code) { + + case lir_shl: __ sll_d(dreg, lreg, count->as_register()); break; + + case lir_shr: __ sra_d(dreg, lreg, count->as_register()); break; + + case lir_ushr: __ srl_d(dreg, lreg, count->as_register()); break; + + default: ShouldNotReachHere(); break; + + } + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + + } + +} + + + +void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { + + Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo(); + + Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo(); + + + + switch (left->type()) { + + case T_INT: { + + switch (code) { + + case lir_shl: __ slli_w(dreg, lreg, count); break; + + case lir_shr: __ srai_w(dreg, lreg, count); break; + + case lir_ushr: __ srli_w(dreg, lreg, count); break; + + default: ShouldNotReachHere(); break; + + } + + break; + + case T_LONG: + + case T_ADDRESS: + + case T_OBJECT: + + switch (code) { + + case lir_shl: __ slli_d(dreg, lreg, count); break; + + case lir_shr: __ srai_d(dreg, lreg, count); break; + + case lir_ushr: __ srli_d(dreg, lreg, count); break; + + default: ShouldNotReachHere(); break; + + } + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + + } + +} + + + +void LIR_Assembler::store_parameter(Register r, int offset_from_sp_in_words) { + + assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); + + int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; + + assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); + + __ st_ptr(r, Address(SP, offset_from_sp_in_bytes)); + +} + + + +void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) { + + assert(offset_from_sp_in_words >= 0, "invalid offset from sp"); + + int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord; + + assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); + + __ li(SCR2, c); + + __ st_ptr(SCR2, Address(SP, offset_from_sp_in_bytes)); + +} + + + +void LIR_Assembler::store_parameter(jobject o, int offset_from_sp_in_words) { + + ShouldNotReachHere(); + +} + + + +// This code replaces a call to arraycopy; no exception may + +// be thrown in this code, they must be thrown in the System.arraycopy + +// activation frame; we could save some checks if this would not be the case + +void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { + + Register j_rarg0 = T0; + + Register j_rarg1 = A0; + + Register j_rarg2 = A1; + + Register j_rarg3 = A2; + + Register j_rarg4 = A3; + + + + ciArrayKlass* default_type = op->expected_type(); + + Register src = op->src()->as_register(); + + Register dst = op->dst()->as_register(); + + Register src_pos = op->src_pos()->as_register(); + + Register dst_pos = op->dst_pos()->as_register(); + + Register length = op->length()->as_register(); + + Register tmp = op->tmp()->as_register(); + + + + CodeStub* stub = op->stub(); + + int flags = op->flags(); + + BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; + + if (is_reference_type(basic_type)) + + basic_type = T_OBJECT; + + + + // if we don't know anything, just go through the generic arraycopy + + if (default_type == NULL) { + + Label done; + + assert(src == T0 && src_pos == A0, "mismatch in calling convention"); + + + + // Save the arguments in case the generic arraycopy fails and we + + // have to fall back to the JNI stub + + __ st_ptr(dst, Address(SP, 0 * BytesPerWord)); + + __ st_ptr(dst_pos, Address(SP, 1 * BytesPerWord)); + + __ st_ptr(length, Address(SP, 2 * BytesPerWord)); + + __ st_ptr(src_pos, Address(SP, 3 * BytesPerWord)); + + __ st_ptr(src, Address(SP, 4 * BytesPerWord)); + + + + address copyfunc_addr = StubRoutines::generic_arraycopy(); + + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); + + + + // The arguments are in java calling convention so we shift them + + // to C convention + + assert_different_registers(A4, j_rarg0, j_rarg1, j_rarg2, j_rarg3); + + __ move(A4, j_rarg4); + + assert_different_registers(A3, j_rarg0, j_rarg1, j_rarg2); + + __ move(A3, j_rarg3); + + assert_different_registers(A2, j_rarg0, j_rarg1); + + __ move(A2, j_rarg2); + + assert_different_registers(A1, j_rarg0); + + __ move(A1, j_rarg1); + + __ move(A0, j_rarg0); + +#ifndef PRODUCT + + if (PrintC1Statistics) { + + __ li(SCR2, (address)&Runtime1::_generic_arraycopystub_cnt); + + __ increment(SCR2, 1); + + } + +#endif + + __ call(copyfunc_addr, relocInfo::runtime_call_type); + + + + __ beqz(A0, *stub->continuation()); + + __ move(tmp, A0); + + + + // Reload values from the stack so they are where the stub + + // expects them. + + __ ld_ptr(dst, Address(SP, 0 * BytesPerWord)); + + __ ld_ptr(dst_pos, Address(SP, 1 * BytesPerWord)); + + __ ld_ptr(length, Address(SP, 2 * BytesPerWord)); + + __ ld_ptr(src_pos, Address(SP, 3 * BytesPerWord)); + + __ ld_ptr(src, Address(SP, 4 * BytesPerWord)); + + + + // tmp is -1^K where K == partial copied count + + __ nor(SCR1, tmp, R0); + + // adjust length down and src/end pos up by partial copied count + + __ sub_w(length, length, SCR1); + + __ add_w(src_pos, src_pos, SCR1); + + __ add_w(dst_pos, dst_pos, SCR1); + + __ b(*stub->entry()); + + + + __ bind(*stub->continuation()); + + return; + + } + + + + assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), + + "must be true at this point"); + + + + int elem_size = type2aelembytes(basic_type); + + Address::ScaleFactor scale = Address::times(elem_size); + + + + Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); + + Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); + + Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); + + Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); + + + + // test for NULL + + if (flags & LIR_OpArrayCopy::src_null_check) { + + __ beqz(src, *stub->entry()); + + } + + if (flags & LIR_OpArrayCopy::dst_null_check) { + + __ beqz(dst, *stub->entry()); + + } + + + + // If the compiler was not able to prove that exact type of the source or the destination + + // of the arraycopy is an array type, check at runtime if the source or the destination is + + // an instance type. + + if (flags & LIR_OpArrayCopy::type_check) { + + if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) { + + __ load_klass(tmp, dst); + + __ ld_w(SCR1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); + + __ li(SCR2, Klass::_lh_neutral_value); + + __ bge_far(SCR1, SCR2, *stub->entry(), true); + + } + + + + if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) { + + __ load_klass(tmp, src); + + __ ld_w(SCR1, Address(tmp, in_bytes(Klass::layout_helper_offset()))); + + __ li(SCR2, Klass::_lh_neutral_value); + + __ bge_far(SCR1, SCR2, *stub->entry(), true); + + } + + } + + + + // check if negative + + if (flags & LIR_OpArrayCopy::src_pos_positive_check) { + + __ blt_far(src_pos, R0, *stub->entry(), true); + + } + + if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { + + __ blt_far(dst_pos, R0, *stub->entry(), true); + + } + + + + if (flags & LIR_OpArrayCopy::length_positive_check) { + + __ blt_far(length, R0, *stub->entry(), true); + + } + + + + if (flags & LIR_OpArrayCopy::src_range_check) { + + __ add_w(tmp, src_pos, length); + + __ ld_wu(SCR1, src_length_addr); + + __ blt_far(SCR1, tmp, *stub->entry(), false); + + } + + if (flags & LIR_OpArrayCopy::dst_range_check) { + + __ add_w(tmp, dst_pos, length); + + __ ld_wu(SCR1, dst_length_addr); + + __ blt_far(SCR1, tmp, *stub->entry(), false); + + } + + + + if (flags & LIR_OpArrayCopy::type_check) { + + // We don't know the array types are compatible + + if (basic_type != T_OBJECT) { + + // Simple test for basic type arrays + + if (UseCompressedClassPointers) { + + __ ld_wu(tmp, src_klass_addr); + + __ ld_wu(SCR1, dst_klass_addr); + + } else { + + __ ld_ptr(tmp, src_klass_addr); + + __ ld_ptr(SCR1, dst_klass_addr); + + } + + __ bne_far(tmp, SCR1, *stub->entry()); + + } else { + + // For object arrays, if src is a sub class of dst then we can + + // safely do the copy. + + Label cont, slow; + + + + __ addi_d(SP, SP, -2 * wordSize); + + __ st_ptr(dst, Address(SP, 0 * wordSize)); + + __ st_ptr(src, Address(SP, 1 * wordSize)); + + + + __ load_klass(src, src); + + __ load_klass(dst, dst); + + + + __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); + + + + __ addi_d(SP, SP, -2 * wordSize); + + __ st_ptr(dst, Address(SP, 0 * wordSize)); + + __ st_ptr(src, Address(SP, 1 * wordSize)); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + + __ ld_ptr(dst, Address(SP, 0 * wordSize)); + + __ ld_ptr(src, Address(SP, 1 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + + + __ bnez(dst, cont); + + + + __ bind(slow); + + __ ld_ptr(dst, Address(SP, 0 * wordSize)); + + __ ld_ptr(src, Address(SP, 1 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + + + address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + + if (copyfunc_addr != NULL) { // use stub if available + + // src is not a sub class of dst so we have to do a + + // per-element check. + + + + int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; + + if ((flags & mask) != mask) { + + // Check that at least both of them object arrays. + + assert(flags & mask, "one of the two should be known to be an object array"); + + + + if (!(flags & LIR_OpArrayCopy::src_objarray)) { + + __ load_klass(tmp, src); + + } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { + + __ load_klass(tmp, dst); + + } + + int lh_offset = in_bytes(Klass::layout_helper_offset()); + + Address klass_lh_addr(tmp, lh_offset); + + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); + + __ ld_w(SCR1, klass_lh_addr); + + __ li(SCR2, objArray_lh); + + __ XOR(SCR1, SCR1, SCR2); + + __ bnez(SCR1, *stub->entry()); + + } + + + + // Spill because stubs can use any register they like and it's + + // easier to restore just those that we care about. + + __ st_ptr(dst, Address(SP, 0 * BytesPerWord)); + + __ st_ptr(dst_pos, Address(SP, 1 * BytesPerWord)); + + __ st_ptr(length, Address(SP, 2 * BytesPerWord)); + + __ st_ptr(src_pos, Address(SP, 3 * BytesPerWord)); + + __ st_ptr(src, Address(SP, 4 * BytesPerWord)); + + + + __ lea(A0, Address(src, src_pos, scale)); + + __ addi_d(A0, A0, arrayOopDesc::base_offset_in_bytes(basic_type)); + + assert_different_registers(A0, dst, dst_pos, length); + + __ load_klass(A4, dst); + + assert_different_registers(A4, dst, dst_pos, length); + + __ lea(A1, Address(dst, dst_pos, scale)); + + __ addi_d(A1, A1, arrayOopDesc::base_offset_in_bytes(basic_type)); + + assert_different_registers(A1, length); + + __ bstrpick_d(A2, length, 31, 0); + + __ ld_ptr(A4, Address(A4, ObjArrayKlass::element_klass_offset())); + + __ ld_w(A3, Address(A4, Klass::super_check_offset_offset())); + + __ call(copyfunc_addr, relocInfo::runtime_call_type); + + + +#ifndef PRODUCT + + if (PrintC1Statistics) { + + Label failed; + + __ bnez(A0, failed); + + __ li(SCR2, (address)&Runtime1::_arraycopy_checkcast_cnt); + + __ increment(SCR2, 1); + + __ bind(failed); + + } + +#endif + + + + __ beqz(A0, *stub->continuation()); + + + +#ifndef PRODUCT + + if (PrintC1Statistics) { + + __ li(SCR2, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); + + __ increment(SCR2, 1); + + } + +#endif + + assert_different_registers(dst, dst_pos, length, src_pos, src, tmp, SCR1); + + __ move(tmp, A0); + + + + // Restore previously spilled arguments + + __ ld_ptr(dst, Address(SP, 0 * BytesPerWord)); + + __ ld_ptr(dst_pos, Address(SP, 1 * BytesPerWord)); + + __ ld_ptr(length, Address(SP, 2 * BytesPerWord)); + + __ ld_ptr(src_pos, Address(SP, 3 * BytesPerWord)); + + __ ld_ptr(src, Address(SP, 4 * BytesPerWord)); + + + + // return value is -1^K where K is partial copied count + + __ nor(SCR1, tmp, R0); + + // adjust length down and src/end pos up by partial copied count + + __ sub_w(length, length, SCR1); + + __ add_w(src_pos, src_pos, SCR1); + + __ add_w(dst_pos, dst_pos, SCR1); + + } + + + + __ b(*stub->entry()); + + + + __ bind(cont); + + __ ld_ptr(dst, Address(SP, 0 * wordSize)); + + __ ld_ptr(src, Address(SP, 1 * wordSize)); + + __ addi_d(SP, SP, 2 * wordSize); + + } + + } + + + +#ifdef ASSERT + + if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { + + // Sanity check the known type with the incoming class. For the + + // primitive case the types must match exactly with src.klass and + + // dst.klass each exactly matching the default type. For the + + // object array case, if no type check is needed then either the + + // dst type is exactly the expected type and the src type is a + + // subtype which we can't check or src is the same array as dst + + // but not necessarily exactly of type default_type. + + Label known_ok, halt; + + __ mov_metadata(tmp, default_type->constant_encoding()); + + if (UseCompressedClassPointers) { + + __ encode_klass_not_null(tmp); + + } + + + + if (basic_type != T_OBJECT) { + + + + if (UseCompressedClassPointers) { + + __ ld_wu(SCR1, dst_klass_addr); + + } else { + + __ ld_ptr(SCR1, dst_klass_addr); + + } + + __ bne(tmp, SCR1, halt); + + if (UseCompressedClassPointers) { + + __ ld_wu(SCR1, src_klass_addr); + + } else { + + __ ld_ptr(SCR1, src_klass_addr); + + } + + __ beq(tmp, SCR1, known_ok); + + } else { + + if (UseCompressedClassPointers) { + + __ ld_wu(SCR1, dst_klass_addr); + + } else { + + __ ld_ptr(SCR1, dst_klass_addr); + + } + + __ beq(tmp, SCR1, known_ok); + + __ beq(src, dst, known_ok); + + } + + __ bind(halt); + + __ stop("incorrect type information in arraycopy"); + + __ bind(known_ok); + + } + +#endif + + + +#ifndef PRODUCT + + if (PrintC1Statistics) { + + __ li(SCR2, Runtime1::arraycopy_count_address(basic_type)); + + __ increment(SCR2, 1); + + } + +#endif + + + + __ lea(A0, Address(src, src_pos, scale)); + + __ addi_d(A0, A0, arrayOopDesc::base_offset_in_bytes(basic_type)); + + assert_different_registers(A0, dst, dst_pos, length); + + __ lea(A1, Address(dst, dst_pos, scale)); + + __ addi_d(A1, A1, arrayOopDesc::base_offset_in_bytes(basic_type)); + + assert_different_registers(A1, length); + + __ bstrpick_d(A2, length, 31, 0); + + + + bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; + + bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; + + const char *name; + + address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); + + + + CodeBlob *cb = CodeCache::find_blob(entry); + + if (cb) { + + __ call(entry, relocInfo::runtime_call_type); + + } else { + + __ call_VM_leaf(entry, 3); + + } + + + + __ bind(*stub->continuation()); + +} + + + +void LIR_Assembler::emit_lock(LIR_OpLock* op) { + + Register obj = op->obj_opr()->as_register(); // may not be an oop + + Register hdr = op->hdr_opr()->as_register(); + + Register lock = op->lock_opr()->as_register(); + + if (!UseFastLocking) { + + __ b(*op->stub()->entry()); + + } else if (op->code() == lir_lock) { + + Register scratch = noreg; + + if (UseBiasedLocking) { + + scratch = op->scratch_opr()->as_register(); + + } + + assert(BasicLock::displaced_header_offset_in_bytes() == 0, + + "lock_reg must point to the displaced header"); + + // add debug info for NullPointerException only if one is possible + + int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); + + if (op->info() != NULL) { + + add_debug_info_for_null_check(null_check_offset, op->info()); + + } + + // done + + } else if (op->code() == lir_unlock) { + + assert(BasicLock::displaced_header_offset_in_bytes() == 0, + + "lock_reg must point to the displaced header"); + + __ unlock_object(hdr, obj, lock, *op->stub()->entry()); + + } else { + + Unimplemented(); + + } + + __ bind(*op->stub()->continuation()); + +} + + + +void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { + + ciMethod* method = op->profiled_method(); + + ciMethod* callee = op->profiled_callee(); + + int bci = op->profiled_bci(); + + + + // Update counter for all call types + + ciMethodData* md = method->method_data_or_null(); + + assert(md != NULL, "Sanity"); + + ciProfileData* data = md->bci_to_data(bci); + + assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); + + assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); + + Register mdo = op->mdo()->as_register(); + + __ mov_metadata(mdo, md->constant_encoding()); + + Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); + + // Perform additional virtual call profiling for invokevirtual and + + // invokeinterface bytecodes + + if (op->should_profile_receiver_type()) { + + assert(op->recv()->is_single_cpu(), "recv must be allocated"); + + Register recv = op->recv()->as_register(); + + assert_different_registers(mdo, recv); + + assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); + + ciKlass* known_klass = op->known_holder(); + + if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { + + // We know the type that will be seen at this call site; we can + + // statically update the MethodData* rather than needing to do + + // dynamic tests on the receiver type + + + + // NOTE: we should probably put a lock around this search to + + // avoid collisions by concurrent compilations + + ciVirtualCallData* vc_data = (ciVirtualCallData*) data; + + uint i; + + for (i = 0; i < VirtualCallData::row_limit(); i++) { + + ciKlass* receiver = vc_data->receiver(i); + + if (known_klass->equals(receiver)) { + + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); + + __ ld_ptr(SCR2, data_addr); + + __ addi_d(SCR2, SCR2, DataLayout::counter_increment); + + __ st_ptr(SCR2, data_addr); + + return; + + } + + } + + + + // Receiver type not found in profile data; select an empty slot + + + + // Note that this is less efficient than it should be because it + + // always does a write to the receiver part of the + + // VirtualCallData rather than just the first time + + for (i = 0; i < VirtualCallData::row_limit(); i++) { + + ciKlass* receiver = vc_data->receiver(i); + + if (receiver == NULL) { + + Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); + + __ mov_metadata(SCR2, known_klass->constant_encoding()); + + __ lea(SCR1, recv_addr); + + __ st_ptr(SCR2, SCR1, 0); + + Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); + + __ ld_ptr(SCR2, data_addr); + + __ addi_d(SCR2, SCR1, DataLayout::counter_increment); + + __ st_ptr(SCR2, data_addr); + + return; + + } + + } + + } else { + + __ load_klass(recv, recv); + + Label update_done; + + type_profile_helper(mdo, md, data, recv, &update_done); + + // Receiver did not match any saved receiver and there is no empty row for it. + + // Increment total counter to indicate polymorphic case. + + __ ld_ptr(SCR2, counter_addr); + + __ addi_d(SCR2, SCR2, DataLayout::counter_increment); + + __ st_ptr(SCR2, counter_addr); + + + + __ bind(update_done); + + } + + } else { + + // Static call + + __ ld_ptr(SCR2, counter_addr); + + __ addi_d(SCR2, SCR2, DataLayout::counter_increment); + + __ st_ptr(SCR2, counter_addr); + + } + +} + + + +void LIR_Assembler::emit_delay(LIR_OpDelay*) { + + Unimplemented(); + +} + + + +void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { + + __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); + +} + + + +void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { + + assert(op->crc()->is_single_cpu(), "crc must be register"); + + assert(op->val()->is_single_cpu(), "byte value must be register"); + + assert(op->result_opr()->is_single_cpu(), "result must be register"); + + Register crc = op->crc()->as_register(); + + Register val = op->val()->as_register(); + + Register res = op->result_opr()->as_register(); + + + + assert_different_registers(val, crc, res); + + __ li(res, StubRoutines::crc_table_addr()); + + __ nor(crc, crc, R0); // ~crc + + __ update_byte_crc32(crc, val, res); + + __ nor(res, crc, R0); // ~crc + +} + + + +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { + + COMMENT("emit_profile_type {"); + + Register obj = op->obj()->as_register(); + + Register tmp = op->tmp()->as_pointer_register(); + + Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); + + ciKlass* exact_klass = op->exact_klass(); + + intptr_t current_klass = op->current_klass(); + + bool not_null = op->not_null(); + + bool no_conflict = op->no_conflict(); + + + + Label update, next, none; + + + + bool do_null = !not_null; + + bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; + + bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; + + + + assert(do_null || do_update, "why are we here?"); + + assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); + + assert(mdo_addr.base() != SCR1, "wrong register"); + + + + __ verify_oop(obj); + + + + if (tmp != obj) { + + __ move(tmp, obj); + + } + + if (do_null) { + + __ bnez(tmp, update); + + if (!TypeEntries::was_null_seen(current_klass)) { + + __ ld_ptr(SCR2, mdo_addr); + + __ ori(SCR2, SCR2, TypeEntries::null_seen); + + __ st_ptr(SCR2, mdo_addr); + + } + + if (do_update) { + +#ifndef ASSERT + + __ b(next); + + } + +#else + + __ b(next); + + } + + } else { + + __ bnez(tmp, update); + + __ stop("unexpected null obj"); + +#endif + + } + + + + __ bind(update); + + + + if (do_update) { + +#ifdef ASSERT + + if (exact_klass != NULL) { + + Label ok; + + __ load_klass(tmp, tmp); + + __ mov_metadata(SCR1, exact_klass->constant_encoding()); + + __ XOR(SCR1, tmp, SCR1); + + __ beqz(SCR1, ok); + + __ stop("exact klass and actual klass differ"); + + __ bind(ok); + + } + +#endif + + if (!no_conflict) { + + if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { + + if (exact_klass != NULL) { + + __ mov_metadata(tmp, exact_klass->constant_encoding()); + + } else { + + __ load_klass(tmp, tmp); + + } + + + + __ ld_ptr(SCR2, mdo_addr); + + __ XOR(tmp, tmp, SCR2); + + assert(TypeEntries::type_klass_mask == -4, "must be"); + + __ bstrpick_d(SCR1, tmp, 63, 2); + + // klass seen before, nothing to do. The unknown bit may have been + + // set already but no need to check. + + __ beqz(SCR1, next); + + + + __ andi(SCR1, tmp, TypeEntries::type_unknown); + + __ bnez(SCR1, next); // already unknown. Nothing to do anymore. + + + + if (TypeEntries::is_type_none(current_klass)) { + + __ beqz(SCR2, none); + + __ li(SCR1, (u1)TypeEntries::null_seen); + + __ beq(SCR2, SCR1, none); + + // There is a chance that the checks above (re-reading profiling + + // data from memory) fail if another thread has just set the + + // profiling to this obj's klass + + membar_acquire(); + + __ ld_ptr(SCR2, mdo_addr); + + __ XOR(tmp, tmp, SCR2); + + assert(TypeEntries::type_klass_mask == -4, "must be"); + + __ bstrpick_d(SCR1, tmp, 63, 2); + + __ beqz(SCR1, next); + + } + + } else { + + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); + + + + __ ld_ptr(tmp, mdo_addr); + + __ andi(SCR2, tmp, TypeEntries::type_unknown); + + __ bnez(SCR2, next); // already unknown. Nothing to do anymore. + + } + + + + // different than before. Cannot keep accurate profile. + + __ ld_ptr(SCR2, mdo_addr); + + __ ori(SCR2, SCR2, TypeEntries::type_unknown); + + __ st_ptr(SCR2, mdo_addr); + + + + if (TypeEntries::is_type_none(current_klass)) { + + __ b(next); + + + + __ bind(none); + + // first time here. Set profile type. + + __ st_ptr(tmp, mdo_addr); + + } + + } else { + + // There's a single possible klass at this profile point + + assert(exact_klass != NULL, "should be"); + + if (TypeEntries::is_type_none(current_klass)) { + + __ mov_metadata(tmp, exact_klass->constant_encoding()); + + __ ld_ptr(SCR2, mdo_addr); + + __ XOR(tmp, tmp, SCR2); + + assert(TypeEntries::type_klass_mask == -4, "must be"); + + __ bstrpick_d(SCR1, tmp, 63, 2); + + __ beqz(SCR1, next); + +#ifdef ASSERT + + { + + Label ok; + + __ ld_ptr(SCR1, mdo_addr); + + __ beqz(SCR1, ok); + + __ li(SCR2, (u1)TypeEntries::null_seen); + + __ beq(SCR1, SCR2, ok); + + // may have been set by another thread + + membar_acquire(); + + __ mov_metadata(SCR1, exact_klass->constant_encoding()); + + __ ld_ptr(SCR2, mdo_addr); + + __ XOR(SCR2, SCR1, SCR2); + + assert(TypeEntries::type_mask == -2, "must be"); + + __ bstrpick_d(SCR2, SCR2, 63, 1); + + __ beqz(SCR2, ok); + + + + __ stop("unexpected profiling mismatch"); + + __ bind(ok); + + } + +#endif + + // first time here. Set profile type. + + __ st_ptr(tmp, mdo_addr); + + } else { + + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); + + + + __ ld_ptr(tmp, mdo_addr); + + __ andi(SCR1, tmp, TypeEntries::type_unknown); + + __ bnez(SCR1, next); // already unknown. Nothing to do anymore. + + + + __ ori(tmp, tmp, TypeEntries::type_unknown); + + __ st_ptr(tmp, mdo_addr); + + // FIXME: Write barrier needed here? + + } + + } + + + + __ bind(next); + + } + + COMMENT("} emit_profile_type"); + +} + + + +void LIR_Assembler::align_backward_branch_target() {} + + + +void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { + + // tmp must be unused + + assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); + + + + if (left->is_single_cpu()) { + + assert(dest->is_single_cpu(), "expect single result reg"); + + __ sub_w(dest->as_register(), R0, left->as_register()); + + } else if (left->is_double_cpu()) { + + assert(dest->is_double_cpu(), "expect double result reg"); + + __ sub_d(dest->as_register_lo(), R0, left->as_register_lo()); + + } else if (left->is_single_fpu()) { + + assert(dest->is_single_fpu(), "expect single float result reg"); + + __ fneg_s(dest->as_float_reg(), left->as_float_reg()); + + } else { + + assert(left->is_double_fpu(), "expect double float operand reg"); + + assert(dest->is_double_fpu(), "expect double float result reg"); + + __ fneg_d(dest->as_double_reg(), left->as_double_reg()); + + } + +} + + + +void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, + + CodeEmitInfo* info) { + + if (patch_code != lir_patch_none) { + + deoptimize_trap(info); + + return; + + } + + + + __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); + +} + + + +void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, + + LIR_Opr tmp, CodeEmitInfo* info) { + + assert(!tmp->is_valid(), "don't need temporary"); + + __ call(dest, relocInfo::runtime_call_type); + + if (info != NULL) { + + add_call_info_here(info); + + } + +} + + + +void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, + + CodeEmitInfo* info) { + + if (dest->is_address() || src->is_address()) { + + move_op(src, dest, type, lir_patch_none, info, + + /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false); + + } else { + + ShouldNotReachHere(); + + } + +} + + + +#ifdef ASSERT + +// emit run-time assertion + +void LIR_Assembler::emit_assert(LIR_OpAssert* op) { + + assert(op->code() == lir_assert, "must be"); + + Label ok; + + + + if (op->in_opr1()->is_valid()) { + + assert(op->in_opr2()->is_valid(), "both operands must be valid"); + + assert(op->in_opr1()->is_cpu_register() || op->in_opr2()->is_cpu_register(), "must be"); + + Register reg1 = as_reg(op->in_opr1()); + + Register reg2 = as_reg(op->in_opr2()); + + switch (op->condition()) { + + case lir_cond_equal: __ beq(reg1, reg2, ok); break; + + case lir_cond_notEqual: __ bne(reg1, reg2, ok); break; + + case lir_cond_less: __ blt(reg1, reg2, ok); break; + + case lir_cond_lessEqual: __ bge(reg2, reg1, ok); break; + + case lir_cond_greaterEqual: __ bge(reg1, reg2, ok); break; + + case lir_cond_greater: __ blt(reg2, reg1, ok); break; + + case lir_cond_belowEqual: __ bgeu(reg2, reg1, ok); break; + + case lir_cond_aboveEqual: __ bgeu(reg1, reg2, ok); break; + + default: ShouldNotReachHere(); + + } + + } else { + + assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); + + assert(op->condition() == lir_cond_always, "no other conditions allowed"); + + } + + if (op->halt()) { + + const char* str = __ code_string(op->msg()); + + __ stop(str); + + } else { + + breakpoint(); + + } + + __ bind(ok); + +} + +#endif + + + +#ifndef PRODUCT + +#define COMMENT(x) do { __ block_comment(x); } while (0) + +#else + +#define COMMENT(x) + +#endif + + + +void LIR_Assembler::membar() { + + COMMENT("membar"); + + __ membar(Assembler::AnyAny); + +} + + + +void LIR_Assembler::membar_acquire() { + + __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore)); + +} + + + +void LIR_Assembler::membar_release() { + + __ membar(Assembler::Membar_mask_bits(Assembler::LoadStore|Assembler::StoreStore)); + +} + + + +void LIR_Assembler::membar_loadload() { + + __ membar(Assembler::LoadLoad); + +} + + + +void LIR_Assembler::membar_storestore() { + + __ membar(MacroAssembler::StoreStore); + +} + + + +void LIR_Assembler::membar_loadstore() { + + __ membar(MacroAssembler::LoadStore); + +} + + + +void LIR_Assembler::membar_storeload() { + + __ membar(MacroAssembler::StoreLoad); + +} + + + +void LIR_Assembler::on_spin_wait() { + + Unimplemented(); + +} + + + +void LIR_Assembler::get_thread(LIR_Opr result_reg) { + + __ move(result_reg->as_register(), TREG); + +} + + + +void LIR_Assembler::peephole(LIR_List *lir) { + +} + + + +void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, + + LIR_Opr dest, LIR_Opr tmp_op) { + + Address addr = as_Address(src->as_address_ptr()); + + BasicType type = src->type(); + + Register dst = as_reg(dest); + + Register tmp = as_reg(tmp_op); + + bool is_oop = is_reference_type(type); + + + + if (Assembler::is_simm(addr.disp(), 12)) { + + __ addi_d(tmp, addr.base(), addr.disp()); + + } else { + + __ li(tmp, addr.disp()); + + __ add_d(tmp, addr.base(), tmp); + + } + + if (addr.index() != noreg) { + + if (addr.scale() != Address::no_scale) + + __ alsl_d(tmp, addr.index(), tmp, addr.scale() - 1); + + else + + __ add_d(tmp, tmp, addr.index()); + + } + + + + switch(type) { + + case T_INT: + + break; + + case T_LONG: + + break; + + case T_OBJECT: + + case T_ARRAY: + + if (UseCompressedOops) { + + // unsigned int + + } else { + + // long + + } + + break; + + default: + + ShouldNotReachHere(); + + } + + + + if (code == lir_xadd) { + + Register inc = noreg; + + if (data->is_constant()) { + + inc = SCR1; + + __ li(inc, as_long(data)); + + } else { + + inc = as_reg(data); + + } + + switch(type) { + + case T_INT: + + __ amadd_db_w(dst, inc, tmp); + + break; + + case T_LONG: + + __ amadd_db_d(dst, inc, tmp); + + break; + + case T_OBJECT: + + case T_ARRAY: + + if (UseCompressedOops) { + + __ amadd_db_w(dst, inc, tmp); + + __ lu32i_d(dst, 0); + + } else { + + __ amadd_db_d(dst, inc, tmp); + + } + + break; + + default: + + ShouldNotReachHere(); + + } + + } else if (code == lir_xchg) { + + Register obj = as_reg(data); + + if (is_oop && UseCompressedOops) { + + __ encode_heap_oop(SCR2, obj); + + obj = SCR2; + + } + + switch(type) { + + case T_INT: + + __ amswap_db_w(dst, obj, tmp); + + break; + + case T_LONG: + + __ amswap_db_d(dst, obj, tmp); + + break; + + case T_OBJECT: + + case T_ARRAY: + + if (UseCompressedOops) { + + __ amswap_db_w(dst, obj, tmp); + + __ lu32i_d(dst, 0); + + } else { + + __ amswap_db_d(dst, obj, tmp); + + } + + break; + + default: + + ShouldNotReachHere(); + + } + + if (is_oop && UseCompressedOops) { + + __ decode_heap_oop(dst); + + } + + } else { + + ShouldNotReachHere(); + + } + +} + + + +#undef __ +diff --cc src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp +index a346700ed36,00000000000..fedcc547d48 +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp +@@@ -1,1397 -1,0 +1,1384 @@@ + +/* + + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + * + + */ + + + +#include "precompiled.hpp" + +#include "asm/macroAssembler.inline.hpp" + +#include "c1/c1_Compilation.hpp" + +#include "c1/c1_FrameMap.hpp" + +#include "c1/c1_Instruction.hpp" + +#include "c1/c1_LIRAssembler.hpp" + +#include "c1/c1_LIRGenerator.hpp" + +#include "c1/c1_Runtime1.hpp" + +#include "c1/c1_ValueStack.hpp" + +#include "ci/ciArray.hpp" + +#include "ci/ciObjArrayKlass.hpp" + +#include "ci/ciTypeArrayKlass.hpp" + +#include "runtime/sharedRuntime.hpp" + +#include "runtime/stubRoutines.hpp" + +#include "utilities/powerOfTwo.hpp" + +#include "vmreg_loongarch.inline.hpp" + + + +#ifdef ASSERT + +#define __ gen()->lir(__FILE__, __LINE__)-> + +#else + +#define __ gen()->lir()-> + +#endif + + + +// Item will be loaded into a byte register; Intel only + +void LIRItem::load_byte_item() { + + load_item(); + +} + + + +void LIRItem::load_nonconstant() { + + LIR_Opr r = value()->operand(); + + if (r->is_constant()) { + + _result = r; + + } else { + + load_item(); + + } + +} + + + +//-------------------------------------------------------------- + +// LIRGenerator + +//-------------------------------------------------------------- + + + +LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::a0_oop_opr; } + +LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::a1_opr; } + +LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } + +LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } + +LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } + +LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; } + +LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } + +LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::a0_opr; } + +LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } + + + +LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { + + LIR_Opr opr; + + switch (type->tag()) { + + case intTag: opr = FrameMap::a0_opr; break; + + case objectTag: opr = FrameMap::a0_oop_opr; break; + + case longTag: opr = FrameMap::long0_opr; break; + + case floatTag: opr = FrameMap::fpu0_float_opr; break; + + case doubleTag: opr = FrameMap::fpu0_double_opr; break; + + case addressTag: + + default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; + + } + + + + assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); + + return opr; + +} + + + +LIR_Opr LIRGenerator::rlock_byte(BasicType type) { + + LIR_Opr reg = new_register(T_INT); + + set_vreg_flag(reg, LIRGenerator::byte_reg); + + return reg; + +} + + + +//--------- loading items into registers -------------------------------- + + + +bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { + + if (v->type()->as_IntConstant() != NULL) { + + return v->type()->as_IntConstant()->value() == 0L; + + } else if (v->type()->as_LongConstant() != NULL) { + + return v->type()->as_LongConstant()->value() == 0L; + + } else if (v->type()->as_ObjectConstant() != NULL) { + + return v->type()->as_ObjectConstant()->value()->is_null_object(); + + } else { + + return false; + + } + +} + + + +bool LIRGenerator::can_inline_as_constant(Value v) const { + + // FIXME: Just a guess + + if (v->type()->as_IntConstant() != NULL) { + + return Assembler::is_simm(v->type()->as_IntConstant()->value(), 12); + + } else if (v->type()->as_LongConstant() != NULL) { + + return v->type()->as_LongConstant()->value() == 0L; + + } else if (v->type()->as_ObjectConstant() != NULL) { + + return v->type()->as_ObjectConstant()->value()->is_null_object(); + + } else { + + return false; + + } + +} + + + +bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; } + + + +LIR_Opr LIRGenerator::safepoint_poll_register() { + + return LIR_OprFact::illegalOpr; + +} + + + +LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, + + int shift, int disp, BasicType type) { + + assert(base->is_register(), "must be"); + + intx large_disp = disp; + + + + // accumulate fixed displacements + + if (index->is_constant()) { + + LIR_Const *constant = index->as_constant_ptr(); + + if (constant->type() == T_INT) { + + large_disp += index->as_jint() << shift; + + } else { + + assert(constant->type() == T_LONG, "should be"); + + jlong c = index->as_jlong() << shift; + + if ((jlong)((jint)c) == c) { + + large_disp += c; + + index = LIR_OprFact::illegalOpr; + + } else { + + LIR_Opr tmp = new_register(T_LONG); + + __ move(index, tmp); + + index = tmp; + + // apply shift and displacement below + + } + + } + + } + + + + if (index->is_register()) { + + // apply the shift and accumulate the displacement + + if (shift > 0) { + + LIR_Opr tmp = new_pointer_register(); + + __ shift_left(index, shift, tmp); + + index = tmp; + + } + + if (large_disp != 0) { + + LIR_Opr tmp = new_pointer_register(); + + if (Assembler::is_simm(large_disp, 12)) { + + __ add(index, LIR_OprFact::intptrConst(large_disp), tmp); + + index = tmp; + + } else { + + __ move(LIR_OprFact::intptrConst(large_disp), tmp); + + __ add(tmp, index, tmp); + + index = tmp; + + } + + large_disp = 0; + + } + + } else if (large_disp != 0 && !Assembler::is_simm(large_disp, 12)) { + + // index is illegal so replace it with the displacement loaded into a register + + index = new_pointer_register(); + + __ move(LIR_OprFact::intptrConst(large_disp), index); + + large_disp = 0; + + } + + + + // at this point we either have base + index or base + displacement + + if (large_disp == 0 && index->is_register()) { + + return new LIR_Address(base, index, type); + + } else { + + assert(Assembler::is_simm(large_disp, 12), "must be"); + + return new LIR_Address(base, large_disp, type); + + } + +} + + + +LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) { + + int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); + + int elem_size = type2aelembytes(type); + + int shift = exact_log2(elem_size); + + + + LIR_Address* addr; + + if (index_opr->is_constant()) { + + addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); + + } else { + + if (offset_in_bytes) { + + LIR_Opr tmp = new_pointer_register(); + + __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp); + + array_opr = tmp; + + offset_in_bytes = 0; + + } + + addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type); + + } + + return addr; + +} + + + +LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { + + LIR_Opr r; + + if (type == T_LONG) { + + r = LIR_OprFact::longConst(x); + + if (!Assembler::is_simm(x, 12)) { + + LIR_Opr tmp = new_register(type); + + __ move(r, tmp); + + return tmp; + + } + + } else if (type == T_INT) { + + r = LIR_OprFact::intConst(x); + + if (!Assembler::is_simm(x, 12)) { + + // This is all rather nasty. We don't know whether our constant + + // is required for a logical or an arithmetic operation, wo we + + // don't know what the range of valid values is!! + + LIR_Opr tmp = new_register(type); + + __ move(r, tmp); + + return tmp; + + } + + } else { + + ShouldNotReachHere(); + + r = NULL; // unreachable + + } + + return r; + +} + + + +void LIRGenerator::increment_counter(address counter, BasicType type, int step) { + + LIR_Opr pointer = new_pointer_register(); + + __ move(LIR_OprFact::intptrConst(counter), pointer); + + LIR_Address* addr = new LIR_Address(pointer, type); + + increment_counter(addr, step); + +} + + + +void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + + LIR_Opr imm = NULL; + + switch(addr->type()) { + + case T_INT: + + imm = LIR_OprFact::intConst(step); + + break; + + case T_LONG: + + imm = LIR_OprFact::longConst(step); + + break; + + default: + + ShouldNotReachHere(); + + } + + LIR_Opr reg = new_register(addr->type()); + + __ load(addr, reg); + + __ add(reg, imm, reg); + + __ store(reg, addr); + +} + + +- template +- void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, +- int disp, int c, T tgt, CodeEmitInfo* info) { +++void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { + + LIR_Opr reg = new_register(T_INT); + + __ load(generate_address(base, disp, T_INT), reg, info); +- __ cmp_branch(condition, reg, LIR_OprFact::intConst(c), tgt); +++ __ cmp(condition, reg, LIR_OprFact::intConst(c)); + +} + + +- // Explicit instantiation for all supported types. +- template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); +- template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); +- template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); +- +- template +- void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, +- int disp, BasicType type, T tgt, CodeEmitInfo* info) { +++void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { + + LIR_Opr reg1 = new_register(T_INT); + + __ load(generate_address(base, disp, type), reg1, info); +- __ cmp_branch(condition, reg, reg1, tgt); +++ __ cmp(condition, reg, reg1); + +} + + +- // Explicit instantiation for all supported types. +- template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); +- template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); +- template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); +- + +bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + + if (is_power_of_2(c - 1)) { + + __ shift_left(left, exact_log2(c - 1), tmp); + + __ add(tmp, left, result); + + return true; + + } else if (is_power_of_2(c + 1)) { + + __ shift_left(left, exact_log2(c + 1), tmp); + + __ sub(tmp, left, result); + + return true; + + } else { + + return false; + + } + +} + + + +void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { + + BasicType type = item->type(); + + __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type)); + +} + + + +void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, + + ciMethod* profiled_method, int profiled_bci) { + + LIR_Opr tmp1 = new_register(objectType); + + LIR_Opr tmp2 = new_register(objectType); + + LIR_Opr tmp3 = new_register(objectType); + + __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); + +} + + + +//---------------------------------------------------------------------- + +// visitor functions + +//---------------------------------------------------------------------- + + + +void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { + + assert(x->is_pinned(),""); + + LIRItem obj(x->obj(), this); + + obj.load_item(); + + + + set_no_result(x); + + + + // "lock" stores the address of the monitor stack slot, so this is not an oop + + LIR_Opr lock = new_register(T_INT); + + // Need a scratch register for biased locking + + LIR_Opr scratch = LIR_OprFact::illegalOpr; + + if (UseBiasedLocking) { + + scratch = new_register(T_INT); + + } + + + + CodeEmitInfo* info_for_exception = NULL; + + if (x->needs_null_check()) { + + info_for_exception = state_for(x); + + } + + // this CodeEmitInfo must not have the xhandlers because here the + + // object is already locked (xhandlers expect object to be unlocked) + + CodeEmitInfo* info = state_for(x, x->state(), true); + + monitor_enter(obj.result(), lock, syncTempOpr(), scratch, + + x->monitor_no(), info_for_exception, info); + +} + + + +void LIRGenerator::do_MonitorExit(MonitorExit* x) { + + assert(x->is_pinned(),""); + + + + LIRItem obj(x->obj(), this); + + obj.dont_load_item(); + + + + LIR_Opr lock = new_register(T_INT); + + LIR_Opr obj_temp = new_register(T_INT); + + set_no_result(x); + + monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); + +} + + + +void LIRGenerator::do_NegateOp(NegateOp* x) { + + LIRItem from(x->x(), this); + + from.load_item(); + + LIR_Opr result = rlock_result(x); + + __ negate (from.result(), result); + +} + + + +// for _fadd, _fmul, _fsub, _fdiv, _frem + +// _dadd, _dmul, _dsub, _ddiv, _drem + +void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { + + if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { + + // float remainder is implemented as a direct call into the runtime + + LIRItem right(x->x(), this); + + LIRItem left(x->y(), this); + + + + BasicTypeList signature(2); + + if (x->op() == Bytecodes::_frem) { + + signature.append(T_FLOAT); + + signature.append(T_FLOAT); + + } else { + + signature.append(T_DOUBLE); + + signature.append(T_DOUBLE); + + } + + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + + + const LIR_Opr result_reg = result_register_for(x->type()); + + left.load_item_force(cc->at(1)); + + right.load_item(); + + + + __ move(right.result(), cc->at(0)); + + + + address entry; + + if (x->op() == Bytecodes::_frem) { + + entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); + + } else { + + entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); + + } + + + + LIR_Opr result = rlock_result(x); + + __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); + + __ move(result_reg, result); + + return; + + } + + + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + LIRItem* left_arg = &left; + + LIRItem* right_arg = &right; + + + + // Always load right hand side. + + right.load_item(); + + + + if (!left.is_register()) + + left.load_item(); + + + + LIR_Opr reg = rlock(x); + + + + arithmetic_op_fpu(x->op(), reg, left.result(), right.result()); + + + + set_result(x, round_item(reg)); + +} + + + +// for _ladd, _lmul, _lsub, _ldiv, _lrem + +void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { + + // missing test if instr is commutative and if we should swap + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + + + if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { + + left.load_item(); + + bool need_zero_check = true; + + if (right.is_constant()) { + + jlong c = right.get_jlong_constant(); + + // no need to do div-by-zero check if the divisor is a non-zero constant + + if (c != 0) need_zero_check = false; + + // do not load right if the divisor is a power-of-2 constant + + if (c > 0 && is_power_of_2(c) && Assembler::is_uimm(c - 1, 12)) { + + right.dont_load_item(); + + } else { + + right.load_item(); + + } + + } else { + + right.load_item(); + + } + + if (need_zero_check) { + + CodeEmitInfo* info = state_for(x); +- CodeStub* stub = new DivByZeroStub(info); +- __ cmp_branch(lir_cond_equal, right.result(), LIR_OprFact::longConst(0), stub); +++ __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); +++ __ branch(lir_cond_equal, new DivByZeroStub(info)); + + } + + + + rlock_result(x); + + switch (x->op()) { + + case Bytecodes::_lrem: + + __ rem (left.result(), right.result(), x->operand()); + + break; + + case Bytecodes::_ldiv: + + __ div (left.result(), right.result(), x->operand()); + + break; + + default: + + ShouldNotReachHere(); + + break; + + } + + } else { + + assert(x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, + + "expect lmul, ladd or lsub"); + + // add, sub, mul + + left.load_item(); + + if (!right.is_register()) { + + if (x->op() == Bytecodes::_lmul || !right.is_constant() || + + (x->op() == Bytecodes::_ladd && !Assembler::is_simm(right.get_jlong_constant(), 12)) || + + (x->op() == Bytecodes::_lsub && !Assembler::is_simm(-right.get_jlong_constant(), 12))) { + + right.load_item(); + + } else { // add, sub + + assert(x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); + + // don't load constants to save register + + right.load_nonconstant(); + + } + + } + + rlock_result(x); + + arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); + + } + +} + + + +// for: _iadd, _imul, _isub, _idiv, _irem + +void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { + + // Test if instr is commutative and if we should swap + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + LIRItem* left_arg = &left; + + LIRItem* right_arg = &right; + + if (x->is_commutative() && left.is_stack() && right.is_register()) { + + // swap them if left is real stack (or cached) and right is real register(not cached) + + left_arg = &right; + + right_arg = &left; + + } + + + + left_arg->load_item(); + + + + // do not need to load right, as we can handle stack and constants + + if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { + + rlock_result(x); + + bool need_zero_check = true; + + if (right.is_constant()) { + + jint c = right.get_jint_constant(); + + // no need to do div-by-zero check if the divisor is a non-zero constant + + if (c != 0) need_zero_check = false; + + // do not load right if the divisor is a power-of-2 constant + + if (c > 0 && is_power_of_2(c) && Assembler::is_uimm(c - 1, 12)) { + + right_arg->dont_load_item(); + + } else { + + right_arg->load_item(); + + } + + } else { + + right_arg->load_item(); + + } + + if (need_zero_check) { + + CodeEmitInfo* info = state_for(x); +- CodeStub* stub = new DivByZeroStub(info); +- __ cmp_branch(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0), stub); +++ __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); +++ __ branch(lir_cond_equal, new DivByZeroStub(info)); + + } + + + + LIR_Opr ill = LIR_OprFact::illegalOpr; + + if (x->op() == Bytecodes::_irem) { + + __ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); + + } else if (x->op() == Bytecodes::_idiv) { + + __ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL); + + } + + } else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) { + + if (right.is_constant() && + + ((x->op() == Bytecodes::_iadd && Assembler::is_simm(right.get_jint_constant(), 12)) || + + (x->op() == Bytecodes::_isub && Assembler::is_simm(-right.get_jint_constant(), 12)))) { + + right.load_nonconstant(); + + } else { + + right.load_item(); + + } + + rlock_result(x); + + arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr); + + } else { + + assert (x->op() == Bytecodes::_imul, "expect imul"); + + if (right.is_constant()) { + + jint c = right.get_jint_constant(); + + if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) { + + right_arg->dont_load_item(); + + } else { + + // Cannot use constant op. + + right_arg->load_item(); + + } + + } else { + + right.load_item(); + + } + + rlock_result(x); + + arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT)); + + } + +} + + + +void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { + + // when an operand with use count 1 is the left operand, then it is + + // likely that no move for 2-operand-LIR-form is necessary + + if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { + + x->swap_operands(); + + } + + + + ValueTag tag = x->type()->tag(); + + assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); + + switch (tag) { + + case floatTag: + + case doubleTag: do_ArithmeticOp_FPU(x); return; + + case longTag: do_ArithmeticOp_Long(x); return; + + case intTag: do_ArithmeticOp_Int(x); return; + + default: ShouldNotReachHere(); return; + + } + +} + + + +// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr + +void LIRGenerator::do_ShiftOp(ShiftOp* x) { + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + + + left.load_item(); + + + + rlock_result(x); + + if (right.is_constant()) { + + right.dont_load_item(); + + int c; + + switch (x->op()) { + + case Bytecodes::_ishl: + + c = right.get_jint_constant() & 0x1f; + + __ shift_left(left.result(), c, x->operand()); + + break; + + case Bytecodes::_ishr: + + c = right.get_jint_constant() & 0x1f; + + __ shift_right(left.result(), c, x->operand()); + + break; + + case Bytecodes::_iushr: + + c = right.get_jint_constant() & 0x1f; + + __ unsigned_shift_right(left.result(), c, x->operand()); + + break; + + case Bytecodes::_lshl: + + c = right.get_jint_constant() & 0x3f; + + __ shift_left(left.result(), c, x->operand()); + + break; + + case Bytecodes::_lshr: + + c = right.get_jint_constant() & 0x3f; + + __ shift_right(left.result(), c, x->operand()); + + break; + + case Bytecodes::_lushr: + + c = right.get_jint_constant() & 0x3f; + + __ unsigned_shift_right(left.result(), c, x->operand()); + + break; + + default: + + ShouldNotReachHere(); + + } + + } else { + + right.load_item(); + + LIR_Opr tmp = new_register(T_INT); + + switch (x->op()) { + + case Bytecodes::_ishl: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); + + __ shift_left(left.result(), tmp, x->operand(), tmp); + + break; + + case Bytecodes::_ishr: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); + + __ shift_right(left.result(), tmp, x->operand(), tmp); + + break; + + case Bytecodes::_iushr: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp); + + __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); + + break; + + case Bytecodes::_lshl: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); + + __ shift_left(left.result(), tmp, x->operand(), tmp); + + break; + + case Bytecodes::_lshr: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); + + __ shift_right(left.result(), tmp, x->operand(), tmp); + + break; + + case Bytecodes::_lushr: + + __ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp); + + __ unsigned_shift_right(left.result(), tmp, x->operand(), tmp); + + break; + + default: + + ShouldNotReachHere(); + + } + + } + +} + + + +// _iand, _land, _ior, _lor, _ixor, _lxor + +void LIRGenerator::do_LogicOp(LogicOp* x) { + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + + + left.load_item(); + + + + rlock_result(x); + + if (right.is_constant() + + && ((right.type()->tag() == intTag + + && Assembler::is_uimm(right.get_jint_constant(), 12)) + + || (right.type()->tag() == longTag + + && Assembler::is_uimm(right.get_jlong_constant(), 12)))) { + + right.dont_load_item(); + + } else { + + right.load_item(); + + } + + switch (x->op()) { + + case Bytecodes::_iand: + + case Bytecodes::_land: + + __ logical_and(left.result(), right.result(), x->operand()); break; + + case Bytecodes::_ior: + + case Bytecodes::_lor: + + __ logical_or (left.result(), right.result(), x->operand()); break; + + case Bytecodes::_ixor: + + case Bytecodes::_lxor: + + __ logical_xor(left.result(), right.result(), x->operand()); break; + + default: Unimplemented(); + + } + +} + + + +// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg + +void LIRGenerator::do_CompareOp(CompareOp* x) { + + LIRItem left(x->x(), this); + + LIRItem right(x->y(), this); + + ValueTag tag = x->x()->type()->tag(); + + if (tag == longTag) { + + left.set_destroys_register(); + + } + + left.load_item(); + + right.load_item(); + + LIR_Opr reg = rlock_result(x); + + + + if (x->x()->type()->is_float_kind()) { + + Bytecodes::Code code = x->op(); + + __ fcmp2int(left.result(), right.result(), reg, + + (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); + + } else if (x->x()->type()->tag() == longTag) { + + __ lcmp2int(left.result(), right.result(), reg); + + } else { + + Unimplemented(); + + } + +} + + + +LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, + + LIRItem& cmp_value, LIRItem& new_value) { + + LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience + + new_value.load_item(); + + cmp_value.load_item(); + + LIR_Opr result = new_register(T_INT); + + if (is_reference_type(type)) { + + __ cas_obj(addr, cmp_value.result(), new_value.result(), + + new_register(T_INT), new_register(T_INT), result); + + } else if (type == T_INT) { + + __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), + + new_value.result(), ill, ill); + + } else if (type == T_LONG) { + + __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), + + new_value.result(), ill, ill); + + } else { + + ShouldNotReachHere(); + + Unimplemented(); + + } + + __ move(FrameMap::scr1_opr, result); + + return result; + +} + + + +LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { + + bool is_oop = is_reference_type(type); + + LIR_Opr result = new_register(type); + + value.load_item(); + + assert(type == T_INT || is_oop || type == T_LONG, "unexpected type"); + + LIR_Opr tmp = new_register(T_INT); + + __ xchg(addr, value.result(), result, tmp); + + return result; + +} + + + +LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { + + LIR_Opr result = new_register(type); + + value.load_item(); + + assert(type == T_INT || type == T_LONG, "unexpected type"); + + LIR_Opr tmp = new_register(T_INT); + + __ xadd(addr, value.result(), result, tmp); + + return result; + +} + + + +void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { + + assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), + + "wrong type"); + + if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || + + x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || + + x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || + + x->id() == vmIntrinsics::_dlog10) { + + do_LibmIntrinsic(x); + + return; + + } + + switch (x->id()) { + + case vmIntrinsics::_dabs: + + case vmIntrinsics::_dsqrt: { + + assert(x->number_of_arguments() == 1, "wrong type"); + + LIRItem value(x->argument_at(0), this); + + value.load_item(); + + LIR_Opr dst = rlock_result(x); + + + + switch (x->id()) { + + case vmIntrinsics::_dsqrt: + + __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); + + break; + + case vmIntrinsics::_dabs: + + __ abs(value.result(), dst, LIR_OprFact::illegalOpr); + + break; + + default: + + ShouldNotReachHere(); + + } + + break; + + } + + default: + + ShouldNotReachHere(); + + } + +} + + + +void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { + + LIRItem value(x->argument_at(0), this); + + value.set_destroys_register(); + + + + LIR_Opr calc_result = rlock_result(x); + + LIR_Opr result_reg = result_register_for(x->type()); + + + + CallingConvention* cc = NULL; + + + + if (x->id() == vmIntrinsics::_dpow) { + + LIRItem value1(x->argument_at(1), this); + + + + value1.set_destroys_register(); + + + + BasicTypeList signature(2); + + signature.append(T_DOUBLE); + + signature.append(T_DOUBLE); + + cc = frame_map()->c_calling_convention(&signature); + + value.load_item_force(cc->at(0)); + + value1.load_item_force(cc->at(1)); + + } else { + + BasicTypeList signature(1); + + signature.append(T_DOUBLE); + + cc = frame_map()->c_calling_convention(&signature); + + value.load_item_force(cc->at(0)); + + } + + + + switch (x->id()) { + + case vmIntrinsics::_dexp: + + if (StubRoutines::dexp() != NULL) { + + __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dlog: + + if (StubRoutines::dlog() != NULL) { + + __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dlog10: + + if (StubRoutines::dlog10() != NULL) { + + __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dpow: + + if (StubRoutines::dpow() != NULL) { + + __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dsin: + + if (StubRoutines::dsin() != NULL) { + + __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dcos: + + if (StubRoutines::dcos() != NULL) { + + __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + case vmIntrinsics::_dtan: + + if (StubRoutines::dtan() != NULL) { + + __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); + + } else { + + __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); + + } + + break; + + default: ShouldNotReachHere(); + + } + + __ move(result_reg, calc_result); + +} + + + +void LIRGenerator::do_ArrayCopy(Intrinsic* x) { + + Register j_rarg0 = RT0; + + Register j_rarg1 = RA0; + + Register j_rarg2 = RA1; + + Register j_rarg3 = RA2; + + Register j_rarg4 = RA3; + + Register j_rarg5 = RA4; + + + + assert(x->number_of_arguments() == 5, "wrong type"); + + + + // Make all state_for calls early since they can emit code + + CodeEmitInfo* info = state_for(x, x->state()); + + + + LIRItem src(x->argument_at(0), this); + + LIRItem src_pos(x->argument_at(1), this); + + LIRItem dst(x->argument_at(2), this); + + LIRItem dst_pos(x->argument_at(3), this); + + LIRItem length(x->argument_at(4), this); + + + + // operands for arraycopy must use fixed registers, otherwise + + // LinearScan will fail allocation (because arraycopy always needs a + + // call) + + + + // The java calling convention will give us enough registers + + // so that on the stub side the args will be perfect already. + + // On the other slow/special case side we call C and the arg + + // positions are not similar enough to pick one as the best. + + // Also because the java calling convention is a "shifted" version + + // of the C convention we can process the java args trivially into C + + // args without worry of overwriting during the xfer + + + + src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); + + src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); + + dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); + + dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); + + length.load_item_force (FrameMap::as_opr(j_rarg4)); + + + + LIR_Opr tmp = FrameMap::as_opr(j_rarg5); + + + + set_no_result(x); + + + + int flags; + + ciArrayKlass* expected_type; + + arraycopy_helper(x, &flags, &expected_type); + + + + __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), + + length.result(), tmp, expected_type, flags, info); // does add_safepoint + +} + + + +void LIRGenerator::do_update_CRC32(Intrinsic* x) { + + assert(UseCRC32Intrinsics, "why are we here?"); + + // Make all state_for calls early since they can emit code + + LIR_Opr result = rlock_result(x); + + int flags = 0; + + switch (x->id()) { + + case vmIntrinsics::_updateCRC32: { + + LIRItem crc(x->argument_at(0), this); + + LIRItem val(x->argument_at(1), this); + + // val is destroyed by update_crc32 + + val.set_destroys_register(); + + crc.load_item(); + + val.load_item(); + + __ update_crc32(crc.result(), val.result(), result); + + break; + + } + + case vmIntrinsics::_updateBytesCRC32: + + case vmIntrinsics::_updateByteBufferCRC32: { + + bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); + + + + LIRItem crc(x->argument_at(0), this); + + LIRItem buf(x->argument_at(1), this); + + LIRItem off(x->argument_at(2), this); + + LIRItem len(x->argument_at(3), this); + + buf.load_item(); + + off.load_nonconstant(); + + + + LIR_Opr index = off.result(); + + int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; + + if(off.result()->is_constant()) { + + index = LIR_OprFact::illegalOpr; + + offset += off.result()->as_jint(); + + } + + LIR_Opr base_op = buf.result(); + + + + if (index->is_valid()) { + + LIR_Opr tmp = new_register(T_LONG); + + __ convert(Bytecodes::_i2l, index, tmp); + + index = tmp; + + } + + + + if (offset) { + + LIR_Opr tmp = new_pointer_register(); + + __ add(base_op, LIR_OprFact::intConst(offset), tmp); + + base_op = tmp; + + offset = 0; + + } + + + + LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE); + + BasicTypeList signature(3); + + signature.append(T_INT); + + signature.append(T_ADDRESS); + + signature.append(T_INT); + + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + const LIR_Opr result_reg = result_register_for(x->type()); + + + + LIR_Opr addr = new_pointer_register(); + + __ leal(LIR_OprFact::address(a), addr); + + + + crc.load_item_force(cc->at(0)); + + __ move(addr, cc->at(1)); + + len.load_item_force(cc->at(2)); + + + + __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); + + __ move(result_reg, result); + + + + break; + + } + + default: { + + ShouldNotReachHere(); + + } + + } + +} + + + +void LIRGenerator::do_update_CRC32C(Intrinsic* x) { + + assert(UseCRC32CIntrinsics, "why are we here?"); + + // Make all state_for calls early since they can emit code + + LIR_Opr result = rlock_result(x); + + int flags = 0; + + switch (x->id()) { + + case vmIntrinsics::_updateBytesCRC32C: + + case vmIntrinsics::_updateDirectByteBufferCRC32C: { + + bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); + + int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; + + + + LIRItem crc(x->argument_at(0), this); + + LIRItem buf(x->argument_at(1), this); + + LIRItem off(x->argument_at(2), this); + + LIRItem end(x->argument_at(3), this); + + + + buf.load_item(); + + off.load_nonconstant(); + + end.load_nonconstant(); + + + + // len = end - off + + LIR_Opr len = end.result(); + + LIR_Opr tmpA = new_register(T_INT); + + LIR_Opr tmpB = new_register(T_INT); + + __ move(end.result(), tmpA); + + __ move(off.result(), tmpB); + + __ sub(tmpA, tmpB, tmpA); + + len = tmpA; + + + + LIR_Opr index = off.result(); + + if(off.result()->is_constant()) { + + index = LIR_OprFact::illegalOpr; + + offset += off.result()->as_jint(); + + } + + LIR_Opr base_op = buf.result(); + + + + if (index->is_valid()) { + + LIR_Opr tmp = new_register(T_LONG); + + __ convert(Bytecodes::_i2l, index, tmp); + + index = tmp; + + } + + + + if (offset) { + + LIR_Opr tmp = new_pointer_register(); + + __ add(base_op, LIR_OprFact::intConst(offset), tmp); + + base_op = tmp; + + offset = 0; + + } + + + + LIR_Address* a = new LIR_Address(base_op, index, offset, T_BYTE); + + BasicTypeList signature(3); + + signature.append(T_INT); + + signature.append(T_ADDRESS); + + signature.append(T_INT); + + CallingConvention* cc = frame_map()->c_calling_convention(&signature); + + const LIR_Opr result_reg = result_register_for(x->type()); + + + + LIR_Opr addr = new_pointer_register(); + + __ leal(LIR_OprFact::address(a), addr); + + + + crc.load_item_force(cc->at(0)); + + __ move(addr, cc->at(1)); + + __ move(len, cc->at(2)); + + + + __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); + + __ move(result_reg, result); + + + + break; + + } + + default: { + + ShouldNotReachHere(); + + } + + } + +} + + + +void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { + + assert(x->number_of_arguments() == 3, "wrong type"); + + assert(UseFMA, "Needs FMA instructions support."); + + LIRItem value(x->argument_at(0), this); + + LIRItem value1(x->argument_at(1), this); + + LIRItem value2(x->argument_at(2), this); + + + + value.load_item(); + + value1.load_item(); + + value2.load_item(); + + + + LIR_Opr calc_input = value.result(); + + LIR_Opr calc_input1 = value1.result(); + + LIR_Opr calc_input2 = value2.result(); + + LIR_Opr calc_result = rlock_result(x); + + + + switch (x->id()) { + + case vmIntrinsics::_fmaD: + + __ fmad(calc_input, calc_input1, calc_input2, calc_result); + + break; + + case vmIntrinsics::_fmaF: + + __ fmaf(calc_input, calc_input1, calc_input2, calc_result); + + break; + + default: + + ShouldNotReachHere(); + + } + +} + + + +void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { + + fatal("vectorizedMismatch intrinsic is not implemented on this platform"); + +} + + + +// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f + +// _i2b, _i2c, _i2s + +void LIRGenerator::do_Convert(Convert* x) { + + LIRItem value(x->value(), this); + + value.load_item(); + + LIR_Opr input = value.result(); + + LIR_Opr result = rlock(x); + + + + // arguments of lir_convert + + LIR_Opr conv_input = input; + + LIR_Opr conv_result = result; + + + + switch (x->op()) { + + case Bytecodes::_f2i: + + case Bytecodes::_f2l: + + __ convert(x->op(), conv_input, conv_result, NULL, new_register(T_FLOAT)); + + break; + + case Bytecodes::_d2i: + + case Bytecodes::_d2l: + + __ convert(x->op(), conv_input, conv_result, NULL, new_register(T_DOUBLE)); + + break; + + default: + + __ convert(x->op(), conv_input, conv_result); + + break; + + } + + + + assert(result->is_virtual(), "result must be virtual register"); + + set_result(x, result); + +} + + + +void LIRGenerator::do_NewInstance(NewInstance* x) { + +#ifndef PRODUCT + + if (PrintNotLoaded && !x->klass()->is_loaded()) { + + tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); + + } + +#endif + + CodeEmitInfo* info = state_for(x, x->state()); + + LIR_Opr reg = result_register_for(x->type()); + + new_instance(reg, x->klass(), x->is_unresolved(), + + FrameMap::t0_oop_opr, + + FrameMap::t1_oop_opr, + + FrameMap::a4_oop_opr, + + LIR_OprFact::illegalOpr, + + FrameMap::a3_metadata_opr, info); + + LIR_Opr result = rlock_result(x); + + __ move(reg, result); + +} + + + +void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { + + CodeEmitInfo* info = state_for(x, x->state()); + + + + LIRItem length(x->length(), this); + + length.load_item_force(FrameMap::s0_opr); + + + + LIR_Opr reg = result_register_for(x->type()); + + LIR_Opr tmp1 = FrameMap::t0_oop_opr; + + LIR_Opr tmp2 = FrameMap::t1_oop_opr; + + LIR_Opr tmp3 = FrameMap::a5_oop_opr; + + LIR_Opr tmp4 = reg; + + LIR_Opr klass_reg = FrameMap::a3_metadata_opr; + + LIR_Opr len = length.result(); + + BasicType elem_type = x->elt_type(); + + + + __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); + + + + CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); + + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); + + + + LIR_Opr result = rlock_result(x); + + __ move(reg, result); + +} + + + +void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { + + LIRItem length(x->length(), this); + + // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction + + // and therefore provide the state before the parameters have been consumed + + CodeEmitInfo* patching_info = NULL; + + if (!x->klass()->is_loaded() || PatchALot) { + + patching_info = state_for(x, x->state_before()); + + } + + + + CodeEmitInfo* info = state_for(x, x->state()); + + + + LIR_Opr reg = result_register_for(x->type()); + + LIR_Opr tmp1 = FrameMap::t0_oop_opr; + + LIR_Opr tmp2 = FrameMap::t1_oop_opr; + + LIR_Opr tmp3 = FrameMap::a5_oop_opr; + + LIR_Opr tmp4 = reg; + + LIR_Opr klass_reg = FrameMap::a3_metadata_opr; + + + + length.load_item_force(FrameMap::s0_opr); + + LIR_Opr len = length.result(); + + + + CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); + + ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); + + if (obj == ciEnv::unloaded_ciobjarrayklass()) { + + BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); + + } + + klass2reg_with_patching(klass_reg, obj, patching_info); + + __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); + + + + LIR_Opr result = rlock_result(x); + + __ move(reg, result); + +} + + + +void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { + + Values* dims = x->dims(); + + int i = dims->length(); + + LIRItemList* items = new LIRItemList(i, i, NULL); + + while (i-- > 0) { + + LIRItem* size = new LIRItem(dims->at(i), this); + + items->at_put(i, size); + + } + + + + // Evaluate state_for early since it may emit code. + + CodeEmitInfo* patching_info = NULL; + + if (!x->klass()->is_loaded() || PatchALot) { + + patching_info = state_for(x, x->state_before()); + + + + // Cannot re-use same xhandlers for multiple CodeEmitInfos, so + + // clone all handlers (NOTE: Usually this is handled transparently + + // by the CodeEmitInfo cloning logic in CodeStub constructors but + + // is done explicitly here because a stub isn't being used). + + x->set_exception_handlers(new XHandlers(x->exception_handlers())); + + } + + CodeEmitInfo* info = state_for(x, x->state()); + + + + i = dims->length(); + + while (i-- > 0) { + + LIRItem* size = items->at(i); + + size->load_item(); + + + + store_stack_parameter(size->result(), in_ByteSize(i*4)); + + } + + + + LIR_Opr klass_reg = FrameMap::a0_metadata_opr; + + klass2reg_with_patching(klass_reg, x->klass(), patching_info); + + + + LIR_Opr rank = FrameMap::s0_opr; + + __ move(LIR_OprFact::intConst(x->rank()), rank); + + LIR_Opr varargs = FrameMap::a2_opr; + + __ move(FrameMap::sp_opr, varargs); + + LIR_OprList* args = new LIR_OprList(3); + + args->append(klass_reg); + + args->append(rank); + + args->append(varargs); + + LIR_Opr reg = result_register_for(x->type()); + + __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + + LIR_OprFact::illegalOpr, + + reg, args, info); + + + + LIR_Opr result = rlock_result(x); + + __ move(reg, result); + +} + + + +void LIRGenerator::do_BlockBegin(BlockBegin* x) { + + // nothing to do for now + +} + + + +void LIRGenerator::do_CheckCast(CheckCast* x) { + + LIRItem obj(x->obj(), this); + + + + CodeEmitInfo* patching_info = NULL; + + if (!x->klass()->is_loaded() || + + (PatchALot && !x->is_incompatible_class_change_check() && + + !x->is_invokespecial_receiver_check())) { + + // must do this before locking the destination register as an oop register, + + // and before the obj is loaded (the latter is for deoptimization) + + patching_info = state_for(x, x->state_before()); + + } + + obj.load_item(); + + + + // info for exceptions + + CodeEmitInfo* info_for_exception = + + (x->needs_exception_state() ? state_for(x) : + + state_for(x, x->state_before(), true /*ignore_xhandler*/)); + + + + CodeStub* stub; + + if (x->is_incompatible_class_change_check()) { + + assert(patching_info == NULL, "can't patch this"); + + stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, + + LIR_OprFact::illegalOpr, info_for_exception); + + } else if (x->is_invokespecial_receiver_check()) { + + assert(patching_info == NULL, "can't patch this"); + + stub = new DeoptimizeStub(info_for_exception, + + Deoptimization::Reason_class_check, + + Deoptimization::Action_none); + + } else { + + stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, + + obj.result(), info_for_exception); + + } + + LIR_Opr reg = rlock_result(x); + + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; + + if (!x->klass()->is_loaded() || UseCompressedClassPointers) { + + tmp3 = new_register(objectType); + + } + + __ checkcast(reg, obj.result(), x->klass(), + + new_register(objectType), new_register(objectType), tmp3, + + x->direct_compare(), info_for_exception, patching_info, stub, + + x->profiled_method(), x->profiled_bci()); + +} + + + +void LIRGenerator::do_InstanceOf(InstanceOf* x) { + + LIRItem obj(x->obj(), this); + + + + // result and test object may not be in same register + + LIR_Opr reg = rlock_result(x); + + CodeEmitInfo* patching_info = NULL; + + if ((!x->klass()->is_loaded() || PatchALot)) { + + // must do this before locking the destination register as an oop register + + patching_info = state_for(x, x->state_before()); + + } + + obj.load_item(); + + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; + + if (!x->klass()->is_loaded() || UseCompressedClassPointers) { + + tmp3 = new_register(objectType); + + } + + __ instanceof(reg, obj.result(), x->klass(), + + new_register(objectType), new_register(objectType), tmp3, + + x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); + +} + + + +void LIRGenerator::do_If(If* x) { + + assert(x->number_of_sux() == 2, "inconsistency"); + + ValueTag tag = x->x()->type()->tag(); + + bool is_safepoint = x->is_safepoint(); + + + + If::Condition cond = x->cond(); + + + + LIRItem xitem(x->x(), this); + + LIRItem yitem(x->y(), this); + + LIRItem* xin = &xitem; + + LIRItem* yin = &yitem; + + + + if (tag == longTag) { + + // for longs, only conditions "eql", "neq", "lss", "geq" are valid; + + // mirror for other conditions + + if (cond == If::gtr || cond == If::leq) { + + cond = Instruction::mirror(cond); + + xin = &yitem; + + yin = &xitem; + + } + + xin->set_destroys_register(); + + } + + xin->load_item(); + + + + if (tag == longTag) { + + if (yin->is_constant() && yin->get_jlong_constant() == 0) { + + yin->dont_load_item(); + + } else { + + yin->load_item(); + + } + + } else if (tag == intTag) { + + if (yin->is_constant() && yin->get_jint_constant() == 0) { + + yin->dont_load_item(); + + } else { + + yin->load_item(); + + } + + } else { + + yin->load_item(); + + } + + + + set_no_result(x); + + + + LIR_Opr left = xin->result(); + + LIR_Opr right = yin->result(); + + + + // add safepoint before generating condition code so it can be recomputed + + if (x->is_safepoint()) { + + // increment backedge counter if needed + + increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), + + x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); + + __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); + + } + + +++ __ cmp(lir_cond(cond), left, right); + + // Generate branch profiling. Profiling code doesn't kill flags. +- profile_branch(x, cond, left, right); +++ profile_branch(x, cond); + + move_to_phi(x->state()); + + if (x->x()->type()->is_float_kind()) { +- __ cmp_branch(lir_cond(cond), left, right, x->tsux(), x->usux()); +++ __ branch(lir_cond(cond), x->tsux(), x->usux()); + + } else { +- __ cmp_branch(lir_cond(cond), left, right, x->tsux()); +++ __ branch(lir_cond(cond), x->tsux()); + + } + + assert(x->default_sux() == x->fsux(), "wrong destination above"); + + __ jump(x->default_sux()); + +} + + + +LIR_Opr LIRGenerator::getThreadPointer() { + + return FrameMap::as_pointer_opr(TREG); + +} + + + +void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); } + + + +void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, + + CodeEmitInfo* info) { + + __ volatile_store_mem_reg(value, address, info); + +} + + + +void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, + + CodeEmitInfo* info) { + + // 8179954: We need to make sure that the code generated for + + // volatile accesses forms a sequentially-consistent set of + + // operations when combined with STLR and LDAR. Without a leading + + // membar it's possible for a simple Dekker test to fail if loads + + // use LD;DMB but stores use STLR. This can happen if C2 compiles + + // the stores in one method and C1 compiles the loads in another. + + if (!CompilerConfig::is_c1_only_no_jvmci()) { + + __ membar(); + + } + + __ volatile_load_mem_reg(address, result, info); + +} +diff --cc src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp +index 127be89865e,00000000000..01e8c9f270e +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp +@@@ -1,75 -1,0 +1,57 @@@ + +/* + + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + * + + */ + + + +#include "precompiled.hpp" + +#include "asm/register.hpp" + +#include "c1/c1_LIR.hpp" + + + +FloatRegister LIR_OprDesc::as_float_reg() const { + + return as_FloatRegister(fpu_regnr()); + +} + + + +FloatRegister LIR_OprDesc::as_double_reg() const { + + return as_FloatRegister(fpu_regnrLo()); + +} + + + +// Reg2 unused. + +LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) { + + assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform"); + + return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | + + (reg1 << LIR_OprDesc::reg2_shift) | + + LIR_OprDesc::double_type | + + LIR_OprDesc::fpu_register | + + LIR_OprDesc::double_size); + +} + + + +#ifndef PRODUCT + +void LIR_Address::verify() const { + + assert(base()->is_cpu_register(), "wrong base operand"); + + assert(index()->is_illegal() || index()->is_double_cpu() || + + index()->is_single_cpu(), "wrong index operand"); + + assert(base()->type() == T_ADDRESS || base()->type() == T_OBJECT || + + base()->type() == T_LONG || base()->type() == T_METADATA, + + "wrong type for addresses"); + +} + +#endif // PRODUCT +- +- template +- void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { +- append(new LIR_OpCmpBranch(condition, left, right, tgt, info)); +- } +- +- // Explicit instantiation for all supported types. +- template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); +- template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); +- template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); +- +- void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { +- append(new LIR_OpCmpBranch(condition, left, right, block, unordered)); +- } +- +- void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { +- append(new LIR_Op4(lir_cmp_cmove, condition, left, right, src1, src2, dst, type)); +- } +diff --cc src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp +index 3ef43daa725,00000000000..372d80cf11b +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp +@@@ -1,466 -1,0 +1,462 @@@ + +/* + + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + */ + + + +#include "precompiled.hpp" + +#include "asm/macroAssembler.inline.hpp" + +#include "code/codeBlob.hpp" + +#include "code/vmreg.inline.hpp" + +#include "gc/z/zBarrier.inline.hpp" + +#include "gc/z/zBarrierSet.hpp" + +#include "gc/z/zBarrierSetAssembler.hpp" + +#include "gc/z/zBarrierSetRuntime.hpp" + +#include "gc/z/zThreadLocalData.hpp" + +#include "memory/resourceArea.hpp" + +#include "runtime/sharedRuntime.hpp" + +#include "utilities/macros.hpp" + +#ifdef COMPILER1 + +#include "c1/c1_LIRAssembler.hpp" + +#include "c1/c1_MacroAssembler.hpp" + +#include "gc/z/c1/zBarrierSetC1.hpp" + +#endif // COMPILER1 + +#ifdef COMPILER2 + +#include "gc/z/c2/zBarrierSetC2.hpp" + +#endif // COMPILER2 + + + +#ifdef PRODUCT + +#define BLOCK_COMMENT(str) /* nothing */ + +#else + +#define BLOCK_COMMENT(str) __ block_comment(str) + +#endif + + + +#undef __ + +#define __ masm-> + + + +#define A0 RA0 + +#define A1 RA1 + +#define T4 RT4 + + + +void ZBarrierSetAssembler::load_at(MacroAssembler* masm, + + DecoratorSet decorators, + + BasicType type, + + Register dst, + + Address src, + + Register tmp1, + + Register tmp_thread) { + + if (!ZBarrierSet::barrier_needed(decorators, type)) { + + // Barrier not needed + + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + + return; + + } + + + + // Allocate scratch register + + Register scratch = tmp1; + + + + assert_different_registers(dst, scratch, SCR1); + + + + Label done; + + + + // + + // Fast Path + + // + + + + // Load address + + __ lea(scratch, src); + + + + // Load oop at address + + __ ld_ptr(dst, scratch, 0); + + + + // Test address bad mask + + __ ld_ptr(SCR1, address_bad_mask_from_thread(TREG)); + + __ andr(SCR1, dst, SCR1); + + __ beqz(SCR1, done); + + + + // + + // Slow path + + // + + __ enter(); + + + + if (dst != V0) { + + __ push(V0); + + } + + __ push_call_clobbered_registers_except(RegSet::of(V0)); + + + + if (dst != A0) { + + __ move(A0, dst); + + } + + __ move(A1, scratch); + + __ MacroAssembler::call_VM_leaf_base(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + + + __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + + + // Make sure dst has the return value. + + if (dst != V0) { + + __ move(dst, V0); + + __ pop(V0); + + } + + __ leave(); + + + + __ bind(done); + +} + + + +#ifdef ASSERT + + + +void ZBarrierSetAssembler::store_at(MacroAssembler* masm, + + DecoratorSet decorators, + + BasicType type, + + Address dst, + + Register val, + + Register tmp1, + + Register tmp2) { + + // Verify value + + if (is_reference_type(type)) { + + // Note that src could be noreg, which means we + + // are storing null and can skip verification. + + if (val != noreg) { + + Label done; + + + + // tmp1 and tmp2 are often set to noreg. + + + + __ ld_ptr(AT, address_bad_mask_from_thread(TREG)); + + __ andr(AT, val, AT); + + __ beqz(AT, done); + + __ stop("Verify oop store failed"); + + __ should_not_reach_here(); + + __ bind(done); + + } + + } + + + + // Store value + + BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); + +} + + + +#endif // ASSERT + + + +void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, + + DecoratorSet decorators, + + bool is_oop, + + Register src, + + Register dst, + + Register count, + + RegSet saved_regs) { + + if (!is_oop) { + + // Barrier not needed + + return; + + } + + + + BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); + + + + __ push(saved_regs); + + + + if (count == A0) { + + if (src == A1) { + + // exactly backwards!! + + __ move(AT, A0); + + __ move(A0, A1); + + __ move(A1, AT); + + } else { + + __ move(A1, count); + + __ move(A0, src); + + } + + } else { + + __ move(A0, src); + + __ move(A1, count); + + } + + + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), 2); + + + + __ pop(saved_regs); + + + + BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); + +} + + + +void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + + Register jni_env, + + Register robj, + + Register tmp, + + Label& slowpath) { + + BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); + + + + assert_different_registers(jni_env, robj, tmp); + + + + // Resolve jobject + + BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath); + + + + // The Address offset is too large to direct load - -784. Our range is +127, -128. + + __ li(tmp, (int64_t)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - + + in_bytes(JavaThread::jni_environment_offset()))); + + + + // Load address bad mask + + __ ldx_d(tmp, jni_env, tmp); + + + + // Check address bad mask + + __ andr(AT, robj, tmp); + + __ bnez(AT, slowpath); + + + + BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); + +} + + + +#ifdef COMPILER1 + + + +#undef __ + +#define __ ce->masm()-> + + + +void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref, +- LIR_Opr res) const { +- Register rscratch1 = AT; +- Register rthread = TREG; +- assert_different_registers(rscratch1, rthread, ref->as_register()); +- +- __ ld_d(rscratch1, address_bad_mask_from_thread(rthread)); +- __ andr(res->as_register(), ref->as_register(), rscratch1); +++ LIR_Opr ref) const { +++ assert_different_registers(SCR1, TREG, ref->as_register()); +++ __ ld_d(SCR1, address_bad_mask_from_thread(TREG)); +++ __ andr(SCR1, SCR1, ref->as_register()); + +} + + + +void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, + + ZLoadBarrierStubC1* stub) const { + + // Stub entry + + __ bind(*stub->entry()); + + + + Register ref = stub->ref()->as_register(); + + Register ref_addr = noreg; + + Register tmp = noreg; + + + + if (stub->tmp()->is_valid()) { + + // Load address into tmp register + + ce->leal(stub->ref_addr(), stub->tmp()); + + ref_addr = tmp = stub->tmp()->as_pointer_register(); + + } else { + + // Address already in register + + ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); + + } + + + + assert_different_registers(ref, ref_addr, noreg); + + + + // Save V0 unless it is the result or tmp register + + // Set up SP to accomodate parameters and maybe V0. + + if (ref != V0 && tmp != V0) { + + __ addi_d(SP, SP, -32); + + __ st_d(V0, SP, 16); + + } else { + + __ addi_d(SP, SP, -16); + + } + + + + // Setup arguments and call runtime stub + + ce->store_parameter(ref_addr, 1); + + ce->store_parameter(ref, 0); + + + + __ call(stub->runtime_stub(), relocInfo::runtime_call_type); + + + + // Verify result + + __ verify_oop(V0, "Bad oop"); + + + + // Move result into place + + if (ref != V0) { + + __ move(ref, V0); + + } + + + + // Restore V0 unless it is the result or tmp register + + if (ref != V0 && tmp != V0) { + + __ ld_d(V0, SP, 16); + + __ addi_d(SP, SP, 32); + + } else { + + __ addi_d(SP, SP, 16); + + } + + + + // Stub exit + + __ b(*stub->continuation()); + +} + + + +#undef __ + +#define __ sasm-> + + + +void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + + DecoratorSet decorators) const { + + __ prologue("zgc_load_barrier stub", false); + + + + __ push_call_clobbered_registers_except(RegSet::of(V0)); + + + + // Setup arguments + + __ load_parameter(0, A0); + + __ load_parameter(1, A1); + + + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + + + + __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + + + __ epilogue(); + +} + +#endif // COMPILER1 + + + +#ifdef COMPILER2 + + + +OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + + if (!OptoReg::is_reg(opto_reg)) { + + return OptoReg::Bad; + + } + + + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + + if (vm_reg->is_FloatRegister()) { + + return opto_reg & ~1; + + } + + + + return opto_reg; + +} + + + +#undef __ + +#define __ _masm-> + + + +class ZSaveLiveRegisters { + +private: + + MacroAssembler* const _masm; + + RegSet _gp_regs; + + FloatRegSet _fp_regs; + + FloatRegSet _lsx_vp_regs; + + FloatRegSet _lasx_vp_regs; + + + +public: + + void initialize(ZLoadBarrierStubC2* stub) { + + // Record registers that needs to be saved/restored + + RegMaskIterator rmi(stub->live()); + + while (rmi.has_next()) { + + const OptoReg::Name opto_reg = rmi.next(); + + if (OptoReg::is_reg(opto_reg)) { + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + + if (vm_reg->is_Register()) { + + _gp_regs += RegSet::of(vm_reg->as_Register()); + + } else if (vm_reg->is_FloatRegister()) { + + if (UseLASX && vm_reg->next(7)) + + _lasx_vp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); + + else if (UseLSX && vm_reg->next(3)) + + _lsx_vp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); + + else + + _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister()); + + } else { + + fatal("Unknown register type"); + + } + + } + + } + + + + // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated + + _gp_regs -= RegSet::range(S0, S7) + RegSet::of(SP, SCR1, SCR2, stub->ref()); + + } + + + + ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + + _masm(masm), + + _gp_regs(), + + _fp_regs(), + + _lsx_vp_regs(), + + _lasx_vp_regs() { + + + + // Figure out what registers to save/restore + + initialize(stub); + + + + // Save registers + + __ push(_gp_regs); + + __ push_fpu(_fp_regs); + + __ push_vp(_lsx_vp_regs /* UseLSX */); + + __ push_vp(_lasx_vp_regs /* UseLASX */); + + } + + + + ~ZSaveLiveRegisters() { + + // Restore registers + + __ pop_vp(_lasx_vp_regs /* UseLASX */); + + __ pop_vp(_lsx_vp_regs /* UseLSX */); + + __ pop_fpu(_fp_regs); + + __ pop(_gp_regs); + + } + +}; + + + +#undef __ + +#define __ _masm-> + + + +class ZSetupArguments { + +private: + + MacroAssembler* const _masm; + + const Register _ref; + + const Address _ref_addr; + + + +public: + + ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + + _masm(masm), + + _ref(stub->ref()), + + _ref_addr(stub->ref_addr()) { + + + + // Setup arguments + + if (_ref_addr.base() == noreg) { + + // No self healing + + if (_ref != A0) { + + __ move(A0, _ref); + + } + + __ move(A1, 0); + + } else { + + // Self healing + + if (_ref == A0) { + + // _ref is already at correct place + + __ lea(A1, _ref_addr); + + } else if (_ref != A1) { + + // _ref is in wrong place, but not in A1, so fix it first + + __ lea(A1, _ref_addr); + + __ move(A0, _ref); + + } else if (_ref_addr.base() != A0 && _ref_addr.index() != A0) { + + assert(_ref == A1, "Mov ref first, vacating A0"); + + __ move(A0, _ref); + + __ lea(A1, _ref_addr); + + } else { + + assert(_ref == A1, "Need to vacate A1 and _ref_addr is using A0"); + + if (_ref_addr.base() == A0 || _ref_addr.index() == A0) { + + __ move(T4, A1); + + __ lea(A1, _ref_addr); + + __ move(A0, T4); + + } else { + + ShouldNotReachHere(); + + } + + } + + } + + } + + + + ~ZSetupArguments() { + + // Transfer result + + if (_ref != V0) { + + __ move(_ref, V0); + + } + + } + +}; + + + +#undef __ + +#define __ masm-> + + + +void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + + BLOCK_COMMENT("ZLoadBarrierStubC2"); + + + + // Stub entry + + __ bind(*stub->entry()); + + + + { + + ZSaveLiveRegisters save_live_registers(masm, stub); + + ZSetupArguments setup_arguments(masm, stub); + + __ call_VM_leaf(stub->slow_path(), 2); + + } + + // Stub exit + + __ b(*stub->continuation()); + +} + + + +#undef __ + + + +#endif // COMPILER2 +diff --cc src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp +index 8d032c34995,00000000000..6a96d6fdd60 +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp +@@@ -1,102 -1,0 +1,101 @@@ + +/* + + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + */ + + + +#ifndef CPU_LOONGARCH_GC_Z_ZBARRIERSETASSEMBLER_LOONGARCH_HPP + +#define CPU_LOONGARCH_GC_Z_ZBARRIERSETASSEMBLER_LOONGARCH_HPP + + + +#include "code/vmreg.hpp" + +#include "oops/accessDecorators.hpp" + +#ifdef COMPILER2 + +#include "opto/optoreg.hpp" + +#endif // COMPILER2 + + + +#ifdef COMPILER1 + +class LIR_Assembler; + +class LIR_OprDesc; + +typedef LIR_OprDesc* LIR_Opr; + +class StubAssembler; + +class ZLoadBarrierStubC1; + +#endif // COMPILER1 + + + +#ifdef COMPILER2 + +class Node; + +class ZLoadBarrierStubC2; + +#endif // COMPILER2 + + + +class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { + +public: + + virtual void load_at(MacroAssembler* masm, + + DecoratorSet decorators, + + BasicType type, + + Register dst, + + Address src, + + Register tmp1, + + Register tmp_thread); + + + +#ifdef ASSERT + + virtual void store_at(MacroAssembler* masm, + + DecoratorSet decorators, + + BasicType type, + + Address dst, + + Register val, + + Register tmp1, + + Register tmp2); + +#endif // ASSERT + + + + virtual void arraycopy_prologue(MacroAssembler* masm, + + DecoratorSet decorators, + + bool is_oop, + + Register src, + + Register dst, + + Register count, + + RegSet saved_regs); + + + + virtual void try_resolve_jobject_in_native(MacroAssembler* masm, + + Register jni_env, + + Register robj, + + Register tmp, + + Label& slowpath); + + + +#ifdef COMPILER1 + + void generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref, +- LIR_Opr res) const; +++ LIR_Opr ref) const; + + + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + + ZLoadBarrierStubC1* stub) const; + + + + void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, + + DecoratorSet decorators) const; + +#endif // COMPILER1 + + + +#ifdef COMPILER2 + + OptoReg::Name refine_register(const Node* node, + + OptoReg::Name opto_reg); + + + + void generate_c2_load_barrier_stub(MacroAssembler* masm, + + ZLoadBarrierStubC2* stub) const; + +#endif // COMPILER2 + +}; + + + +#endif // CPU_LOONGARCH_GC_Z_ZBARRIERSETASSEMBLER_LOONGARCH_HPP +diff --cc src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp +index 7d20899d949,00000000000..542fd267434 +mode 100644,000000..100644 +--- a/src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp +@@@ -1,37 -1,0 +1,35 @@@ + +/* + + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + + * Copyright (c) 2021, Loongson Technology. All rights reserved. + + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + + * + + * This code is free software; you can redistribute it and/or modify it + + * under the terms of the GNU General Public License version 2 only, as + + * published by the Free Software Foundation. + + * + + * This code is distributed in the hope that it will be useful, but WITHOUT + + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + + * version 2 for more details (a copy is included in the LICENSE file that + + * accompanied this code). + + * + + * You should have received a copy of the GNU General Public License version + + * 2 along with this work; if not, write to the Free Software Foundation, + + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + * + + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + + * or visit www.oracle.com if you need additional information or have any + + * questions. + + */ + + + +#ifndef CPU_LOONGARCH_GC_Z_ZGLOBALS_LOONGARCH_HPP + +#define CPU_LOONGARCH_GC_Z_ZGLOBALS_LOONGARCH_HPP + + + +const size_t ZPlatformGranuleSizeShift = 21; // 2MB + +const size_t ZPlatformHeapViews = 3; + +const size_t ZPlatformCacheLineSize = 64; + + +- const bool ZPlatformLoadBarrierTestResultInRegister = true; +- + +size_t ZPlatformAddressOffsetBits(); + +size_t ZPlatformAddressMetadataShift(); + + + +#endif // CPU_LOONGARCH_GC_Z_ZGLOBALS_LOONGARCH_HPP +diff --cc src/hotspot/share/c1/c1_LIR.cpp +index f1af08d5df0,308f3a09c15..53a68cdb2fd +--- a/src/hotspot/share/c1/c1_LIR.cpp ++++ b/src/hotspot/share/c1/c1_LIR.cpp +@@@ -22,6 -22,6 +22,12 @@@ + * + */ + +++/* +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made +++ * available on the same license terms set forth above. +++ */ +++ + #include "precompiled.hpp" + #include "c1/c1_CodeStubs.hpp" + #include "c1/c1_InstructionPrinter.hpp" +@@@ -188,6 -188,9 +194,11 @@@ void LIR_Op2::verify() const + #ifdef ASSERT + switch (code()) { + case lir_cmove: ++ #ifdef RISCV ++ assert(false, "lir_cmove is LIR_Op4 on RISCV"); +++#elif defined(LOONGARCH) +++ assert(false, "lir_cmove is LIR_Op4 on LoongArch"); ++ #endif + case lir_xchg: + break; + +@@@ -236,22 -239,14 +247,14 @@@ + #endif + } + +- void LIR_Op4::verify() const { +- #ifdef ASSERT +- switch (code()) { +- case lir_cmp_cmove: +- break; +- +- default: +- assert(!result_opr()->is_register() || !result_opr()->is_oop_register(), +- "can't produce oops from arith"); +- } +- #endif +- } + + LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block) + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ #else + : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) + , _cond(cond) ++ #endif + , _label(block->label()) + , _block(block) + , _ublock(NULL) +@@@ -259,8 -254,12 +262,12 @@@ + } + + LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, CodeStub* stub) : + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ #else + LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) + , _cond(cond) ++ #endif + , _label(stub->entry()) + , _block(NULL) + , _ublock(NULL) +@@@ -268,8 -267,12 +275,12 @@@ + } + + LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock) + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ : LIR_Op2(lir_cond_float_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) ++ #else + : LIR_Op(lir_cond_float_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) + , _cond(cond) ++ #endif + , _label(block->label()) + , _block(block) + , _ublock(ublock) +@@@ -559,7 -512,10 +520,11 @@@ void LIR_OpVisitState::visit(LIR_Op* op + assert(opConvert->_info == NULL, "must be"); + if (opConvert->_opr->is_valid()) do_input(opConvert->_opr); + if (opConvert->_result->is_valid()) do_output(opConvert->_result); + + if (opConvert->_tmp->is_valid()) do_temp(opConvert->_tmp); ++ #ifdef PPC32 ++ if (opConvert->_tmp1->is_valid()) do_temp(opConvert->_tmp1); ++ if (opConvert->_tmp2->is_valid()) do_temp(opConvert->_tmp2); ++ #endif + do_stub(opConvert->_stub); + + break; +@@@ -572,6 -528,15 +537,15 @@@ + assert(op->as_OpBranch() != NULL, "must be"); + LIR_OpBranch* opBranch = (LIR_OpBranch*)op; + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(opBranch->_tmp1->is_illegal() && opBranch->_tmp2->is_illegal() && ++ opBranch->_tmp3->is_illegal() && opBranch->_tmp4->is_illegal() && ++ opBranch->_tmp5->is_illegal(), "not used"); ++ ++ if (opBranch->_opr1->is_valid()) do_input(opBranch->_opr1); ++ if (opBranch->_opr2->is_valid()) do_input(opBranch->_opr2); ++ #endif ++ + if (opBranch->_info != NULL) do_info(opBranch->_info); + assert(opBranch->_result->is_illegal(), "not used"); + if (opBranch->_stub != NULL) opBranch->stub()->visit(this); +@@@ -679,6 -625,21 +634,21 @@@ + // to the result operand, otherwise the backend fails + case lir_cmove: + { + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(op->as_Op4() != NULL, "must be"); ++ LIR_Op4* op4 = (LIR_Op4*)op; ++ ++ assert(op4->_info == NULL && op4->_tmp1->is_illegal() && op4->_tmp2->is_illegal() && ++ op4->_tmp3->is_illegal() && op4->_tmp4->is_illegal() && op4->_tmp5->is_illegal(), "not used"); ++ assert(op4->_opr1->is_valid() && op4->_opr2->is_valid() && op4->_result->is_valid(), "used"); ++ ++ do_input(op4->_opr1); ++ do_input(op4->_opr2); ++ if (op4->_opr3->is_valid()) do_input(op4->_opr3); ++ if (op4->_opr4->is_valid()) do_input(op4->_opr4); ++ do_temp(op4->_opr2); ++ do_output(op4->_result); ++ #else + assert(op->as_Op2() != NULL, "must be"); + LIR_Op2* op2 = (LIR_Op2*)op; + +@@@ -1150,6 -1095,7 +1104,7 @@@ void LIR_Op3::emit_code(LIR_Assembler* + masm->emit_op3(this); + } + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + void LIR_Op4::emit_code(LIR_Assembler* masm) { + masm->emit_op4(this); + } +@@@ -1190,6 -1141,10 +1150,10 @@@ LIR_List::LIR_List(Compilation* compila + , _file(NULL) + , _line(0) + #endif + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ , _cmp_opr1(LIR_OprFact::illegalOpr) ++ , _cmp_opr2(LIR_OprFact::illegalOpr) ++ #endif + { } + + +@@@ -1207,6 -1162,38 +1171,38 @@@ void LIR_List::set_file_and_line(const + } + #endif + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ void LIR_List::set_cmp_oprs(LIR_Op* op) { ++ switch (op->code()) { ++ case lir_cmp: ++ _cmp_opr1 = op->as_Op2()->in_opr1(); ++ _cmp_opr2 = op->as_Op2()->in_opr2(); ++ break; ++ case lir_branch: // fall through ++ case lir_cond_float_branch: ++ assert(op->as_OpBranch()->cond() == lir_cond_always || ++ (_cmp_opr1 != LIR_OprFact::illegalOpr && _cmp_opr2 != LIR_OprFact::illegalOpr), ++ "conditional branches must have legal operands"); ++ if (op->as_OpBranch()->cond() != lir_cond_always) { ++ op->as_Op2()->set_in_opr1(_cmp_opr1); ++ op->as_Op2()->set_in_opr2(_cmp_opr2); ++ } ++ break; ++ case lir_cmove: ++ op->as_Op4()->set_in_opr3(_cmp_opr1); ++ op->as_Op4()->set_in_opr4(_cmp_opr2); ++ break; ++ #if INCLUDE_ZGC ++ case lir_zloadbarrier_test: + - _cmp_opr1 = FrameMap::as_opr(t1); +++ _cmp_opr1 = FrameMap::as_opr(RISCV_ONLY(t1) LOONGARCH64_ONLY(SCR1)); ++ _cmp_opr2 = LIR_OprFact::intConst(0); ++ break; ++ #endif ++ default: ++ break; ++ } ++ } ++ #endif + + void LIR_List::append(LIR_InsertionBuffer* buffer) { + assert(this == buffer->lir_list(), "wrong lir list"); +@@@ -1940,26 -1924,10 +1933,10 @@@ void LIR_Op1::print_patch_code(outputSt + // LIR_OpBranch + void LIR_OpBranch::print_instr(outputStream* out) const { + print_condition(out, cond()); out->print(" "); +- if (block() != NULL) { +- out->print("[B%d] ", block()->block_id()); +- } else if (stub() != NULL) { +- out->print("["); +- stub()->print_name(out); +- out->print(": " INTPTR_FORMAT "]", p2i(stub())); +- if (stub()->info() != NULL) out->print(" [bci:%d]", stub()->info()->stack()->bci()); +- } else { +- out->print("[label:" INTPTR_FORMAT "] ", p2i(label())); +- } +- if (ublock() != NULL) { +- out->print("unordered: [B%d] ", ublock()->block_id()); +- } +- } +- +- // LIR_OpCmpBranch +- void LIR_OpCmpBranch::print_instr(outputStream* out) const { +- print_condition(out, condition()); out->print(" "); +- in_opr1()->print(out); out->print(" "); +- in_opr2()->print(out); out->print(" "); + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ in_opr1()->print(out); out->print(" "); ++ in_opr2()->print(out); out->print(" "); ++ #endif + if (block() != NULL) { + out->print("[B%d] ", block()->block_id()); + } else if (stub() != NULL) { +@@@ -1995,9 -1963,12 +1972,15 @@@ void LIR_OpConvert::print_instr(outputS + print_bytecode(out, bytecode()); + in_opr()->print(out); out->print(" "); + result_opr()->print(out); out->print(" "); + + if(tmp()->is_valid()) { + + tmp()->print(out); out->print(" "); + + } ++ #ifdef PPC32 ++ if(tmp1()->is_valid()) { ++ tmp1()->print(out); out->print(" "); ++ tmp2()->print(out); out->print(" "); ++ } ++ #endif + } + + void LIR_OpConvert::print_bytecode(outputStream* out, Bytecodes::Code code) { +@@@ -2043,7 -2014,11 +2026,11 @@@ void LIR_OpRoundFP::print_instr(outputS + + // LIR_Op2 + void LIR_Op2::print_instr(outputStream* out) const { + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ if (code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch) { ++ #else + if (code() == lir_cmove || code() == lir_cmp) { ++ #endif + print_condition(out, condition()); out->print(" "); + } + in_opr1()->print(out); out->print(" "); +@@@ -2094,19 -2069,17 +2081,17 @@@ void LIR_Op3::print_instr(outputStream + result_opr()->print(out); + } + +- + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + // LIR_Op4 + void LIR_Op4::print_instr(outputStream* out) const { +- if (code() == lir_cmp_cmove) { +- print_condition(out, condition()); out->print(" "); +- } +- in_opr1()->print(out); out->print(" "); +- in_opr2()->print(out); out->print(" "); +- in_opr3()->print(out); out->print(" "); +- in_opr4()->print(out); out->print(" "); ++ print_condition(out, condition()); out->print(" "); ++ in_opr1()->print(out); out->print(" "); ++ in_opr2()->print(out); out->print(" "); ++ in_opr3()->print(out); out->print(" "); ++ in_opr4()->print(out); out->print(" "); + result_opr()->print(out); + } +- ++ #endif + + void LIR_OpLock::print_instr(outputStream* out) const { + hdr_opr()->print(out); out->print(" "); +diff --cc src/hotspot/share/c1/c1_LIR.hpp +index 00c3f938e11,717404e9726..0fffd4aabfc +--- a/src/hotspot/share/c1/c1_LIR.hpp ++++ b/src/hotspot/share/c1/c1_LIR.hpp +@@@ -22,6 -22,6 +22,12 @@@ + * + */ + +++/* +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made +++ * available on the same license terms set forth above. +++ */ +++ + #ifndef SHARE_C1_C1_LIR_HPP + #define SHARE_C1_C1_LIR_HPP + +@@@ -869,8 -869,9 +875,9 @@@ class LIR_Op2 + class LIR_OpDelay; + class LIR_Op3; + class LIR_OpAllocArray; + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + class LIR_Op4; ++ #endif + class LIR_OpCall; + class LIR_OpJavaCall; + class LIR_OpRTCall; +@@@ -915,8 -917,10 +923,10 @@@ enum LIR_Code + , lir_null_check + , lir_return + , lir_leal + -#ifndef RISCV +++#if !defined(RISCV) && !defined(LOONGARCH) + , lir_branch + , lir_cond_float_branch ++ #endif + , lir_move + , lir_convert + , lir_alloc_object +@@@ -924,15 -928,20 +934,20 @@@ + , lir_roundfp + , lir_safepoint + , lir_unwind ++ , lir_load_klass + , end_op1 + , begin_op2 + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ , lir_branch ++ , lir_cond_float_branch ++ #endif + , lir_cmp + , lir_cmp_l2i + , lir_ucmp_fd2i + , lir_cmp_fd2i +- , lir_cmp_branch +- , lir_cmp_float_branch + -#ifndef RISCV +++#if !defined(RISCV) && !defined(LOONGARCH) + , lir_cmove ++ #endif + , lir_add + , lir_sub + , lir_mul +@@@ -960,9 -969,11 +975,11 @@@ + , lir_fmad + , lir_fmaf + , end_op3 + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + , begin_op4 +- , lir_cmp_cmove ++ , lir_cmove + , end_op4 ++ #endif + , begin_opJavaCall + , lir_static_call + , lir_optvirtual_call +@@@ -999,6 -1010,11 +1016,11 @@@ + , begin_opAssert + , lir_assert + , end_opAssert + -#if defined(RISCV) && defined(INCLUDE_ZGC) +++#if (defined(RISCV) || defined(LOONGARCH)) && defined(INCLUDE_ZGC) ++ , begin_opZLoadBarrierTest ++ , lir_zloadbarrier_test ++ , end_opZLoadBarrierTest ++ #endif + }; + + +@@@ -1136,7 -1151,9 +1157,9 @@@ class LIR_Op: public CompilationResourc + virtual LIR_Op1* as_Op1() { return NULL; } + virtual LIR_Op2* as_Op2() { return NULL; } + virtual LIR_Op3* as_Op3() { return NULL; } + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + virtual LIR_Op4* as_Op4() { return NULL; } ++ #endif + virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } + virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } +@@@ -1636,7 -1610,7 +1619,11 @@@ class LIR_Op2: public LIR_Op + , _tmp4(LIR_OprFact::illegalOpr) + , _tmp5(LIR_OprFact::illegalOpr) + , _condition(condition) { +- assert(code == lir_cmp || code == lir_cmp_branch || code == lir_cmp_float_branch || code == lir_assert, "code check"); + - assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check"); +++ assert(code == lir_cmp || code == lir_assert +++#if defined(RISCV) || defined(LOONGARCH) +++ || code == lir_branch || code == lir_cond_float_branch +++#endif +++ , "code check"); + } + + LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) +@@@ -1668,7 -1642,7 +1655,11 @@@ + , _tmp4(LIR_OprFact::illegalOpr) + , _tmp5(LIR_OprFact::illegalOpr) + , _condition(lir_cond_unknown) { +- assert((code != lir_cmp && code != lir_cmp_branch && code != lir_cmp_float_branch) && is_in_range(code, begin_op2, end_op2), "code check"); + - assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); +++ assert(code != lir_cmp && +++#if defined(RISCV) || defined(LOONGARCH) +++ code != lir_branch && code != lir_cond_float_branch && +++#endif +++ is_in_range(code, begin_op2, end_op2), "code check"); + } + + LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, +@@@ -1684,7 -1658,7 +1675,11 @@@ + , _tmp4(tmp4) + , _tmp5(tmp5) + , _condition(lir_cond_unknown) { +- assert((code != lir_cmp && code != lir_cmp_branch && code != lir_cmp_float_branch) && is_in_range(code, begin_op2, end_op2), "code check"); + - assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); +++ assert(code != lir_cmp && +++#if defined(RISCV) || defined(LOONGARCH) +++ code != lir_branch && code != lir_cond_float_branch && +++#endif +++ is_in_range(code, begin_op2, end_op2), "code check"); + } + + LIR_Opr in_opr1() const { return _opr1; } +@@@ -1696,12 -1670,18 +1691,18 @@@ + LIR_Opr tmp4_opr() const { return _tmp4; } + LIR_Opr tmp5_opr() const { return _tmp5; } + LIR_Condition condition() const { +- assert(code() == lir_cmp || code() == lir_cmp_branch || code() == lir_cmp_float_branch || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); +- return _condition; + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch || code() == lir_assert, "only valid for branch and assert"); return _condition; ++ #else ++ assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition; ++ #endif + } + void set_condition(LIR_Condition condition) { +- assert(code() == lir_cmp || code() == lir_cmp_branch || code() == lir_cmp_float_branch || code() == lir_cmove, "only valid for cmp and cmove"); +- _condition = condition; + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch, "only valid for branch"); _condition = condition; ++ #else ++ assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; ++ #endif + } + + void set_fpu_stack_size(int size) { _fpu_stack_size = size; } +@@@ -1715,33 -1695,52 +1716,52 @@@ + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; + }; + +- class LIR_OpCmpBranch: public LIR_Op2 { + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ class LIR_OpBranch: public LIR_Op2 { ++ #else ++ class LIR_OpBranch: public LIR_Op { ++ #endif + friend class LIR_OpVisitState; + + private: + -#ifndef RISCV +++#if !defined(RISCV) && !defined(LOONGARCH) ++ LIR_Condition _cond; ++ #endif + Label* _label; + BlockBegin* _block; // if this is a branch to a block, this is the block +- BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block ++ BlockBegin* _ublock; // if this is a float-branch, this is the unordered block + CodeStub* _stub; // if this is a branch to a stub, this is the stub + + public: +- LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, Label* lbl, CodeEmitInfo* info = NULL) +- : LIR_Op2(lir_cmp_branch, cond, left, right, info) ++ LIR_OpBranch(LIR_Condition cond, Label* lbl) + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) ++ #else ++ : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) ++ , _cond(cond) ++ #endif + , _label(lbl) + , _block(NULL) + , _ublock(NULL) + , _stub(NULL) { } + +- LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeStub* stub, CodeEmitInfo* info = NULL); +- LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, CodeEmitInfo* info = NULL); ++ LIR_OpBranch(LIR_Condition cond, BlockBegin* block); ++ LIR_OpBranch(LIR_Condition cond, CodeStub* stub); + + // for unordered comparisons +- LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* ublock, CodeEmitInfo* info = NULL); ++ LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock); + +- Label* label() const { return _label; } +- BlockBegin* block() const { return _block; } +- BlockBegin* ublock() const { return _ublock; } +- CodeStub* stub() const { return _stub; } + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ LIR_Condition cond() const { return condition(); } ++ void set_cond(LIR_Condition cond) { set_condition(cond); } ++ #else ++ LIR_Condition cond() const { return _cond; } ++ void set_cond(LIR_Condition cond) { _cond = cond; } ++ #endif ++ Label* label() const { return _label; } ++ BlockBegin* block() const { return _block; } ++ BlockBegin* ublock() const { return _ublock; } ++ CodeStub* stub() const { return _stub; } + + void change_block(BlockBegin* b); + void change_ublock(BlockBegin* b); +@@@ -1815,22 -1814,25 +1835,25 @@@ class LIR_Op3: public LIR_Op + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; + }; + +- + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + class LIR_Op4: public LIR_Op { +- friend class LIR_OpVisitState; +- +- private: +- LIR_Opr _opr1; +- LIR_Opr _opr2; +- LIR_Opr _opr3; +- LIR_Opr _opr4; ++ friend class LIR_OpVisitState; ++ protected: ++ LIR_Opr _opr1; ++ LIR_Opr _opr2; ++ LIR_Opr _opr3; ++ LIR_Opr _opr4; + BasicType _type; ++ LIR_Opr _tmp1; ++ LIR_Opr _tmp2; ++ LIR_Opr _tmp3; ++ LIR_Opr _tmp4; ++ LIR_Opr _tmp5; + LIR_Condition _condition; + +- void verify() const; +- + public: +- LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4, LIR_Opr result, BasicType type) ++ LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4, ++ LIR_Opr result, BasicType type) + : LIR_Op(code, result, NULL) + , _opr1(opr1) + , _opr2(opr2) +@@@ -2079,6 -2112,10 +2133,10 @@@ class LIR_List: public CompilationResou + const char * _file; + int _line; + #endif + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ LIR_Opr _cmp_opr1; ++ LIR_Opr _cmp_opr2; ++ #endif + + public: + void append(LIR_Op* op) { +@@@ -2091,6 -2128,12 +2149,12 @@@ + } + #endif // PRODUCT + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ set_cmp_oprs(op); ++ // lir_cmp set cmp oprs only on riscv ++ if (op->code() == lir_cmp) return; ++ #endif ++ + _operations.append(op); + + #ifdef ASSERT +@@@ -2107,6 -2150,10 +2171,10 @@@ + void set_file_and_line(const char * file, int line); + #endif + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ void set_cmp_oprs(LIR_Op* op); ++ #endif ++ + //---------- accessors --------------- + LIR_OpList* instructions_list() { return &_operations; } + int length() const { return _operations.length(); } +@@@ -2228,15 -2273,12 +2296,12 @@@ + void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); + void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info); + + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type, ++ LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr) { ++ append(new LIR_Op4(lir_cmove, condition, src1, src2, cmp_opr1, cmp_opr2, dst, type)); ++ } ++ #else + void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { + append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type)); + } +diff --cc src/hotspot/share/c1/c1_LIRAssembler.cpp +index 09053d94b51,989a6f8ad25..e288de2ab8e +--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp ++++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp +@@@ -22,6 -22,6 +22,12 @@@ + * + */ + +++/* +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made +++ * available on the same license terms set forth above. +++ */ +++ + #include "precompiled.hpp" + #include "asm/assembler.inline.hpp" + #include "c1/c1_Compilation.hpp" +@@@ -691,6 -691,7 +697,7 @@@ void LIR_Assembler::emit_op2(LIR_Op2* o + comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); + break; + + -#ifndef RISCV +++#if !defined(RISCV) && !defined(LOONGARCH) + case lir_cmove: + cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); + break; +@@@ -756,11 -758,11 +764,11 @@@ + } + } + +- + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + void LIR_Assembler::emit_op4(LIR_Op4* op) { +- switch (op->code()) { +- case lir_cmp_cmove: +- cmp_cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->in_opr3(), op->in_opr4(), op->result_opr(), op->type()); ++ switch(op->code()) { ++ case lir_cmove: ++ cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type(), op->in_opr3(), op->in_opr4()); + break; + + default: +diff --cc src/hotspot/share/c1/c1_LIRAssembler.hpp +index d0cceefdda1,c82baa15fe7..84c34db4985 +--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp ++++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp +@@@ -22,6 -22,6 +22,12 @@@ + * + */ + +++/* +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made +++ * available on the same license terms set forth above. +++ */ +++ + #ifndef SHARE_C1_C1_LIRASSEMBLER_HPP + #define SHARE_C1_C1_LIRASSEMBLER_HPP + +@@@ -186,9 -186,10 +192,10 @@@ class LIR_Assembler: public Compilation + void emit_op1(LIR_Op1* op); + void emit_op2(LIR_Op2* op); + void emit_op3(LIR_Op3* op); + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) + void emit_op4(LIR_Op4* op); ++ #endif + void emit_opBranch(LIR_OpBranch* op); +- void emit_opCmpBranch(LIR_OpCmpBranch* op); + void emit_opLabel(LIR_OpLabel* op); + void emit_arraycopy(LIR_OpArrayCopy* op); + void emit_updatecrc32(LIR_OpUpdateCRC32* op); +@@@ -220,9 -222,12 +228,12 @@@ + void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); + void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions + void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op); + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type, ++ LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr); ++ #else + void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type); +- void cmp_cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type); +- ++ #endif + void call( LIR_OpJavaCall* op, relocInfo::relocType rtype); + void ic_call( LIR_OpJavaCall* op); + void vtable_call( LIR_OpJavaCall* op); +diff --cc src/hotspot/share/c1/c1_LinearScan.cpp +index f81a440d237,d3d38d11a90..6947406b2e7 +--- a/src/hotspot/share/c1/c1_LinearScan.cpp ++++ b/src/hotspot/share/c1/c1_LinearScan.cpp +@@@ -35,12 -35,6 +35,12 @@@ + #include "runtime/timerTrace.hpp" + #include "utilities/bitMap.inline.hpp" + + +/* +- * This file has been modified by Loongson Technology in 2022, These +- * modifications are Copyright (c) 2022, Loongson Technology, and are made +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made + + * available on the same license terms set forth above. + + */ + + + #ifndef PRODUCT + + static LinearScanStatistic _stat_before_alloc; +@@@ -1246,8 -1240,13 +1246,13 @@@ void LinearScan::add_register_hints(LIR + break; + } + case lir_cmove: { + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(op->as_Op4() != NULL, "lir_cmove must be LIR_Op4"); ++ LIR_Op4* cmove = (LIR_Op4*)op; ++ #else + assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2"); + LIR_Op2* cmove = (LIR_Op2*)op; ++ #endif + + LIR_Opr move_from = cmove->in_opr1(); + LIR_Opr move_to = cmove->result_opr(); +@@@ -3161,6 -3151,9 +3157,9 @@@ void LinearScan::do_linear_scan() + } + } + + -#ifndef RISCV +++#if !defined(RISCV) && !defined(LOONGARCH) ++ // Disable these optimizations on riscv temporarily, because it does not ++ // work when the comparison operands are bound to branches or cmoves. + { TIME_LINEAR_SCAN(timer_optimize_lir); + + EdgeMoveOptimizer::optimize(ir()->code()); +@@@ -6403,14 -6385,23 +6391,23 @@@ void ControlFlowOptimizer::delete_unnec + // There might be a cmove inserted for profiling which depends on the same + // compare. If we change the condition of the respective compare, we have + // to take care of this cmove as well. + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ LIR_Op4* prev_cmove = NULL; ++ #else + LIR_Op2* prev_cmove = NULL; ++ #endif + + for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) { + prev_op = instructions->at(j); + // check for the cmove + if (prev_op->code() == lir_cmove) { + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ assert(prev_op->as_Op4() != NULL, "cmove must be of type LIR_Op4"); ++ prev_cmove = (LIR_Op4*)prev_op; ++ #else + assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2"); + prev_cmove = (LIR_Op2*)prev_op; ++ #endif + assert(prev_branch->cond() == prev_cmove->condition(), "should be the same"); + } + if (prev_op->code() == lir_cmp) { +diff --cc src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +index c6717bb76af,7d31ff02e1a..07dac06aecf +--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp ++++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +@@@ -41,7 -35,7 +41,7 @@@ + #include "utilities/defaultStream.hpp" + + void ShenandoahArguments::initialize() { +- #if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined LOONGARCH64) + -#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64) +++#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64 || defined LOONGARCH64) + vm_exit_during_initialization("Shenandoah GC is not supported on this platform."); + #endif + +diff --cc src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +index b145a63363a,0e99bf107c1..d5541cf8966 +--- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp ++++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +@@@ -21,6 -21,6 +21,12 @@@ + * questions. + */ + +++/* +++ * This file has been modified by Loongson Technology in 2023, These +++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made +++ * available on the same license terms set forth above. +++ */ +++ + #include "precompiled.hpp" + #include "c1/c1_LIR.hpp" + #include "c1/c1_LIRGenerator.hpp" +@@@ -94,7 -94,11 +100,11 @@@ private + + public: + LIR_OpZLoadBarrierTest(LIR_Opr opr) : + -#ifdef RISCV +++#if defined(RISCV) || defined(LOONGARCH) ++ LIR_Op(lir_zloadbarrier_test, LIR_OprFact::illegalOpr, NULL), ++ #else + LIR_Op(), ++ #endif + _opr(opr) {} + + virtual void visit(LIR_OpVisitState* state) { +diff --cc src/hotspot/share/jfr/utilities/jfrBigEndian.hpp +index c381aa11475,597ddb3800f..427a9503eaf +--- a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp ++++ b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp +@@@ -108,7 -102,7 +108,7 @@@ inline T JfrBigEndian::read_unaligned(c + inline bool JfrBigEndian::platform_supports_unaligned_reads(void) { + #if defined(IA32) || defined(AMD64) || defined(PPC) || defined(S390) + return true; +- #elif defined(ARM) || defined(AARCH64) || defined(MIPS) || defined(LOONGARCH) + -#elif defined(ARM) || defined(AARCH64) || defined(RISCV) +++#elif defined(ARM) || defined(AARCH64) || defined(RISCV) || defined(MIPS) || defined(LOONGARCH) + return false; + #else + #warning "Unconfigured platform" +diff --cc src/hotspot/share/opto/output.cpp +index 79c2b223588,8a1ed0d3160..596829c07ca +--- a/src/hotspot/share/opto/output.cpp ++++ b/src/hotspot/share/opto/output.cpp +@@@ -1016,28 -1010,7 +1016,28 @@@ void PhaseOutput::Process_OopMap_Node(M + + // Add the safepoint in the DebugInfoRecorder + if( !mach->is_MachCall() ) { +- mcall = NULL; ++ mcall = nullptr; + +#if defined(MIPS) || defined(LOONGARCH) + + // safepoint_pc_offset should point to tha last instruction in safePoint. + + // In X86 and sparc, their safePoints only contain one instruction. + + // However, we should add current_offset with the size of safePoint in MIPS. + + // 0x2d6ff22c: lw s2, 0x14(s2) + + // last_pd->pc_offset()=308, pc_offset=304, bci=64 + + // last_pd->pc_offset()=312, pc_offset=312, bci=64 + + // src/hotspot/share/code/debugInfoRec.cpp:295, assert(last_pd->pc_offset() == pc_offset, "must be last pc") + + // + + // ;; Safepoint: + + // ---> pc_offset=304 + + // 0x2d6ff230: lui at, 0x2b7a ; OopMap{s2=Oop s5=Oop t4=Oop off=308} + + // ;*goto + + // ; - java.util.Hashtable::get@64 (line 353) + + // ---> last_pd(308) + + // 0x2d6ff234: lw at, 0xffffc100(at) ;*goto + + // ; - java.util.Hashtable::get@64 (line 353) + + // ; {poll} + + // 0x2d6ff238: addiu s0, zero, 0x0 + + safepoint_pc_offset += sfn->size(C->regalloc()) - 4; + +#endif + C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map); + } else { + mcall = mach->as_MachCall(); +diff --cc src/hotspot/share/runtime/thread.inline.hpp +index 9b72ada86b6,d86fce3c8ac..71bfd4dfa19 +--- a/src/hotspot/share/runtime/thread.inline.hpp ++++ b/src/hotspot/share/runtime/thread.inline.hpp +@@@ -138,7 -132,7 +138,7 @@@ inline void JavaThread::set_pending_asy + } + + inline JavaThreadState JavaThread::thread_state() const { +- #if defined(PPC64) || defined (AARCH64) || defined(LOONGARCH64) + -#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) +++#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) || defined(LOONGARCH64) + // Use membars when accessing volatile _thread_state. See + // Threads::create_vm() for size checks. + return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state); +@@@ -150,7 -144,7 +150,7 @@@ + inline void JavaThread::set_thread_state(JavaThreadState s) { + assert(current_or_null() == NULL || current_or_null() == this, + "state change should only be called by the current thread"); +- #if defined(PPC64) || defined (AARCH64) || defined(LOONGARCH64) + -#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) +++#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) || defined(LOONGARCH64) + // Use membars when accessing volatile _thread_state. See + // Threads::create_vm() for size checks. + Atomic::release_store((volatile jint*)&_thread_state, (jint)s); +diff --cc src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp +index c2a144f49b8,9accba375a2..200bb1e82f3 +--- a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp ++++ b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp +@@@ -67,10 -60,10 +67,14 @@@ + #include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h" + #endif + ++ #ifdef riscv64 ++ #include "sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext.h" ++ #endif ++ + +#ifdef loongarch64 + +#include "sun_jvm_hotspot_debugger_loongarch64_LOONGARCH64ThreadContext.h" + +#endif + + + class AutoJavaString { + JNIEnv* m_env; + jstring m_str; +@@@ -419,7 -412,7 +423,7 @@@ JNIEXPORT jbyteArray JNICALL Java_sun_j + return (err == PS_OK)? array : 0; + } + +- #if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(loongarch64) + -#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(riscv64) +++#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(riscv64) || defined(loongarch64) + extern "C" + JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 + (JNIEnv *env, jobject this_obj, jint lwp_id) { +@@@ -451,9 -444,9 +455,12 @@@ + #ifdef aarch64 + #define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG + #endif ++ #ifdef riscv64 ++ #define NPRGREG sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext_NPRGREG ++ #endif + +#ifdef loongarch64 + +#define NPRGREG sun_jvm_hotspot_debugger_loongarch64_LOONGARCH64ThreadContext_NPRGREG + +#endif + #if defined(ppc64) || defined(ppc64le) + #define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG + #endif +@@@ -530,18 -523,44 +537,56 @@@ + } + #endif /* aarch64 */ + ++ #if defined(riscv64) ++ #define REG_INDEX(reg) sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext_##reg ++ ++ regs[REG_INDEX(PC)] = gregs.pc; ++ regs[REG_INDEX(LR)] = gregs.ra; ++ regs[REG_INDEX(SP)] = gregs.sp; ++ regs[REG_INDEX(R3)] = gregs.gp; ++ regs[REG_INDEX(R4)] = gregs.tp; ++ regs[REG_INDEX(R5)] = gregs.t0; ++ regs[REG_INDEX(R6)] = gregs.t1; ++ regs[REG_INDEX(R7)] = gregs.t2; ++ regs[REG_INDEX(R8)] = gregs.s0; ++ regs[REG_INDEX(R9)] = gregs.s1; ++ regs[REG_INDEX(R10)] = gregs.a0; ++ regs[REG_INDEX(R11)] = gregs.a1; ++ regs[REG_INDEX(R12)] = gregs.a2; ++ regs[REG_INDEX(R13)] = gregs.a3; ++ regs[REG_INDEX(R14)] = gregs.a4; ++ regs[REG_INDEX(R15)] = gregs.a5; ++ regs[REG_INDEX(R16)] = gregs.a6; ++ regs[REG_INDEX(R17)] = gregs.a7; ++ regs[REG_INDEX(R18)] = gregs.s2; ++ regs[REG_INDEX(R19)] = gregs.s3; ++ regs[REG_INDEX(R20)] = gregs.s4; ++ regs[REG_INDEX(R21)] = gregs.s5; ++ regs[REG_INDEX(R22)] = gregs.s6; ++ regs[REG_INDEX(R23)] = gregs.s7; ++ regs[REG_INDEX(R24)] = gregs.s8; ++ regs[REG_INDEX(R25)] = gregs.s9; ++ regs[REG_INDEX(R26)] = gregs.s10; ++ regs[REG_INDEX(R27)] = gregs.s11; ++ regs[REG_INDEX(R28)] = gregs.t3; ++ regs[REG_INDEX(R29)] = gregs.t4; ++ regs[REG_INDEX(R30)] = gregs.t5; ++ regs[REG_INDEX(R31)] = gregs.t6; ++ ++ #endif /* riscv64 */ ++ + +#if defined(loongarch64) + + + +#define REG_INDEX(reg) sun_jvm_hotspot_debugger_loongarch64_LOONGARCH64ThreadContext_##reg + + + + { + + int i; + + for (i = 0; i < 31; i++) + + regs[i] = gregs.regs[i]; + + regs[REG_INDEX(PC)] = gregs.csr_era; + + } + +#endif /* loongarch64 */ + + + #if defined(ppc64) || defined(ppc64le) + #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg + +diff --cc src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h +index 17920fafec9,a69496e77a4..64312b4705d +--- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h ++++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h +@@@ -50,11 -43,9 +50,13 @@@ + #elif defined(arm) + #include + #define user_regs_struct pt_regs ++ #elif defined(riscv64) ++ #include + #endif + +#if defined(mips) || defined(mipsel) || defined(mips64) || defined(mips64el) + +#include + +#define user_regs_struct pt_regs + +#endif + + // This C bool type must be int for compatibility with Linux calls and + // it would be a mistake to equivalence it to C++ bool on many platforms +diff --cc src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java +index 021ba6f2820,e0e9b4b6727..9af1218ed46 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java +@@@ -42,9 -36,8 +42,10 @@@ import sun.jvm.hotspot.debugger.Machine + import sun.jvm.hotspot.debugger.MachineDescriptionAMD64; + import sun.jvm.hotspot.debugger.MachineDescriptionPPC64; + import sun.jvm.hotspot.debugger.MachineDescriptionAArch64; ++ import sun.jvm.hotspot.debugger.MachineDescriptionRISCV64; + import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86; + +import sun.jvm.hotspot.debugger.MachineDescriptionMIPS64; + +import sun.jvm.hotspot.debugger.MachineDescriptionLOONGARCH64; + import sun.jvm.hotspot.debugger.NoSuchSymbolException; + import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal; + import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal; +@@@ -577,10 -570,8 +578,12 @@@ public class HotSpotAgent + machDesc = new MachineDescriptionPPC64(); + } else if (cpu.equals("aarch64")) { + machDesc = new MachineDescriptionAArch64(); ++ } else if (cpu.equals("riscv64")) { ++ machDesc = new MachineDescriptionRISCV64(); + + } else if (cpu.equals("mips64")) { + + machDesc = new MachineDescriptionMIPS64(); + + } else if (cpu.equals("loongarch64")) { + + machDesc = new MachineDescriptionLOONGARCH64(); + } else { + try { + machDesc = (MachineDescription) +diff --cc src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +index db3b1cc20ee,469bb6e0665..ea3a118de2a +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +@@@ -39,15 -33,13 +39,17 @@@ import sun.jvm.hotspot.debugger.cdbg.* + import sun.jvm.hotspot.debugger.x86.*; + import sun.jvm.hotspot.debugger.amd64.*; + import sun.jvm.hotspot.debugger.aarch64.*; ++ import sun.jvm.hotspot.debugger.riscv64.*; + +import sun.jvm.hotspot.debugger.mips64.*; + +import sun.jvm.hotspot.debugger.loongarch64.*; + import sun.jvm.hotspot.debugger.ppc64.*; + import sun.jvm.hotspot.debugger.linux.x86.*; + import sun.jvm.hotspot.debugger.linux.amd64.*; + import sun.jvm.hotspot.debugger.linux.ppc64.*; + import sun.jvm.hotspot.debugger.linux.aarch64.*; ++ import sun.jvm.hotspot.debugger.linux.riscv64.*; + +import sun.jvm.hotspot.debugger.linux.mips64.*; + +import sun.jvm.hotspot.debugger.linux.loongarch64.*; + import sun.jvm.hotspot.utilities.*; + + class LinuxCDebugger implements CDebugger { +diff --cc src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +index 59e4a3aca46,d16ac8aae51..de1e70a7290 +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +@@@ -40,9 -34,8 +40,10 @@@ import sun.jvm.hotspot.runtime.win32_aa + import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess; ++ import sun.jvm.hotspot.runtime.linux_riscv64.LinuxRISCV64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess; + +import sun.jvm.hotspot.runtime.linux_mips64.LinuxMIPS64JavaThreadPDAccess; + +import sun.jvm.hotspot.runtime.linux_loongarch64.LinuxLOONGARCH64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.bsd_amd64.BsdAMD64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.bsd_aarch64.BsdAARCH64JavaThreadPDAccess; +@@@ -121,10 -114,8 +122,12 @@@ public class Threads + access = new LinuxPPC64JavaThreadPDAccess(); + } else if (cpu.equals("aarch64")) { + access = new LinuxAARCH64JavaThreadPDAccess(); ++ } else if (cpu.equals("riscv64")) { ++ access = new LinuxRISCV64JavaThreadPDAccess(); + + } else if (cpu.equals("mips64")) { + + access = new LinuxMIPS64JavaThreadPDAccess(); + + } else if (cpu.equals("loongarch64")) { + + access = new LinuxLOONGARCH64JavaThreadPDAccess(); + } else { + try { + access = (JavaThreadPDAccess) +diff --cc src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java +index f894792f912,f4cd4873207..6901946e58a +--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java ++++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java +@@@ -57,7 -50,7 +57,7 @@@ public class PlatformInfo + + public static boolean knownCPU(String cpu) { + final String[] KNOWN = +- new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "mips64", "mips64el", "loongarch64"}; + - new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "riscv64"}; +++ new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "riscv64", "mips64", "mips64el", "loongarch64"}; + + for(String s : KNOWN) { + if(s.equals(cpu)) +diff --cc test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java +index 3c81fc96949,4c56daebfb8..92836130408 +--- a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java ++++ b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java +@@@ -32,9 -26,9 +32,9 @@@ + * @library /test/lib / + * @modules java.base/jdk.internal.misc + * java.management + - * @requires vm.cpu.features ~= ".*aes.*" & !vm.graal.enabled + + * @requires (vm.cpu.features ~= ".*aes.*" | os.arch == "loongarch64") & !vm.graal.enabled +- * @build sun.hotspot.WhiteBox +- * @run driver jdk.test.lib.helpers.ClassFileInstaller sun.hotspot.WhiteBox ++ * @build jdk.test.whitebox.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm/timeout=600 -Xbootclasspath/a:. + * -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch +diff --cc test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java +index 460a3dafe38,03016ea3dd6..62ce6c1a7a5 +--- a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java ++++ b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java +@@@ -33,10 -27,10 +33,10 @@@ + * @modules java.base/jdk.internal.misc + * java.management + * +- * @build sun.hotspot.WhiteBox ++ * @build jdk.test.whitebox.WhiteBox + - * @requires !(vm.cpu.features ~= ".*aes.*") + + * @requires !(vm.cpu.features ~= ".*aes.*" | os.arch == "loongarch64") + * @requires vm.compiler1.enabled | !vm.graal.enabled +- * @run driver jdk.test.lib.helpers.ClassFileInstaller sun.hotspot.WhiteBox ++ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions + * -XX:+WhiteBoxAPI -Xbatch + * compiler.cpuflags.TestAESIntrinsicsOnUnsupportedConfig +diff --cc test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java +index 0209ea644ef,468cd83d7a2..40d2b03e301 +--- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java ++++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java +@@@ -38,7 -32,7 +38,7 @@@ import jdk.test.lib.cli.predicate.OrPre + + /** + * Generic test case for SHA-related options targeted to any CPU except +- * AArch64, PPC, S390x, LoongArch64, and X86. + - * AArch64, RISCV64, PPC, S390x, and X86. +++ * AArch64, RISCV64, PPC, S390x, LoongArch64, and X86. + */ + public class GenericTestCaseForOtherCPU extends + DigestOptionsBase.TestCase { +@@@ -50,14 -44,14 +50,15 @@@ + } + + public GenericTestCaseForOtherCPU(String optionName, boolean checkUseSHA) { +- // Execute the test case on any CPU except AArch64, PPC, S390x, LoongArch64, and X86. + - // Execute the test case on any CPU except AArch64, RISCV64, PPC, S390x, and X86. +++ // Execute the test case on any CPU except AArch64, RISCV64, PPC, S390x, LoongArch64, and X86. + super(optionName, new NotPredicate( + new OrPredicate(Platform::isAArch64, ++ new OrPredicate(Platform::isRISCV64, + new OrPredicate(Platform::isS390x, + new OrPredicate(Platform::isPPC, + + new OrPredicate(Platform::isLoongArch64, + new OrPredicate(Platform::isX64, +-- Platform::isX86))))))); +++ Platform::isX86)))))))); + + this.checkUseSHA = checkUseSHA; + } +diff --cc test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +index 93ffcadf8bc,2f2395b77c6..58482edb32e +--- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java ++++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +@@@ -59,13 -53,13 +59,13 @@@ public class IRNode + private static final String STORE_OF_CLASS_POSTFIX = "(:|\\+)\\S* \\*" + END; + private static final String LOAD_OF_CLASS_POSTFIX = "(:|\\+)\\S* \\*" + END; + +- public static final String ALLOC = "(.*precise klass .*\\R((.*(?i:mov|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; +- public static final String ALLOC_OF = COMPOSITE_PREFIX + "(.*precise klass .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; +- public static final String ALLOC_ARRAY = "(.*precise klass \\[L.*\\R((.*(?i:mov|xor|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; +- public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "(.*precise klass \\[L.*" + IS_REPLACED + ";:.*\\R((.*(?i:mov|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; ++ public static final String ALLOC = "(.*precise klass .*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; ++ public static final String ALLOC_OF = COMPOSITE_PREFIX + "(.*precise klass .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END; ++ public static final String ALLOC_ARRAY = "(.*precise klass \\[L.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; ++ public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "(.*precise klass \\[L.*" + IS_REPLACED + ";:.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; + +- public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|or|li).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; +- public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|or|li).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; + - public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|mv|or).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; + - public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|mv|or).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; +++ public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|mv|or|li).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; +++ public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|mv|or|li).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; + // Does not work on s390 (a rule containing this regex will be skipped on s390). + public static final String CHECKCAST_ARRAYCOPY = "(.*((?i:call_leaf_nofp,runtime)|CALL,\\s?runtime leaf nofp|BCTRL.*.leaf call).*checkcast_arraycopy.*" + END; + +diff --cc test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java +index eb9db789f91,85fd3fa938d..0655f2b0bd1 +--- a/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java ++++ b/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java +@@@ -136,7 -130,7 +136,7 @@@ public abstract class TestConstantsInEr + results.shouldMatch("Test_C1/.*::test \\(3 bytes\\)$") + .shouldMatch("Test_C2/.*::test \\(3 bytes\\)$"); + +- if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching + - if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching + results.shouldMatch("Test_C1/.*::test \\(3 bytes\\) made not entrant") + .shouldMatch("Test_C2/.*::test \\(3 bytes\\) made not entrant"); + } else { +@@@ -174,7 -168,7 +174,7 @@@ + .shouldMatch("Test_MH3/.*::test \\(3 bytes\\)$") + .shouldMatch("Test_MH4/.*::test \\(3 bytes\\)$"); + +- if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching + - if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching + results.shouldMatch("Test_MH1/.*::test \\(3 bytes\\) made not entrant") + .shouldMatch("Test_MH2/.*::test \\(3 bytes\\) made not entrant") + .shouldMatch("Test_MH3/.*::test \\(3 bytes\\) made not entrant") +@@@ -197,7 -191,7 +197,7 @@@ + results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\)$") + .shouldMatch("Test_MT2/.*::test \\(3 bytes\\)$"); + +- if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching + - if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching +++ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching + results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\) made not entrant") + .shouldMatch("Test_MT2/.*::test \\(3 bytes\\) made not entrant"); + } else { +diff --cc test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java +index 119acb520da,10d87d51f0f..dbea76741d6 +--- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java ++++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java +@@@ -30,8 -24,7 +30,8 @@@ + + /* @test + * @bug 8167409 +- * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") ++ * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") + + * @requires (os.arch != "mips64el") & (os.arch != "loongarch64") & (vm.flavor != "zero") + * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.argumentcorruption.CheckLongArgs + */ + package compiler.runtime.criticalnatives.argumentcorruption; +diff --cc test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java +index d3e73158a25,23c1e6e6acb..2f402d567d9 +--- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java ++++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java +@@@ -30,8 -24,7 +30,8 @@@ + + /* @test + * @bug 8167408 +- * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") ++ * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") + + * @requires (os.arch != "mips64el") & (os.arch != "loongarch64") & (vm.flavor != "zero") + * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.lookup.LookUp + */ + package compiler.runtime.criticalnatives.lookup; +diff --cc test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java +index 8b859a92d8a,689c7c8cc2f..f734c1baa3f +--- a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java ++++ b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java +@@@ -75,13 -68,12 +75,14 @@@ public class IntrinsicPredicates + + public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE + = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" }, null), ++ new OrPredicate(new CPUSpecificPredicate("riscv64.*", new String[] { "sha1" }, null), + new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha1" }, null), + + // Basic instructions are used to implement SHA1 Intrinsics on LA, so "sha1" feature is not needed. + + new OrPredicate(new CPUSpecificPredicate("loongarch64.*", null, null), + // x86 variants + new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" }, null), + new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), +-- new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null)))))); +++ new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null))))))); + + public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE + = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" }, null), +@@@ -95,7 -86,7 +97,7 @@@ + new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), + new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null), + new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null), +-- new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null)))))))))); +++ new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))))); + + public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE + = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512" }, null), +diff --cc test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java +index bf933f04957,36f74d01b54..035b91b9d8e +--- a/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java ++++ b/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java +@@@ -246,7 -240,7 +246,8 @@@ public class ReservedStackTest + return Platform.isAix() || + (Platform.isLinux() && + (Platform.isPPC() || Platform.isS390x() || Platform.isX64() || +- Platform.isX86() || Platform.isAArch64() || Platform.isMIPS() || Platform.isLoongArch64())) || + - Platform.isX86() || Platform.isAArch64() || Platform.isRISCV64())) || +++ Platform.isX86() || Platform.isAArch64() || Platform.isRISCV64() || +++ Platform.isMIPS() || Platform.isLoongArch64())) || + Platform.isOSX(); + } + +diff --cc test/jdk/jdk/jfr/event/os/TestCPUInformation.java +index ae739a92f28,c5166580010..913136a1fd1 +--- a/test/jdk/jdk/jfr/event/os/TestCPUInformation.java ++++ b/test/jdk/jdk/jfr/event/os/TestCPUInformation.java +@@@ -58,8 -52,8 +58,8 @@@ public class TestCPUInformation + Events.assertField(event, "hwThreads").atLeast(1); + Events.assertField(event, "cores").atLeast(1); + Events.assertField(event, "sockets").atLeast(1); +- Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390", "MIPS", "LoongArch"); +- Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390", "MIPS", "LoongArch"); + - Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); + - Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); +++ Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390", "MIPS", "LoongArch"); +++ Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390", "MIPS", "LoongArch"); + } + } + } +diff --cc test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java +index 9084076f433,c71a6034748..427ebda770f +--- a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java ++++ b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java +@@@ -51,7 -45,7 +51,7 @@@ import java.util.Set + */ + public class TestMutuallyExclusivePlatformPredicates { + private static enum MethodGroup { +- ARCH("isAArch64", "isARM", "isPPC", "isS390x", "isX64", "isX86", "isMIPS", "isLoongArch64"), + - ARCH("isAArch64", "isARM", "isRISCV64", "isPPC", "isS390x", "isX64", "isX86"), +++ ARCH("isAArch64", "isARM", "isRISCV64", "isPPC", "isS390x", "isX64", "isX86", "isMIPS", "isLoongArch64"), + BITNESS("is32bit", "is64bit"), + OS("isAix", "isLinux", "isOSX", "isWindows"), + VM_TYPE("isClient", "isServer", "isMinimal", "isZero", "isEmbedded"), + +commit e7efed5c455e2af269d45f1761fb8a22a0834b78 +Author: loongson-jvm +Date: Thu Dec 7 18:05:56 2023 +0800 + + Update (2023.12.07) + + 30457: Use membars when accessing volatile _thread_state + 32163: The size of is_wide_vector should be greater than 8 bytes + +diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +index 44af7805b8a..930b6240b4b 100644 +--- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +@@ -327,9 +327,9 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) { + } + + // Is vector's size (in bytes) bigger than a size saved by default? +-// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. ++// 8 bytes registers are saved by default using fld/fst instructions. + bool SharedRuntime::is_wide_vector(int size) { +- return size > 16; ++ return size > 8; + } + + // The java_calling_convention describes stack locations as ideal slots on +diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp +index 63101e77855..9b72ada86b6 100644 +--- a/src/hotspot/share/runtime/thread.inline.hpp ++++ b/src/hotspot/share/runtime/thread.inline.hpp +@@ -23,6 +23,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2018, 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #ifndef SHARE_RUNTIME_THREAD_INLINE_HPP + #define SHARE_RUNTIME_THREAD_INLINE_HPP + +@@ -132,7 +138,7 @@ inline void JavaThread::set_pending_async_exception(oop e) { + } + + inline JavaThreadState JavaThread::thread_state() const { +-#if defined(PPC64) || defined (AARCH64) ++#if defined(PPC64) || defined (AARCH64) || defined(LOONGARCH64) + // Use membars when accessing volatile _thread_state. See + // Threads::create_vm() for size checks. + return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state); +@@ -144,7 +150,7 @@ inline JavaThreadState JavaThread::thread_state() const { + inline void JavaThread::set_thread_state(JavaThreadState s) { + assert(current_or_null() == NULL || current_or_null() == this, + "state change should only be called by the current thread"); +-#if defined(PPC64) || defined (AARCH64) ++#if defined(PPC64) || defined (AARCH64) || defined(LOONGARCH64) + // Use membars when accessing volatile _thread_state. See + // Threads::create_vm() for size checks. + Atomic::release_store((volatile jint*)&_thread_state, (jint)s); + +commit 983f139f47a778dcb225d3a514f5ca5f9f949e06 +Merge: 0ca05554a06 3197a9b0299 +Author: aoqi +Date: Mon Aug 14 19:28:14 2023 +0800 + + Merge + +diff --cc src/hotspot/share/memory/metaspace.cpp +index 2bfb3d71634,3f29a72a86d..0b3d26df59c +--- a/src/hotspot/share/memory/metaspace.cpp ++++ b/src/hotspot/share/memory/metaspace.cpp +@@@ -593,15 -587,12 +593,15 @@@ bool Metaspace::class_space_is_initiali + // On error, returns an unreserved space. + ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) { + + -#if defined(AARCH64) || defined(PPC64) + +#if defined(AARCH64) || defined(PPC64) || defined(MIPS64) || defined(LOONGARCH64) + const size_t alignment = Metaspace::reserve_alignment(); + +- // AArch64: Try to align metaspace so that we can decode a compressed +- // klass with a single MOVK instruction. We can do this iff the ++ // AArch64: Try to align metaspace class space so that we can decode a ++ // compressed klass with a single MOVK instruction. We can do this iff the + // compressed class base is a multiple of 4G. + + + + // MIPS: Cannot mmap for 1G space at 4G position, and prepare for future optimization. + + + // Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits + // of the upper 32-bits of the address are zero so we can handle a shift + // when decoding. +@@@ -627,18 -622,34 +631,34 @@@ + address a = search_ranges[i].from; + assert(CompressedKlassPointers::is_valid_base(a), "Sanity"); + while (a < search_ranges[i].to) { +- ReservedSpace rs(size, Metaspace::reserve_alignment(), +- os::vm_page_size(), (char*)a); +- if (rs.is_reserved()) { +- assert(a == (address)rs.base(), "Sanity"); +- return rs; +- } ++ list.append(a); + a += search_ranges[i].increment; + } ++ } ++ ++ int len = list.length(); ++ int r = 0; ++ if (!DumpSharedSpaces) { ++ // Starting from a random position in the list. If the address cannot be reserved ++ // (the OS already assigned it for something else), go to the next position, wrapping ++ // around if necessary, until we exhaust all the items. ++ os::init_random((int)os::javaTimeNanos()); ++ r = os::random(); ++ log_info(metaspace)("Randomizing compressed class space: start from %d out of %d locations", ++ r % len, len); ++ } ++ for (int i = 0; i < len; i++) { ++ address a = list.at((i + r) % len); ++ ReservedSpace rs(size, Metaspace::reserve_alignment(), ++ os::vm_page_size(), (char*)a); ++ if (rs.is_reserved()) { ++ assert(a == (address)rs.base(), "Sanity"); ++ return rs; ++ } + } + -#endif // defined(AARCH64) || defined(PPC64) + +#endif // defined(AARCH64) || defined(PPC64) || defined(MIPS64) || defined(LOONGARCH64) + + -#ifdef AARCH64 + +#if defined(AARCH64) || defined(MIPS64) || defined(LOONGARCH64) + // Note: on AARCH64, if the code above does not find any good placement, we + // have no recourse. We return an empty space and the VM will exit. + return ReservedSpace(); + +commit 0ca05554a0681d353bc743c741cec776f2e9bf5e +Author: loongson-jvm +Date: Mon Aug 14 19:24:05 2023 +0800 + + Update (2023.08.14, 2nd) + + 30943: LA port of 8303588: [JVMCI] make JVMCI source directories conform with standard layout + 22702: MIPS/LA port of 8252990: Intrinsify Unsafe.storeStoreFence + +diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad +index daa1db594ce..43e32570a0f 100644 +--- a/src/hotspot/cpu/loongarch/loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/loongarch_64.ad +@@ -6434,6 +6434,7 @@ instruct membar_volatile() %{ + + instruct membar_storestore() %{ + match(MemBarStoreStore); ++ match(StoreStoreFence); + + ins_cost(400); + format %{ "MEMBAR-storestore @ membar_storestore" %} +diff --git a/src/hotspot/cpu/mips/mips_64.ad b/src/hotspot/cpu/mips/mips_64.ad +index ec85f64244c..882878f739a 100644 +--- a/src/hotspot/cpu/mips/mips_64.ad ++++ b/src/hotspot/cpu/mips/mips_64.ad +@@ -7127,6 +7127,7 @@ instruct unnecessary_membar_volatile() %{ + + instruct membar_storestore() %{ + match(MemBarStoreStore); ++ match(StoreStoreFence); + + ins_cost(400); + format %{ "MEMBAR-storestore @ membar_storestore" %} +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java +similarity index 98% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java +index e3a7daa1983..1f54e9f3c59 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java +similarity index 99% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java +index 2ee6a4b8472..e1a007000d2 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java +similarity index 98% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java +index c8605976a0d..0a2e857204c 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java +similarity index 94% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java +index 1048ea9d64b..74c6ca9801f 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java +similarity index 99% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java +index 99201889b84..930b17e820a 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java +similarity index 98% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java +index 84b7f2027f1..047a1dbbe36 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java +similarity index 94% +rename from src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java +rename to src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java +index 9d020833eaf..6df1b7b3a92 100644 +--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff --git a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java +index 4095202ea30..0d47a2f3037 100644 +--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java ++++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java +@@ -22,23 +22,23 @@ + */ + + /* +- * This file has been modified by Loongson Technology in 2022, These +- * modifications are Copyright (c) 2022, Loongson Technology, and are made ++ * This file has been modified by Loongson Technology in 2023, These ++ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made + * available on the same license terms set forth above. + */ + + package jdk.vm.ci.code.test; + + import jdk.vm.ci.aarch64.AArch64; +-import jdk.vm.ci.loongarch64.LoongArch64; + import jdk.vm.ci.amd64.AMD64; ++import jdk.vm.ci.loongarch64.LoongArch64; + import jdk.vm.ci.code.Architecture; + import jdk.vm.ci.code.CodeCacheProvider; + import jdk.vm.ci.code.InstalledCode; + import jdk.vm.ci.code.TargetDescription; +-import jdk.vm.ci.code.test.loongarch64.LoongArch64TestAssembler; + import jdk.vm.ci.code.test.aarch64.AArch64TestAssembler; + import jdk.vm.ci.code.test.amd64.AMD64TestAssembler; ++import jdk.vm.ci.code.test.loongarch64.LoongArch64TestAssembler; + import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider; + import jdk.vm.ci.hotspot.HotSpotCompiledCode; + import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime; + +commit 1f64aa86dc7214e3587472b98886550f777c890f +Merge: d833eebe238 0f531dacb87 +Author: aoqi +Date: Mon Aug 14 17:52:34 2023 +0800 + + Merge + +diff --cc test/lib/jdk/test/lib/Platform.java +index ef98ee74a48,886f54be165..d15decbfa79 +--- a/test/lib/jdk/test/lib/Platform.java ++++ b/test/lib/jdk/test/lib/Platform.java +@@@ -21,16 -21,12 +21,18 @@@ + * questions. + */ + + +/* + + * This file has been modified by Loongson Technology in 2021, These + + * modifications are Copyright (c) 2019, 2021, Loongson Technology, and are made + + * available on the same license terms set forth above. + + */ + + + package jdk.test.lib; + ++ import java.io.BufferedReader; + import java.io.FileNotFoundException; + import java.io.IOException; ++ import java.io.InputStreamReader; + import java.nio.file.Files; + import java.nio.file.Path; + import java.nio.file.Paths; + +commit d833eebe238495b3493dc760d56a5da4490118b7 +Author: loongson-jvm +Date: Mon Aug 14 17:49:18 2023 +0800 + + Update (2023.08.14) + + 30285: ArrayCopy: Align destination address + 27957: Mark stub code without alignment padding + +diff --git a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +index fbdc96f10f5..10242a3df4a 100644 +--- a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +@@ -728,8 +728,8 @@ class StubGenerator: public StubCodeGenerator { + + // disjoint large copy + void generate_disjoint_large_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -741,7 +741,7 @@ class StubGenerator: public StubCodeGenerator { + __ ld_d(A6, A0, 0); + __ ld_d(A7, A2, -8); + +- __ andi(T1, A0, 7); ++ __ andi(T1, A1, 7); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 8); + +@@ -813,8 +813,8 @@ class StubGenerator: public StubCodeGenerator { + + // disjoint large copy lsx + void generate_disjoint_large_copy_lsx(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -826,7 +826,7 @@ class StubGenerator: public StubCodeGenerator { + __ vld(F0, A0, 0); + __ vld(F1, A2, -16); + +- __ andi(T1, A0, 15); ++ __ andi(T1, A1, 15); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 16); + +@@ -898,8 +898,8 @@ class StubGenerator: public StubCodeGenerator { + + // disjoint large copy lasx + void generate_disjoint_large_copy_lasx(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -911,7 +911,7 @@ class StubGenerator: public StubCodeGenerator { + __ xvld(F0, A0, 0); + __ xvld(F1, A2, -32); + +- __ andi(T1, A0, 31); ++ __ andi(T1, A1, 31); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 32); + +@@ -983,8 +983,8 @@ class StubGenerator: public StubCodeGenerator { + + // conjoint large copy + void generate_conjoint_large_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -996,7 +996,7 @@ class StubGenerator: public StubCodeGenerator { + __ ld_d(A6, A0, 0); + __ ld_d(A7, A2, -8); + +- __ andi(T1, A2, 7); ++ __ andi(T1, A3, 7); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + +@@ -1065,8 +1065,8 @@ class StubGenerator: public StubCodeGenerator { + + // conjoint large copy lsx + void generate_conjoint_large_copy_lsx(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -1078,7 +1078,7 @@ class StubGenerator: public StubCodeGenerator { + __ vld(F0, A0, 0); + __ vld(F1, A2, -16); + +- __ andi(T1, A2, 15); ++ __ andi(T1, A3, 15); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + +@@ -1147,8 +1147,8 @@ class StubGenerator: public StubCodeGenerator { + + // conjoint large copy lasx + void generate_conjoint_large_copy_lasx(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); +@@ -1160,7 +1160,7 @@ class StubGenerator: public StubCodeGenerator { + __ xvld(F0, A0, 0); + __ xvld(F1, A2, -32); + +- __ andi(T1, A2, 31); ++ __ andi(T1, A3, 31); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + +@@ -1229,8 +1229,8 @@ class StubGenerator: public StubCodeGenerator { + + // Byte small copy: less than { int:9, lsx:17, lasx:33 } elements. + void generate_byte_small_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + Label L; + __ bind(entry); +@@ -1594,9 +1594,9 @@ class StubGenerator: public StubCodeGenerator { + // used by generate_conjoint_byte_copy(). + // + address generate_disjoint_byte_copy(bool aligned, Label &small, Label &large, +- Label &large_aligned, const char * name) { +- StubCodeMark mark(this, "StubRoutines", name); ++ const char * name) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + if (UseLASX) +@@ -1607,12 +1607,6 @@ class StubGenerator: public StubCodeGenerator { + __ sltui(T0, A2, 9); + __ bnez(T0, small); + +- if (large_aligned.is_bound()) { +- __ orr(T0, A0, A1); +- __ andi(T0, T0, 7); +- __ beqz(T0, large_aligned); +- } +- + __ b(large); + + return start; +@@ -1634,9 +1628,9 @@ class StubGenerator: public StubCodeGenerator { + // and stored atomically. + // + address generate_conjoint_byte_copy(bool aligned, Label &small, Label &large, +- Label &large_aligned, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); ++ const char *name) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + array_overlap_test(StubRoutines::jbyte_disjoint_arraycopy(), 0); +@@ -1649,12 +1643,6 @@ class StubGenerator: public StubCodeGenerator { + __ sltui(T0, A2, 9); + __ bnez(T0, small); + +- if (large_aligned.is_bound()) { +- __ orr(T0, A0, A1); +- __ andi(T0, T0, 7); +- __ beqz(T0, large_aligned); +- } +- + __ b(large); + + return start; +@@ -1662,8 +1650,8 @@ class StubGenerator: public StubCodeGenerator { + + // Short small copy: less than { int:9, lsx:9, lasx:17 } elements. + void generate_short_small_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + Label L; + __ bind(entry); +@@ -1873,9 +1861,9 @@ class StubGenerator: public StubCodeGenerator { + // used by generate_conjoint_short_copy(). + // + address generate_disjoint_short_copy(bool aligned, Label &small, Label &large, +- Label &large_aligned, const char * name) { +- StubCodeMark mark(this, "StubRoutines", name); ++ const char * name) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + if (UseLASX) +@@ -1886,12 +1874,6 @@ class StubGenerator: public StubCodeGenerator { + + __ slli_d(A2, A2, 1); + +- if (large_aligned.is_bound()) { +- __ orr(T0, A0, A1); +- __ andi(T0, T0, 7); +- __ beqz(T0, large_aligned); +- } +- + __ b(large); + + return start; +@@ -1913,9 +1895,9 @@ class StubGenerator: public StubCodeGenerator { + // and stored atomically. + // + address generate_conjoint_short_copy(bool aligned, Label &small, Label &large, +- Label &large_aligned, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); ++ const char *name) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + array_overlap_test(StubRoutines::jshort_disjoint_arraycopy(), 1); +@@ -1928,12 +1910,6 @@ class StubGenerator: public StubCodeGenerator { + + __ slli_d(A2, A2, 1); + +- if (large_aligned.is_bound()) { +- __ orr(T0, A0, A1); +- __ andi(T0, T0, 7); +- __ beqz(T0, large_aligned); +- } +- + __ b(large); + + return start; +@@ -1941,8 +1917,8 @@ class StubGenerator: public StubCodeGenerator { + + // Int small copy: less than { int:7, lsx:7, lasx:9 } elements. + void generate_int_small_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + Label L; + __ bind(entry); +@@ -2075,8 +2051,8 @@ class StubGenerator: public StubCodeGenerator { + + // Generate maybe oop copy + void gen_maybe_oop_copy(bool is_oop, bool disjoint, bool aligned, Label &small, +- Label &large, Label &large_aligned, const char *name, +- int small_limit, int log2_elem_size, bool dest_uninitialized = false) { ++ Label &large, const char *name, int small_limit, ++ int log2_elem_size, bool dest_uninitialized = false) { + Label post, _large; + DecoratorSet decorators = DECORATORS_NONE; + BarrierSetAssembler *bs = nullptr; +@@ -2122,20 +2098,6 @@ class StubGenerator: public StubCodeGenerator { + __ bind(_large); + __ slli_d(A2, A2, log2_elem_size); + +- if (large_aligned.is_bound()) { +- __ orr(T0, A0, A1); +- __ andi(T0, T0, (1 << (log2_elem_size + 1)) - 1); +- if (is_oop) { +- Label skip; +- __ bnez(T0, skip); +- __ bl(large_aligned); +- __ b(post); +- __ bind(skip); +- } else { +- __ beqz(T0, large_aligned); +- } +- } +- + if (is_oop) { + __ bl(large); + } else { +@@ -2176,14 +2138,14 @@ class StubGenerator: public StubCodeGenerator { + // used by generate_conjoint_int_oop_copy(). + // + address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, Label &small, +- Label &large, Label &large_aligned, const char *name, +- int small_limit, bool dest_uninitialized = false) { +- StubCodeMark mark(this, "StubRoutines", name); ++ Label &large, const char *name, int small_limit, ++ bool dest_uninitialized = false) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + +- gen_maybe_oop_copy(is_oop, true, aligned, small, large, large_aligned, +- name, small_limit, 2, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, true, aligned, small, large, name, ++ small_limit, 2, dest_uninitialized); + + return start; + } +@@ -2204,10 +2166,10 @@ class StubGenerator: public StubCodeGenerator { + // cache line boundaries will still be loaded and stored atomicly. + // + address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, Label &small, +- Label &large, Label &large_aligned, const char *name, +- int small_limit, bool dest_uninitialized = false) { +- StubCodeMark mark(this, "StubRoutines", name); ++ Label &large, const char *name, int small_limit, ++ bool dest_uninitialized = false) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + if (is_oop) { +@@ -2216,16 +2178,16 @@ class StubGenerator: public StubCodeGenerator { + array_overlap_test(StubRoutines::jint_disjoint_arraycopy(), 2); + } + +- gen_maybe_oop_copy(is_oop, false, aligned, small, large, large_aligned, +- name, small_limit, 2, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, false, aligned, small, large, name, ++ small_limit, 2, dest_uninitialized); + + return start; + } + + // Long small copy: less than { int:4, lsx:4, lasx:5 } elements. + void generate_long_small_copy(Label &entry, const char *name) { +- StubCodeMark mark(this, "StubRoutines", name); + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + + Label L; + __ bind(entry); +@@ -2328,14 +2290,14 @@ class StubGenerator: public StubCodeGenerator { + // used by generate_conjoint_int_oop_copy(). + // + address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, Label &small, +- Label &large, Label &large_aligned, const char *name, +- int small_limit, bool dest_uninitialized = false) { +- StubCodeMark mark(this, "StubRoutines", name); ++ Label &large, const char *name, int small_limit, ++ bool dest_uninitialized = false) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + +- gen_maybe_oop_copy(is_oop, true, aligned, small, large, large_aligned, +- name, small_limit, 3, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, true, aligned, small, large, name, ++ small_limit, 3, dest_uninitialized); + + return start; + } +@@ -2356,10 +2318,10 @@ class StubGenerator: public StubCodeGenerator { + // cache line boundaries will still be loaded and stored atomicly. + // + address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, Label &small, +- Label &large, Label &large_aligned, const char *name, +- int small_limit, bool dest_uninitialized = false) { +- StubCodeMark mark(this, "StubRoutines", name); ++ Label &large, const char *name, int small_limit, ++ bool dest_uninitialized = false) { + __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + if (is_oop) { +@@ -2368,8 +2330,8 @@ class StubGenerator: public StubCodeGenerator { + array_overlap_test(StubRoutines::jlong_disjoint_arraycopy(), 3); + } + +- gen_maybe_oop_copy(is_oop, false, aligned, small, large, large_aligned, +- name, small_limit, 3, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, false, aligned, small, large, name, ++ small_limit, 3, dest_uninitialized); + + return start; + } +@@ -2889,20 +2851,24 @@ class StubGenerator: public StubCodeGenerator { + + void generate_arraycopy_stubs() { + Label disjoint_large_copy, conjoint_large_copy; +- Label disjoint_large_copy_lsx, conjoint_large_copy_lsx; +- Label disjoint_large_copy_lasx, conjoint_large_copy_lasx; + Label byte_small_copy, short_small_copy, int_small_copy, long_small_copy; +- Label none; ++ int int_oop_small_limit, long_oop_small_limit; + +- generate_disjoint_large_copy(disjoint_large_copy, "disjoint_large_copy"); +- generate_conjoint_large_copy(conjoint_large_copy, "conjoint_large_copy"); +- if (UseLSX) { +- generate_disjoint_large_copy_lsx(disjoint_large_copy_lsx, "disjoint_large_copy_lsx"); +- generate_conjoint_large_copy_lsx(conjoint_large_copy_lsx, "conjoint_large_copy_lsx"); +- } + if (UseLASX) { +- generate_disjoint_large_copy_lasx(disjoint_large_copy_lasx, "disjoint_large_copy_lasx"); +- generate_conjoint_large_copy_lasx(conjoint_large_copy_lasx, "conjoint_large_copy_lasx"); ++ int_oop_small_limit = 9; ++ long_oop_small_limit = 5; ++ generate_disjoint_large_copy_lasx(disjoint_large_copy, "disjoint_large_copy_lasx"); ++ generate_conjoint_large_copy_lasx(conjoint_large_copy, "conjoint_large_copy_lasx"); ++ } else if (UseLSX) { ++ int_oop_small_limit = 7; ++ long_oop_small_limit = 4; ++ generate_disjoint_large_copy_lsx(disjoint_large_copy, "disjoint_large_copy_lsx"); ++ generate_conjoint_large_copy_lsx(conjoint_large_copy, "conjoint_large_copy_lsx"); ++ } else { ++ int_oop_small_limit = 7; ++ long_oop_small_limit = 4; ++ generate_disjoint_large_copy(disjoint_large_copy, "disjoint_large_copy_int"); ++ generate_conjoint_large_copy(conjoint_large_copy, "conjoint_large_copy_int"); + } + generate_byte_small_copy(byte_small_copy, "jbyte_small_copy"); + generate_short_small_copy(short_small_copy, "jshort_small_copy"); +@@ -2910,78 +2876,39 @@ class StubGenerator: public StubCodeGenerator { + generate_long_small_copy(long_small_copy, "jlong_small_copy"); + + if (UseCompressedOops) { +- if (UseLSX) { +- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "oop_disjoint_arraycopy", 7); +- StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "oop_disjoint_arraycopy_uninit", 7, true); +- } else { +- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy", 7); +- StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy_uninit", 7, true); +- } +- if (UseLASX) { +- StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "oop_arraycopy", 9); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "oop_arraycopy_uninit", 9, true); +- } else if (UseLSX) { +- StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "oop_arraycopy", 7); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "oop_arraycopy_uninit", 7, true); +- } else { +- StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, none, "oop_arraycopy", 7); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, none, "oop_arraycopy_uninit", 7, true); +- } ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, ++ "oop_disjoint_arraycopy", int_oop_small_limit); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, ++ "oop_disjoint_arraycopy_uninit", int_oop_small_limit, true); ++ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, ++ "oop_arraycopy", int_oop_small_limit); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, ++ "oop_arraycopy_uninit", int_oop_small_limit, true); + } else { +- if (UseLASX) { +- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "oop_disjoint_arraycopy", 5); +- StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "oop_disjoint_arraycopy_uninit", 5, true); +- StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "oop_arraycopy", 5); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "oop_arraycopy_uninit", 5, true); +- } else if (UseLSX) { +- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "oop_disjoint_arraycopy", 4); +- StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "oop_disjoint_arraycopy_uninit", 4, true); +- StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy", 4); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy_uninit", 4, true); +- } else { +- StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy", 4); +- StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy_uninit", 4, true); +- StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, none, "oop_arraycopy", 4); +- StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy_uninit", 4, true); +- } ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, ++ "oop_disjoint_arraycopy", long_oop_small_limit); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, ++ "oop_disjoint_arraycopy_uninit", long_oop_small_limit, true); ++ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, ++ "oop_arraycopy", long_oop_small_limit); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, ++ "oop_arraycopy_uninit", long_oop_small_limit, true); + } + +- if (UseLASX) { +- StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy_lasx, disjoint_large_copy_lsx, "jbyte_disjoint_arraycopy"); +- StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy_lasx, disjoint_large_copy, "jshort_disjoint_arraycopy"); +- StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy_lasx, disjoint_large_copy, "jint_disjoint_arraycopy", 9); ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy, ++ "jint_disjoint_arraycopy", int_oop_small_limit); + +- StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy_lasx, conjoint_large_copy_lsx, "jbyte_arraycopy"); +- StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "jshort_arraycopy"); +- StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "jint_arraycopy", 9); +- } else if (UseLSX) { +- StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy_lsx, none, "jbyte_disjoint_arraycopy"); +- StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "jshort_disjoint_arraycopy"); +- StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "jint_disjoint_arraycopy", 7); ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy, "jshort_arraycopy"); ++ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy, ++ "jint_arraycopy", int_oop_small_limit); + +- StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy_lsx, none, "jbyte_arraycopy"); +- StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "jshort_arraycopy"); +- StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "jint_arraycopy", 7); +- } else { +- StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy, none, "jbyte_disjoint_arraycopy"); +- StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy, none, "jshort_disjoint_arraycopy"); +- StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy, none, "jint_disjoint_arraycopy", 7); +- +- StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy, none, "jbyte_arraycopy"); +- StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy, none, "jshort_arraycopy"); +- StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy, none, "jint_arraycopy", 7); +- } +- +- if (UseLASX) { +- StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "jlong_disjoint_arraycopy", 5); +- StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "jlong_arraycopy", 5); +- } else if (UseLSX) { +- StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "jlong_disjoint_arraycopy", 4); +- StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "jlong_arraycopy", 4); +- } else { +- StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, none, "jlong_disjoint_arraycopy", 4); +- StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, none, "jlong_arraycopy", 4); +- } ++ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, ++ "jlong_disjoint_arraycopy", long_oop_small_limit); ++ StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, ++ "jlong_arraycopy", long_oop_small_limit); + + // We don't generate specialized code for HeapWord-aligned source + // arrays, so just use the code we've already generated + +commit cece1917fcfc054ec2d8e7db859536b2d19b208d +Author: loongson-jvm +Date: Mon Jul 3 21:07:28 2023 +0800 + + Update (2023.07.03) + + 31006: Fix VectorInsert + 30358: Add support for ordering memory barriers + 9854: Implement cross modify fence + +diff --git a/src/hotspot/cpu/loongarch/assembler_loongarch.hpp b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp +index 40a3aea36f8..af65eb878e4 100644 +--- a/src/hotspot/cpu/loongarch/assembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp +@@ -1292,6 +1292,15 @@ class Assembler : public AbstractAssembler { + static int high6 (int x) { return high(x, 6); } + + ++ static ALWAYSINLINE void patch(address a, int length, uint32_t val) { ++ guarantee(val < (1ULL << length), "Field too big for insn"); ++ guarantee(length > 0, "length > 0"); ++ unsigned target = *(unsigned *)a; ++ target = (target >> length) << length; ++ target |= val; ++ *(unsigned *)a = target; ++ } ++ + protected: + // help methods for instruction ejection + +@@ -2028,18 +2037,25 @@ public: + void bceqz(ConditionalFlagRegister cj, Label& L) { bceqz(cj, target(L)); } + void bcnez(ConditionalFlagRegister cj, Label& L) { bcnez(cj, target(L)); } + +- // Now Membar_mask_bits is 0,Need to fix it after LA6000 + typedef enum { +- StoreStore = 0, +- LoadStore = 0, +- StoreLoad = 0, +- LoadLoad = 0, +- AnyAny = 0 ++ // hint[4] ++ Completion = 0, ++ Ordering = (1 << 4), ++ ++ // The bitwise-not of the below constants is corresponding to the hint. This is convenient for OR operation. ++ // hint[3:2] and hint[1:0] ++ LoadLoad = ((1 << 3) | (1 << 1)), ++ LoadStore = ((1 << 3) | (1 << 0)), ++ StoreLoad = ((1 << 2) | (1 << 1)), ++ StoreStore = ((1 << 2) | (1 << 0)), ++ AnyAny = ((3 << 2) | (3 << 0)), + } Membar_mask_bits; + + // Serializes memory and blows flags + void membar(Membar_mask_bits hint) { +- dbar(hint); ++ assert((hint & (3 << 0)) != 0, "membar mask unsupported!"); ++ assert((hint & (3 << 2)) != 0, "membar mask unsupported!"); ++ dbar(Ordering | (~hint & 0xf)); + } + + // LSX and LASX +diff --git a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp +index e3a01f1f25d..ef520a39ff3 100644 +--- a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2021, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -367,7 +367,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register r + move(AT, R0); + bnez(scrReg, DONE_SET); + +- dbar(0); ++ membar(Assembler::Membar_mask_bits(LoadStore|StoreStore)); + st_d(R0, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes() - 2)); + li(resReg, 1); + b(DONE); +diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad +index 703aeafc855..daa1db594ce 100644 +--- a/src/hotspot/cpu/loongarch/loongarch_64.ad ++++ b/src/hotspot/cpu/loongarch/loongarch_64.ad +@@ -2114,7 +2114,7 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { + if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { + st->print("\n\t"); + st->print("ld_d T1, guard, 0\n\t"); +- st->print("dbar 0\n\t"); ++ st->print("membar LoadLoad\n\t"); + st->print("ld_d T2, TREG, thread_disarmed_offset\n\t"); + st->print("beq T1, T2, skip\n\t"); + st->print("\n\t"); +@@ -15195,13 +15195,19 @@ instruct insert32B(vecY dst, mRegI val, immIU5 idx) %{ + match(Set dst (VectorInsert (Binary dst val) idx)); + format %{ "xvinsert $dst, $val, $idx\t# @insert32B" %} + ins_encode %{ +- if ($idx$$constant < 16) { +- __ vinsgr2vr_b($dst$$FloatRegister, $val$$Register, $idx$$constant); +- } else { +- __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); +- __ vinsgr2vr_b($dst$$FloatRegister, $val$$Register, $idx$$constant-16); +- __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); ++ int idx = $idx$$constant; ++ int msbw, lsbw; ++ switch (idx % 4) { ++ case 0: msbw = 7, lsbw = 0; break; ++ case 1: msbw = 15, lsbw = 8; break; ++ case 2: msbw = 23, lsbw = 16; break; ++ case 3: msbw = 31, lsbw = 24; break; ++ default: ++ ShouldNotReachHere(); + } ++ __ xvpickve2gr_w(SCR1, $dst$$FloatRegister, idx >> 2); ++ __ bstrins_w(SCR1, $val$$Register, msbw, lsbw); ++ __ xvinsgr2vr_w($dst$$FloatRegister, SCR1, idx >> 2); + %} + ins_pipe( pipe_slow ); + %} +@@ -15211,13 +15217,12 @@ instruct insert16S(vecY dst, mRegI val, immIU4 idx) %{ + match(Set dst (VectorInsert (Binary dst val) idx)); + format %{ "xvinsert $dst, $val, $idx\t# @insert16S" %} + ins_encode %{ +- if ($idx$$constant < 8) { +- __ vinsgr2vr_h($dst$$FloatRegister, $val$$Register, $idx$$constant); +- } else { +- __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); +- __ vinsgr2vr_h($dst$$FloatRegister, $val$$Register, $idx$$constant-8); +- __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); +- } ++ int idx = $idx$$constant; ++ int msbw = (idx % 2) ? 31 : 15; ++ int lsbw = (idx % 2) ? 16 : 0; ++ __ xvpickve2gr_w(SCR1, $dst$$FloatRegister, idx >> 1); ++ __ bstrins_w(SCR1, $val$$Register, msbw, lsbw); ++ __ xvinsgr2vr_w($dst$$FloatRegister, SCR1, idx >> 1); + %} + ins_pipe( pipe_slow ); + %} +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +index 882c43bf592..f1cf308b447 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +@@ -1857,7 +1857,7 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + + bind(fail); + if (barrier) +- membar(LoadLoad); ++ dbar(0x700); + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { +@@ -1882,7 +1882,7 @@ void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, + + bind(neq); + if (barrier) +- membar(LoadLoad); ++ dbar(0x700); + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) +@@ -1916,7 +1916,7 @@ void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, + + bind(fail); + if (barrier) +- membar(LoadLoad); ++ dbar(0x700); + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { +@@ -1943,7 +1943,7 @@ void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, R + + bind(neq); + if (barrier) +- membar(LoadLoad); ++ dbar(0x700); + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) +@@ -3390,10 +3390,14 @@ void MacroAssembler::membar(Membar_mask_bits hint){ + address last = code()->last_insn(); + if (last != NULL && ((NativeInstruction*)last)->is_sync() && prev == last) { + code()->set_last_insn(NULL); ++ NativeMembar *membar = (NativeMembar*)prev; ++ // merged membar ++ // e.g. LoadLoad and LoadLoad|LoadStore to LoadLoad|LoadStore ++ membar->set_hint(membar->get_hint() & (~hint & 0xF)); + block_comment("merged membar"); + } else { + code()->set_last_insn(pc()); +- dbar(hint); ++ Assembler::membar(hint); + } + } + +diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp +index 2f126991338..e445ebeb8be 100644 +--- a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp +@@ -520,4 +520,11 @@ inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { + assert(ni->is_NativeCallTrampolineStub_at(), "no call trampoline found"); + return (NativeCallTrampolineStub*)addr; + } ++ ++class NativeMembar : public NativeInstruction { ++public: ++ unsigned int get_hint() { return Assembler::low(insn_word(), 4); } ++ void set_hint(int hint) { Assembler::patch(addr_at(0), 4, hint); } ++}; ++ + #endif // CPU_LOONGARCH_NATIVEINST_LOONGARCH_HPP +diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +index 0fe2f8f0d36..44af7805b8a 100644 +--- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -1964,8 +1964,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + + // Now set thread in native + __ addi_d(AT, R0, _thread_in_native); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + } +@@ -2020,8 +2020,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + // Thread A is resumed to finish this native method, but doesn't block here since it + // didn't see any synchronization is progress, and escapes. + __ addi_d(AT, R0, _thread_in_native_trans); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + +@@ -2068,8 +2068,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + + // change thread state + __ addi_d(AT, R0, _thread_in_Java); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + __ bind(after_transition); +diff --git a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp +index 88b938103bc..02af7c8ffa7 100644 +--- a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -1378,8 +1378,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { + #endif + + __ li(t, _thread_in_native); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + +@@ -1403,8 +1403,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { + __ get_thread(thread); + #endif + __ li(t, _thread_in_native_trans); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + +@@ -1447,8 +1447,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { + + // change thread state + __ li(t, _thread_in_Java); +- if(os::is_MP()) { +- __ dbar(0); // store release ++ if (os::is_MP()) { ++ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + __ reset_last_Java_frame(thread, true); +diff --git a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp +index 138bf701bf8..c0d1daea305 100644 +--- a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp +@@ -2176,38 +2176,6 @@ void TemplateTable::_return(TosState state) { + __ jr(T4); + } + +-// ---------------------------------------------------------------------------- +-// Volatile variables demand their effects be made known to all CPU's +-// in order. Store buffers on most chips allow reads & writes to +-// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode +-// without some kind of memory barrier (i.e., it's not sufficient that +-// the interpreter does not reorder volatile references, the hardware +-// also must not reorder them). +-// +-// According to the new Java Memory Model (JMM): +-// (1) All volatiles are serialized wrt to each other. ALSO reads & +-// writes act as aquire & release, so: +-// (2) A read cannot let unrelated NON-volatile memory refs that +-// happen after the read float up to before the read. It's OK for +-// non-volatile memory refs that happen before the volatile read to +-// float down below it. +-// (3) Similar a volatile write cannot let unrelated NON-volatile +-// memory refs that happen BEFORE the write float down to after the +-// write. It's OK for non-volatile memory refs that happen after the +-// volatile write to float up before it. +-// +-// We only put in barriers around volatile refs (they are expensive), +-// not _between_ memory refs (that would require us to track the +-// flavor of the previous memory refs). Requirements (2) and (3) +-// require some barriers before volatile stores and after volatile +-// loads. These nearly cover requirement (1) but miss the +-// volatile-store-volatile-load case. This final case is placed after +-// volatile-stores although it could just as well go before +-// volatile-loads. +-void TemplateTable::volatile_barrier() { +- if(os::is_MP()) __ membar(__ StoreLoad); +-} +- + // we dont shift left 2 bits in get_cache_and_index_at_bcp + // for we always need shift the index we use it. the ConstantPoolCacheEntry + // is 16-byte long, index is the index in +@@ -2412,7 +2380,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr + + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(MacroAssembler::AnyAny); + __ bind(notVolatile); + } + +@@ -2558,7 +2526,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); + __ bind(notVolatile); + } + } +@@ -2674,7 +2642,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr + + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ StoreStore | __ LoadStore)); + __ bind(notVolatile); + } + +@@ -2846,7 +2814,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ StoreLoad | __ StoreStore)); + __ bind(notVolatile); + } + } +@@ -2956,7 +2924,7 @@ void TemplateTable::fast_storefield(TosState state) { + + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ StoreStore | __ LoadStore)); + __ bind(notVolatile); + } + +@@ -3005,7 +2973,7 @@ void TemplateTable::fast_storefield(TosState state) { + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ StoreLoad | __ StoreStore)); + __ bind(notVolatile); + } + } +@@ -3056,7 +3024,7 @@ void TemplateTable::fast_accessfield(TosState state) { + + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(MacroAssembler::AnyAny); + __ bind(notVolatile); + } + +@@ -3100,7 +3068,7 @@ void TemplateTable::fast_accessfield(TosState state) { + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); + __ bind(notVolatile); + } + } +@@ -3130,7 +3098,7 @@ void TemplateTable::fast_xaccess(TosState state) { + + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(MacroAssembler::AnyAny); + __ bind(notVolatile); + } + +@@ -3155,7 +3123,7 @@ void TemplateTable::fast_xaccess(TosState state) { + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); +- volatile_barrier(); ++ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); + __ bind(notVolatile); + } + } +diff --git a/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp +index 4fab36f92b4..beb717b67ff 100644 +--- a/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp ++++ b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -186,7 +186,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, + " sc.w %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" +- "2: dbar 0 \n\t" ++ "2: dbar 0x700 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) +@@ -227,7 +227,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, + " sc.d %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" +- "2: dbar 0 \n\t" ++ "2: dbar 0x700 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) +diff --git a/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp b/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp +index 23a9d27b0f4..6236e741d05 100644 +--- a/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp ++++ b/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp +@@ -31,21 +31,21 @@ + // Included in orderAccess.hpp header file. + + // Implementation of class OrderAccess. +-#define inlasm_sync() if (os::is_ActiveCoresMP()) \ ++#define inlasm_sync(v) if (os::is_ActiveCoresMP()) \ + __asm__ __volatile__ ("nop" : : : "memory"); \ + else \ +- __asm__ __volatile__ ("dbar 0" : : : "memory"); +- +-inline void OrderAccess::loadload() { inlasm_sync(); } +-inline void OrderAccess::storestore() { inlasm_sync(); } +-inline void OrderAccess::loadstore() { inlasm_sync(); } +-inline void OrderAccess::storeload() { inlasm_sync(); } +- +-inline void OrderAccess::acquire() { inlasm_sync(); } +-inline void OrderAccess::release() { inlasm_sync(); } +-inline void OrderAccess::fence() { inlasm_sync(); } +-inline void OrderAccess::cross_modify_fence_impl() { inlasm_sync(); } +- ++ __asm__ __volatile__ ("dbar %0" : :"K"(v) : "memory"); ++#define inlasm_synci() __asm__ __volatile__ ("ibar 0" : : : "memory"); ++ ++inline void OrderAccess::loadload() { inlasm_sync(0x15); } ++inline void OrderAccess::storestore() { inlasm_sync(0x1a); } ++inline void OrderAccess::loadstore() { inlasm_sync(0x16); } ++inline void OrderAccess::storeload() { inlasm_sync(0x19); } ++ ++inline void OrderAccess::acquire() { inlasm_sync(0x14); } ++inline void OrderAccess::release() { inlasm_sync(0x12); } ++inline void OrderAccess::fence() { inlasm_sync(0x10); } ++inline void OrderAccess::cross_modify_fence_impl() { inlasm_synci(); } + + #undef inlasm_sync + +diff --git a/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp b/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp +index 460d118c869..a92bf43bdbb 100644 +--- a/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp ++++ b/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp +@@ -35,6 +35,7 @@ + __asm__ __volatile__ ("nop" : : : "memory"); \ + else \ + __asm__ __volatile__ ("sync" : : : "memory"); ++#define inlasm_synci() __asm__ __volatile__ ("synci 0($0)" : : : "memory"); + + inline void OrderAccess::loadload() { inlasm_sync(); } + inline void OrderAccess::storestore() { inlasm_sync(); } +@@ -44,8 +45,7 @@ inline void OrderAccess::storeload() { inlasm_sync(); } + inline void OrderAccess::acquire() { inlasm_sync(); } + inline void OrderAccess::release() { inlasm_sync(); } + inline void OrderAccess::fence() { inlasm_sync(); } +-inline void OrderAccess::cross_modify_fence_impl() { inlasm_sync(); } +- ++inline void OrderAccess::cross_modify_fence_impl() { inlasm_synci(); } + + #undef inlasm_sync + + +commit 2eceaca5eb843f2c040de99e82cd17a24b7167c1 +Merge: f3b0a23f659 22cdf79dce8 +Author: aoqi +Date: Wed May 17 14:31:52 2023 +0800 + + Merge + +diff --cc test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +index ecac04f43b6,bbbec26406d..5989d3fd6c2 +--- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java ++++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +@@@ -21,15 -21,9 +21,15 @@@ + * questions. + */ + + +/* + + * This file has been modified by Loongson Technology in 2022, These + + * modifications are Copyright (c) 2022, Loongson Technology, and are made + + * available on the same license terms set forth above. + + */ + + + package compiler.lib.ir_framework; + +- import compiler.lib.ir_framework.driver.IRMatcher; ++ import compiler.lib.ir_framework.driver.irmatching.IRMatcher; + import compiler.lib.ir_framework.shared.*; + import jdk.test.lib.Platform; + import sun.hotspot.WhiteBox; + +commit f3b0a23f659eef01237010d69ea71d0aee9bf1a1 +Author: loongson-jvm +Date: Wed May 17 14:29:44 2023 +0800 + + Update (2023.05.17) + + 29453: Some cpu features use hwcap detect + +diff --git a/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp +index 4cc21e7dd08..c9a19b379b7 100644 +--- a/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp +@@ -145,27 +145,9 @@ uint32_t VM_Version::get_feature_flags_by_cpucfg() { + } else if (_cpuid_info.cpucfg_info_id1.bits.ARCH == 0b10 ) { + result |= CPU_LA64; + } +- if (_cpuid_info.cpucfg_info_id1.bits.UAL != 0) +- result |= CPU_UAL; + + if (_cpuid_info.cpucfg_info_id2.bits.FP_CFG != 0) + result |= CPU_FP; +- if (_cpuid_info.cpucfg_info_id2.bits.LSX != 0) +- result |= CPU_LSX; +- if (_cpuid_info.cpucfg_info_id2.bits.LASX != 0) +- result |= CPU_LASX; +- if (_cpuid_info.cpucfg_info_id2.bits.COMPLEX != 0) +- result |= CPU_COMPLEX; +- if (_cpuid_info.cpucfg_info_id2.bits.CRYPTO != 0) +- result |= CPU_CRYPTO; +- if (_cpuid_info.cpucfg_info_id2.bits.LBT_X86 != 0) +- result |= CPU_LBT_X86; +- if (_cpuid_info.cpucfg_info_id2.bits.LBT_ARM != 0) +- result |= CPU_LBT_ARM; +- if (_cpuid_info.cpucfg_info_id2.bits.LBT_MIPS != 0) +- result |= CPU_LBT_MIPS; +- if (_cpuid_info.cpucfg_info_id2.bits.LAM != 0) +- result |= CPU_LAM; + + if (_cpuid_info.cpucfg_info_id3.bits.CCDMA != 0) + result |= CPU_CCDMA; +@@ -185,8 +167,10 @@ void VM_Version::get_processor_features() { + + clean_cpuFeatures(); + ++ get_os_cpu_info(); ++ + get_cpu_info_stub(&_cpuid_info); +- _features = get_feature_flags_by_cpucfg(); ++ _features |= get_feature_flags_by_cpucfg(); + + _supports_cx8 = true; + +diff --git a/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp b/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp +index 16c12a30ee4..cae9f863c30 100644 +--- a/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -196,25 +196,26 @@ public: + }; + + #define CPU_FEATURE_FLAGS(decl) \ +- decl(LA32, la32, 1) \ +- decl(LA64, la64, 2) \ +- decl(LLEXC, llexc, 3) \ +- decl(SCDLY, scdly, 4) \ +- decl(LLDBAR, lldbar, 5) \ +- decl(LBT_X86, lbt_x86, 6) \ +- decl(LBT_ARM, lbt_arm, 7) \ +- decl(LBT_MIPS, lbt_mips, 8) \ +- decl(CCDMA, ccdma, 9) \ +- decl(COMPLEX, complex, 10) \ +- decl(FP, fp, 11) \ +- decl(CRYPTO, crypto, 14) \ +- decl(LSX, lsx, 15) \ +- decl(LASX, lasx, 17) \ +- decl(LAM, lam, 21) \ +- decl(LLSYNC, llsync, 23) \ +- decl(TGTSYNC, tgtsync, 24) \ +- decl(ULSYNC, ulsync, 25) \ +- decl(UAL, ual, 26) ++ decl(LAM, lam, 1) \ ++ decl(UAL, ual, 2) \ ++ decl(LSX, lsx, 4) \ ++ decl(LASX, lasx, 5) \ ++ decl(COMPLEX, complex, 7) \ ++ decl(CRYPTO, crypto, 8) \ ++ decl(LBT_X86, lbt_x86, 10) \ ++ decl(LBT_ARM, lbt_arm, 11) \ ++ decl(LBT_MIPS, lbt_mips, 12) \ ++ /* flags above must follow Linux HWCAP */ \ ++ decl(LA32, la32, 13) \ ++ decl(LA64, la64, 14) \ ++ decl(FP, fp, 15) \ ++ decl(LLEXC, llexc, 16) \ ++ decl(SCDLY, scdly, 17) \ ++ decl(LLDBAR, lldbar, 18) \ ++ decl(CCDMA, ccdma, 19) \ ++ decl(LLSYNC, llsync, 20) \ ++ decl(TGTSYNC, tgtsync, 21) \ ++ decl(ULSYNC, ulsync, 22) \ + + enum Feature_Flag { + #define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit), +@@ -245,8 +246,8 @@ protected: + static CpuidInfo _cpuid_info; + + static uint32_t get_feature_flags_by_cpucfg(); +- static int get_feature_flags_by_cpuinfo(int features); + static void get_processor_features(); ++ static void get_os_cpu_info(); + + public: + // Offsets for cpuid asm stub +diff --git a/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp +new file mode 100644 +index 00000000000..3711a7036a1 +--- /dev/null ++++ b/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp +@@ -0,0 +1,95 @@ ++/* ++ * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/register.hpp" ++#include "runtime/os.hpp" ++#include "runtime/os.inline.hpp" ++#include "runtime/vm_version.hpp" ++ ++#include ++#include ++ ++#ifndef HWCAP_LOONGARCH_LAM ++#define HWCAP_LOONGARCH_LAM (1 << 1) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_UAL ++#define HWCAP_LOONGARCH_UAL (1 << 2) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_LSX ++#define HWCAP_LOONGARCH_LSX (1 << 4) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_LASX ++#define HWCAP_LOONGARCH_LASX (1 << 5) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_COMPLEX ++#define HWCAP_LOONGARCH_COMPLEX (1 << 7) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_CRYPTO ++#define HWCAP_LOONGARCH_CRYPTO (1 << 8) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_LBT_X86 ++#define HWCAP_LOONGARCH_LBT_X86 (1 << 10) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_LBT_ARM ++#define HWCAP_LOONGARCH_LBT_ARM (1 << 11) ++#endif ++ ++#ifndef HWCAP_LOONGARCH_LBT_MIPS ++#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) ++#endif ++ ++void VM_Version::get_os_cpu_info() { ++ ++ uint64_t auxv = getauxval(AT_HWCAP); ++ ++ static_assert(CPU_LAM == HWCAP_LOONGARCH_LAM, "Flag CPU_LAM must follow Linux HWCAP"); ++ static_assert(CPU_UAL == HWCAP_LOONGARCH_UAL, "Flag CPU_UAL must follow Linux HWCAP"); ++ static_assert(CPU_LSX == HWCAP_LOONGARCH_LSX, "Flag CPU_LSX must follow Linux HWCAP"); ++ static_assert(CPU_LASX == HWCAP_LOONGARCH_LASX, "Flag CPU_LASX must follow Linux HWCAP"); ++ static_assert(CPU_COMPLEX == HWCAP_LOONGARCH_COMPLEX, "Flag CPU_COMPLEX must follow Linux HWCAP"); ++ static_assert(CPU_CRYPTO == HWCAP_LOONGARCH_CRYPTO, "Flag CPU_CRYPTO must follow Linux HWCAP"); ++ static_assert(CPU_LBT_X86 == HWCAP_LOONGARCH_LBT_X86, "Flag CPU_LBT_X86 must follow Linux HWCAP"); ++ static_assert(CPU_LBT_ARM == HWCAP_LOONGARCH_LBT_ARM, "Flag CPU_LBT_ARM must follow Linux HWCAP"); ++ static_assert(CPU_LBT_MIPS == HWCAP_LOONGARCH_LBT_MIPS, "Flag CPU_LBT_MIPS must follow Linux HWCAP"); ++ ++ _features = auxv & ( ++ HWCAP_LOONGARCH_LAM | ++ HWCAP_LOONGARCH_UAL | ++ HWCAP_LOONGARCH_LSX | ++ HWCAP_LOONGARCH_LASX | ++ HWCAP_LOONGARCH_COMPLEX | ++ HWCAP_LOONGARCH_CRYPTO | ++ HWCAP_LOONGARCH_LBT_X86 | ++ HWCAP_LOONGARCH_LBT_ARM | ++ HWCAP_LOONGARCH_LBT_MIPS); ++} + +commit 4d654249359c20b98708cb4c4c3866ea6d6657e7 +Author: loongson-jvm +Date: Fri Feb 24 09:17:04 2023 +0800 + + Update (2023.02.23) + + 29653: windows debug build failed + 29652: 8301397: [11u, 17u] Bump jtreg to fix issue with build JDK + 29655: ppc64le build failed + 23864: Fix caller saved fpu regset + 28680: [MIPS] Fix a typo in PosixSignals::pd_hotspot_signal_handler + 28678: Fix a typo in PosixSignals::pd_hotspot_signal_handler + +diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf +index 06dfd477372..4a505ac5905 100644 +--- a/make/conf/github-actions.conf ++++ b/make/conf/github-actions.conf +@@ -26,7 +26,7 @@ + # Versions and download locations for dependencies used by GitHub Actions (GHA) + + GTEST_VERSION=1.8.1 +-JTREG_VERSION=6.1+2 ++JTREG_VERSION=6.1+3 + + LINUX_X64_BOOT_JDK_EXT=tar.gz + LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.2%2B8/OpenJDK17U-jdk_x64_linux_hotspot_17.0.2_8.tar.gz +diff --git a/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp +index 0afc25e6e95..e129264506b 100644 +--- a/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp +@@ -450,10 +450,10 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* + __ b(done); + + __ bind(runtime); +- __ pushad(); ++ __ push_call_clobbered_registers(); + __ load_parameter(0, pre_val); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); +- __ popad(); ++ __ pop_call_clobbered_registers(); + __ bind(done); + + __ epilogue(); +@@ -520,9 +520,9 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* + __ b(done); + + __ bind(runtime); +- __ pushad(); ++ __ push_call_clobbered_registers(); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); +- __ popad(); ++ __ pop_call_clobbered_registers(); + __ bind(done); + __ epilogue(); + } +diff --git a/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp +index 37124c99615..7cf552e283a 100644 +--- a/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp +@@ -285,7 +285,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, + __ beqz(SCR2, not_cset); + } + +- __ pushad_except_v0(); ++ __ push_call_clobbered_registers_except(RegSet::of(V0)); + if (is_strong) { + if (is_narrow) { + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)); +@@ -304,7 +304,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); + } + __ jalr(RA); +- __ popad_except_v0(); ++ __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + __ bind(not_cset); + +@@ -320,9 +320,9 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, + + void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) { + if (ShenandoahIUBarrier) { +- __ pushad(); ++ __ push_call_clobbered_registers(); + satb_write_barrier_pre(masm, noreg, dst, TREG, tmp, true, false); +- __ popad(); ++ __ pop_call_clobbered_registers(); + } + } + +@@ -374,7 +374,7 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d + // 3: apply keep-alive barrier if needed + if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { + __ enter(); +- __ pushad(); ++ __ push_call_clobbered_registers(); + satb_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, +@@ -382,7 +382,7 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); +- __ popad(); ++ __ pop_call_clobbered_registers(); + __ leave(); + } + } +@@ -729,10 +729,10 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss + __ b(done); + + __ bind(runtime); +- __ pushad(); ++ __ push_call_clobbered_registers(); + __ load_parameter(0, pre_val); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); +- __ popad(); ++ __ pop_call_clobbered_registers(); + __ bind(done); + + __ epilogue(); +@@ -743,7 +743,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s + __ bstrins_d(SP, R0, 3, 0); + // arg0 : object to be resolved + +- __ pushad_except_v0(); ++ __ push_call_clobbered_registers_except(RegSet::of(V0)); + __ load_parameter(0, A0); + __ load_parameter(1, A1); + +@@ -774,7 +774,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_s + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); + } + __ jalr(RA); +- __ popad_except_v0(); ++ __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + __ epilogue(); + } +diff --git a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp +index f345008bb92..3ef43daa725 100644 +--- a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp +@@ -99,7 +99,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, + if (dst != V0) { + __ push(V0); + } +- __ pushad_except_v0(); ++ __ push_call_clobbered_registers_except(RegSet::of(V0)); + + if (dst != A0) { + __ move(A0, dst); +@@ -107,7 +107,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, + __ move(A1, scratch); + __ MacroAssembler::call_VM_leaf_base(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + +- __ popad_except_v0(); ++ __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + // Make sure dst has the return value. + if (dst != V0) { +@@ -294,7 +294,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* + DecoratorSet decorators) const { + __ prologue("zgc_load_barrier stub", false); + +- __ pushad_except_v0(); ++ __ push_call_clobbered_registers_except(RegSet::of(V0)); + + // Setup arguments + __ load_parameter(0, A0); +@@ -302,7 +302,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + +- __ popad_except_v0(); ++ __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + __ epilogue(); + } +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +index be66c6c5e92..882c43bf592 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +@@ -984,7 +984,7 @@ void MacroAssembler::stop(const char* msg) { + } + + void MacroAssembler::warn(const char* msg) { +- pushad(); ++ push_call_clobbered_registers(); + li(A0, (long)msg); + push(S2); + move(S2, SP); // use S2 as a sender SP holder +@@ -993,7 +993,7 @@ void MacroAssembler::warn(const char* msg) { + call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); + move(SP, S2); // use S2 as a sender SP holder + pop(S2); +- popad(); ++ pop_call_clobbered_registers(); + } + + void MacroAssembler::increment(Register reg, int imm) { +@@ -1337,7 +1337,7 @@ void MacroAssembler::get_thread(Register thread) { + if (thread != V0) { + push(V0); + } +- pushad_except_v0(); ++ push_call_clobbered_registers_except(RegSet::of(V0)); + + push(S5); + move(S5, SP); +@@ -1348,7 +1348,7 @@ void MacroAssembler::get_thread(Register thread) { + move(SP, S5); + pop(S5); + +- popad_except_v0(); ++ pop_call_clobbered_registers_except(RegSet::of(V0)); + if (thread != V0) { + move(thread, V0); + pop(V0); +@@ -1777,9 +1777,9 @@ void MacroAssembler::verify_oop_subroutine() { + + // handle errors + bind(error); +- pushad(); ++ push_call_clobbered_registers(); + call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); +- popad(); ++ pop_call_clobbered_registers(); + jr(RA); + } + +@@ -1971,83 +1971,17 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) { + //Unimplemented(); + } + +-Register caller_saved_registers[] = {T7, T5, T6, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T4, S8, RA, FP}; +-Register caller_saved_registers_except_v0[] = {T7, T5, T6, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T4, S8, RA, FP}; ++static RegSet caller_saved_regset = RegSet::range(A0, A7) + RegSet::range(T0, T8) + RegSet::of(FP, RA) - RegSet::of(SCR1, SCR2); ++static FloatRegSet caller_saved_fpu_regset = FloatRegSet::range(F0, F23); + +- //TODO: LA +-//In LA, F0~23 are all caller-saved registers +-FloatRegister caller_saved_fpu_registers[] = {F0, F12, F13}; +- +-// We preserve all caller-saved register +-void MacroAssembler::pushad(){ +- int i; +- // Fixed-point registers +- int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); +- addi_d(SP, SP, -1 * len * wordSize); +- for (i = 0; i < len; i++) { +- st_d(caller_saved_registers[i], SP, (len - i - 1) * wordSize); +- } +- +- // Floating-point registers +- len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); +- addi_d(SP, SP, -1 * len * wordSize); +- for (i = 0; i < len; i++) { +- fst_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); +- } +-}; +- +-void MacroAssembler::popad(){ +- int i; +- // Floating-point registers +- int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); +- for (i = 0; i < len; i++) +- { +- fld_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); +- } +- addi_d(SP, SP, len * wordSize); +- +- // Fixed-point registers +- len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); +- for (i = 0; i < len; i++) +- { +- ld_d(caller_saved_registers[i], SP, (len - i - 1) * wordSize); +- } +- addi_d(SP, SP, len * wordSize); +-}; +- +-// We preserve all caller-saved register except V0 +-void MacroAssembler::pushad_except_v0() { +- int i; +- // Fixed-point registers +- int len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]); +- addi_d(SP, SP, -1 * len * wordSize); +- for (i = 0; i < len; i++) { +- st_d(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize); +- } +- +- // Floating-point registers +- len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); +- addi_d(SP, SP, -1 * len * wordSize); +- for (i = 0; i < len; i++) { +- fst_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); +- } ++void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { ++ push(caller_saved_regset - exclude); ++ push_fpu(caller_saved_fpu_regset); + } + +-void MacroAssembler::popad_except_v0() { +- int i; +- // Floating-point registers +- int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); +- for (i = 0; i < len; i++) { +- fld_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); +- } +- addi_d(SP, SP, len * wordSize); +- +- // Fixed-point registers +- len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]); +- for (i = 0; i < len; i++) { +- ld_d(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize); +- } +- addi_d(SP, SP, len * wordSize); ++void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { ++ pop_fpu(caller_saved_fpu_regset); ++ pop(caller_saved_regset - exclude); + } + + void MacroAssembler::push2(Register reg1, Register reg2) { +diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +index 8e36a23afc1..07c33b80151 100644 +--- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp ++++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +@@ -571,11 +571,20 @@ class MacroAssembler: public Assembler { + void pop2 () { addi_d(SP, SP, 16); } + void push2(Register reg1, Register reg2); + void pop2 (Register reg1, Register reg2); +- //we need 2 fun to save and resotre general register +- void pushad(); +- void popad(); +- void pushad_except_v0(); +- void popad_except_v0(); ++ // Push and pop everything that might be clobbered by a native ++ // runtime call except SCR1 and SCR2. (They are always scratch, ++ // so we don't have to protect them.) Only save the lower 64 bits ++ // of each vector register. Additional registers can be excluded ++ // in a passed RegSet. ++ void push_call_clobbered_registers_except(RegSet exclude); ++ void pop_call_clobbered_registers_except(RegSet exclude); ++ ++ void push_call_clobbered_registers() { ++ push_call_clobbered_registers_except(RegSet()); ++ } ++ void pop_call_clobbered_registers() { ++ pop_call_clobbered_registers_except(RegSet()); ++ } + void push(RegSet regs) { if (regs.bits()) push(regs.bits()); } + void pop(RegSet regs) { if (regs.bits()) pop(regs.bits()); } + void push_fpu(FloatRegSet regs) { if (regs.bits()) push_fpu(regs.bits()); } +diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +index 0ca8968136b..0fe2f8f0d36 100644 +--- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +@@ -459,7 +459,7 @@ static void patch_callers_callsite(MacroAssembler *masm) { + // T5 isn't live so capture return address while we easily can + __ move(T5, RA); + +- __ pushad(); ++ __ push_call_clobbered_registers(); + + // VM needs caller's callsite + // VM needs target method +@@ -474,7 +474,7 @@ static void patch_callers_callsite(MacroAssembler *masm) { + relocInfo::runtime_call_type); + + __ move(SP, TSR); +- __ popad(); ++ __ pop_call_clobbered_registers(); + __ bind(L); + } + +diff --git a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +index 667f6aa92fc..fbdc96f10f5 100644 +--- a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp ++++ b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp +@@ -3035,7 +3035,7 @@ class StubGenerator: public StubCodeGenerator { + __ addi_d(SP, SP, -4 * wordSize); // four words for the returned {SP, FP, RA, PC} + + __ push(V0); +- __ pushad_except_v0(); ++ __ push_call_clobbered_registers_except(RegSet::of(V0)); + + __ move(A0, T4); + __ call_VM_leaf +@@ -3044,7 +3044,7 @@ class StubGenerator: public StubCodeGenerator { + + __ reset_last_Java_frame(true); + +- __ popad_except_v0(); ++ __ pop_call_clobbered_registers_except(RegSet::of(V0)); + + __ bnez(V0, deoptimize_label); + +diff --git a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp +index 18e19e87b2e..88b938103bc 100644 +--- a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp ++++ b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp +@@ -1481,13 +1481,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { + __ ld_w(t, thread, in_bytes(JavaThread::stack_guard_state_offset())); + __ li(AT, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); + __ bne(t, AT, no_reguard); +- __ pushad(); ++ __ push_call_clobbered_registers(); + __ move(S5_heapbase, SP); + assert(StackAlignmentInBytes == 16, "must be"); + __ bstrins_d(SP, R0, 3, 0); + __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::runtime_call_type); + __ move(SP, S5_heapbase); +- __ popad(); ++ __ pop_call_clobbered_registers(); + //add for compressedoops + __ reinit_heapbase(); + __ bind(no_reguard); +diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +index 17fc8e5078e..27e431c2c61 100644 +--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp ++++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.cpp +@@ -292,7 +292,8 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R + // Code emitted by LIR node "LIR_OpZLoadBarrierTest" which in turn is emitted by ZBarrierSetC1::load_barrier. + // The actual compare and branch instructions are represented as stand-alone LIR nodes. + void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const { ++ LIR_Opr ref, ++ LIR_Opr res) const { + __ block_comment("load_barrier_test (zgc) {"); + + __ ld(R0, in_bytes(ZThreadLocalData::address_bad_mask_offset()), R16_thread); +diff --git a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +index e2ff1bf53ae..4957e73ae22 100644 +--- a/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp ++++ b/src/hotspot/cpu/ppc/gc/z/zBarrierSetAssembler_ppc.hpp +@@ -67,7 +67,8 @@ public: + + #ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const; ++ LIR_Opr ref, ++ LIR_Opr res) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; +diff --git a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp +index 3657b16fc1a..a2aab225743 100644 +--- a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp ++++ b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp +@@ -30,6 +30,8 @@ const size_t ZPlatformGranuleSizeShift = 21; // 2MB + const size_t ZPlatformHeapViews = 3; + const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; + ++const bool ZPlatformLoadBarrierTestResultInRegister = false; ++ + size_t ZPlatformAddressOffsetBits(); + size_t ZPlatformAddressMetadataShift(); + +diff --git a/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp +index 84519a31f56..a2e4fea109c 100644 +--- a/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp ++++ b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp +@@ -217,7 +217,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, + #endif + + // Handle signal from NativeJump::patch_verified_entry(). +- if (sig == SIGILL & nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { ++ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + #ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("verified entry = %lx, sig=%d", nativeInstruction_at(pc), sig); + #endif +diff --git a/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp +index df17c3f8853..8344945ff79 100644 +--- a/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp ++++ b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. +- * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -248,7 +248,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, + #endif + + // Handle signal from NativeJump::patch_verified_entry(). +- if (sig == SIGILL & nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { ++ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + #ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("verified entry = %lx, sig=%d", nativeInstruction_at(pc), sig); + #endif +diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp +index 8dfa007e71a..e3660cab271 100644 +--- a/src/hotspot/share/asm/codeBuffer.cpp ++++ b/src/hotspot/share/asm/codeBuffer.cpp +@@ -22,6 +22,12 @@ + * + */ + ++/* ++ * This file has been modified by Loongson Technology in 2023. These ++ * modifications are Copyright (c) 2018, 2023, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + #include "precompiled.hpp" + #include "asm/codeBuffer.hpp" + #include "code/oopRecorder.inline.hpp" +@@ -330,9 +336,7 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format) + assert(rtype == relocInfo::none || + rtype == relocInfo::runtime_call_type || + rtype == relocInfo::internal_word_type|| +-#ifdef MIPS +- rtype == relocInfo::internal_pc_type || +-#endif ++ NOT_ZERO(MIPS64_ONLY(rtype == relocInfo::internal_pc_type ||)) + rtype == relocInfo::section_word_type || + rtype == relocInfo::external_word_type, + "code needs relocation information"); + +commit 9f3079d058b332d7cec1c0b4568786aa8c84c363 +Merge: d4dcc0bf7b8 2fe42855c48 +Author: aoqi +Date: Tue Jan 31 19:07:27 2023 +0800 + + Merge + + +commit d4dcc0bf7b8bca2f2a0095278bc573879726eb0f +Author: loongson-jvm +Date: Tue Jan 31 16:17:39 2023 +0800 + + Initial commit by Loongson + + A LoongArch64 port (template interpreter, C1 JIT compiler and C2 JIT compiler) and a MIPS64 port (template interpreter and C2 JIT compiler). + + Co-authored-by: Loongson JVM team + diff --git a/make/autoconf/jvm-features.m4 b/make/autoconf/jvm-features.m4 -index aa99b037b2b..d0c3a85757b 100644 +index 906a2857877..20d35c2a9e2 100644 --- a/make/autoconf/jvm-features.m4 +++ b/make/autoconf/jvm-features.m4 @@ -23,6 +23,12 @@ @@ -39,7 +11469,7 @@ index aa99b037b2b..d0c3a85757b 100644 ############################################################################### # Check if the feature 'dtrace' is available on this platform. # -@@ -295,6 +318,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_JVMCI], +@@ -292,6 +315,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_JVMCI], AC_MSG_RESULT([yes]) elif test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then AC_MSG_RESULT([yes]) @@ -48,17 +11478,17 @@ index aa99b037b2b..d0c3a85757b 100644 else AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU]) AVAILABLE=false -@@ -312,7 +337,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENANDOAHGC], +@@ -308,7 +333,8 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENANDOAHGC], + AC_MSG_CHECKING([if platform is supported by Shenandoah]) if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || \ test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \ - test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \ -- test "x$OPENJDK_TARGET_CPU" = "xriscv64"; then -+ test "x$OPENJDK_TARGET_CPU" = "xriscv64" || \ +- test "x$OPENJDK_TARGET_CPU" = "xppc64le"; then ++ test "x$OPENJDK_TARGET_CPU" = "xppc64le" || \ + test "x$OPENJDK_TARGET_CPU" = "xloongarch64"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU]) -@@ -370,6 +396,13 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC], +@@ -365,6 +391,13 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_ZGC], AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU]) AVAILABLE=false fi @@ -72,7 +11502,7 @@ index aa99b037b2b..d0c3a85757b 100644 else AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU]) AVAILABLE=false -@@ -404,6 +437,7 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM], +@@ -399,6 +432,7 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM], # JVM_FEATURES_PLATFORM_UNAVAILABLE. JVM_FEATURES_CHECK_CDS @@ -81,7 +11511,7 @@ index aa99b037b2b..d0c3a85757b 100644 JVM_FEATURES_CHECK_JFR JVM_FEATURES_CHECK_JVMCI diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4 -index eb66266262b..931434c4e7b 100644 +index 9e9e9454f0e..9058b736007 100644 --- a/make/autoconf/platform.m4 +++ b/make/autoconf/platform.m4 @@ -23,6 +23,12 @@ @@ -124,10 +11554,10 @@ index eb66266262b..931434c4e7b 100644 AC_SUBST(HOTSPOT_$1_CPU_ARCH) # Setup HOTSPOT_$1_CPU_DEFINE -@@ -563,6 +584,12 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER], +@@ -561,6 +582,12 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER], + HOTSPOT_$1_CPU_DEFINE=PPC64 + elif test "x$OPENJDK_$1_CPU" = xppc64le; then HOTSPOT_$1_CPU_DEFINE=PPC64 - elif test "x$OPENJDK_$1_CPU" = xriscv64; then - HOTSPOT_$1_CPU_DEFINE=RISCV64 + elif test "x$OPENJDK_$1_CPU" = xmips64; then + HOTSPOT_$1_CPU_DEFINE=MIPS64 + elif test "x$OPENJDK_$1_CPU" = xmips64el; then @@ -137,6 +11567,231 @@ index eb66266262b..931434c4e7b 100644 # The cpu defines below are for zero, we don't support them directly. elif test "x$OPENJDK_$1_CPU" = xsparc; then +diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +index f488e863a68..7e56d2666e5 100644 +--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +@@ -1121,7 +1121,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + } + } + +- ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { ++ ShouldNotReachHere(); ++} + + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + LIR_Opr src = op->in_opr(); +@@ -1660,6 +1662,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L + __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond); + } + ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { ++ ShouldNotReachHere(); ++} ++ + void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { + assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); + +diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +index df1b877ee3f..5ecf84f111e 100644 +--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +@@ -278,18 +278,29 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + __ store(reg, addr); + } + +-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) { + LIR_Opr reg = new_register(T_INT); + __ load(generate_address(base, disp, T_INT), reg, info); +- __ cmp(condition, reg, LIR_OprFact::intConst(c)); ++ __ cmp_branch(condition, reg, LIR_OprFact::intConst(c), tgt); + } + +-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); ++ ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) { + LIR_Opr reg1 = new_register(T_INT); + __ load(generate_address(base, disp, type), reg1, info); +- __ cmp(condition, reg, reg1); ++ __ cmp_branch(condition, reg, reg1, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); + + bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + +diff --git a/src/hotspot/cpu/aarch64/c1_LIR_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIR_aarch64.cpp +index 58e1cf5ae63..8f67e6f4f67 100644 +--- a/src/hotspot/cpu/aarch64/c1_LIR_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/c1_LIR_aarch64.cpp +@@ -52,3 +52,24 @@ void LIR_Address::verify() const { + "wrong type for addresses"); + } + #endif // PRODUCT ++ ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ cmp(condition, left, right, info); ++ branch(condition, tgt); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ cmp(condition, left, right); ++ branch(condition, block, unordered); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ cmp(condition, left, right); ++ cmove(condition, src1, src2, dst, type); ++} +diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +index 6b42982ed90..e39e6e8ab48 100644 +--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp ++++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp +@@ -209,7 +209,8 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + #define __ ce->masm()-> + + void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const { ++ LIR_Opr ref, ++ LIR_Opr res) const { + assert_different_registers(rscratch1, rthread, ref->as_register()); + + __ ldr(rscratch1, address_bad_mask_from_thread(rthread)); +diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp +index cca87382560..de7bda178d3 100644 +--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp +@@ -79,7 +79,8 @@ public: + + #ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const; ++ LIR_Opr ref, ++ LIR_Opr res) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; +diff --git a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp +index 3187808b65a..bf18d48814f 100644 +--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp ++++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp +@@ -28,6 +28,8 @@ const size_t ZPlatformGranuleSizeShift = 21; // 2MB + const size_t ZPlatformHeapViews = 3; + const size_t ZPlatformCacheLineSize = 64; + ++const bool ZPlatformLoadBarrierTestResultInRegister = false; ++ + size_t ZPlatformAddressOffsetBits(); + size_t ZPlatformAddressMetadataShift(); + +diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +index 776f00977cb..71f3961828e 100644 +--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp ++++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +@@ -899,6 +899,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + __ b(*(op->label()), acond); + } + ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { ++ ShouldNotReachHere(); ++} + + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + LIR_Opr src = op->in_opr(); +@@ -1495,6 +1498,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L + } + } + ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { ++ ShouldNotReachHere(); ++} ++ + #ifdef ASSERT + static int reg_size(LIR_Opr op) { + switch (op->type()) { +diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +index b16986ee2a2..46904771218 100644 +--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp ++++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +@@ -313,18 +313,27 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + __ move(temp, addr); + } + +- +-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) { + __ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info); +- __ cmp(condition, FrameMap::LR_opr, c); ++ __ cmp_branch(condition, FrameMap::LR_opr, c, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); + +-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) { + __ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info); +- __ cmp(condition, reg, FrameMap::LR_opr); ++ __ cmp_branch(condition, reg, FrameMap::LR_opr, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); + + bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + assert(left != result, "should be different registers"); +diff --git a/src/hotspot/cpu/arm/c1_LIR_arm.cpp b/src/hotspot/cpu/arm/c1_LIR_arm.cpp +index 60bd5265bfb..11df12754cd 100644 +--- a/src/hotspot/cpu/arm/c1_LIR_arm.cpp ++++ b/src/hotspot/cpu/arm/c1_LIR_arm.cpp +@@ -62,3 +62,24 @@ void LIR_Address::verify() const { + #endif + } + #endif // PRODUCT ++ ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ cmp(condition, left, right, info); ++ branch(condition, tgt); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ cmp(condition, left, right); ++ branch(condition, block, unordered); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ cmp(condition, left, right); ++ cmove(condition, src1, src2, dst, type); ++} diff --git a/src/hotspot/cpu/loongarch/abstractInterpreter_loongarch.cpp b/src/hotspot/cpu/loongarch/abstractInterpreter_loongarch.cpp new file mode 100644 index 00000000000..fbcd4050b64 @@ -1133,10 +12788,10 @@ index 00000000000..e6e62cccad0 +} diff --git a/src/hotspot/cpu/loongarch/assembler_loongarch.hpp b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp new file mode 100644 -index 00000000000..5eae8b9995c +index 00000000000..40a3aea36f8 --- /dev/null +++ b/src/hotspot/cpu/loongarch/assembler_loongarch.hpp -@@ -0,0 +1,2831 @@ +@@ -0,0 +1,2812 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -2335,7 +13990,6 @@ index 00000000000..5eae8b9995c + stptr_w_op = 0b00100101, + ldptr_d_op = 0b00100110, + stptr_d_op = 0b00100111, -+ csr_op = 0b00000100, + + unknow_ops8 = 0b11111111 + }; @@ -2432,15 +14086,6 @@ index 00000000000..5eae8b9995c + static int high6 (int x) { return high(x, 6); } + + -+ static ALWAYSINLINE void patch(address a, int length, uint32_t val) { -+ guarantee(val < (1ULL << length), "Field too big for insn"); -+ guarantee(length > 0, "length > 0"); -+ unsigned target = *(unsigned *)a; -+ target = (target >> length) << length; -+ target |= val; -+ *(unsigned *)a = target; -+ } -+ + protected: + // help methods for instruction ejection + @@ -3004,8 +14649,6 @@ index 00000000000..5eae8b9995c + void stptr_w (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(stptr_w_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } + void ldptr_d (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(ldptr_d_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } + void stptr_d (Register rd, Register rj, int si16) { assert(is_simm(si16, 16) && ((si16 & 0x3) == 0), "not a signed 16-bit int"); emit_int32(insn_I14RR(stptr_d_op, si16>>2, (int)rj->encoding(), (int)rd->encoding())); } -+ void csrrd (Register rd, int csr) { emit_int32(insn_I14RR(csr_op, csr, 0, (int)rd->encoding())); } -+ void csrwr (Register rd, int csr) { emit_int32(insn_I14RR(csr_op, csr, 1, (int)rd->encoding())); } + + void ld_b (Register rd, Register rj, int si12) { assert(is_simm(si12, 12), "not a signed 12-bit int"); emit_int32(insn_I12RR(ld_b_op, si12, (int)rj->encoding(), (int)rd->encoding())); } + void ld_h (Register rd, Register rj, int si12) { assert(is_simm(si12, 12), "not a signed 12-bit int"); emit_int32(insn_I12RR(ld_h_op, si12, (int)rj->encoding(), (int)rd->encoding())); } @@ -3179,25 +14822,18 @@ index 00000000000..5eae8b9995c + void bceqz(ConditionalFlagRegister cj, Label& L) { bceqz(cj, target(L)); } + void bcnez(ConditionalFlagRegister cj, Label& L) { bcnez(cj, target(L)); } + ++ // Now Membar_mask_bits is 0,Need to fix it after LA6000 + typedef enum { -+ // hint[4] -+ Completion = 0, -+ Ordering = (1 << 4), -+ -+ // The bitwise-not of the below constants is corresponding to the hint. This is convenient for OR operation. -+ // hint[3:2] and hint[1:0] -+ LoadLoad = ((1 << 3) | (1 << 1)), -+ LoadStore = ((1 << 3) | (1 << 0)), -+ StoreLoad = ((1 << 2) | (1 << 1)), -+ StoreStore = ((1 << 2) | (1 << 0)), -+ AnyAny = ((3 << 2) | (3 << 0)), ++ StoreStore = 0, ++ LoadStore = 0, ++ StoreLoad = 0, ++ LoadLoad = 0, ++ AnyAny = 0 + } Membar_mask_bits; + + // Serializes memory and blows flags + void membar(Membar_mask_bits hint) { -+ assert((hint & (3 << 0)) != 0, "membar mask unsupported!"); -+ assert((hint & (3 << 2)) != 0, "membar mask unsupported!"); -+ dbar(Ordering | (~hint & 0xf)); ++ dbar(hint); + } + + // LSX and LASX @@ -5131,10 +16767,10 @@ index 00000000000..8d439fda060 +} diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp new file mode 100644 -index 00000000000..baadeebb243 +index 00000000000..486592903ce --- /dev/null +++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch.hpp -@@ -0,0 +1,84 @@ +@@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, Loongson Technology. All rights reserved. @@ -5203,8 +16839,6 @@ index 00000000000..baadeebb243 + + void deoptimize_trap(CodeEmitInfo *info); + -+ void emit_cmp_branch(LIR_OpBranch* op); -+ + enum { + // call stub: CompiledStaticCall::to_interp_stub_size() + + // CompiledStaticCall::to_trampoline_stub_size() @@ -5221,10 +16855,10 @@ index 00000000000..baadeebb243 +#endif // CPU_LOONGARCH_C1_LIRASSEMBLER_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp new file mode 100644 -index 00000000000..cde86e3b3a1 +index 00000000000..2e4fbc1ecc0 --- /dev/null +++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp -@@ -0,0 +1,3402 @@ +@@ -0,0 +1,3382 @@ +/* + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, Loongson Technology. All rights reserved. @@ -6117,7 +17751,14 @@ index 00000000000..cde86e3b3a1 + __ ld_ptr(dest->as_register(), as_Address(from_addr)); + break; + case T_ADDRESS: -+ __ ld_ptr(dest->as_register(), as_Address(from_addr)); ++ // FIXME: OMG this is a horrible kludge. Any offset from an ++ // address that matches klass_offset_in_bytes() will be loaded ++ // as a word, not a long. ++ if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { ++ __ ld_wu(dest->as_register(), as_Address(from_addr)); ++ } else { ++ __ ld_ptr(dest->as_register(), as_Address(from_addr)); ++ } + break; + case T_INT: + __ ld_w(dest->as_register(), as_Address(from_addr)); @@ -6150,6 +17791,10 @@ index 00000000000..cde86e3b3a1 + // Load barrier has not yet been applied, so ZGC can't verify the oop here + __ verify_oop(dest->as_register()); + } ++ } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { ++ if (UseCompressedClassPointers) { ++ __ decode_klass_not_null(dest->as_register()); ++ } + } +} + @@ -6183,20 +17828,19 @@ index 00000000000..cde86e3b3a1 +#ifdef ASSERT + assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); + if (op->block() != NULL) _branch_target_blocks.append(op->block()); ++ assert(op->cond() == lir_cond_always, "must be"); +#endif + -+ if (op->cond() == lir_cond_always) { -+ if (op->info() != NULL) -+ add_debug_info_for_branch(op->info()); ++ if (op->info() != NULL) ++ add_debug_info_for_branch(op->info()); + -+ __ b_far(*(op->label())); -+ } else { -+ emit_cmp_branch(op); -+ } ++ __ b_far(*(op->label())); +} + -+void LIR_Assembler::emit_cmp_branch(LIR_OpBranch* op) { ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { +#ifdef ASSERT ++ assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); ++ if (op->block() != NULL) _branch_target_blocks.append(op->block()); + if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); +#endif + @@ -6212,7 +17856,7 @@ index 00000000000..cde86e3b3a1 + LIR_Opr opr2 = op->in_opr2(); + assert(op->condition() != lir_cond_always, "must be"); + -+ if (op->code() == lir_cond_float_branch) { ++ if (op->code() == lir_cmp_float_branch) { + bool is_unordered = (op->ublock() == op->block()); + if (opr1->is_single_fpu()) { + FloatRegister reg1 = opr1->as_float_reg(); @@ -6768,16 +18412,12 @@ index 00000000000..cde86e3b3a1 + +void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, bool sign) { + __ cmpxchg32(Address(addr, 0), cmpval, newval, SCR1, sign, -+ /* retold */ false, /* acquire */ true, /* weak */ false, /* exchange */ false); -+ // LA SC equals store-conditional dbar, so no need AnyAny after CAS. -+ //__ membar(__ AnyAny); ++ /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); +} + +void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) { + __ cmpxchg(Address(addr, 0), cmpval, newval, SCR1, -+ /* retold */ false, /* acquire */ true, /* weak */ false, /* exchange */ false); -+ // LA SC equals store-conditional dbar, so no need AnyAny after CAS. -+ //__ membar(__ AnyAny); ++ /* retold */ false, /* barrier */ true, /* weak */ false, /* exchage */ false); +} + +void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { @@ -6814,8 +18454,13 @@ index 00000000000..cde86e3b3a1 + } +} + -+void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type, -+ LIR_Opr left, LIR_Opr right) { ++void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, ++ LIR_Opr result, BasicType type) { ++ Unimplemented(); ++} ++ ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, ++ LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { + assert(result->is_single_cpu() || result->is_double_cpu(), "expect single register for result"); + assert(left->is_single_cpu() || left->is_double_cpu(), "must be"); + Register regd = (result->type() == T_LONG) ? result->as_register_lo() : result->as_register(); @@ -8096,23 +19741,6 @@ index 00000000000..cde86e3b3a1 + __ bind(*op->stub()->continuation()); +} + -+void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { -+ Register obj = op->obj()->as_pointer_register(); -+ Register result = op->result_opr()->as_pointer_register(); -+ -+ CodeEmitInfo* info = op->info(); -+ if (info != NULL) { -+ add_debug_info_for_null_check_here(info); -+ } -+ -+ if (UseCompressedClassPointers) { -+ __ ld_wu(result, obj, oopDesc::klass_offset_in_bytes()); -+ __ decode_klass_not_null(result); -+ } else { -+ __ ld_ptr(result, obj, oopDesc::klass_offset_in_bytes()); -+ } -+} -+ +void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { + ciMethod* method = op->profiled_method(); + ciMethod* callee = op->profiled_callee(); @@ -8240,10 +19868,7 @@ index 00000000000..cde86e3b3a1 + __ verify_oop(obj); + + if (tmp != obj) { -+ assert_different_registers(obj, tmp, SCR1, SCR2, mdo_addr.base(), mdo_addr.index()); + __ move(tmp, obj); -+ } else { -+ assert_different_registers(obj, SCR1, SCR2, mdo_addr.base(), mdo_addr.index()); + } + if (do_null) { + __ bnez(tmp, update); @@ -8302,11 +19927,10 @@ index 00000000000..cde86e3b3a1 + __ beqz(SCR2, none); + __ li(SCR1, (u1)TypeEntries::null_seen); + __ beq(SCR2, SCR1, none); -+ // There is a chance that the checks above -+ // fail if another thread has just set the ++ // There is a chance that the checks above (re-reading profiling ++ // data from memory) fail if another thread has just set the + // profiling to this obj's klass + membar_acquire(); -+ __ XOR(tmp, tmp, SCR2); // get back original value before XOR + __ ld_ptr(SCR2, mdo_addr); + __ XOR(tmp, tmp, SCR2); + assert(TypeEntries::type_klass_mask == -4, "must be"); @@ -8333,11 +19957,6 @@ index 00000000000..cde86e3b3a1 + __ bind(none); + // first time here. Set profile type. + __ st_ptr(tmp, mdo_addr); -+#ifdef ASSERT -+ assert(TypeEntries::type_mask == -2, "must be"); -+ __ bstrpick_d(tmp, tmp, 63, 1); -+ __ verify_klass_ptr(tmp); -+#endif + } + } else { + // There's a single possible klass at this profile point @@ -8371,11 +19990,6 @@ index 00000000000..cde86e3b3a1 +#endif + // first time here. Set profile type. + __ st_ptr(tmp, mdo_addr); -+#ifdef ASSERT -+ assert(TypeEntries::type_mask == -2, "must be"); -+ __ bstrpick_d(tmp, tmp, 63, 1); -+ __ verify_klass_ptr(tmp); -+#endif + } else { + assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && + ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); @@ -8629,13 +20243,13 @@ index 00000000000..cde86e3b3a1 +#undef __ diff --git a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp new file mode 100644 -index 00000000000..28298dcc375 +index 00000000000..a346700ed36 --- /dev/null +++ b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp -@@ -0,0 +1,1386 @@ +@@ -0,0 +1,1397 @@ +/* + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2021, 2024, Loongson Technology. All rights reserved. ++ * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -8792,10 +20406,8 @@ index 00000000000..28298dcc375 + if (index->is_register()) { + // apply the shift and accumulate the displacement + if (shift > 0) { -+ // Use long register to avoid overflow when shifting large index values left. -+ LIR_Opr tmp = new_register(T_LONG); -+ __ convert(Bytecodes::_i2l, index, tmp); -+ __ shift_left(tmp, shift, tmp); ++ LIR_Opr tmp = new_pointer_register(); ++ __ shift_left(index, shift, tmp); + index = tmp; + } + if (large_disp != 0) { @@ -8897,18 +20509,32 @@ index 00000000000..28298dcc375 + __ store(reg, addr); +} + -+void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, ++ int disp, int c, T tgt, CodeEmitInfo* info) { + LIR_Opr reg = new_register(T_INT); + __ load(generate_address(base, disp, T_INT), reg, info); -+ __ cmp(condition, reg, LIR_OprFact::intConst(c)); ++ __ cmp_branch(condition, reg, LIR_OprFact::intConst(c), tgt); +} + -+void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); ++ ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, ++ int disp, BasicType type, T tgt, CodeEmitInfo* info) { + LIR_Opr reg1 = new_register(T_INT); + __ load(generate_address(base, disp, type), reg1, info); -+ __ cmp(condition, reg, reg1); ++ __ cmp_branch(condition, reg, reg1, tgt); +} + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); ++ +bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + if (is_power_of_2(c - 1)) { + __ shift_left(left, exact_log2(c - 1), tmp); @@ -9064,8 +20690,8 @@ index 00000000000..28298dcc375 + } + if (need_zero_check) { + CodeEmitInfo* info = state_for(x); -+ __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); -+ __ branch(lir_cond_equal, new DivByZeroStub(info)); ++ CodeStub* stub = new DivByZeroStub(info); ++ __ cmp_branch(lir_cond_equal, right.result(), LIR_OprFact::longConst(0), stub); + } + + rlock_result(x); @@ -9135,8 +20761,8 @@ index 00000000000..28298dcc375 + } + if (need_zero_check) { + CodeEmitInfo* info = state_for(x); -+ __ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0)); -+ __ branch(lir_cond_equal, new DivByZeroStub(info)); ++ CodeStub* stub = new DivByZeroStub(info); ++ __ cmp_branch(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0), stub); + } + + LIR_Opr ill = LIR_OprFact::illegalOpr; @@ -9982,14 +21608,13 @@ index 00000000000..28298dcc375 + __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); + } + -+ __ cmp(lir_cond(cond), left, right); + // Generate branch profiling. Profiling code doesn't kill flags. -+ profile_branch(x, cond); ++ profile_branch(x, cond, left, right); + move_to_phi(x->state()); + if (x->x()->type()->is_float_kind()) { -+ __ branch(lir_cond(cond), x->tsux(), x->usux()); ++ __ cmp_branch(lir_cond(cond), left, right, x->tsux(), x->usux()); + } else { -+ __ branch(lir_cond(cond), x->tsux()); ++ __ cmp_branch(lir_cond(cond), left, right, x->tsux()); + } + assert(x->default_sux() == x->fsux(), "wrong destination above"); + __ jump(x->default_sux()); @@ -10021,10 +21646,10 @@ index 00000000000..28298dcc375 +} diff --git a/src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp new file mode 100644 -index 00000000000..01e8c9f270e +index 00000000000..127be89865e --- /dev/null +++ b/src/hotspot/cpu/loongarch/c1_LIR_loongarch_64.cpp -@@ -0,0 +1,57 @@ +@@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, Loongson Technology. All rights reserved. @@ -10082,6 +21707,24 @@ index 00000000000..01e8c9f270e + "wrong type for addresses"); +} +#endif // PRODUCT ++ ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ append(new LIR_OpCmpBranch(condition, left, right, tgt, info)); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ append(new LIR_OpCmpBranch(condition, left, right, block, unordered)); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ append(new LIR_Op4(lir_cmp_cmove, condition, left, right, src1, src2, dst, type)); ++} diff --git a/src/hotspot/cpu/loongarch/c1_LinearScan_loongarch.hpp b/src/hotspot/cpu/loongarch/c1_LinearScan_loongarch.hpp new file mode 100644 index 00000000000..f15dacafeba @@ -10317,7 +21960,7 @@ index 00000000000..38ff4c58369 +#endif // CPU_LOONGARCH_C1_MACROASSEMBLER_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp new file mode 100644 -index 00000000000..0221951342a +index 00000000000..56c6281d415 --- /dev/null +++ b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp @@ -0,0 +1,365 @@ @@ -10409,7 +22052,7 @@ index 00000000000..0221951342a + // displaced header address in the object header - if it is not the same, get the + // object header instead + lea(SCR2, Address(obj, hdr_offset)); -+ cmpxchg(Address(SCR2, 0), hdr, disp_hdr, SCR1, true, true /* acquire */, done); ++ cmpxchg(Address(SCR2, 0), hdr, disp_hdr, SCR1, true, false, done); + // if the object header was the same, we're done + // if the object header was not the same, it is now in the hdr register + // => test if it is a stack pointer into the same stack (recursive locking), i.e.: @@ -10469,9 +22112,9 @@ index 00000000000..0221951342a + // we do unlocking via runtime call + if (hdr_offset) { + lea(SCR1, Address(obj, hdr_offset)); -+ cmpxchg(Address(SCR1, 0), disp_hdr, hdr, SCR2, false, true /* acquire */, done, &slow_case); ++ cmpxchg(Address(SCR1, 0), disp_hdr, hdr, SCR2, false, false, done, &slow_case); + } else { -+ cmpxchg(Address(obj, 0), disp_hdr, hdr, SCR2, false, true /* acquire */, done, &slow_case); ++ cmpxchg(Address(obj, 0), disp_hdr, hdr, SCR2, false, false, done, &slow_case); + } + // done + bind(done); @@ -11913,13 +23556,13 @@ index 00000000000..ce84af28c9b +#endif // CPU_LOONGARCH_C1_GLOBALS_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp new file mode 100644 -index 00000000000..0c91c74d63e +index 00000000000..e3a01f1f25d --- /dev/null +++ b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp @@ -0,0 +1,1872 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2021, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -12114,7 +23757,7 @@ index 00000000000..0c91c74d63e + + if (PrintBiasedLockingStatistics) { + Label SUCC, FAIL; -+ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, true /* acquire */, SUCC, &FAIL); // Updates tmpReg ++ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, false, SUCC, &FAIL); // Updates tmpReg + bind(SUCC); + atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, AT, scrReg); + li(resReg, 1); @@ -12122,7 +23765,7 @@ index 00000000000..0c91c74d63e + bind(FAIL); + } else { + // If cmpxchg is succ, then scrReg = 1 -+ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, true /* acquire */, DONE_SET); // Updates tmpReg ++ cmpxchg(Address(objReg, 0), tmpReg, boxReg, scrReg, true, false, DONE_SET); // Updates tmpReg + } + + // Recursive locking @@ -12167,7 +23810,7 @@ index 00000000000..0c91c74d63e +#endif + // It's inflated and appears unlocked + addi_d(tmpReg, tmpReg, ObjectMonitor::owner_offset_in_bytes() - 2); -+ cmpxchg(Address(tmpReg, 0), R0, TREG, scrReg, false, true /* acquire */); ++ cmpxchg(Address(tmpReg, 0), R0, TREG, scrReg, false, false); + // Intentional fall-through into DONE ... + + bind(DONE_SET); @@ -12286,14 +23929,14 @@ index 00000000000..0c91c74d63e + move(AT, R0); + bnez(scrReg, DONE_SET); + -+ membar(Assembler::Membar_mask_bits(LoadStore|StoreStore)); ++ dbar(0); + st_d(R0, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes() - 2)); + li(resReg, 1); + b(DONE); + + bind(Stacked); + ld_d(tmpReg, Address(boxReg, 0)); -+ cmpxchg(Address(objReg, 0), boxReg, tmpReg, AT, false, true /* acquire */); ++ cmpxchg(Address(objReg, 0), boxReg, tmpReg, AT, false, false); + + bind(DONE_SET); + move(resReg, AT); @@ -14558,7 +26201,7 @@ index 00000000000..680a27363ec +#endif // CPU_LOONGARCH_FOREIGN_GLOBALS_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/frame_loongarch.cpp b/src/hotspot/cpu/loongarch/frame_loongarch.cpp new file mode 100644 -index 00000000000..1aba8e4dd27 +index 00000000000..23a63a77d98 --- /dev/null +++ b/src/hotspot/cpu/loongarch/frame_loongarch.cpp @@ -0,0 +1,668 @@ @@ -15102,7 +26745,7 @@ index 00000000000..1aba8e4dd27 + + // first the method + -+ Method* m = safe_interpreter_frame_method(); ++ Method* m = *interpreter_frame_method_addr(); + + // validate the method we'd find in this potential sender + if (!Method::is_valid_method(m)) return false; @@ -15651,7 +27294,7 @@ index 00000000000..8b0e0502701 +#endif // CPU_LOONGARCH_FRAME_LOONGARCH_INLINE_HPP diff --git a/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp new file mode 100644 -index 00000000000..e129264506b +index 00000000000..0afc25e6e95 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp @@ -0,0 +1,532 @@ @@ -16107,10 +27750,10 @@ index 00000000000..e129264506b + __ b(done); + + __ bind(runtime); -+ __ push_call_clobbered_registers(); ++ __ pushad(); + __ load_parameter(0, pre_val); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + __ bind(done); + + __ epilogue(); @@ -16177,9 +27820,9 @@ index 00000000000..e129264506b + __ b(done); + + __ bind(runtime); -+ __ push_call_clobbered_registers(); ++ __ pushad(); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + __ bind(done); + __ epilogue(); +} @@ -17320,7 +28963,7 @@ index 00000000000..f82a2500d41 +} diff --git a/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp new file mode 100644 -index 00000000000..e90623fe989 +index 00000000000..37124c99615 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/shenandoah/shenandoahBarrierSetAssembler_loongarch.cpp @@ -0,0 +1,784 @@ @@ -17611,7 +29254,7 @@ index 00000000000..e90623fe989 + __ beqz(SCR2, not_cset); + } + -+ __ push_call_clobbered_registers_except(RegSet::of(V0)); ++ __ pushad_except_v0(); + if (is_strong) { + if (is_narrow) { + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)); @@ -17630,7 +29273,7 @@ index 00000000000..e90623fe989 + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); + } + __ jalr(RA); -+ __ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ __ popad_except_v0(); + + __ bind(not_cset); + @@ -17646,9 +29289,9 @@ index 00000000000..e90623fe989 + +void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) { + if (ShenandoahIUBarrier) { -+ __ push_call_clobbered_registers(); ++ __ pushad(); + satb_write_barrier_pre(masm, noreg, dst, TREG, tmp, true, false); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + } +} + @@ -17700,7 +29343,7 @@ index 00000000000..e90623fe989 + // 3: apply keep-alive barrier if needed + if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { + __ enter(); -+ __ push_call_clobbered_registers(); ++ __ pushad(); + satb_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, @@ -17708,7 +29351,7 @@ index 00000000000..e90623fe989 + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + __ leave(); + } +} @@ -17841,9 +29484,9 @@ index 00000000000..e90623fe989 + + if (is_narrow) { + __ cmpxchg32(addr, expected, new_val, tmp2, false /* sign */, false /* retold */, -+ acquire /* acquire */, false /* weak */, true /* exchange */); ++ acquire /* barrier */, false /* weak */, true /* exchange */); + } else { -+ __ cmpxchg(addr, expected, new_val, tmp2, false /* retold */, acquire /* acquire */, ++ __ cmpxchg(addr, expected, new_val, tmp2, false /* retold */, acquire /* barrier */, + false /* weak */, true /* exchange */); + } + // tmp2 holds value fetched. @@ -17907,9 +29550,9 @@ index 00000000000..e90623fe989 + // compares result with expected. + if (is_narrow) { + __ cmpxchg32(addr, tmp2, new_val, tmp1, false /* sign */, false /* retold */, -+ acquire /* acquire */, false /* weak */, false /* exchange */); ++ acquire /* barrier */, false /* weak */, false /* exchange */); + } else { -+ __ cmpxchg(addr, tmp2, new_val, tmp1, false /* retold */, acquire /* acquire */, ++ __ cmpxchg(addr, tmp2, new_val, tmp1, false /* retold */, acquire /* barrier */, + false /* weak */, false /* exchange */); + } + // tmp1 set iff success, tmp2 holds value fetched. @@ -18055,10 +29698,10 @@ index 00000000000..e90623fe989 + __ b(done); + + __ bind(runtime); -+ __ push_call_clobbered_registers(); ++ __ pushad(); + __ load_parameter(0, pre_val); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + __ bind(done); + + __ epilogue(); @@ -18069,7 +29712,7 @@ index 00000000000..e90623fe989 + __ bstrins_d(SP, R0, 3, 0); + // arg0 : object to be resolved + -+ __ push_call_clobbered_registers_except(RegSet::of(V0)); ++ __ pushad_except_v0(); + __ load_parameter(0, A0); + __ load_parameter(1, A1); + @@ -18100,7 +29743,7 @@ index 00000000000..e90623fe989 + __ li(RA, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); + } + __ jalr(RA); -+ __ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ __ popad_except_v0(); + + __ epilogue(); +} @@ -18429,10 +30072,10 @@ index 00000000000..04f67d23157 +%} diff --git a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp new file mode 100644 -index 00000000000..372d80cf11b +index 00000000000..f345008bb92 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.cpp -@@ -0,0 +1,462 @@ +@@ -0,0 +1,466 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2022, Loongson Technology. All rights reserved. @@ -18534,7 +30177,7 @@ index 00000000000..372d80cf11b + if (dst != V0) { + __ push(V0); + } -+ __ push_call_clobbered_registers_except(RegSet::of(V0)); ++ __ pushad_except_v0(); + + if (dst != A0) { + __ move(A0, dst); @@ -18542,7 +30185,7 @@ index 00000000000..372d80cf11b + __ move(A1, scratch); + __ MacroAssembler::call_VM_leaf_base(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + -+ __ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ __ popad_except_v0(); + + // Make sure dst has the return value. + if (dst != V0) { @@ -18657,10 +30300,14 @@ index 00000000000..372d80cf11b +#define __ ce->masm()-> + +void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, -+ LIR_Opr ref) const { -+ assert_different_registers(SCR1, TREG, ref->as_register()); -+ __ ld_d(SCR1, address_bad_mask_from_thread(TREG)); -+ __ andr(SCR1, SCR1, ref->as_register()); ++ LIR_Opr ref, ++ LIR_Opr res) const { ++ Register rscratch1 = AT; ++ Register rthread = TREG; ++ assert_different_registers(rscratch1, rthread, ref->as_register()); ++ ++ __ ld_d(rscratch1, address_bad_mask_from_thread(rthread)); ++ __ andr(res->as_register(), ref->as_register(), rscratch1); +} + +void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, @@ -18725,7 +30372,7 @@ index 00000000000..372d80cf11b + DecoratorSet decorators) const { + __ prologue("zgc_load_barrier stub", false); + -+ __ push_call_clobbered_registers_except(RegSet::of(V0)); ++ __ pushad_except_v0(); + + // Setup arguments + __ load_parameter(0, A0); @@ -18733,7 +30380,7 @@ index 00000000000..372d80cf11b + + __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); + -+ __ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ __ popad_except_v0(); + + __ epilogue(); +} @@ -18897,10 +30544,10 @@ index 00000000000..372d80cf11b +#endif // COMPILER2 diff --git a/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp new file mode 100644 -index 00000000000..6a96d6fdd60 +index 00000000000..8d032c34995 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/z/zBarrierSetAssembler_loongarch.hpp -@@ -0,0 +1,101 @@ +@@ -0,0 +1,102 @@ +/* + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, Loongson Technology. All rights reserved. @@ -18983,7 +30630,8 @@ index 00000000000..6a96d6fdd60 + +#ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, -+ LIR_Opr ref) const; ++ LIR_Opr ref, ++ LIR_Opr res) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; @@ -19221,10 +30869,10 @@ index 00000000000..85db1cf1854 +} diff --git a/src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp b/src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp new file mode 100644 -index 00000000000..542fd267434 +index 00000000000..7d20899d949 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/z/zGlobals_loongarch.hpp -@@ -0,0 +1,35 @@ +@@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, Loongson Technology. All rights reserved. @@ -19256,13 +30904,15 @@ index 00000000000..542fd267434 +const size_t ZPlatformHeapViews = 3; +const size_t ZPlatformCacheLineSize = 64; + ++const bool ZPlatformLoadBarrierTestResultInRegister = true; ++ +size_t ZPlatformAddressOffsetBits(); +size_t ZPlatformAddressMetadataShift(); + +#endif // CPU_LOONGARCH_GC_Z_ZGLOBALS_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad b/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad new file mode 100644 -index 00000000000..262cfd50b65 +index 00000000000..59656e75376 --- /dev/null +++ b/src/hotspot/cpu/loongarch/gc/z/z_loongarch_64.ad @@ -0,0 +1,273 @@ @@ -19332,12 +30982,12 @@ index 00000000000..262cfd50b65 +#endif + Address addr(mem); + if (node->barrier_data() == ZLoadBarrierElided) { -+ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* acquire */, ++ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* barrier */, + weak /* weak */, false /* exchange */); + __ move(res, tmp); + } else { + __ move(tmp, oldval); -+ __ cmpxchg(addr, tmp, newval, AT, true /* retold */, acquire /* acquire */, ++ __ cmpxchg(addr, tmp, newval, AT, true /* retold */, acquire /* barrier */, + false /* weak */, false /* exchange */); + __ move(res, AT); + @@ -19346,7 +30996,7 @@ index 00000000000..262cfd50b65 + __ andr(AT, AT, tmp); + __ beqz(AT, good); + z_load_barrier_slow_path(_masm, node, addr, tmp, res /* used as tmp */); -+ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* acquire */, weak /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, tmp, false /* retold */, acquire /* barrier */, weak /* weak */, false /* exchange */); + __ move(res, tmp); + __ bind(good); + } @@ -19602,10 +31252,10 @@ index 00000000000..363cd6e2092 +#endif // CPU_LOONGARCH_GLOBALDEFINITIONS_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/globals_loongarch.hpp b/src/hotspot/cpu/loongarch/globals_loongarch.hpp new file mode 100644 -index 00000000000..2358ca31596 +index 00000000000..e31a3d02555 --- /dev/null +++ b/src/hotspot/cpu/loongarch/globals_loongarch.hpp -@@ -0,0 +1,106 @@ +@@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. @@ -19705,9 +31355,7 @@ index 00000000000..2358ca31596 + "Use CRC32 instructions for CRC32 computation") \ + \ + product(bool, UseActiveCoresMP, false, \ -+ "Eliminate barriers for single active cpu") \ -+ \ -+ product(bool, TraceTraps, false, "Trace all traps the signal handler") ++ "Eliminate barriers for single active cpu") + +// end of ARCH_FLAGS + @@ -20191,10 +31839,10 @@ index 00000000000..7dea5deb183 +#endif // CPU_LOONGARCH_INTERP_MASM_LOONGARCH_64_HPP diff --git a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp new file mode 100644 -index 00000000000..cb8ad8a359c +index 00000000000..fa65d10765c --- /dev/null +++ b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp -@@ -0,0 +1,2042 @@ +@@ -0,0 +1,2040 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -21076,13 +32724,13 @@ index 00000000000..cb8ad8a359c + + if (PrintBiasedLockingStatistics) { + Label succ, fail; -+ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, true /* acquire */, succ, &fail); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, false, succ, &fail); + bind(succ); + atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, AT, scr_reg); + b(done); + bind(fail); + } else { -+ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, true /* acquire */, done); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, lock_reg, AT, true, false, done); + } + + // Test if the oopMark is an obvious stack pointer, i.e., @@ -21156,7 +32804,7 @@ index 00000000000..cb8ad8a359c + beqz(hdr_reg, done); + + // Atomic swap back the old header -+ cmpxchg(Address(scr_reg, 0), tmp_reg, hdr_reg, AT, false, true /* acquire */, done); ++ cmpxchg(Address(scr_reg, 0), tmp_reg, hdr_reg, AT, false, false, done); + + // Call the runtime routine for slow case. + st_d(scr_reg, lock_reg, BasicObjectLock::obj_offset_in_bytes()); // restore obj @@ -21839,7 +33487,6 @@ index 00000000000..cb8ad8a359c + + +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { -+ assert_different_registers(obj, AT, T5, mdo_addr.base(), mdo_addr.index()); + Label update, next, none; + + verify_oop(obj); @@ -21878,21 +33525,25 @@ index 00000000000..cb8ad8a359c + xorr(obj, obj, AT); + + assert(TypeEntries::type_klass_mask == -4, "must be"); -+ bstrpick_d(T5, obj, 63, 2); -+ beqz(T5, next); ++ bstrpick_d(AT, obj, 63, 2); ++ beqz(AT, next); + -+ andi(T5, obj, TypeEntries::type_unknown); -+ bnez(T5, next); ++ andi(AT, obj, TypeEntries::type_unknown); ++ bnez(AT, next); + ++ if (mdo_addr.index() == noreg) { ++ ld_d(AT, mdo_addr); ++ } else { ++ ld_d(AT, T0, mdo_addr.disp()); ++ } + beqz(AT, none); + -+ addi_d(T5, AT, -(TypeEntries::null_seen)); -+ beqz(T5, none); ++ addi_d(AT, AT, -(TypeEntries::null_seen)); ++ beqz(AT, none); + -+ // There is a chance that the checks above -+ // fail if another thread has just set the ++ // There is a chance that the checks above (re-reading profiling ++ // data from memory) fail if another thread has just set the + // profiling to this obj's klass -+ xorr(obj, obj, AT); // get back original value before XOR + if (mdo_addr.index() == noreg) { + ld_d(AT, mdo_addr); + } else { @@ -21924,11 +33575,6 @@ index 00000000000..cb8ad8a359c + } else { + st_d(obj, T0, mdo_addr.disp()); + } -+#ifdef ASSERT -+ assert(TypeEntries::type_mask == -2, "must be"); -+ bstrpick_d(obj, obj, 63, 1); -+ verify_klass_ptr(obj); -+#endif + + bind(next); + if (mdo_addr.index() != noreg) { @@ -23279,10 +34925,10 @@ index 00000000000..80dff0c7626 + diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad new file mode 100644 -index 00000000000..7e01822e0e9 +index 00000000000..703aeafc855 --- /dev/null +++ b/src/hotspot/cpu/loongarch/loongarch_64.ad -@@ -0,0 +1,15689 @@ +@@ -0,0 +1,15672 @@ +// +// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. @@ -25399,7 +37045,7 @@ index 00000000000..7e01822e0e9 + if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { + st->print("\n\t"); + st->print("ld_d T1, guard, 0\n\t"); -+ st->print("membar LoadLoad\n\t"); ++ st->print("dbar 0\n\t"); + st->print("ld_d T2, TREG, thread_disarmed_offset\n\t"); + st->print("beq T1, T2, skip\n\t"); + st->print("\n\t"); @@ -29719,7 +41365,6 @@ index 00000000000..7e01822e0e9 + +instruct membar_storestore() %{ + match(MemBarStoreStore); -+ match(StoreStoreFence); + + ins_cost(400); + format %{ "MEMBAR-storestore @ membar_storestore" %} @@ -29729,17 +41374,6 @@ index 00000000000..7e01822e0e9 + ins_pipe(empty); +%} + -+instruct same_addr_load_fence() %{ -+ match(SameAddrLoadFence); -+ ins_cost(400); -+ -+ format %{ "MEMBAR @ same_addr_load_fence" %} -+ ins_encode %{ -+ __ dbar(0x700); -+ %} -+ ins_pipe(pipe_slow); -+%} -+ +//----------Move Instructions-------------------------------------------------- +instruct castX2P(mRegP dst, mRegL src) %{ + match(Set dst (CastX2P src)); @@ -33691,13 +45325,13 @@ index 00000000000..7e01822e0e9 + ins_cost(300); + + // Use the following format syntax -+ format %{ "stop; #@ShouldNotReachHere" %} ++ format %{ "ILLTRAP ;#@ShouldNotReachHere" %} + ins_encode %{ + if (is_reachable()) { -+ __ stop(_halt_reason); ++ // Here we should emit illtrap! ++ __ stop("ShouldNotReachHere"); + } + %} -+ + ins_pipe( pipe_jump ); +%} + @@ -33881,9 +45515,9 @@ index 00000000000..7e01822e0e9 + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg32(addr, oldval, newval, res, true, false, true /* acquire */); ++ __ cmpxchg32(addr, oldval, newval, res, true, false, true); + } else { -+ __ cmpxchg32(addr, oldval, newval, AT, true, false, true /* acquire */); ++ __ cmpxchg32(addr, oldval, newval, AT, true, false, true); + __ move(res, AT); + } + %} @@ -33902,9 +45536,9 @@ index 00000000000..7e01822e0e9 + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg(addr, oldval, newval, res, false, true /* acquire */); ++ __ cmpxchg(addr, oldval, newval, res, false, true); + } else { -+ __ cmpxchg(addr, oldval, newval, AT, false, true /* acquire */); ++ __ cmpxchg(addr, oldval, newval, AT, false, true); + __ move(res, AT); + } + %} @@ -33923,9 +45557,9 @@ index 00000000000..7e01822e0e9 + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg(addr, oldval, newval, res, false, true /* acquire */); ++ __ cmpxchg(addr, oldval, newval, res, false, true); + } else { -+ __ cmpxchg(addr, oldval, newval, AT, false, true /* acquire */); ++ __ cmpxchg(addr, oldval, newval, AT, false, true); + __ move(res, AT); + } + %} @@ -33943,9 +45577,9 @@ index 00000000000..7e01822e0e9 + Address addr($mem_ptr$$Register, 0); + + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg32(addr, oldval, newval, res, false, false, true /* acquire */); ++ __ cmpxchg32(addr, oldval, newval, res, false, false, true); + } else { -+ __ cmpxchg32(addr, oldval, newval, AT, false, false, true /* acquire */); ++ __ cmpxchg32(addr, oldval, newval, AT, false, false, true); + __ move(res, AT); + } + %} @@ -34084,14 +45718,14 @@ index 00000000000..7e01822e0e9 + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeI" ++ "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeI" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); -+ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); +%} @@ -34102,14 +45736,14 @@ index 00000000000..7e01822e0e9 + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeL" ++ "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeL" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); -+ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); +%} @@ -34120,7 +45754,7 @@ index 00000000000..7e01822e0e9 + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeP" ++ "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeP" + %} + ins_encode %{ + Register newval = $newval$$Register; @@ -34138,14 +45772,14 @@ index 00000000000..7e01822e0e9 + ins_cost(2 * MEMORY_REF_COST); + effect(TEMP_DEF res); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeN" ++ "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @compareAndExchangeN" + %} + ins_encode %{ + Register newval = $newval$$Register; + Register oldval = $oldval$$Register; + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); -+ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* acquire */, false /* weak */, true /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* barrier */, false /* weak */, true /* exchange */); + %} + ins_pipe(pipe_slow); +%} @@ -34163,9 +45797,9 @@ index 00000000000..7e01822e0e9 + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, true /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + } else { -+ __ cmpxchg32(addr, oldval, newval, AT, true /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, AT, true /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} @@ -34177,7 +45811,7 @@ index 00000000000..7e01822e0e9 + match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @WeakCompareAndSwapL" ++ "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @WeakCompareAndSwapL" + %} + ins_encode %{ + Register newval = $newval$$Register; @@ -34185,9 +45819,9 @@ index 00000000000..7e01822e0e9 + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + } else { -+ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} @@ -34199,7 +45833,7 @@ index 00000000000..7e01822e0e9 + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + ins_cost(MEMORY_REF_COST); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" ++ "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" + %} + ins_encode %{ + Register newval = $newval$$Register; @@ -34207,9 +45841,9 @@ index 00000000000..7e01822e0e9 + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg(addr, oldval, newval, res, false /* retold */, false /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, false /* barrier */, true /* weak */, false /* exchange */); + } else { -+ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, false /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, false /* barrier */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} @@ -34221,7 +45855,7 @@ index 00000000000..7e01822e0e9 + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" ++ "cmpxchg $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapP" + %} + ins_encode %{ + Register newval = $newval$$Register; @@ -34229,9 +45863,9 @@ index 00000000000..7e01822e0e9 + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, res, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + } else { -+ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg(addr, oldval, newval, AT, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} @@ -34243,7 +45877,7 @@ index 00000000000..7e01822e0e9 + match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); + ins_cost(2 * MEMORY_REF_COST); + format %{ -+ "CMPXCHG $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapN" ++ "cmpxchg32 $res = $mem, $oldval, $newval\t# if $mem == $oldval then $mem <-- $newval @weakCompareAndSwapN" + %} + ins_encode %{ + Register newval = $newval$$Register; @@ -34251,9 +45885,9 @@ index 00000000000..7e01822e0e9 + Register res = $res$$Register; + Address addr(as_Register($mem$$base)); + if (res != addr.base() && res != oldval && res != newval) { -+ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, res, false /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + } else { -+ __ cmpxchg32(addr, oldval, newval, AT, false /* sign */, false /* retold */, true /* acquire */, true /* weak */, false /* exchange */); ++ __ cmpxchg32(addr, oldval, newval, AT, false /* sign */, false /* retold */, true /* barrier */, true /* weak */, false /* exchange */); + __ move(res, AT); + } + %} @@ -38492,19 +50126,13 @@ index 00000000000..7e01822e0e9 + match(Set dst (VectorInsert (Binary dst val) idx)); + format %{ "xvinsert $dst, $val, $idx\t# @insert32B" %} + ins_encode %{ -+ int idx = $idx$$constant; -+ int msbw, lsbw; -+ switch (idx % 4) { -+ case 0: msbw = 7, lsbw = 0; break; -+ case 1: msbw = 15, lsbw = 8; break; -+ case 2: msbw = 23, lsbw = 16; break; -+ case 3: msbw = 31, lsbw = 24; break; -+ default: -+ ShouldNotReachHere(); ++ if ($idx$$constant < 16) { ++ __ vinsgr2vr_b($dst$$FloatRegister, $val$$Register, $idx$$constant); ++ } else { ++ __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); ++ __ vinsgr2vr_b($dst$$FloatRegister, $val$$Register, $idx$$constant-16); ++ __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); + } -+ __ xvpickve2gr_w(SCR1, $dst$$FloatRegister, idx >> 2); -+ __ bstrins_w(SCR1, $val$$Register, msbw, lsbw); -+ __ xvinsgr2vr_w($dst$$FloatRegister, SCR1, idx >> 2); + %} + ins_pipe( pipe_slow ); +%} @@ -38514,12 +50142,13 @@ index 00000000000..7e01822e0e9 + match(Set dst (VectorInsert (Binary dst val) idx)); + format %{ "xvinsert $dst, $val, $idx\t# @insert16S" %} + ins_encode %{ -+ int idx = $idx$$constant; -+ int msbw = (idx % 2) ? 31 : 15; -+ int lsbw = (idx % 2) ? 16 : 0; -+ __ xvpickve2gr_w(SCR1, $dst$$FloatRegister, idx >> 1); -+ __ bstrins_w(SCR1, $val$$Register, msbw, lsbw); -+ __ xvinsgr2vr_w($dst$$FloatRegister, SCR1, idx >> 1); ++ if ($idx$$constant < 8) { ++ __ vinsgr2vr_h($dst$$FloatRegister, $val$$Register, $idx$$constant); ++ } else { ++ __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); ++ __ vinsgr2vr_h($dst$$FloatRegister, $val$$Register, $idx$$constant-8); ++ __ xvpermi_d($dst$$FloatRegister, $dst$$FloatRegister, 0b01001110); ++ } + %} + ins_pipe( pipe_slow ); +%} @@ -38974,10 +50603,10 @@ index 00000000000..7e01822e0e9 + diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp new file mode 100644 -index 00000000000..06fbc181583 +index 00000000000..be66c6c5e92 --- /dev/null +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp -@@ -0,0 +1,3839 @@ +@@ -0,0 +1,3900 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2022, Loongson Technology. All rights reserved. @@ -39636,7 +51265,7 @@ index 00000000000..06fbc181583 +#else + orr(tmp_reg, TREG, swap_reg); +#endif -+ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); + if (need_tmp_reg) { + pop(tmp_reg); + } @@ -39681,7 +51310,7 @@ index 00000000000..06fbc181583 +#endif + ld_ptr(swap_reg, saved_mark_addr); + -+ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); + if (need_tmp_reg) { + pop(tmp_reg); + } @@ -39721,7 +51350,7 @@ index 00000000000..06fbc181583 + push(tmp_reg); + } + load_prototype_header(tmp_reg, obj_reg); -+ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, true /* acquire */); ++ cmpxchg(Address(obj_reg, 0), swap_reg, tmp_reg, AT, false, false); + if (need_tmp_reg) { + pop(tmp_reg); + } @@ -39958,11 +51587,22 @@ index 00000000000..06fbc181583 + + +void MacroAssembler::stop(const char* msg) { -+#ifndef PRODUCT -+ block_comment(msg); -+#endif -+ csrrd(R0, 0); -+ emit_int64((uintptr_t)msg); ++ li(A0, (long)msg); ++ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); ++ brk(17); ++} ++ ++void MacroAssembler::warn(const char* msg) { ++ pushad(); ++ li(A0, (long)msg); ++ push(S2); ++ move(S2, SP); // use S2 as a sender SP holder ++ assert(StackAlignmentInBytes == 16, "must be"); ++ bstrins_d(SP, R0, 3, 0); // align stack as required by ABI ++ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); ++ move(SP, S2); // use S2 as a sender SP holder ++ pop(S2); ++ popad(); +} + +void MacroAssembler::increment(Register reg, int imm) { @@ -40306,7 +51946,7 @@ index 00000000000..06fbc181583 + if (thread != V0) { + push(V0); + } -+ push_call_clobbered_registers_except(RegSet::of(V0)); ++ pushad_except_v0(); + + push(S5); + move(S5, SP); @@ -40317,7 +51957,7 @@ index 00000000000..06fbc181583 + move(SP, S5); + pop(S5); + -+ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ popad_except_v0(); + if (thread != V0) { + move(thread, V0); + pop(V0); @@ -40746,9 +52386,9 @@ index 00000000000..06fbc181583 + + // handle errors + bind(error); -+ push_call_clobbered_registers(); ++ pushad(); + call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); -+ pop_call_clobbered_registers(); ++ popad(); + jr(RA); +} + @@ -40802,7 +52442,7 @@ index 00000000000..06fbc181583 +} + +void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, -+ Register resflag, bool retold, bool acquire, ++ Register resflag, bool retold, bool barrier, + bool weak, bool exchange) { + assert(oldval != resflag, "oldval != resflag"); + assert(newval != resflag, "newval != resflag"); @@ -40825,11 +52465,8 @@ index 00000000000..06fbc181583 + b(succ); + + bind(fail); -+ if (acquire) { -+ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); -+ } else { -+ dbar(0x700); -+ } ++ if (barrier) ++ membar(LoadLoad); + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { @@ -40839,7 +52476,7 @@ index 00000000000..06fbc181583 +} + +void MacroAssembler::cmpxchg(Address addr, Register oldval, Register newval, -+ Register tmp, bool retold, bool acquire, Label& succ, Label* fail) { ++ Register tmp, bool retold, bool barrier, Label& succ, Label* fail) { + assert(oldval != tmp, "oldval != tmp"); + assert(newval != tmp, "newval != tmp"); + Label again, neq; @@ -40853,11 +52490,8 @@ index 00000000000..06fbc181583 + b(succ); + + bind(neq); -+ if (acquire) { -+ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); -+ } else { -+ dbar(0x700); -+ } ++ if (barrier) ++ membar(LoadLoad); + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) @@ -40865,7 +52499,7 @@ index 00000000000..06fbc181583 +} + +void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, -+ Register resflag, bool sign, bool retold, bool acquire, ++ Register resflag, bool sign, bool retold, bool barrier, + bool weak, bool exchange) { + assert(oldval != resflag, "oldval != resflag"); + assert(newval != resflag, "newval != resflag"); @@ -40890,11 +52524,8 @@ index 00000000000..06fbc181583 + b(succ); + + bind(fail); -+ if (acquire) { -+ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); -+ } else { -+ dbar(0x700); -+ } ++ if (barrier) ++ membar(LoadLoad); + if (retold && oldval != R0) + move(oldval, resflag); + if (!exchange) { @@ -40904,7 +52535,7 @@ index 00000000000..06fbc181583 +} + +void MacroAssembler::cmpxchg32(Address addr, Register oldval, Register newval, Register tmp, -+ bool sign, bool retold, bool acquire, Label& succ, Label* fail) { ++ bool sign, bool retold, bool barrier, Label& succ, Label* fail) { + assert(oldval != tmp, "oldval != tmp"); + assert(newval != tmp, "newval != tmp"); + Label again, neq; @@ -40920,11 +52551,8 @@ index 00000000000..06fbc181583 + b(succ); + + bind(neq); -+ if (acquire) { -+ membar(Assembler::Membar_mask_bits(LoadLoad|LoadStore)); -+ } else { -+ dbar(0x700); -+ } ++ if (barrier) ++ membar(LoadLoad); + if (retold && oldval != R0) + move(oldval, tmp); + if (fail) @@ -40952,17 +52580,83 @@ index 00000000000..06fbc181583 + //Unimplemented(); +} + -+static RegSet caller_saved_regset = RegSet::range(A0, A7) + RegSet::range(T0, T8) + RegSet::of(FP, RA) - RegSet::of(SCR1, SCR2); -+static FloatRegSet caller_saved_fpu_regset = FloatRegSet::range(F0, F23); ++Register caller_saved_registers[] = {T7, T5, T6, A0, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T4, S8, RA, FP}; ++Register caller_saved_registers_except_v0[] = {T7, T5, T6, A1, A2, A3, A4, A5, A6, A7, T0, T1, T2, T3, T8, T4, S8, RA, FP}; + -+void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { -+ push(caller_saved_regset - exclude); -+ push_fpu(caller_saved_fpu_regset); ++ //TODO: LA ++//In LA, F0~23 are all caller-saved registers ++FloatRegister caller_saved_fpu_registers[] = {F0, F12, F13}; ++ ++// We preserve all caller-saved register ++void MacroAssembler::pushad(){ ++ int i; ++ // Fixed-point registers ++ int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); ++ addi_d(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ st_d(caller_saved_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ ++ // Floating-point registers ++ len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ addi_d(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ fst_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++}; ++ ++void MacroAssembler::popad(){ ++ int i; ++ // Floating-point registers ++ int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ for (i = 0; i < len; i++) ++ { ++ fld_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ addi_d(SP, SP, len * wordSize); ++ ++ // Fixed-point registers ++ len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); ++ for (i = 0; i < len; i++) ++ { ++ ld_d(caller_saved_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ addi_d(SP, SP, len * wordSize); ++}; ++ ++// We preserve all caller-saved register except V0 ++void MacroAssembler::pushad_except_v0() { ++ int i; ++ // Fixed-point registers ++ int len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]); ++ addi_d(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ st_d(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize); ++ } ++ ++ // Floating-point registers ++ len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ addi_d(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ fst_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } +} + -+void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { -+ pop_fpu(caller_saved_fpu_regset); -+ pop(caller_saved_regset - exclude); ++void MacroAssembler::popad_except_v0() { ++ int i; ++ // Floating-point registers ++ int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ for (i = 0; i < len; i++) { ++ fld_d(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ addi_d(SP, SP, len * wordSize); ++ ++ // Fixed-point registers ++ len = sizeof(caller_saved_registers_except_v0) / sizeof(caller_saved_registers_except_v0[0]); ++ for (i = 0; i < len; i++) { ++ ld_d(caller_saved_registers_except_v0[i], SP, (len - i - 1) * wordSize); ++ } ++ addi_d(SP, SP, len * wordSize); +} + +void MacroAssembler::push2(Register reg1, Register reg2) { @@ -42371,14 +54065,10 @@ index 00000000000..06fbc181583 + address last = code()->last_insn(); + if (last != NULL && ((NativeInstruction*)last)->is_sync() && prev == last) { + code()->set_last_insn(NULL); -+ NativeMembar *membar = (NativeMembar*)prev; -+ // merged membar -+ // e.g. LoadLoad and LoadLoad|LoadStore to LoadLoad|LoadStore -+ membar->set_hint(membar->get_hint() & (~hint & 0xF)); + block_comment("merged membar"); + } else { + code()->set_last_insn(pc()); -+ Assembler::membar(hint); ++ dbar(hint); + } +} + @@ -42819,10 +54509,10 @@ index 00000000000..06fbc181583 +#endif diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp new file mode 100644 -index 00000000000..204ca1a1a07 +index 00000000000..8e36a23afc1 --- /dev/null +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp -@@ -0,0 +1,754 @@ +@@ -0,0 +1,748 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. @@ -43212,6 +54902,9 @@ index 00000000000..204ca1a1a07 + // prints msg, dumps registers and stops execution + void stop(const char* msg); + ++ // prints msg and continues ++ void warn(const char* msg); ++ + static void debug(char* msg/*, RegistersForDebugging* regs*/); + static void debug64(char* msg, int64_t pc, int64_t regs[]); + @@ -43375,13 +55068,13 @@ index 00000000000..204ca1a1a07 + void bswap_w(Register dst, Register src); + + void cmpxchg(Address addr, Register oldval, Register newval, Register resflag, -+ bool retold, bool acquire, bool weak = false, bool exchange = false); ++ bool retold, bool barrier, bool weak = false, bool exchange = false); + void cmpxchg(Address addr, Register oldval, Register newval, Register tmp, -+ bool retold, bool acquire, Label& succ, Label* fail = nullptr); ++ bool retold, bool barrier, Label& succ, Label* fail = nullptr); + void cmpxchg32(Address addr, Register oldval, Register newval, Register resflag, -+ bool sign, bool retold, bool acquire, bool weak = false, bool exchange = false); ++ bool sign, bool retold, bool barrier, bool weak = false, bool exchange = false); + void cmpxchg32(Address addr, Register oldval, Register newval, Register tmp, -+ bool sign, bool retold, bool acquire, Label& succ, Label* fail = nullptr); ++ bool sign, bool retold, bool barrier, Label& succ, Label* fail = nullptr); + + void extend_sign(Register rh, Register rl) { /*stop("extend_sign");*/ guarantee(0, "LA not implemented yet");} + void neg(Register reg) { /*dsubu(reg, R0, reg);*/ guarantee(0, "LA not implemented yet");} @@ -43393,20 +55086,11 @@ index 00000000000..204ca1a1a07 + void pop2 () { addi_d(SP, SP, 16); } + void push2(Register reg1, Register reg2); + void pop2 (Register reg1, Register reg2); -+ // Push and pop everything that might be clobbered by a native -+ // runtime call except SCR1 and SCR2. (They are always scratch, -+ // so we don't have to protect them.) Only save the lower 64 bits -+ // of each vector register. Additional registers can be excluded -+ // in a passed RegSet. -+ void push_call_clobbered_registers_except(RegSet exclude); -+ void pop_call_clobbered_registers_except(RegSet exclude); -+ -+ void push_call_clobbered_registers() { -+ push_call_clobbered_registers_except(RegSet()); -+ } -+ void pop_call_clobbered_registers() { -+ pop_call_clobbered_registers_except(RegSet()); -+ } ++ //we need 2 fun to save and resotre general register ++ void pushad(); ++ void popad(); ++ void pushad_except_v0(); ++ void popad_except_v0(); + void push(RegSet regs) { if (regs.bits()) push(regs.bits()); } + void pop(RegSet regs) { if (regs.bits()) pop(regs.bits()); } + void push_fpu(FloatRegSet regs) { if (regs.bits()) push_fpu(regs.bits()); } @@ -43619,13 +55303,13 @@ index 00000000000..49302590c37 +#endif // CPU_LOONGARCH_MACROASSEMBLER_LOONGARCH_INLINE_HPP diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp new file mode 100644 -index 00000000000..9c74be2dbd3 +index 00000000000..63b5b0da7e7 --- /dev/null +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch_trig.cpp -@@ -0,0 +1,1634 @@ +@@ -0,0 +1,1633 @@ +/* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Cavium. All rights reserved. (By BELLSOFT) -+ * Copyright (c) 2022, 2024, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -44532,7 +56216,7 @@ index 00000000000..9c74be2dbd3 + b(Q_DONE); + bind(JX_IS_0); + if (UseLASX) { -+ xvfmul_d(v28, v18, v6); // f[0,3] * x[0] ++ xvfmul_d(v28, v18, v6); // f[0,1] * x[0] + fmul_d(v30, v19, v6); // f[4] * x[0] + } else { + vfmul_d(v28, v18, v6); // f[0,1] * x[0] @@ -44761,7 +56445,6 @@ index 00000000000..9c74be2dbd3 + st_w(tmp2, SCR2, 0); + addi_w(SCR1, SCR1, 24); + addi_w(jz, jz, 1); -+ alsl_d(SCR2, jz, iqBase, 2 - 1); + st_w(tmp3, SCR2, 0); // iq[jz] = (int) fw + b(Z_ZERO_CHECK_DONE); + bind(Z_IS_LESS_THAN_TWO24B); @@ -46069,10 +57752,10 @@ index 00000000000..a97520ea768 + } diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp new file mode 100644 -index 00000000000..25ef0ecd224 +index 00000000000..407f539e8d7 --- /dev/null +++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.cpp -@@ -0,0 +1,529 @@ +@@ -0,0 +1,525 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -46497,10 +58180,6 @@ index 00000000000..25ef0ecd224 + return uint_at(0) == NativeIllegalInstruction::instruction_code; +} + -+bool NativeInstruction::is_stop() { -+ return uint_at(0) == 0x04000000; // csrrd R0 0 -+} -+ +void NativeIllegalInstruction::insert(address code_pos) { + *(juint*)code_pos = instruction_code; + ICache::invalidate_range(code_pos, instruction_size); @@ -46604,10 +58283,10 @@ index 00000000000..25ef0ecd224 +} diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp new file mode 100644 -index 00000000000..0ec8ebddf09 +index 00000000000..2f126991338 --- /dev/null +++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp -@@ -0,0 +1,531 @@ +@@ -0,0 +1,523 @@ +/* + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -46690,7 +58369,6 @@ index 00000000000..0ec8ebddf09 + inline bool is_NativeCallTrampolineStub_at(); + //We use an illegal instruction for marking a method as not_entrant or zombie. + bool is_sigill_zombie_not_entrant(); -+ bool is_stop(); + + protected: + address addr_at(int offset) const { return address(this) + offset; } @@ -47131,13 +58809,6 @@ index 00000000000..0ec8ebddf09 + assert(ni->is_NativeCallTrampolineStub_at(), "no call trampoline found"); + return (NativeCallTrampolineStub*)addr; +} -+ -+class NativeMembar : public NativeInstruction { -+public: -+ unsigned int get_hint() { return Assembler::low(insn_word(), 4); } -+ void set_hint(int hint) { Assembler::patch(addr_at(0), 4, hint); } -+}; -+ +#endif // CPU_LOONGARCH_NATIVEINST_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/registerMap_loongarch.hpp b/src/hotspot/cpu/loongarch/registerMap_loongarch.hpp new file mode 100644 @@ -48280,13 +59951,13 @@ index 00000000000..fae11f47e62 +} diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp new file mode 100644 -index 00000000000..30c06f40493 +index 00000000000..0ca8968136b --- /dev/null +++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp -@@ -0,0 +1,3113 @@ +@@ -0,0 +1,3114 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -48613,9 +60284,9 @@ index 00000000000..30c06f40493 +} + +// Is vector's size (in bytes) bigger than a size saved by default? -+// 8 bytes registers are saved by default using fld/fst instructions. ++// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. +bool SharedRuntime::is_wide_vector(int size) { -+ return size > 8; ++ return size > 16; +} + +// The java_calling_convention describes stack locations as ideal slots on @@ -48745,7 +60416,7 @@ index 00000000000..30c06f40493 + // T5 isn't live so capture return address while we easily can + __ move(T5, RA); + -+ __ push_call_clobbered_registers(); ++ __ pushad(); + + // VM needs caller's callsite + // VM needs target method @@ -48760,7 +60431,7 @@ index 00000000000..30c06f40493 + relocInfo::runtime_call_type); + + __ move(SP, TSR); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + __ bind(L); +} + @@ -49179,6 +60850,7 @@ index 00000000000..30c06f40493 + + gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); + ++ __ flush(); + return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); +} + @@ -50214,7 +61886,7 @@ index 00000000000..30c06f40493 + __ orr(swap_reg, swap_reg, AT); + + __ st_d(swap_reg, lock_reg, mark_word_offset); -+ __ cmpxchg(Address(obj_reg, 0), swap_reg, lock_reg, AT, true, true /* acquire */, lock_done); ++ __ cmpxchg(Address(obj_reg, 0), swap_reg, lock_reg, AT, true, false, lock_done); + // Test if the oopMark is an obvious stack pointer, i.e., + // 1) (mark & 3) == 0, and + // 2) sp <= mark < mark + os::pagesize() @@ -50249,8 +61921,8 @@ index 00000000000..30c06f40493 + + // Now set thread in native + __ addi_d(AT, R0, _thread_in_native); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + } @@ -50305,8 +61977,8 @@ index 00000000000..30c06f40493 + // Thread A is resumed to finish this native method, but doesn't block here since it + // didn't see any synchronization is progress, and escapes. + __ addi_d(AT, R0, _thread_in_native_trans); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + @@ -50353,8 +62025,8 @@ index 00000000000..30c06f40493 + + // change thread state + __ addi_d(AT, R0, _thread_in_Java); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(AT, thread, in_bytes(JavaThread::thread_state_offset())); + __ bind(after_transition); @@ -50398,7 +62070,7 @@ index 00000000000..30c06f40493 + // get address of the stack lock + __ addi_d (c_rarg0, FP, lock_slot_fp_offset); + // Atomic swap old header if oop still contains the stack lock -+ __ cmpxchg(Address(obj_reg, 0), c_rarg0, T8, AT, false, true /* acquire */, unlock_done, &slow_path_unlock); ++ __ cmpxchg(Address(obj_reg, 0), c_rarg0, T8, AT, false, false, unlock_done, &slow_path_unlock); + + // slow path re-enters here + __ bind(unlock_done); @@ -51399,10 +63071,10 @@ index 00000000000..30c06f40493 +#endif diff --git a/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp new file mode 100644 -index 00000000000..21bfc7d78cb +index 00000000000..667f6aa92fc --- /dev/null +++ b/src/hotspot/cpu/loongarch/stubGenerator_loongarch_64.cpp -@@ -0,0 +1,5176 @@ +@@ -0,0 +1,5296 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. @@ -52133,8 +63805,8 @@ index 00000000000..21bfc7d78cb + + // disjoint large copy + void generate_disjoint_large_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52146,7 +63818,7 @@ index 00000000000..21bfc7d78cb + __ ld_d(A6, A0, 0); + __ ld_d(A7, A2, -8); + -+ __ andi(T1, A1, 7); ++ __ andi(T1, A0, 7); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 8); + @@ -52218,8 +63890,8 @@ index 00000000000..21bfc7d78cb + + // disjoint large copy lsx + void generate_disjoint_large_copy_lsx(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52231,7 +63903,7 @@ index 00000000000..21bfc7d78cb + __ vld(F0, A0, 0); + __ vld(F1, A2, -16); + -+ __ andi(T1, A1, 15); ++ __ andi(T1, A0, 15); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 16); + @@ -52303,8 +63975,8 @@ index 00000000000..21bfc7d78cb + + // disjoint large copy lasx + void generate_disjoint_large_copy_lasx(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52316,7 +63988,7 @@ index 00000000000..21bfc7d78cb + __ xvld(F0, A0, 0); + __ xvld(F1, A2, -32); + -+ __ andi(T1, A1, 31); ++ __ andi(T1, A0, 31); + __ sub_d(T0, R0, T1); + __ addi_d(T0, T0, 32); + @@ -52388,8 +64060,8 @@ index 00000000000..21bfc7d78cb + + // conjoint large copy + void generate_conjoint_large_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52401,7 +64073,7 @@ index 00000000000..21bfc7d78cb + __ ld_d(A6, A0, 0); + __ ld_d(A7, A2, -8); + -+ __ andi(T1, A3, 7); ++ __ andi(T1, A2, 7); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + @@ -52470,8 +64142,8 @@ index 00000000000..21bfc7d78cb + + // conjoint large copy lsx + void generate_conjoint_large_copy_lsx(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52483,7 +64155,7 @@ index 00000000000..21bfc7d78cb + __ vld(F0, A0, 0); + __ vld(F1, A2, -16); + -+ __ andi(T1, A3, 15); ++ __ andi(T1, A2, 15); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + @@ -52552,8 +64224,8 @@ index 00000000000..21bfc7d78cb + + // conjoint large copy lasx + void generate_conjoint_large_copy_lasx(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + { + UnsafeCopyMemoryMark ucmm(this, true, true); @@ -52565,7 +64237,7 @@ index 00000000000..21bfc7d78cb + __ xvld(F0, A0, 0); + __ xvld(F1, A2, -32); + -+ __ andi(T1, A3, 31); ++ __ andi(T1, A2, 31); + __ sub_d(A2, A2, T1); + __ sub_d(A5, A3, T1); + @@ -52634,8 +64306,8 @@ index 00000000000..21bfc7d78cb + + // Byte small copy: less than { int:9, lsx:17, lasx:33 } elements. + void generate_byte_small_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + Label L; + __ bind(entry); @@ -52999,9 +64671,9 @@ index 00000000000..21bfc7d78cb + // used by generate_conjoint_byte_copy(). + // + address generate_disjoint_byte_copy(bool aligned, Label &small, Label &large, -+ const char * name) { -+ __ align(CodeEntryAlignment); ++ Label &large_aligned, const char * name) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + if (UseLASX) @@ -53012,6 +64684,12 @@ index 00000000000..21bfc7d78cb + __ sltui(T0, A2, 9); + __ bnez(T0, small); + ++ if (large_aligned.is_bound()) { ++ __ orr(T0, A0, A1); ++ __ andi(T0, T0, 7); ++ __ beqz(T0, large_aligned); ++ } ++ + __ b(large); + + return start; @@ -53033,9 +64711,9 @@ index 00000000000..21bfc7d78cb + // and stored atomically. + // + address generate_conjoint_byte_copy(bool aligned, Label &small, Label &large, -+ const char *name) { -+ __ align(CodeEntryAlignment); ++ Label &large_aligned, const char *name) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + array_overlap_test(StubRoutines::jbyte_disjoint_arraycopy(), 0); @@ -53048,6 +64726,12 @@ index 00000000000..21bfc7d78cb + __ sltui(T0, A2, 9); + __ bnez(T0, small); + ++ if (large_aligned.is_bound()) { ++ __ orr(T0, A0, A1); ++ __ andi(T0, T0, 7); ++ __ beqz(T0, large_aligned); ++ } ++ + __ b(large); + + return start; @@ -53055,8 +64739,8 @@ index 00000000000..21bfc7d78cb + + // Short small copy: less than { int:9, lsx:9, lasx:17 } elements. + void generate_short_small_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + Label L; + __ bind(entry); @@ -53266,9 +64950,9 @@ index 00000000000..21bfc7d78cb + // used by generate_conjoint_short_copy(). + // + address generate_disjoint_short_copy(bool aligned, Label &small, Label &large, -+ const char * name) { -+ __ align(CodeEntryAlignment); ++ Label &large_aligned, const char * name) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + if (UseLASX) @@ -53279,6 +64963,12 @@ index 00000000000..21bfc7d78cb + + __ slli_d(A2, A2, 1); + ++ if (large_aligned.is_bound()) { ++ __ orr(T0, A0, A1); ++ __ andi(T0, T0, 7); ++ __ beqz(T0, large_aligned); ++ } ++ + __ b(large); + + return start; @@ -53300,9 +64990,9 @@ index 00000000000..21bfc7d78cb + // and stored atomically. + // + address generate_conjoint_short_copy(bool aligned, Label &small, Label &large, -+ const char *name) { -+ __ align(CodeEntryAlignment); ++ Label &large_aligned, const char *name) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + array_overlap_test(StubRoutines::jshort_disjoint_arraycopy(), 1); @@ -53315,6 +65005,12 @@ index 00000000000..21bfc7d78cb + + __ slli_d(A2, A2, 1); + ++ if (large_aligned.is_bound()) { ++ __ orr(T0, A0, A1); ++ __ andi(T0, T0, 7); ++ __ beqz(T0, large_aligned); ++ } ++ + __ b(large); + + return start; @@ -53322,8 +65018,8 @@ index 00000000000..21bfc7d78cb + + // Int small copy: less than { int:7, lsx:7, lasx:9 } elements. + void generate_int_small_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + Label L; + __ bind(entry); @@ -53456,8 +65152,8 @@ index 00000000000..21bfc7d78cb + + // Generate maybe oop copy + void gen_maybe_oop_copy(bool is_oop, bool disjoint, bool aligned, Label &small, -+ Label &large, const char *name, int small_limit, -+ int log2_elem_size, bool dest_uninitialized = false) { ++ Label &large, Label &large_aligned, const char *name, ++ int small_limit, int log2_elem_size, bool dest_uninitialized = false) { + Label post, _large; + DecoratorSet decorators = DECORATORS_NONE; + BarrierSetAssembler *bs = nullptr; @@ -53503,6 +65199,20 @@ index 00000000000..21bfc7d78cb + __ bind(_large); + __ slli_d(A2, A2, log2_elem_size); + ++ if (large_aligned.is_bound()) { ++ __ orr(T0, A0, A1); ++ __ andi(T0, T0, (1 << (log2_elem_size + 1)) - 1); ++ if (is_oop) { ++ Label skip; ++ __ bnez(T0, skip); ++ __ bl(large_aligned); ++ __ b(post); ++ __ bind(skip); ++ } else { ++ __ beqz(T0, large_aligned); ++ } ++ } ++ + if (is_oop) { + __ bl(large); + } else { @@ -53543,14 +65253,14 @@ index 00000000000..21bfc7d78cb + // used by generate_conjoint_int_oop_copy(). + // + address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, Label &small, -+ Label &large, const char *name, int small_limit, -+ bool dest_uninitialized = false) { -+ __ align(CodeEntryAlignment); ++ Label &large, Label &large_aligned, const char *name, ++ int small_limit, bool dest_uninitialized = false) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + -+ gen_maybe_oop_copy(is_oop, true, aligned, small, large, name, -+ small_limit, 2, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, true, aligned, small, large, large_aligned, ++ name, small_limit, 2, dest_uninitialized); + + return start; + } @@ -53571,10 +65281,10 @@ index 00000000000..21bfc7d78cb + // cache line boundaries will still be loaded and stored atomicly. + // + address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, Label &small, -+ Label &large, const char *name, int small_limit, -+ bool dest_uninitialized = false) { -+ __ align(CodeEntryAlignment); ++ Label &large, Label &large_aligned, const char *name, ++ int small_limit, bool dest_uninitialized = false) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + if (is_oop) { @@ -53583,16 +65293,16 @@ index 00000000000..21bfc7d78cb + array_overlap_test(StubRoutines::jint_disjoint_arraycopy(), 2); + } + -+ gen_maybe_oop_copy(is_oop, false, aligned, small, large, name, -+ small_limit, 2, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, false, aligned, small, large, large_aligned, ++ name, small_limit, 2, dest_uninitialized); + + return start; + } + + // Long small copy: less than { int:4, lsx:4, lasx:5 } elements. + void generate_long_small_copy(Label &entry, const char *name) { -+ __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + + Label L; + __ bind(entry); @@ -53695,14 +65405,14 @@ index 00000000000..21bfc7d78cb + // used by generate_conjoint_int_oop_copy(). + // + address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, Label &small, -+ Label &large, const char *name, int small_limit, -+ bool dest_uninitialized = false) { -+ __ align(CodeEntryAlignment); ++ Label &large, Label &large_aligned, const char *name, ++ int small_limit, bool dest_uninitialized = false) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + -+ gen_maybe_oop_copy(is_oop, true, aligned, small, large, name, -+ small_limit, 3, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, true, aligned, small, large, large_aligned, ++ name, small_limit, 3, dest_uninitialized); + + return start; + } @@ -53723,10 +65433,10 @@ index 00000000000..21bfc7d78cb + // cache line boundaries will still be loaded and stored atomicly. + // + address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, Label &small, -+ Label &large, const char *name, int small_limit, -+ bool dest_uninitialized = false) { -+ __ align(CodeEntryAlignment); ++ Label &large, Label &large_aligned, const char *name, ++ int small_limit, bool dest_uninitialized = false) { + StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); + address start = __ pc(); + + if (is_oop) { @@ -53735,8 +65445,8 @@ index 00000000000..21bfc7d78cb + array_overlap_test(StubRoutines::jlong_disjoint_arraycopy(), 3); + } + -+ gen_maybe_oop_copy(is_oop, false, aligned, small, large, name, -+ small_limit, 3, dest_uninitialized); ++ gen_maybe_oop_copy(is_oop, false, aligned, small, large, large_aligned, ++ name, small_limit, 3, dest_uninitialized); + + return start; + } @@ -54256,24 +65966,20 @@ index 00000000000..21bfc7d78cb + + void generate_arraycopy_stubs() { + Label disjoint_large_copy, conjoint_large_copy; ++ Label disjoint_large_copy_lsx, conjoint_large_copy_lsx; ++ Label disjoint_large_copy_lasx, conjoint_large_copy_lasx; + Label byte_small_copy, short_small_copy, int_small_copy, long_small_copy; -+ int int_oop_small_limit, long_oop_small_limit; ++ Label none; + ++ generate_disjoint_large_copy(disjoint_large_copy, "disjoint_large_copy"); ++ generate_conjoint_large_copy(conjoint_large_copy, "conjoint_large_copy"); ++ if (UseLSX) { ++ generate_disjoint_large_copy_lsx(disjoint_large_copy_lsx, "disjoint_large_copy_lsx"); ++ generate_conjoint_large_copy_lsx(conjoint_large_copy_lsx, "conjoint_large_copy_lsx"); ++ } + if (UseLASX) { -+ int_oop_small_limit = 9; -+ long_oop_small_limit = 5; -+ generate_disjoint_large_copy_lasx(disjoint_large_copy, "disjoint_large_copy_lasx"); -+ generate_conjoint_large_copy_lasx(conjoint_large_copy, "conjoint_large_copy_lasx"); -+ } else if (UseLSX) { -+ int_oop_small_limit = 7; -+ long_oop_small_limit = 4; -+ generate_disjoint_large_copy_lsx(disjoint_large_copy, "disjoint_large_copy_lsx"); -+ generate_conjoint_large_copy_lsx(conjoint_large_copy, "conjoint_large_copy_lsx"); -+ } else { -+ int_oop_small_limit = 7; -+ long_oop_small_limit = 4; -+ generate_disjoint_large_copy(disjoint_large_copy, "disjoint_large_copy_int"); -+ generate_conjoint_large_copy(conjoint_large_copy, "conjoint_large_copy_int"); ++ generate_disjoint_large_copy_lasx(disjoint_large_copy_lasx, "disjoint_large_copy_lasx"); ++ generate_conjoint_large_copy_lasx(conjoint_large_copy_lasx, "conjoint_large_copy_lasx"); + } + generate_byte_small_copy(byte_small_copy, "jbyte_small_copy"); + generate_short_small_copy(short_small_copy, "jshort_small_copy"); @@ -54281,39 +65987,78 @@ index 00000000000..21bfc7d78cb + generate_long_small_copy(long_small_copy, "jlong_small_copy"); + + if (UseCompressedOops) { -+ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, -+ "oop_disjoint_arraycopy", int_oop_small_limit); -+ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, -+ "oop_disjoint_arraycopy_uninit", int_oop_small_limit, true); -+ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, -+ "oop_arraycopy", int_oop_small_limit); -+ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, -+ "oop_arraycopy_uninit", int_oop_small_limit, true); ++ if (UseLSX) { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "oop_disjoint_arraycopy", 7); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "oop_disjoint_arraycopy_uninit", 7, true); ++ } else { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy", 7); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, int_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy_uninit", 7, true); ++ } ++ if (UseLASX) { ++ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "oop_arraycopy", 9); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "oop_arraycopy_uninit", 9, true); ++ } else if (UseLSX) { ++ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "oop_arraycopy", 7); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "oop_arraycopy_uninit", 7, true); ++ } else { ++ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, none, "oop_arraycopy", 7); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, int_small_copy, conjoint_large_copy, none, "oop_arraycopy_uninit", 7, true); ++ } + } else { -+ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, -+ "oop_disjoint_arraycopy", long_oop_small_limit); -+ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, -+ "oop_disjoint_arraycopy_uninit", long_oop_small_limit, true); -+ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, -+ "oop_arraycopy", long_oop_small_limit); -+ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, -+ "oop_arraycopy_uninit", long_oop_small_limit, true); ++ if (UseLASX) { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "oop_disjoint_arraycopy", 5); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "oop_disjoint_arraycopy_uninit", 5, true); ++ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "oop_arraycopy", 5); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "oop_arraycopy_uninit", 5, true); ++ } else if (UseLSX) { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "oop_disjoint_arraycopy", 4); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "oop_disjoint_arraycopy_uninit", 4, true); ++ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy", 4); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy_uninit", 4, true); ++ } else { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy", 4); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, long_small_copy, disjoint_large_copy, none, "oop_disjoint_arraycopy_uninit", 4, true); ++ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, none, "oop_arraycopy", 4); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "oop_arraycopy_uninit", 4, true); ++ } + } + -+ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy, "jbyte_disjoint_arraycopy"); -+ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy, "jshort_disjoint_arraycopy"); -+ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy, -+ "jint_disjoint_arraycopy", int_oop_small_limit); ++ if (UseLASX) { ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy_lasx, disjoint_large_copy_lsx, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy_lasx, disjoint_large_copy, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy_lasx, disjoint_large_copy, "jint_disjoint_arraycopy", 9); + -+ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy, "jbyte_arraycopy"); -+ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy, "jshort_arraycopy"); -+ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy, -+ "jint_arraycopy", int_oop_small_limit); ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy_lasx, conjoint_large_copy_lsx, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "jshort_arraycopy"); ++ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy_lasx, conjoint_large_copy, "jint_arraycopy", 9); ++ } else if (UseLSX) { ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy_lsx, none, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy_lsx, disjoint_large_copy, "jint_disjoint_arraycopy", 7); + -+ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, -+ "jlong_disjoint_arraycopy", long_oop_small_limit); -+ StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, -+ "jlong_arraycopy", long_oop_small_limit); ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy_lsx, none, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "jshort_arraycopy"); ++ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy_lsx, conjoint_large_copy, "jint_arraycopy", 7); ++ } else { ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy, none, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy, none, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, int_small_copy, disjoint_large_copy, none, "jint_disjoint_arraycopy", 7); ++ ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy, none, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy, none, "jshort_arraycopy"); ++ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, int_small_copy, conjoint_large_copy, none, "jint_arraycopy", 7); ++ } ++ ++ if (UseLASX) { ++ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, disjoint_large_copy_lasx, "jlong_disjoint_arraycopy", 5); ++ StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, conjoint_large_copy_lasx, "jlong_arraycopy", 5); ++ } else if (UseLSX) { ++ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, disjoint_large_copy_lsx, "jlong_disjoint_arraycopy", 4); ++ StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, conjoint_large_copy_lsx, "jlong_arraycopy", 4); ++ } else { ++ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, long_small_copy, disjoint_large_copy, none, "jlong_disjoint_arraycopy", 4); ++ StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, long_small_copy, conjoint_large_copy, none, "jlong_arraycopy", 4); ++ } + + // We don't generate specialized code for HeapWord-aligned source + // arrays, so just use the code we've already generated @@ -54367,7 +66112,7 @@ index 00000000000..21bfc7d78cb + __ addi_d(SP, SP, -4 * wordSize); // four words for the returned {SP, FP, RA, PC} + + __ push(V0); -+ __ push_call_clobbered_registers_except(RegSet::of(V0)); ++ __ pushad_except_v0(); + + __ move(A0, T4); + __ call_VM_leaf @@ -54376,7 +66121,7 @@ index 00000000000..21bfc7d78cb + + __ reset_last_Java_frame(true); + -+ __ pop_call_clobbered_registers_except(RegSet::of(V0)); ++ __ popad_except_v0(); + + __ bnez(V0, deoptimize_label); + @@ -55762,6 +67507,45 @@ index 00000000000..21bfc7d78cb + return start; + } + ++ // add a function to implement SafeFetch32 and SafeFetchN ++ void generate_safefetch(const char* name, int size, address* entry, ++ address* fault_pc, address* continuation_pc) { ++ // safefetch signatures: ++ // int SafeFetch32(int* adr, int errValue); ++ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); ++ // ++ // arguments: ++ // A0 = adr ++ // A1 = errValue ++ // ++ // result: ++ // PPC_RET = *adr or errValue ++ StubCodeMark mark(this, "StubRoutines", name); ++ ++ // Entry point, pc or function descriptor. ++ *entry = __ pc(); ++ ++ // Load *adr into A1, may fault. ++ *fault_pc = __ pc(); ++ switch (size) { ++ case 4: ++ // int32_t ++ __ ld_w(A1, A0, 0); ++ break; ++ case 8: ++ // int64_t ++ __ ld_d(A1, A0, 0); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ // return errValue or *adr ++ *continuation_pc = __ pc(); ++ __ add_d(V0, A1, R0); ++ __ jr(RA); ++ } ++ + +#undef __ +#define __ masm-> @@ -56515,6 +68299,14 @@ index 00000000000..21bfc7d78cb + StubRoutines::_dcos = generate_dsin_dcos(/* isCos = */ true); + } + ++ // Safefetch stubs. ++ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, ++ &StubRoutines::_safefetch32_fault_pc, ++ &StubRoutines::_safefetch32_continuation_pc); ++ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, ++ &StubRoutines::_safefetchN_fault_pc, ++ &StubRoutines::_safefetchN_continuation_pc); ++ +#ifdef COMPILER2 + if (UseMulAddIntrinsic) { + StubRoutines::_mulAdd = generate_mulAdd(); @@ -56865,13 +68657,13 @@ index 00000000000..53ded54ae6c +}; diff --git a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp new file mode 100644 -index 00000000000..02af7c8ffa7 +index 00000000000..18e19e87b2e --- /dev/null +++ b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp @@ -0,0 +1,2197 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -58249,8 +70041,8 @@ index 00000000000..02af7c8ffa7 +#endif + + __ li(t, _thread_in_native); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + @@ -58274,8 +70066,8 @@ index 00000000000..02af7c8ffa7 + __ get_thread(thread); +#endif + __ li(t, _thread_in_native_trans); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + @@ -58318,8 +70110,8 @@ index 00000000000..02af7c8ffa7 + + // change thread state + __ li(t, _thread_in_Java); -+ if (os::is_MP()) { -+ __ membar(Assembler::Membar_mask_bits(__ LoadStore|__ StoreStore)); ++ if(os::is_MP()) { ++ __ dbar(0); // store release + } + __ st_w(t, thread, in_bytes(JavaThread::thread_state_offset())); + __ reset_last_Java_frame(thread, true); @@ -58352,13 +70144,13 @@ index 00000000000..02af7c8ffa7 + __ ld_w(t, thread, in_bytes(JavaThread::stack_guard_state_offset())); + __ li(AT, (u1)StackOverflow::stack_guard_yellow_reserved_disabled); + __ bne(t, AT, no_reguard); -+ __ push_call_clobbered_registers(); ++ __ pushad(); + __ move(S5_heapbase, SP); + assert(StackAlignmentInBytes == 16, "must be"); + __ bstrins_d(SP, R0, 3, 0); + __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::runtime_call_type); + __ move(SP, S5_heapbase); -+ __ pop_call_clobbered_registers(); ++ __ popad(); + //add for compressedoops + __ reinit_heapbase(); + __ bind(no_reguard); @@ -59117,10 +70909,10 @@ index 00000000000..ddb38faf446 +#endif // CPU_LOONGARCH_TEMPLATETABLE_LOONGARCH_64_HPP diff --git a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp new file mode 100644 -index 00000000000..2474f90c247 +index 00000000000..138bf701bf8 --- /dev/null +++ b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp -@@ -0,0 +1,4043 @@ +@@ -0,0 +1,4077 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -61299,6 +73091,38 @@ index 00000000000..2474f90c247 + __ jr(T4); +} + ++// ---------------------------------------------------------------------------- ++// Volatile variables demand their effects be made known to all CPU's ++// in order. Store buffers on most chips allow reads & writes to ++// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode ++// without some kind of memory barrier (i.e., it's not sufficient that ++// the interpreter does not reorder volatile references, the hardware ++// also must not reorder them). ++// ++// According to the new Java Memory Model (JMM): ++// (1) All volatiles are serialized wrt to each other. ALSO reads & ++// writes act as aquire & release, so: ++// (2) A read cannot let unrelated NON-volatile memory refs that ++// happen after the read float up to before the read. It's OK for ++// non-volatile memory refs that happen before the volatile read to ++// float down below it. ++// (3) Similar a volatile write cannot let unrelated NON-volatile ++// memory refs that happen BEFORE the write float down to after the ++// write. It's OK for non-volatile memory refs that happen after the ++// volatile write to float up before it. ++// ++// We only put in barriers around volatile refs (they are expensive), ++// not _between_ memory refs (that would require us to track the ++// flavor of the previous memory refs). Requirements (2) and (3) ++// require some barriers before volatile stores and after volatile ++// loads. These nearly cover requirement (1) but miss the ++// volatile-store-volatile-load case. This final case is placed after ++// volatile-stores although it could just as well go before ++// volatile-loads. ++void TemplateTable::volatile_barrier() { ++ if(os::is_MP()) __ membar(__ StoreLoad); ++} ++ +// we dont shift left 2 bits in get_cache_and_index_at_bcp +// for we always need shift the index we use it. the ConstantPoolCacheEntry +// is 16-byte long, index is the index in @@ -61503,7 +73327,7 @@ index 00000000000..2474f90c247 + + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(MacroAssembler::AnyAny); ++ volatile_barrier(); + __ bind(notVolatile); + } + @@ -61649,7 +73473,7 @@ index 00000000000..2474f90c247 + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } +} @@ -61765,7 +73589,7 @@ index 00000000000..2474f90c247 + + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ StoreStore | __ LoadStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } + @@ -61937,7 +73761,7 @@ index 00000000000..2474f90c247 + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ StoreLoad | __ StoreStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } +} @@ -62047,7 +73871,7 @@ index 00000000000..2474f90c247 + + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ StoreStore | __ LoadStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } + @@ -62096,7 +73920,7 @@ index 00000000000..2474f90c247 + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ StoreLoad | __ StoreStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } +} @@ -62147,7 +73971,7 @@ index 00000000000..2474f90c247 + + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(MacroAssembler::AnyAny); ++ volatile_barrier(); + __ bind(notVolatile); + } + @@ -62191,7 +74015,7 @@ index 00000000000..2474f90c247 + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } +} @@ -62221,7 +74045,7 @@ index 00000000000..2474f90c247 + + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(MacroAssembler::AnyAny); ++ volatile_barrier(); + __ bind(notVolatile); + } + @@ -62246,7 +74070,7 @@ index 00000000000..2474f90c247 + { + Label notVolatile; + __ beq(scratch, R0, notVolatile); -+ __ membar(Assembler::Membar_mask_bits(__ LoadLoad | __ LoadStore)); ++ volatile_barrier(); + __ bind(notVolatile); + } +} @@ -62569,6 +74393,7 @@ index 00000000000..2474f90c247 + + __ bind(no_such_method); + // throw exception ++ __ pop(Rmethod); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); + __ restore_locals(); + // Pass arguments for generating a verbose error message. @@ -62582,6 +74407,7 @@ index 00000000000..2474f90c247 + + __ bind(no_such_interface); + // throw exception ++ __ pop(Rmethod); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); + __ restore_locals(); + // Pass arguments for generating a verbose error message. @@ -63469,10 +75295,10 @@ index 00000000000..1a93123134c +#endif // CPU_LOONGARCH_VM_VERSION_EXT_LOONGARCH_HPP diff --git a/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp new file mode 100644 -index 00000000000..1a1ac923117 +index 00000000000..4cc21e7dd08 --- /dev/null +++ b/src/hotspot/cpu/loongarch/vm_version_loongarch.cpp -@@ -0,0 +1,432 @@ +@@ -0,0 +1,448 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -63620,9 +75446,27 @@ index 00000000000..1a1ac923117 + } else if (_cpuid_info.cpucfg_info_id1.bits.ARCH == 0b10 ) { + result |= CPU_LA64; + } ++ if (_cpuid_info.cpucfg_info_id1.bits.UAL != 0) ++ result |= CPU_UAL; + + if (_cpuid_info.cpucfg_info_id2.bits.FP_CFG != 0) + result |= CPU_FP; ++ if (_cpuid_info.cpucfg_info_id2.bits.LSX != 0) ++ result |= CPU_LSX; ++ if (_cpuid_info.cpucfg_info_id2.bits.LASX != 0) ++ result |= CPU_LASX; ++ if (_cpuid_info.cpucfg_info_id2.bits.COMPLEX != 0) ++ result |= CPU_COMPLEX; ++ if (_cpuid_info.cpucfg_info_id2.bits.CRYPTO != 0) ++ result |= CPU_CRYPTO; ++ if (_cpuid_info.cpucfg_info_id2.bits.LBT_X86 != 0) ++ result |= CPU_LBT_X86; ++ if (_cpuid_info.cpucfg_info_id2.bits.LBT_ARM != 0) ++ result |= CPU_LBT_ARM; ++ if (_cpuid_info.cpucfg_info_id2.bits.LBT_MIPS != 0) ++ result |= CPU_LBT_MIPS; ++ if (_cpuid_info.cpucfg_info_id2.bits.LAM != 0) ++ result |= CPU_LAM; + + if (_cpuid_info.cpucfg_info_id3.bits.CCDMA != 0) + result |= CPU_CCDMA; @@ -63642,15 +75486,13 @@ index 00000000000..1a1ac923117 + + clean_cpuFeatures(); + -+ get_os_cpu_info(); -+ + get_cpu_info_stub(&_cpuid_info); -+ _features |= get_feature_flags_by_cpucfg(); ++ _features = get_feature_flags_by_cpucfg(); + + _supports_cx8 = true; + + if (UseG1GC && FLAG_IS_DEFAULT(MaxGCPauseMillis)) { -+ FLAG_SET_DEFAULT(MaxGCPauseMillis, 150); ++ FLAG_SET_CMDLINE(MaxGCPauseMillis, 650); + } + + if (supports_lsx()) { @@ -63907,13 +75749,13 @@ index 00000000000..1a1ac923117 +} diff --git a/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp b/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp new file mode 100644 -index 00000000000..cae9f863c30 +index 00000000000..16c12a30ee4 --- /dev/null +++ b/src/hotspot/cpu/loongarch/vm_version_loongarch.hpp -@@ -0,0 +1,295 @@ +@@ -0,0 +1,294 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -64109,26 +75951,25 @@ index 00000000000..cae9f863c30 + }; + +#define CPU_FEATURE_FLAGS(decl) \ -+ decl(LAM, lam, 1) \ -+ decl(UAL, ual, 2) \ -+ decl(LSX, lsx, 4) \ -+ decl(LASX, lasx, 5) \ -+ decl(COMPLEX, complex, 7) \ -+ decl(CRYPTO, crypto, 8) \ -+ decl(LBT_X86, lbt_x86, 10) \ -+ decl(LBT_ARM, lbt_arm, 11) \ -+ decl(LBT_MIPS, lbt_mips, 12) \ -+ /* flags above must follow Linux HWCAP */ \ -+ decl(LA32, la32, 13) \ -+ decl(LA64, la64, 14) \ -+ decl(FP, fp, 15) \ -+ decl(LLEXC, llexc, 16) \ -+ decl(SCDLY, scdly, 17) \ -+ decl(LLDBAR, lldbar, 18) \ -+ decl(CCDMA, ccdma, 19) \ -+ decl(LLSYNC, llsync, 20) \ -+ decl(TGTSYNC, tgtsync, 21) \ -+ decl(ULSYNC, ulsync, 22) \ ++ decl(LA32, la32, 1) \ ++ decl(LA64, la64, 2) \ ++ decl(LLEXC, llexc, 3) \ ++ decl(SCDLY, scdly, 4) \ ++ decl(LLDBAR, lldbar, 5) \ ++ decl(LBT_X86, lbt_x86, 6) \ ++ decl(LBT_ARM, lbt_arm, 7) \ ++ decl(LBT_MIPS, lbt_mips, 8) \ ++ decl(CCDMA, ccdma, 9) \ ++ decl(COMPLEX, complex, 10) \ ++ decl(FP, fp, 11) \ ++ decl(CRYPTO, crypto, 14) \ ++ decl(LSX, lsx, 15) \ ++ decl(LASX, lasx, 17) \ ++ decl(LAM, lam, 21) \ ++ decl(LLSYNC, llsync, 23) \ ++ decl(TGTSYNC, tgtsync, 24) \ ++ decl(ULSYNC, ulsync, 25) \ ++ decl(UAL, ual, 26) + + enum Feature_Flag { +#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit), @@ -64159,8 +76000,8 @@ index 00000000000..cae9f863c30 + static CpuidInfo _cpuid_info; + + static uint32_t get_feature_flags_by_cpucfg(); ++ static int get_feature_flags_by_cpuinfo(int features); + static void get_processor_features(); -+ static void get_os_cpu_info(); + +public: + // Offsets for cpuid asm stub @@ -80224,10 +92065,10 @@ index 00000000000..3563bbe0e59 + diff --git a/src/hotspot/cpu/mips/mips_64.ad b/src/hotspot/cpu/mips/mips_64.ad new file mode 100644 -index 00000000000..882878f739a +index 00000000000..ec85f64244c --- /dev/null +++ b/src/hotspot/cpu/mips/mips_64.ad -@@ -0,0 +1,12317 @@ +@@ -0,0 +1,12316 @@ +// +// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. @@ -87357,7 +99198,6 @@ index 00000000000..882878f739a + +instruct membar_storestore() %{ + match(MemBarStoreStore); -+ match(StoreStoreFence); + + ins_cost(400); + format %{ "MEMBAR-storestore @ membar_storestore" %} @@ -95341,13 +107181,13 @@ index 00000000000..4a9b22bfef2 + diff --git a/src/hotspot/cpu/mips/register_mips.hpp b/src/hotspot/cpu/mips/register_mips.hpp new file mode 100644 -index 00000000000..c2124538a0f +index 00000000000..4f74717c24f --- /dev/null +++ b/src/hotspot/cpu/mips/register_mips.hpp -@@ -0,0 +1,345 @@ +@@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2024, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -95452,9 +107292,8 @@ index 00000000000..c2124538a0f +#define NOREG ((Register)(noreg_RegisterEnumValue)) + +#define I0 ((Register)(i0_RegisterEnumValue)) -+// Conflict with I1 and I2 in googlemock/include/gmock/gmock-actions.h -+//#define I1 ((Register)(i1_RegisterEnumValue)) -+//#define I2 ((Register)(i2_RegisterEnumValue)) ++#define I1 ((Register)(i1_RegisterEnumValue)) ++#define I2 ((Register)(i2_RegisterEnumValue)) +#define I3 ((Register)(i3_RegisterEnumValue)) +#define I4 ((Register)(i4_RegisterEnumValue)) +#define I5 ((Register)(i5_RegisterEnumValue)) @@ -99510,13 +111349,13 @@ index 00000000000..48cc424a54e +#endif diff --git a/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp b/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp new file mode 100644 -index 00000000000..e894a302b50 +index 00000000000..ad44d23c531 --- /dev/null +++ b/src/hotspot/cpu/mips/stubGenerator_mips_64.cpp -@@ -0,0 +1,2725 @@ +@@ -0,0 +1,2774 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -101421,6 +113260,47 @@ index 00000000000..e894a302b50 + StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + } + ++ // add a function to implement SafeFetch32 and SafeFetchN ++ void generate_safefetch(const char* name, int size, address* entry, ++ address* fault_pc, address* continuation_pc) { ++ // safefetch signatures: ++ // int SafeFetch32(int* adr, int errValue); ++ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); ++ // ++ // arguments: ++ // A0 = adr ++ // A1 = errValue ++ // ++ // result: ++ // PPC_RET = *adr or errValue ++ ++ StubCodeMark mark(this, "StubRoutines", name); ++ ++ // Entry point, pc or function descriptor. ++ *entry = __ pc(); ++ ++ // Load *adr into A1, may fault. ++ *fault_pc = __ pc(); ++ switch (size) { ++ case 4: ++ // int32_t ++ __ lw(A1, A0, 0); ++ break; ++ case 8: ++ // int64_t ++ __ ld(A1, A0, 0); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ // return errValue or *adr ++ *continuation_pc = __ pc(); ++ __ addu(V0,A1,R0); ++ __ jr(RA); ++ __ delayed()->nop(); ++ } ++ + +#undef __ +#define __ masm-> @@ -102196,6 +114076,14 @@ index 00000000000..e894a302b50 + generate_arraycopy_stubs(); +#endif + ++ // Safefetch stubs. ++ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, ++ &StubRoutines::_safefetch32_fault_pc, ++ &StubRoutines::_safefetch32_continuation_pc); ++ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, ++ &StubRoutines::_safefetchN_fault_pc, ++ &StubRoutines::_safefetchN_continuation_pc); ++ +#ifdef COMPILER2 + if (UseMontgomeryMultiplyIntrinsic) { + if (UseLEXT1) { @@ -110703,13 +122591,313 @@ index 00000000000..f373aac45c2 + const unsigned int icache_line_size = wordSize; + return icache_line_size; +} -diff --git a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp -index 3657b16fc1a..a2aab225743 100644 ---- a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp -+++ b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.hpp -@@ -30,6 +30,8 @@ const size_t ZPlatformGranuleSizeShift = 21; // 2MB +diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +index af9a856ae08..dc2acbe8b15 100644 +--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +@@ -506,6 +506,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + } + } + ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { ++ ShouldNotReachHere(); ++} + + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + Bytecodes::Code code = op->bytecode(); +@@ -1609,6 +1612,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L + __ bind(skip); + } + ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { ++ ShouldNotReachHere(); ++} ++ + + void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, + CodeEmitInfo* info, bool pop_fpu_stack) { +diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp +index 2c685920367..c0def81ce44 100644 +--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp +@@ -275,21 +275,29 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + __ move(temp, addr); + } + +- +-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) { + LIR_Opr tmp = FrameMap::R0_opr; + __ load(new LIR_Address(base, disp, T_INT), tmp, info); +- __ cmp(condition, tmp, c); ++ __ cmp_branch(condition, tmp, c, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); + +-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, +- int disp, BasicType type, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) { + LIR_Opr tmp = FrameMap::R0_opr; + __ load(new LIR_Address(base, disp, type), tmp, info); +- __ cmp(condition, reg, tmp); ++ __ cmp_branch(condition, reg, tmp, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); + + bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + assert(left != result, "should be different registers"); +diff --git a/src/hotspot/cpu/ppc/c1_LIR_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIR_ppc.cpp +index fb234e82985..8f77a103edb 100644 +--- a/src/hotspot/cpu/ppc/c1_LIR_ppc.cpp ++++ b/src/hotspot/cpu/ppc/c1_LIR_ppc.cpp +@@ -62,3 +62,24 @@ void LIR_Address::verify() const { + #endif + } + #endif // PRODUCT ++ ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ cmp(condition, left, right, info); ++ branch(condition, tgt); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ cmp(condition, left, right); ++ branch(condition, block, unordered); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ cmp(condition, left, right); ++ cmove(condition, src1, src2, dst, type); ++} +diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +index a7d5a4a1c42..d9a3c4ae5ee 100644 +--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +@@ -395,6 +395,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + } + } + ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { ++ ShouldNotReachHere(); ++} + + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + LIR_Opr src = op->in_opr(); +@@ -1501,6 +1504,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L + } + } + ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { ++ ShouldNotReachHere(); ++} ++ + void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, + CodeEmitInfo* info, bool pop_fpu_stack) { + assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); +diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp +index c6d5085079f..755b3738b3f 100644 +--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp +@@ -214,16 +214,29 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); + } + +-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) { + LIR_Opr scratch = FrameMap::Z_R1_opr; + __ load(new LIR_Address(base, disp, T_INT), scratch, info); +- __ cmp(condition, scratch, c); ++ __ cmp_branch(condition, scratch, c, tgt); + } + +-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); ++ ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) { + __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); ++ __ branch(condition, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); ++ + bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + if (tmp->is_valid()) { + if (is_power_of_2(c + 1)) { +diff --git a/src/hotspot/cpu/s390/c1_LIR_s390.cpp b/src/hotspot/cpu/s390/c1_LIR_s390.cpp +index 3c46915e475..9fcc1a8aedb 100644 +--- a/src/hotspot/cpu/s390/c1_LIR_s390.cpp ++++ b/src/hotspot/cpu/s390/c1_LIR_s390.cpp +@@ -56,3 +56,23 @@ void LIR_Address::verify() const { + } + #endif // PRODUCT + ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ cmp(condition, left, right, info); ++ branch(condition, tgt); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ cmp(condition, left, right); ++ branch(condition, block, unordered); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ cmp(condition, left, right); ++ cmove(condition, src1, src2, dst, type); ++} +diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +index e2454f32481..ee89adfd72e 100644 +--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp ++++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +@@ -1459,6 +1459,10 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { + } + } + ++void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) { ++ ShouldNotReachHere(); ++} ++ + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + LIR_Opr src = op->in_opr(); + LIR_Opr dest = op->result_opr(); +@@ -2077,6 +2081,9 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L + } + } + ++void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) { ++ ShouldNotReachHere(); ++} + + void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { + assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); +diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +index b99f16fea05..08d28f4f368 100644 +--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp ++++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +@@ -259,15 +259,27 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) { + __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); + } + +-void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) { + __ cmp_mem_int(condition, base, disp, c, info); ++ __ branch(condition, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*); + +-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { ++template ++void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) { + __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); ++ __ branch(condition, tgt); + } + ++// Explicit instantiation for all supported types. ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*); ++template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*); + + bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { + if (tmp->is_valid() && c > 0 && c < max_jint) { +diff --git a/src/hotspot/cpu/x86/c1_LIR_x86.cpp b/src/hotspot/cpu/x86/c1_LIR_x86.cpp +index f7e3392d2e5..31cf9da42a0 100644 +--- a/src/hotspot/cpu/x86/c1_LIR_x86.cpp ++++ b/src/hotspot/cpu/x86/c1_LIR_x86.cpp +@@ -72,3 +72,24 @@ void LIR_Address::verify() const { + #endif + } + #endif // PRODUCT ++ ++template ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info) { ++ cmp(condition, left, right, info); ++ branch(condition, tgt); ++} ++ ++// Explicit instantiation for all supported types. ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, Label*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BlockBegin*, CodeEmitInfo*); ++template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, CodeStub*, CodeEmitInfo*); ++ ++void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered) { ++ cmp(condition, left, right); ++ branch(condition, block, unordered); ++} ++ ++void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { ++ cmp(condition, left, right); ++ cmove(condition, src1, src2, dst, type); ++} +diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp +index 6fc8833f7f1..4e9d6aad41e 100644 +--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp ++++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp +@@ -266,7 +266,8 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, + #define __ ce->masm()-> + + void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const { ++ LIR_Opr ref, ++ LIR_Opr res) const { + __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread)); + } + +diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp +index c83783d3e3f..1fa546cfaf4 100644 +--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp ++++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp +@@ -80,7 +80,8 @@ public: + + #ifdef COMPILER1 + void generate_c1_load_barrier_test(LIR_Assembler* ce, +- LIR_Opr ref) const; ++ LIR_Opr ref, ++ LIR_Opr res) const; + + void generate_c1_load_barrier_stub(LIR_Assembler* ce, + ZLoadBarrierStubC1* stub) const; +diff --git a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp +index db558d8cb2a..94f27399309 100644 +--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp ++++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp +@@ -28,6 +28,8 @@ const size_t ZPlatformGranuleSizeShift = 21; // 2MB const size_t ZPlatformHeapViews = 3; - const size_t ZPlatformCacheLineSize = DEFAULT_CACHE_LINE_SIZE; + const size_t ZPlatformCacheLineSize = 64; +const bool ZPlatformLoadBarrierTestResultInRegister = false; + @@ -110717,7 +122905,7 @@ index 3657b16fc1a..a2aab225743 100644 size_t ZPlatformAddressMetadataShift(); diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp -index 7c951cee51c..5747d4d3f55 100644 +index 8d2d5d5657e..dd8790a9299 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -23,6 +23,12 @@ @@ -110733,7 +122921,7 @@ index 7c951cee51c..5747d4d3f55 100644 // no precompiled headers #include "jvm.h" #include "classfile/vmSymbols.hpp" -@@ -2457,7 +2463,7 @@ void os::print_memory_info(outputStream* st) { +@@ -2416,7 +2422,7 @@ void os::print_memory_info(outputStream* st) { // before "flags" so if we find a second "model name", then the // "flags" field is considered missing. static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) { @@ -110742,7 +122930,7 @@ index 7c951cee51c..5747d4d3f55 100644 // Other platforms have less repetitive cpuinfo files FILE *fp = fopen("/proc/cpuinfo", "r"); if (fp) { -@@ -2547,7 +2553,7 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { +@@ -2506,7 +2512,7 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { print_sys_devices_cpu_info(st, buf, buflen); } @@ -110783,13 +122971,13 @@ index 00000000000..30719a0340b + */ diff --git a/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp new file mode 100644 -index 00000000000..77413aba9f4 +index 00000000000..4fab36f92b4 --- /dev/null +++ b/src/hotspot/os_cpu/linux_loongarch/atomic_linux_loongarch.hpp -@@ -0,0 +1,275 @@ +@@ -0,0 +1,269 @@ +/* + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -110956,16 +123144,13 @@ index 00000000000..77413aba9f4 + + switch (order) { + case memory_order_relaxed: -+ case memory_order_release: + asm volatile ( + "1: ll.w %[prev], %[dest] \n\t" + " bne %[prev], %[_old], 2f \n\t" + " move %[temp], %[_new] \n\t" + " sc.w %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" -+ " b 3f \n\t" -+ "2: dbar 0x700 \n\t" -+ "3: \n\t" ++ "2: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) + : "memory"); @@ -110978,7 +123163,7 @@ index 00000000000..77413aba9f4 + " sc.w %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" -+ "2: dbar 0x14 \n\t" ++ "2: dbar 0 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) @@ -111000,16 +123185,13 @@ index 00000000000..77413aba9f4 + + switch (order) { + case memory_order_relaxed: -+ case memory_order_release: + asm volatile ( + "1: ll.d %[prev], %[dest] \n\t" + " bne %[prev], %[_old], 2f \n\t" + " move %[temp], %[_new] \n\t" + " sc.d %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" -+ " b 3f \n\t" -+ "2: dbar 0x700 \n\t" -+ "3: \n\t" ++ "2: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) + : "memory"); @@ -111022,7 +123204,7 @@ index 00000000000..77413aba9f4 + " sc.d %[temp], %[dest] \n\t" + " beqz %[temp], 1b \n\t" + " b 3f \n\t" -+ "2: dbar 0x14 \n\t" ++ "2: dbar 0 \n\t" + "3: \n\t" + : [prev] "=&r" (prev), [temp] "=&r" (temp) + : [_old] "r" (compare_value), [_new] "r" (exchange_value), [dest] "ZC" (*dest) @@ -111365,7 +123547,7 @@ index 00000000000..ebd73af0c53 + diff --git a/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp b/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp new file mode 100644 -index 00000000000..6236e741d05 +index 00000000000..23a9d27b0f4 --- /dev/null +++ b/src/hotspot/os_cpu/linux_loongarch/orderAccess_linux_loongarch.hpp @@ -0,0 +1,52 @@ @@ -111402,31 +123584,31 @@ index 00000000000..6236e741d05 +// Included in orderAccess.hpp header file. + +// Implementation of class OrderAccess. -+#define inlasm_sync(v) if (os::is_ActiveCoresMP()) \ ++#define inlasm_sync() if (os::is_ActiveCoresMP()) \ + __asm__ __volatile__ ("nop" : : : "memory"); \ + else \ -+ __asm__ __volatile__ ("dbar %0" : :"K"(v) : "memory"); -+#define inlasm_synci() __asm__ __volatile__ ("ibar 0" : : : "memory"); ++ __asm__ __volatile__ ("dbar 0" : : : "memory"); + -+inline void OrderAccess::loadload() { inlasm_sync(0x15); } -+inline void OrderAccess::storestore() { inlasm_sync(0x1a); } -+inline void OrderAccess::loadstore() { inlasm_sync(0x16); } -+inline void OrderAccess::storeload() { inlasm_sync(0x19); } ++inline void OrderAccess::loadload() { inlasm_sync(); } ++inline void OrderAccess::storestore() { inlasm_sync(); } ++inline void OrderAccess::loadstore() { inlasm_sync(); } ++inline void OrderAccess::storeload() { inlasm_sync(); } ++ ++inline void OrderAccess::acquire() { inlasm_sync(); } ++inline void OrderAccess::release() { inlasm_sync(); } ++inline void OrderAccess::fence() { inlasm_sync(); } ++inline void OrderAccess::cross_modify_fence_impl() { inlasm_sync(); } + -+inline void OrderAccess::acquire() { inlasm_sync(0x14); } -+inline void OrderAccess::release() { inlasm_sync(0x12); } -+inline void OrderAccess::fence() { inlasm_sync(0x10); } -+inline void OrderAccess::cross_modify_fence_impl() { inlasm_synci(); } + +#undef inlasm_sync + +#endif // OS_CPU_LINUX_LOONGARCH_ORDERACCESS_LINUX_LOONGARCH_HPP diff --git a/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp new file mode 100644 -index 00000000000..b32ffe9105e +index 00000000000..84519a31f56 --- /dev/null +++ b/src/hotspot/os_cpu/linux_loongarch/os_linux_loongarch.cpp -@@ -0,0 +1,529 @@ +@@ -0,0 +1,500 @@ +/* + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Loongson Technology. All rights reserved. @@ -111558,12 +123740,6 @@ index 00000000000..b32ffe9105e + intptr_t* sp; + intptr_t* fp; + address epc = fetch_frame_from_context(ucVoid, &sp, &fp); -+ if (!is_readable_pointer(epc)) { -+ // Try to recover from calling into bad memory -+ // Assume new frame has not been set up, the same as -+ // compiled frame stack bang -+ return fetch_compiled_frame_from_context(ucVoid); -+ } + return frame(sp, fp, epc); +} + @@ -111652,7 +123828,7 @@ index 00000000000..b32ffe9105e +#endif + + // Handle signal from NativeJump::patch_verified_entry(). -+ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { ++ if (sig == SIGILL & nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { +#ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("verified entry = %lx, sig=%d", nativeInstruction_at(pc), sig); +#endif @@ -111711,24 +123887,6 @@ index 00000000000..b32ffe9105e +#ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("continuation_for_implicit_exception stub: %lx", stub); +#endif -+ } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) { -+ // Pull a pointer to the error message out of the instruction -+ // stream. -+ const uint64_t *detail_msg_ptr -+ = (uint64_t*)(pc + 4/*NativeInstruction::instruction_size*/); -+ const char *detail_msg = (const char *)*detail_msg_ptr; -+ const char *msg = "stop"; -+ if (TraceTraps) { -+ tty->print_cr("trap: %s: (SIGILL)", msg); -+ } -+ -+ // End life with a fatal error, message and detail message and the context. -+ // Note: no need to do any post-processing here (e.g. signal chaining) -+ va_list va_dummy; -+ VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy); -+ va_end(va_dummy); -+ -+ ShouldNotReachHere(); + } + } else if ((thread->thread_state() == _thread_in_vm || + thread->thread_state() == _thread_in_native) && @@ -111876,7 +124034,6 @@ index 00000000000..b32ffe9105e + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; -+ + st->print_cr("Registers:"); + st->print( "ZERO=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[0]); + st->print(", RA=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[1]); @@ -111919,23 +124076,19 @@ index 00000000000..b32ffe9105e + st->print(", S8=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.__gregs[31]); + st->cr(); + st->cr(); -+} + -+void os::print_tos_pc(outputStream *st, const void *context) { -+ if (context == NULL) return; -+ -+ const ucontext_t* uc = (const ucontext_t*)context; -+ -+ address sp = (address)os::Linux::ucontext_get_sp(uc); -+ print_tos(st, sp); ++ intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); ++ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); ++ print_hex_dump(st, (address)(sp - 32), (address)(sp + 32), sizeof(intptr_t)); + st->cr(); + + // Note: it may be unsafe to inspect memory near pc. For example, pc may + // point to garbage if entry point in an nmethod is corrupted. Leave + // this at the end, and hope for the best. -+ address pc = os::fetch_frame_from_context(uc).pc(); -+ print_instructions(st, pc); -+ st->cr(); ++ address pc = os::Posix::ucontext_get_pc(uc); ++ st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); ++ print_hex_dump(st, pc - 64, pc + 64, sizeof(char)); ++ Disassembler::decode(pc - 80, pc + 80, st); +} + +void os::setup_fpu() { @@ -112062,68 +124215,6 @@ index 00000000000..cf3a596387c +} + +#endif // OS_CPU_LINUX_LOONGARCH_PREFETCH_LINUX_LOONGARCH_INLINE_HPP -diff --git a/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S b/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S -new file mode 100644 -index 00000000000..fdc6da358e5 ---- /dev/null -+++ b/src/hotspot/os_cpu/linux_loongarch/safefetch_linux_loongarch64.S -@@ -0,0 +1,56 @@ -+/* -+ * Copyright (c) 2022 SAP SE. All rights reserved. -+ * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, Loongson Technology. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+ .globl SafeFetchN_impl -+ .globl _SafeFetchN_fault -+ .globl _SafeFetchN_continuation -+ .globl SafeFetch32_impl -+ .globl _SafeFetch32_fault -+ .globl _SafeFetch32_continuation -+ -+ # Support for int SafeFetch32(int* address, int defaultval); -+ # -+ # a0 : address -+ # a1 : defaultval -+SafeFetch32_impl: -+_SafeFetch32_fault: -+ ld.w $r4, $r4, 0 -+ jr $r1 -+_SafeFetch32_continuation: -+ or $r4, $r5, $r0 -+ jr $r1 -+ -+ # Support for intptr_t SafeFetchN(intptr_t* address, intptr_t defaultval); -+ # -+ # a0 : address -+ # a1 : defaultval -+SafeFetchN_impl: -+_SafeFetchN_fault: -+ ld.d $r4, $r4, 0 -+ jr $r1 -+_SafeFetchN_continuation: -+ or $r4, $r5, $r0 -+ jr $r1 diff --git a/src/hotspot/os_cpu/linux_loongarch/thread_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/thread_linux_loongarch.cpp new file mode 100644 index 00000000000..9204302bca8 @@ -112368,107 +124459,6 @@ index 00000000000..a39cb79bb1e +#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) + +#endif // OS_CPU_LINUX_LOONGARCH_VMSTRUCTS_LINUX_LOONGARCH_HPP -diff --git a/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp b/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp -new file mode 100644 -index 00000000000..3711a7036a1 ---- /dev/null -+++ b/src/hotspot/os_cpu/linux_loongarch/vm_version_linux_loongarch.cpp -@@ -0,0 +1,95 @@ -+/* -+ * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+#include "precompiled.hpp" -+#include "asm/register.hpp" -+#include "runtime/os.hpp" -+#include "runtime/os.inline.hpp" -+#include "runtime/vm_version.hpp" -+ -+#include -+#include -+ -+#ifndef HWCAP_LOONGARCH_LAM -+#define HWCAP_LOONGARCH_LAM (1 << 1) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_UAL -+#define HWCAP_LOONGARCH_UAL (1 << 2) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_LSX -+#define HWCAP_LOONGARCH_LSX (1 << 4) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_LASX -+#define HWCAP_LOONGARCH_LASX (1 << 5) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_COMPLEX -+#define HWCAP_LOONGARCH_COMPLEX (1 << 7) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_CRYPTO -+#define HWCAP_LOONGARCH_CRYPTO (1 << 8) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_LBT_X86 -+#define HWCAP_LOONGARCH_LBT_X86 (1 << 10) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_LBT_ARM -+#define HWCAP_LOONGARCH_LBT_ARM (1 << 11) -+#endif -+ -+#ifndef HWCAP_LOONGARCH_LBT_MIPS -+#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) -+#endif -+ -+void VM_Version::get_os_cpu_info() { -+ -+ uint64_t auxv = getauxval(AT_HWCAP); -+ -+ static_assert(CPU_LAM == HWCAP_LOONGARCH_LAM, "Flag CPU_LAM must follow Linux HWCAP"); -+ static_assert(CPU_UAL == HWCAP_LOONGARCH_UAL, "Flag CPU_UAL must follow Linux HWCAP"); -+ static_assert(CPU_LSX == HWCAP_LOONGARCH_LSX, "Flag CPU_LSX must follow Linux HWCAP"); -+ static_assert(CPU_LASX == HWCAP_LOONGARCH_LASX, "Flag CPU_LASX must follow Linux HWCAP"); -+ static_assert(CPU_COMPLEX == HWCAP_LOONGARCH_COMPLEX, "Flag CPU_COMPLEX must follow Linux HWCAP"); -+ static_assert(CPU_CRYPTO == HWCAP_LOONGARCH_CRYPTO, "Flag CPU_CRYPTO must follow Linux HWCAP"); -+ static_assert(CPU_LBT_X86 == HWCAP_LOONGARCH_LBT_X86, "Flag CPU_LBT_X86 must follow Linux HWCAP"); -+ static_assert(CPU_LBT_ARM == HWCAP_LOONGARCH_LBT_ARM, "Flag CPU_LBT_ARM must follow Linux HWCAP"); -+ static_assert(CPU_LBT_MIPS == HWCAP_LOONGARCH_LBT_MIPS, "Flag CPU_LBT_MIPS must follow Linux HWCAP"); -+ -+ _features = auxv & ( -+ HWCAP_LOONGARCH_LAM | -+ HWCAP_LOONGARCH_UAL | -+ HWCAP_LOONGARCH_LSX | -+ HWCAP_LOONGARCH_LASX | -+ HWCAP_LOONGARCH_COMPLEX | -+ HWCAP_LOONGARCH_CRYPTO | -+ HWCAP_LOONGARCH_LBT_X86 | -+ HWCAP_LOONGARCH_LBT_ARM | -+ HWCAP_LOONGARCH_LBT_MIPS); -+} diff --git a/src/hotspot/os_cpu/linux_mips/assembler_linux_mips.cpp b/src/hotspot/os_cpu/linux_mips/assembler_linux_mips.cpp new file mode 100644 index 00000000000..30719a0340b @@ -112963,7 +124953,7 @@ index 00000000000..36c8d810c3c + diff --git a/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp b/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp new file mode 100644 -index 00000000000..a92bf43bdbb +index 00000000000..460d118c869 --- /dev/null +++ b/src/hotspot/os_cpu/linux_mips/orderAccess_linux_mips.hpp @@ -0,0 +1,52 @@ @@ -113004,7 +124994,6 @@ index 00000000000..a92bf43bdbb + __asm__ __volatile__ ("nop" : : : "memory"); \ + else \ + __asm__ __volatile__ ("sync" : : : "memory"); -+#define inlasm_synci() __asm__ __volatile__ ("synci 0($0)" : : : "memory"); + +inline void OrderAccess::loadload() { inlasm_sync(); } +inline void OrderAccess::storestore() { inlasm_sync(); } @@ -113014,20 +125003,21 @@ index 00000000000..a92bf43bdbb +inline void OrderAccess::acquire() { inlasm_sync(); } +inline void OrderAccess::release() { inlasm_sync(); } +inline void OrderAccess::fence() { inlasm_sync(); } -+inline void OrderAccess::cross_modify_fence_impl() { inlasm_synci(); } ++inline void OrderAccess::cross_modify_fence_impl() { inlasm_sync(); } ++ + +#undef inlasm_sync + +#endif // OS_CPU_LINUX_MIPS_VM_ORDERACCESS_LINUX_MIPS_HPP diff --git a/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp new file mode 100644 -index 00000000000..ff1af7beb68 +index 00000000000..df17c3f8853 --- /dev/null +++ b/src/hotspot/os_cpu/linux_mips/os_linux_mips.cpp -@@ -0,0 +1,817 @@ +@@ -0,0 +1,810 @@ +/* + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2015, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2015, 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -113275,7 +125265,7 @@ index 00000000000..ff1af7beb68 +#endif + + // Handle signal from NativeJump::patch_verified_entry(). -+ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { ++ if (sig == SIGILL & nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { +#ifdef PRINT_SIGNAL_HANDLE + tty->print_cr("verified entry = %lx, sig=%d", nativeInstruction_at(pc), sig); +#endif @@ -113747,7 +125737,6 @@ index 00000000000..ff1af7beb68 + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; -+ + st->print_cr("Registers:"); + st->print( "R0=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[0]); + st->print(", AT=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[1]); @@ -113790,12 +125779,6 @@ index 00000000000..ff1af7beb68 + st->print(", RA=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[31]); + st->cr(); + st->cr(); -+} -+ -+void os::print_tos_pc(outputStream *st, const void *context) { -+ if (context == NULL) return; -+ -+ const ucontext_t* uc = (const ucontext_t*)context; + + intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); @@ -113951,72 +125934,6 @@ index 00000000000..93490345f0b +} + +#endif // OS_CPU_LINUX_MIPS_VM_PREFETCH_LINUX_MIPS_INLINE_HPP -diff --git a/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S b/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S -new file mode 100644 -index 00000000000..fc6ee6eca65 ---- /dev/null -+++ b/src/hotspot/os_cpu/linux_mips/safefetch_linux_mips64.S -@@ -0,0 +1,60 @@ -+/* -+ * Copyright (c) 2022 SAP SE. All rights reserved. -+ * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2023, Loongson Technology. All rights reserved. -+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -+ * -+ * This code is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 only, as -+ * published by the Free Software Foundation. -+ * -+ * This code is distributed in the hope that it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * version 2 for more details (a copy is included in the LICENSE file that -+ * accompanied this code). -+ * -+ * You should have received a copy of the GNU General Public License version -+ * 2 along with this work; if not, write to the Free Software Foundation, -+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -+ * or visit www.oracle.com if you need additional information or have any -+ * questions. -+ * -+ */ -+ -+ .globl SafeFetchN_impl -+ .globl _SafeFetchN_fault -+ .globl _SafeFetchN_continuation -+ .globl SafeFetch32_impl -+ .globl _SafeFetch32_fault -+ .globl _SafeFetch32_continuation -+ -+ # Support for int SafeFetch32(int* address, int defaultval); -+ # -+ # a0 : address -+ # a1 : defaultval -+SafeFetch32_impl: -+_SafeFetch32_fault: -+ lw $2, 0($4) -+ j $31 -+ nop -+_SafeFetch32_continuation: -+ or $2, $5, $0 -+ j $31 -+ nop -+ -+ # Support for intptr_t SafeFetchN(intptr_t* address, intptr_t defaultval); -+ # -+ # a0 : address -+ # a1 : defaultval -+SafeFetchN_impl: -+_SafeFetchN_fault: -+ ld $2, 0($4) -+ j $31 -+ nop -+_SafeFetchN_continuation: -+ or $2, $5, $0 -+ j $31 -+ nop diff --git a/src/hotspot/os_cpu/linux_mips/thread_linux_mips.cpp b/src/hotspot/os_cpu/linux_mips/thread_linux_mips.cpp new file mode 100644 index 00000000000..4372eb41e9c @@ -114298,58 +126215,22 @@ index 00000000000..93e4bea04c6 +#include "precompiled.hpp" +#include "runtime/os.hpp" +#include "runtime/vm_version.hpp" -diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp -index b45f432dac9..02bec17ddf3 100644 ---- a/src/hotspot/share/adlc/formssel.cpp -+++ b/src/hotspot/share/adlc/formssel.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - // FORMS.CPP - Definitions for ADL Parser Forms Classes - #include "adlc.hpp" - -@@ -4124,6 +4130,7 @@ bool MatchRule::is_ideal_membar() const { - !strcmp(_opType,"MemBarVolatile") || - !strcmp(_opType,"MemBarCPUOrder") || - !strcmp(_opType,"MemBarStoreStore") || -+ !strcmp(_opType,"SameAddrLoadFence" ) || - !strcmp(_opType,"OnSpinWait"); - } - diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp -index 0012152d48d..e3660cab271 100644 +index 0012152d48d..8dfa007e71a 100644 --- a/src/hotspot/share/asm/codeBuffer.cpp +++ b/src/hotspot/share/asm/codeBuffer.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023. These -+ * modifications are Copyright (c) 2018, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "asm/codeBuffer.hpp" - #include "code/oopRecorder.inline.hpp" -@@ -330,6 +336,7 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format) +@@ -330,6 +330,9 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format) assert(rtype == relocInfo::none || rtype == relocInfo::runtime_call_type || rtype == relocInfo::internal_word_type|| -+ NOT_ZERO(MIPS64_ONLY(rtype == relocInfo::internal_pc_type ||)) ++#ifdef MIPS ++ rtype == relocInfo::internal_pc_type || ++#endif rtype == relocInfo::section_word_type || rtype == relocInfo::external_word_type, "code needs relocation information"); diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp -index de173c64af1..df93c01d893 100644 +index 3795b4e0192..9830cd9c27c 100644 --- a/src/hotspot/share/c1/c1_Compiler.cpp +++ b/src/hotspot/share/c1/c1_Compiler.cpp @@ -43,6 +43,12 @@ @@ -114365,7 +126246,7 @@ index de173c64af1..df93c01d893 100644 Compiler::Compiler() : AbstractCompiler(compiler_c1) { } -@@ -212,7 +218,7 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) { +@@ -211,7 +217,7 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) { case vmIntrinsics::_updateCRC32: case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: @@ -114375,238 +126256,320 @@ index de173c64af1..df93c01d893 100644 case vmIntrinsics::_updateDirectByteBufferCRC32C: #endif diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp -index 308f3a09c15..53a68cdb2fd 100644 +index 62cff4c7505..f1af08d5df0 100644 --- a/src/hotspot/share/c1/c1_LIR.cpp +++ b/src/hotspot/share/c1/c1_LIR.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "c1/c1_CodeStubs.hpp" - #include "c1/c1_InstructionPrinter.hpp" -@@ -190,6 +196,8 @@ void LIR_Op2::verify() const { - case lir_cmove: - #ifdef RISCV - assert(false, "lir_cmove is LIR_Op4 on RISCV"); -+#elif defined(LOONGARCH) -+ assert(false, "lir_cmove is LIR_Op4 on LoongArch"); +@@ -236,6 +236,18 @@ void LIR_Op2::verify() const { #endif - case lir_xchg: - break; -@@ -241,7 +249,7 @@ void LIR_Op2::verify() const { + } ++void LIR_Op4::verify() const { ++#ifdef ASSERT ++ switch (code()) { ++ case lir_cmp_cmove: ++ break; ++ ++ default: ++ assert(!result_opr()->is_register() || !result_opr()->is_oop_register(), ++ "can't produce oops from arith"); ++ } ++#endif ++} LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block) --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) - #else : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) -@@ -254,7 +262,7 @@ LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block) +@@ -291,6 +303,56 @@ void LIR_OpBranch::negate_cond() { } - LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, CodeStub* stub) : --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) - #else - LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) -@@ -267,7 +275,7 @@ LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, CodeStub* stub) : - } - LIR_OpBranch::LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock) --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - : LIR_Op2(lir_cond_float_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) - #else - : LIR_Op(lir_cond_float_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*)NULL) -@@ -512,6 +520,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) { ++LIR_OpCmpBranch::LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeStub* stub, CodeEmitInfo* info) ++ : LIR_Op2(lir_cmp_branch, cond, left, right, info) ++ , _label(stub->entry()) ++ , _block(NULL) ++ , _ublock(NULL) ++ , _stub(stub) { ++} ++ ++LIR_OpCmpBranch::LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, CodeEmitInfo* info) ++ : LIR_Op2(lir_cmp_branch, cond, left, right, info) ++ , _label(block->label()) ++ , _block(block) ++ , _ublock(NULL) ++ , _stub(NULL) { ++} ++ ++LIR_OpCmpBranch::LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* ublock, CodeEmitInfo* info) ++ : LIR_Op2(lir_cmp_float_branch, cond, left, right, info) ++ , _label(block->label()) ++ , _block(block) ++ , _ublock(ublock) ++ , _stub(NULL) { ++} ++ ++void LIR_OpCmpBranch::change_block(BlockBegin* b) { ++ assert(_block != NULL, "must have old block"); ++ assert(_block->label() == label(), "must be equal"); ++ ++ _block = b; ++ _label = b->label(); ++} ++ ++void LIR_OpCmpBranch::change_ublock(BlockBegin* b) { ++ assert(_ublock != NULL, "must have old block"); ++ ++ _ublock = b; ++} ++ ++void LIR_OpCmpBranch::negate_cond() { ++ switch (condition()) { ++ case lir_cond_equal: set_condition(lir_cond_notEqual); break; ++ case lir_cond_notEqual: set_condition(lir_cond_equal); break; ++ case lir_cond_less: set_condition(lir_cond_greaterEqual); break; ++ case lir_cond_lessEqual: set_condition(lir_cond_greater); break; ++ case lir_cond_greaterEqual: set_condition(lir_cond_less); break; ++ case lir_cond_greater: set_condition(lir_cond_lessEqual); break; ++ default: ShouldNotReachHere(); ++ } ++} ++ + LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, + LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, + bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, +@@ -497,10 +559,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) { assert(opConvert->_info == NULL, "must be"); if (opConvert->_opr->is_valid()) do_input(opConvert->_opr); if (opConvert->_result->is_valid()) do_output(opConvert->_result); +-#ifdef PPC32 +- if (opConvert->_tmp1->is_valid()) do_temp(opConvert->_tmp1); +- if (opConvert->_tmp2->is_valid()) do_temp(opConvert->_tmp2); +-#endif + if (opConvert->_tmp->is_valid()) do_temp(opConvert->_tmp); - #ifdef PPC32 - if (opConvert->_tmp1->is_valid()) do_temp(opConvert->_tmp1); - if (opConvert->_tmp2->is_valid()) do_temp(opConvert->_tmp2); -@@ -528,7 +537,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) { - assert(op->as_OpBranch() != NULL, "must be"); - LIR_OpBranch* opBranch = (LIR_OpBranch*)op; - --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(opBranch->_tmp1->is_illegal() && opBranch->_tmp2->is_illegal() && - opBranch->_tmp3->is_illegal() && opBranch->_tmp4->is_illegal() && - opBranch->_tmp5->is_illegal(), "not used"); -@@ -625,7 +634,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) { + do_stub(opConvert->_stub); + + break; +@@ -597,6 +656,25 @@ void LIR_OpVisitState::visit(LIR_Op* op) { + break; + } + ++// LIR_OpCmpBranch; ++ case lir_cmp_branch: // may have info, input and result register always invalid ++ case lir_cmp_float_branch: // may have info, input and result register always invalid ++ { ++ assert(op->as_OpCmpBranch() != NULL, "must be"); ++ LIR_OpCmpBranch* opCmpBranch = (LIR_OpCmpBranch*)op; ++ assert(opCmpBranch->_tmp2->is_illegal() && opCmpBranch->_tmp3->is_illegal() && ++ opCmpBranch->_tmp4->is_illegal() && opCmpBranch->_tmp5->is_illegal(), "not used"); ++ ++ if (opCmpBranch->_info) do_info(opCmpBranch->_info); ++ if (opCmpBranch->_opr1->is_valid()) do_input(opCmpBranch->_opr1); ++ if (opCmpBranch->_opr2->is_valid()) do_input(opCmpBranch->_opr2); ++ if (opCmpBranch->_tmp1->is_valid()) do_temp(opCmpBranch->_tmp1); ++ if (opCmpBranch->_stub != NULL) opCmpBranch->stub()->visit(this); ++ assert(opCmpBranch->_result->is_illegal(), "not used"); ++ ++ break; ++ } ++ + // special handling for cmove: right input operand must not be equal // to the result operand, otherwise the backend fails case lir_cmove: - { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(op->as_Op4() != NULL, "must be"); - LIR_Op4* op4 = (LIR_Op4*)op; +@@ -697,6 +775,29 @@ void LIR_OpVisitState::visit(LIR_Op* op) { + break; + } -@@ -1095,7 +1104,7 @@ void LIR_Op3::emit_code(LIR_Assembler* masm) { - masm->emit_op3(this); ++// LIR_Op4 ++ // special handling for cmp cmove: src2(opr4) operand must not be equal ++ // to the result operand, otherwise the backend fails ++ case lir_cmp_cmove: ++ { ++ assert(op->as_Op4() != NULL, "must be"); ++ LIR_Op4* op4 = (LIR_Op4*)op; ++ ++ assert(op4->_info == NULL, "not used"); ++ assert(op4->_opr1->is_valid() && op4->_opr2->is_valid() && ++ op4->_opr3->is_valid() && op4->_opr4->is_valid() && ++ op4->_result->is_valid(), "used"); ++ ++ do_input(op4->_opr1); ++ do_input(op4->_opr2); ++ do_input(op4->_opr3); ++ do_input(op4->_opr4); ++ do_temp(op4->_opr4); ++ do_output(op4->_result); ++ ++ break; ++ } ++ + // LIR_OpJavaCall + case lir_static_call: + case lir_optvirtual_call: +@@ -1022,6 +1123,13 @@ void LIR_Op2::emit_code(LIR_Assembler* masm) { + masm->emit_op2(this); } --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void LIR_Op4::emit_code(LIR_Assembler* masm) { - masm->emit_op4(this); ++void LIR_OpCmpBranch::emit_code(LIR_Assembler* masm) { ++ masm->emit_opCmpBranch(this); ++ if (stub()) { ++ masm->append_code_stub(stub()); ++ } ++} ++ + void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) { + masm->emit_alloc_array(this); + masm->append_code_stub(stub()); +@@ -1042,6 +1150,10 @@ void LIR_Op3::emit_code(LIR_Assembler* masm) { + masm->emit_op3(this); } -@@ -1141,7 +1150,7 @@ LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) - , _file(NULL) - , _line(0) - #endif --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - , _cmp_opr1(LIR_OprFact::illegalOpr) - , _cmp_opr2(LIR_OprFact::illegalOpr) - #endif -@@ -1162,7 +1171,7 @@ void LIR_List::set_file_and_line(const char * file, int line) { + ++void LIR_Op4::emit_code(LIR_Assembler* masm) { ++ masm->emit_op4(this); ++} ++ + void LIR_OpLock::emit_code(LIR_Assembler* masm) { + masm->emit_lock(this); + if (stub()) { +@@ -1418,8 +1530,7 @@ void LIR_List::null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_nu + if (deoptimize_on_null) { + // Emit an explicit null check and deoptimize if opr is null + CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_null_check, Deoptimization::Action_none); +- cmp(lir_cond_equal, opr, LIR_OprFact::oopConst(NULL)); +- branch(lir_cond_equal, deopt); ++ cmp_branch(lir_cond_equal, opr, LIR_OprFact::oopConst(NULL), deopt); + } else { + // Emit an implicit null check + append(new LIR_Op1(lir_null_check, opr, info)); +@@ -1667,6 +1778,8 @@ const char * LIR_Op::name() const { + case lir_cmp_l2i: s = "cmp_l2i"; break; + case lir_ucmp_fd2i: s = "ucomp_fd2i"; break; + case lir_cmp_fd2i: s = "comp_fd2i"; break; ++ case lir_cmp_branch: s = "cmp_branch"; break; ++ case lir_cmp_float_branch: s = "cmp_fbranch"; break; + case lir_cmove: s = "cmove"; break; + case lir_add: s = "add"; break; + case lir_sub: s = "sub"; break; +@@ -1690,6 +1803,8 @@ const char * LIR_Op::name() const { + case lir_irem: s = "irem"; break; + case lir_fmad: s = "fmad"; break; + case lir_fmaf: s = "fmaf"; break; ++ // LIR_Op4 ++ case lir_cmp_cmove: s = "cmp_cmove"; break; + // LIR_OpJavaCall + case lir_static_call: s = "static"; break; + case lir_optvirtual_call: s = "optvirtual"; break; +@@ -1840,6 +1955,26 @@ void LIR_OpBranch::print_instr(outputStream* out) const { + } } - #endif --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void LIR_List::set_cmp_oprs(LIR_Op* op) { - switch (op->code()) { - case lir_cmp: -@@ -1185,7 +1194,7 @@ void LIR_List::set_cmp_oprs(LIR_Op* op) { - break; - #if INCLUDE_ZGC - case lir_zloadbarrier_test: -- _cmp_opr1 = FrameMap::as_opr(t1); -+ _cmp_opr1 = FrameMap::as_opr(RISCV_ONLY(t1) LOONGARCH64_ONLY(SCR1)); - _cmp_opr2 = LIR_OprFact::intConst(0); - break; - #endif -@@ -1924,7 +1933,7 @@ void LIR_Op1::print_patch_code(outputStream* out, LIR_PatchCode code) { - // LIR_OpBranch - void LIR_OpBranch::print_instr(outputStream* out) const { - print_condition(out, cond()); out->print(" "); --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - in_opr1()->print(out); out->print(" "); - in_opr2()->print(out); out->print(" "); - #endif -@@ -1963,6 +1972,9 @@ void LIR_OpConvert::print_instr(outputStream* out) const { ++// LIR_OpCmpBranch ++void LIR_OpCmpBranch::print_instr(outputStream* out) const { ++ print_condition(out, condition()); out->print(" "); ++ in_opr1()->print(out); out->print(" "); ++ in_opr2()->print(out); out->print(" "); ++ if (block() != NULL) { ++ out->print("[B%d] ", block()->block_id()); ++ } else if (stub() != NULL) { ++ out->print("["); ++ stub()->print_name(out); ++ out->print(": " INTPTR_FORMAT "]", p2i(stub())); ++ if (stub()->info() != NULL) out->print(" [bci:%d]", stub()->info()->stack()->bci()); ++ } else { ++ out->print("[label:" INTPTR_FORMAT "] ", p2i(label())); ++ } ++ if (ublock() != NULL) { ++ out->print("unordered: [B%d] ", ublock()->block_id()); ++ } ++} ++ + void LIR_Op::print_condition(outputStream* out, LIR_Condition cond) { + switch(cond) { + case lir_cond_equal: out->print("[EQ]"); break; +@@ -1860,12 +1995,9 @@ void LIR_OpConvert::print_instr(outputStream* out) const { print_bytecode(out, bytecode()); in_opr()->print(out); out->print(" "); result_opr()->print(out); out->print(" "); +-#ifdef PPC32 +- if(tmp1()->is_valid()) { +- tmp1()->print(out); out->print(" "); +- tmp2()->print(out); out->print(" "); + if(tmp()->is_valid()) { + tmp()->print(out); out->print(" "); -+ } - #ifdef PPC32 - if(tmp1()->is_valid()) { - tmp1()->print(out); out->print(" "); -@@ -2014,7 +2026,7 @@ void LIR_OpRoundFP::print_instr(outputStream* out) const { + } +-#endif + } - // LIR_Op2 - void LIR_Op2::print_instr(outputStream* out) const { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - if (code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch) { - #else - if (code() == lir_cmove || code() == lir_cmp) { -@@ -2069,7 +2081,7 @@ void LIR_Op3::print_instr(outputStream* out) const { - result_opr()->print(out); + void LIR_OpConvert::print_bytecode(outputStream* out, Bytecodes::Code code) { +@@ -1963,6 +2095,19 @@ void LIR_Op3::print_instr(outputStream* out) const { } --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - // LIR_Op4 - void LIR_Op4::print_instr(outputStream* out) const { - print_condition(out, condition()); out->print(" "); + ++// LIR_Op4 ++void LIR_Op4::print_instr(outputStream* out) const { ++ if (code() == lir_cmp_cmove) { ++ print_condition(out, condition()); out->print(" "); ++ } ++ in_opr1()->print(out); out->print(" "); ++ in_opr2()->print(out); out->print(" "); ++ in_opr3()->print(out); out->print(" "); ++ in_opr4()->print(out); out->print(" "); ++ result_opr()->print(out); ++} ++ ++ + void LIR_OpLock::print_instr(outputStream* out) const { + hdr_opr()->print(out); out->print(" "); + obj_opr()->print(out); out->print(" "); diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp -index 717404e9726..e9a981b20c2 100644 +index 2342e6117eb..00c3f938e11 100644 --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #ifndef SHARE_C1_C1_LIR_HPP - #define SHARE_C1_C1_LIR_HPP - -@@ -869,7 +875,7 @@ class LIR_Op2; +@@ -866,9 +866,11 @@ class LIR_OpAllocObj; + class LIR_OpReturn; + class LIR_OpRoundFP; + class LIR_Op2; ++class LIR_OpCmpBranch; class LIR_OpDelay; class LIR_Op3; class LIR_OpAllocArray; --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - class LIR_Op4; - #endif ++class LIR_Op4; class LIR_OpCall; -@@ -917,7 +923,7 @@ enum LIR_Code { - , lir_null_check - , lir_return - , lir_leal --#ifndef RISCV -+#if !defined(RISCV) && !defined(LOONGARCH) - , lir_branch - , lir_cond_float_branch - #endif -@@ -931,7 +937,7 @@ enum LIR_Code { - , lir_load_klass - , end_op1 - , begin_op2 --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - , lir_branch - , lir_cond_float_branch - #endif -@@ -939,7 +945,7 @@ enum LIR_Code { + class LIR_OpJavaCall; + class LIR_OpRTCall; +@@ -928,6 +930,8 @@ enum LIR_Code { , lir_cmp_l2i , lir_ucmp_fd2i , lir_cmp_fd2i --#ifndef RISCV -+#if !defined(RISCV) && !defined(LOONGARCH) ++ , lir_cmp_branch ++ , lir_cmp_float_branch , lir_cmove - #endif , lir_add -@@ -969,7 +975,7 @@ enum LIR_Code { + , lir_sub +@@ -956,6 +960,9 @@ enum LIR_Code { , lir_fmad , lir_fmaf , end_op3 --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - , begin_op4 - , lir_cmove - , end_op4 -@@ -1010,7 +1016,7 @@ enum LIR_Code { - , begin_opAssert - , lir_assert - , end_opAssert --#if defined(RISCV) && defined(INCLUDE_ZGC) -+#if (defined(RISCV) || defined(LOONGARCH)) && defined(INCLUDE_ZGC) - , begin_opZLoadBarrierTest - , lir_zloadbarrier_test - , end_opZLoadBarrierTest -@@ -1151,7 +1157,7 @@ class LIR_Op: public CompilationResourceObj { ++ , begin_op4 ++ , lir_cmp_cmove ++ , end_op4 + , begin_opJavaCall + , lir_static_call + , lir_optvirtual_call +@@ -1121,6 +1128,7 @@ class LIR_Op: public CompilationResourceObj { + virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; } + virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; } + virtual LIR_OpBranch* as_OpBranch() { return NULL; } ++ virtual LIR_OpCmpBranch* as_OpCmpBranch() { return NULL; } + virtual LIR_OpReturn* as_OpReturn() { return NULL; } + virtual LIR_OpRTCall* as_OpRTCall() { return NULL; } + virtual LIR_OpConvert* as_OpConvert() { return NULL; } +@@ -1128,6 +1136,7 @@ class LIR_Op: public CompilationResourceObj { virtual LIR_Op1* as_Op1() { return NULL; } virtual LIR_Op2* as_Op2() { return NULL; } virtual LIR_Op3* as_Op3() { return NULL; } --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - virtual LIR_Op4* as_Op4() { return NULL; } - #endif ++ virtual LIR_Op4* as_Op4() { return NULL; } virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } -@@ -1447,15 +1453,18 @@ class LIR_OpConvert: public LIR_Op1 { + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } + virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } +@@ -1461,15 +1470,18 @@ class LIR_OpConvert: public LIR_Op1 { private: Bytecodes::Code _bytecode; ConversionStub* _stub; @@ -114627,123 +126590,142 @@ index 717404e9726..e9a981b20c2 100644 virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpConvert* as_OpConvert() { return this; } -@@ -1610,7 +1619,7 @@ class LIR_Op2: public LIR_Op { +@@ -1624,7 +1636,7 @@ class LIR_Op2: public LIR_Op { , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) , _condition(condition) { -- assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check"); -+ assert(code == lir_cmp || code == lir_assert RISCV_ONLY(|| code == lir_branch || code == lir_cond_float_branch)LOONGARCH64_ONLY(|| code == lir_branch || code == lir_cond_float_branch), "code check"); +- assert(code == lir_cmp || code == lir_assert, "code check"); ++ assert(code == lir_cmp || code == lir_cmp_branch || code == lir_cmp_float_branch || code == lir_assert, "code check"); } LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) -@@ -1642,7 +1651,7 @@ class LIR_Op2: public LIR_Op { +@@ -1656,7 +1668,7 @@ class LIR_Op2: public LIR_Op { , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) , _condition(lir_cond_unknown) { -- assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); -+ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&)LOONGARCH64_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); +- assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); ++ assert((code != lir_cmp && code != lir_cmp_branch && code != lir_cmp_float_branch) && is_in_range(code, begin_op2, end_op2), "code check"); } LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, -@@ -1658,7 +1667,7 @@ class LIR_Op2: public LIR_Op { +@@ -1672,7 +1684,7 @@ class LIR_Op2: public LIR_Op { , _tmp4(tmp4) , _tmp5(tmp5) , _condition(lir_cond_unknown) { -- assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); -+ assert(code != lir_cmp && RISCV_ONLY(code != lir_branch && code != lir_cond_float_branch &&)LOONGARCH64_ONLY(code != lir_branch && code != lir_cond_float_branch &&) is_in_range(code, begin_op2, end_op2), "code check"); +- assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); ++ assert((code != lir_cmp && code != lir_cmp_branch && code != lir_cmp_float_branch) && is_in_range(code, begin_op2, end_op2), "code check"); } LIR_Opr in_opr1() const { return _opr1; } -@@ -1670,14 +1679,14 @@ class LIR_Op2: public LIR_Op { +@@ -1684,10 +1696,12 @@ class LIR_Op2: public LIR_Op { LIR_Opr tmp4_opr() const { return _tmp4; } LIR_Opr tmp5_opr() const { return _tmp5; } LIR_Condition condition() const { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch || code() == lir_assert, "only valid for branch and assert"); return _condition; - #else - assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition; - #endif +- assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition; ++ assert(code() == lir_cmp || code() == lir_cmp_branch || code() == lir_cmp_float_branch || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); ++ return _condition; } void set_condition(LIR_Condition condition) { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(code() == lir_cmp || code() == lir_branch || code() == lir_cond_float_branch, "only valid for branch"); _condition = condition; - #else - assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; -@@ -1695,7 +1704,7 @@ class LIR_Op2: public LIR_Op { +- assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; ++ assert(code() == lir_cmp || code() == lir_cmp_branch || code() == lir_cmp_float_branch || code() == lir_cmove, "only valid for cmp and cmove"); ++ _condition = condition; + } + + void set_fpu_stack_size(int size) { _fpu_stack_size = size; } +@@ -1701,6 +1715,43 @@ class LIR_Op2: public LIR_Op { virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - class LIR_OpBranch: public LIR_Op2 { - #else - class LIR_OpBranch: public LIR_Op { -@@ -1703,7 +1712,7 @@ class LIR_OpBranch: public LIR_Op { ++class LIR_OpCmpBranch: public LIR_Op2 { ++ friend class LIR_OpVisitState; ++ ++ private: ++ Label* _label; ++ BlockBegin* _block; // if this is a branch to a block, this is the block ++ BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block ++ CodeStub* _stub; // if this is a branch to a stub, this is the stub ++ ++ public: ++ LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, Label* lbl, CodeEmitInfo* info = NULL) ++ : LIR_Op2(lir_cmp_branch, cond, left, right, info) ++ , _label(lbl) ++ , _block(NULL) ++ , _ublock(NULL) ++ , _stub(NULL) { } ++ ++ LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeStub* stub, CodeEmitInfo* info = NULL); ++ LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, CodeEmitInfo* info = NULL); ++ ++ // for unordered comparisons ++ LIR_OpCmpBranch(LIR_Condition cond, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* ublock, CodeEmitInfo* info = NULL); ++ ++ Label* label() const { return _label; } ++ BlockBegin* block() const { return _block; } ++ BlockBegin* ublock() const { return _ublock; } ++ CodeStub* stub() const { return _stub; } ++ ++ void change_block(BlockBegin* b); ++ void change_ublock(BlockBegin* b); ++ void negate_cond(); ++ ++ virtual void emit_code(LIR_Assembler* masm); ++ virtual LIR_OpCmpBranch* as_OpCmpBranch() { return this; } ++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; ++}; ++ + class LIR_OpAllocArray : public LIR_Op { friend class LIR_OpVisitState; - private: --#ifndef RISCV -+#if !defined(RISCV) && !defined(LOONGARCH) - LIR_Condition _cond; - #endif - Label* _label; -@@ -1713,7 +1722,7 @@ class LIR_OpBranch: public LIR_Op { - - public: - LIR_OpBranch(LIR_Condition cond, Label* lbl) --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - : LIR_Op2(lir_branch, cond, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) - #else - : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) -@@ -1730,7 +1739,7 @@ class LIR_OpBranch: public LIR_Op { - // for unordered comparisons - LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock); - --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - LIR_Condition cond() const { return condition(); } - void set_cond(LIR_Condition cond) { set_condition(cond); } - #else -@@ -1814,7 +1823,7 @@ class LIR_Op3: public LIR_Op { - virtual void print_instr(outputStream* out) const PRODUCT_RETURN; +@@ -1765,6 +1816,48 @@ class LIR_Op3: public LIR_Op { }; --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - class LIR_Op4: public LIR_Op { - friend class LIR_OpVisitState; - protected: -@@ -2112,7 +2121,7 @@ class LIR_List: public CompilationResourceObj { - const char * _file; - int _line; - #endif --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - LIR_Opr _cmp_opr1; - LIR_Opr _cmp_opr2; - #endif -@@ -2128,7 +2137,7 @@ class LIR_List: public CompilationResourceObj { - } - #endif // PRODUCT - --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - set_cmp_oprs(op); - // lir_cmp set cmp oprs only on riscv - if (op->code() == lir_cmp) return; -@@ -2150,7 +2159,7 @@ class LIR_List: public CompilationResourceObj { - void set_file_and_line(const char * file, int line); - #endif - --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void set_cmp_oprs(LIR_Op* op); - #endif -@@ -2246,7 +2255,9 @@ class LIR_List: public CompilationResourceObj { ++class LIR_Op4: public LIR_Op { ++ friend class LIR_OpVisitState; ++ ++ private: ++ LIR_Opr _opr1; ++ LIR_Opr _opr2; ++ LIR_Opr _opr3; ++ LIR_Opr _opr4; ++ BasicType _type; ++ LIR_Condition _condition; ++ ++ void verify() const; ++ ++ public: ++ LIR_Op4(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr opr4, LIR_Opr result, BasicType type) ++ : LIR_Op(code, result, NULL) ++ , _opr1(opr1) ++ , _opr2(opr2) ++ , _opr3(opr3) ++ , _opr4(opr4) ++ , _type(type) ++ , _condition(condition) { ++ assert(is_in_range(code, begin_op4, end_op4), "code check"); ++ assert(type != T_ILLEGAL, "cmove should have type"); ++ } ++ LIR_Opr in_opr1() const { return _opr1; } ++ LIR_Opr in_opr2() const { return _opr2; } ++ LIR_Opr in_opr3() const { return _opr3; } ++ LIR_Opr in_opr4() const { return _opr4; } ++ BasicType type() const { return _type; } ++ LIR_Condition condition() const { ++ assert(code() == lir_cmp_cmove, "only valid for cmp cmove"); return _condition; ++ } ++ void set_condition(LIR_Condition condition) { ++ assert(code() == lir_cmp_cmove, "only valid for cmp cmove"); _condition = condition; ++ } ++ ++ virtual void emit_code(LIR_Assembler* masm); ++ virtual LIR_Op4* as_Op4() { return this; } ++ virtual void print_instr(outputStream* out) const PRODUCT_RETURN; ++}; ++ + //-------------------------------- + class LabelObj: public CompilationResourceObj { + private: +@@ -2106,7 +2199,9 @@ class LIR_List: public CompilationResourceObj { void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); } void return_op(LIR_Opr result) { append(new LIR_OpReturn(result)); } @@ -114754,87 +126736,444 @@ index 717404e9726..e9a981b20c2 100644 void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); } void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } -@@ -2273,7 +2284,7 @@ class LIR_List: public CompilationResourceObj { +@@ -2130,6 +2225,15 @@ class LIR_List: public CompilationResourceObj { + cmp(condition, left, LIR_OprFact::intConst(right), info); + } + ++ // machine dependent ++ template ++ void cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, T tgt, CodeEmitInfo* info = NULL); ++ template ++ void cmp_branch(LIR_Condition condition, LIR_Opr left, int right, T tgt, CodeEmitInfo* info = NULL) { ++ cmp_branch(condition, left, LIR_OprFact::intConst(right), tgt, info); ++ } ++ void cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BlockBegin* block, BlockBegin* unordered); ++ void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info); --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type, - LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr) { - append(new LIR_Op4(lir_cmove, condition, src1, src2, cmp_opr1, cmp_opr2, dst, type)); +@@ -2137,6 +2241,9 @@ class LIR_List: public CompilationResourceObj { + append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type)); + } + ++ // machine dependent ++ void cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type); ++ + void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, + LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); + void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, diff --git a/src/hotspot/share/c1/c1_LIRAssembler.cpp b/src/hotspot/share/c1/c1_LIRAssembler.cpp -index 989a6f8ad25..e288de2ab8e 100644 +index 37ce476253d..09053d94b51 100644 --- a/src/hotspot/share/c1/c1_LIRAssembler.cpp +++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp -@@ -22,6 +22,12 @@ - * - */ +@@ -757,6 +757,18 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) { + } -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "asm/assembler.inline.hpp" - #include "c1/c1_Compilation.hpp" -@@ -691,7 +697,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) { - comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); - break; --#ifndef RISCV -+#if !defined(RISCV) && !defined(LOONGARCH) - case lir_cmove: - cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); - break; -@@ -758,7 +764,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) { - } ++void LIR_Assembler::emit_op4(LIR_Op4* op) { ++ switch (op->code()) { ++ case lir_cmp_cmove: ++ cmp_cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->in_opr3(), op->in_opr4(), op->result_opr(), op->type()); ++ break; ++ ++ default: ++ Unimplemented(); ++ break; ++ } ++} ++ + void LIR_Assembler::build_frame() { + _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); } - --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void LIR_Assembler::emit_op4(LIR_Op4* op) { - switch(op->code()) { - case lir_cmove: diff --git a/src/hotspot/share/c1/c1_LIRAssembler.hpp b/src/hotspot/share/c1/c1_LIRAssembler.hpp -index c82baa15fe7..84c34db4985 100644 +index 02c79160d04..d0cceefdda1 100644 --- a/src/hotspot/share/c1/c1_LIRAssembler.hpp +++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #ifndef SHARE_C1_C1_LIRASSEMBLER_HPP - #define SHARE_C1_C1_LIRASSEMBLER_HPP - -@@ -186,7 +192,7 @@ class LIR_Assembler: public CompilationResourceObj { +@@ -186,7 +186,9 @@ class LIR_Assembler: public CompilationResourceObj { void emit_op1(LIR_Op1* op); void emit_op2(LIR_Op2* op); void emit_op3(LIR_Op3* op); --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void emit_op4(LIR_Op4* op); - #endif ++ void emit_op4(LIR_Op4* op); void emit_opBranch(LIR_OpBranch* op); -@@ -222,7 +228,7 @@ class LIR_Assembler: public CompilationResourceObj { - void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); ++ void emit_opCmpBranch(LIR_OpCmpBranch* op); + void emit_opLabel(LIR_OpLabel* op); + void emit_arraycopy(LIR_OpArrayCopy* op); + void emit_updatecrc32(LIR_OpUpdateCRC32* op); +@@ -219,6 +221,7 @@ class LIR_Assembler: public CompilationResourceObj { void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op); --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type, - LIR_Opr cmp_opr1 = LIR_OprFact::illegalOpr, LIR_Opr cmp_opr2 = LIR_OprFact::illegalOpr); - #else + void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type); ++ void cmp_cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type); + + void call( LIR_OpJavaCall* op, relocInfo::relocType rtype); + void ic_call( LIR_OpJavaCall* op); +diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp +index e5e71fab8c1..227ea093a68 100644 +--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp ++++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp +@@ -475,13 +475,11 @@ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, + CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { + CodeStub* stub = new RangeCheckStub(range_check_info, index, array); + if (index->is_constant()) { +- cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), +- index->as_jint(), null_check_info); +- __ branch(lir_cond_belowEqual, stub); // forward branch ++ cmp_mem_int_branch(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), ++ index->as_jint(), stub, null_check_info); // forward branch + } else { +- cmp_reg_mem(lir_cond_aboveEqual, index, array, +- arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); +- __ branch(lir_cond_aboveEqual, stub); // forward branch ++ cmp_reg_mem_branch(lir_cond_aboveEqual, index, array, arrayOopDesc::length_offset_in_bytes(), ++ T_INT, stub, null_check_info); // forward branch + } + } + +@@ -489,12 +487,11 @@ void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, + void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { + CodeStub* stub = new RangeCheckStub(info, index); + if (index->is_constant()) { +- cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); +- __ branch(lir_cond_belowEqual, stub); // forward branch ++ cmp_mem_int_branch(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), ++ index->as_jint(), stub, info); // forward branch + } else { +- cmp_reg_mem(lir_cond_aboveEqual, index, buffer, +- java_nio_Buffer::limit_offset(), T_INT, info); +- __ branch(lir_cond_aboveEqual, stub); // forward branch ++ cmp_reg_mem_branch(lir_cond_aboveEqual, index, buffer, ++ java_nio_Buffer::limit_offset(), T_INT, stub, info); // forward branch + } + __ move(index, result); + } +@@ -920,7 +917,7 @@ LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { + return tmp; + } + +-void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { ++void LIRGenerator::profile_branch(If* if_instr, If::Condition cond, LIR_Opr left, LIR_Opr right) { + if (if_instr->should_profile()) { + ciMethod* method = if_instr->profiled_method(); + assert(method != NULL, "method should be set if branch is profiled"); +@@ -941,10 +938,17 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { + __ metadata2reg(md->constant_encoding(), md_reg); + + LIR_Opr data_offset_reg = new_pointer_register(); +- __ cmove(lir_cond(cond), +- LIR_OprFact::intptrConst(taken_count_offset), +- LIR_OprFact::intptrConst(not_taken_count_offset), +- data_offset_reg, as_BasicType(if_instr->x()->type())); ++ if (left == LIR_OprFact::illegalOpr && right == LIR_OprFact::illegalOpr) { ++ __ cmove(lir_cond(cond), ++ LIR_OprFact::intptrConst(taken_count_offset), ++ LIR_OprFact::intptrConst(not_taken_count_offset), ++ data_offset_reg, as_BasicType(if_instr->x()->type())); ++ } else { ++ __ cmp_cmove(lir_cond(cond), left, right, ++ LIR_OprFact::intptrConst(taken_count_offset), ++ LIR_OprFact::intptrConst(not_taken_count_offset), ++ data_offset_reg, as_BasicType(if_instr->x()->type())); ++ } + + // MDO cells are intptr_t, so the data_reg width is arch-dependent. + LIR_Opr data_reg = new_pointer_register(); +@@ -1294,8 +1298,8 @@ void LIRGenerator::do_isPrimitive(Intrinsic* x) { + } + + __ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info); +- __ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0)); +- __ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN); ++ __ cmp_cmove(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0), ++ LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN); + } + + // Example: Foo.class.getModifiers() +@@ -1327,8 +1331,8 @@ void LIRGenerator::do_getModifiers(Intrinsic* x) { + + // Check if this is a Java mirror of primitive type, and select the appropriate klass. + LIR_Opr klass = new_register(T_METADATA); +- __ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0)); +- __ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS); ++ __ cmp_cmove(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0), ++ prim_klass, recv_klass, klass, T_ADDRESS); + + // Get the answer. + __ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result); +@@ -1360,8 +1364,7 @@ void LIRGenerator::do_getObjectSize(Intrinsic* x) { + LabelObj* L_done = new LabelObj(); + LabelObj* L_array = new LabelObj(); + +- __ cmp(lir_cond_lessEqual, layout, 0); +- __ branch(lir_cond_lessEqual, L_array->label()); ++ __ cmp_branch(lir_cond_lessEqual, layout, 0, L_array->label()); + + // Instance case: the layout helper gives us instance size almost directly, + // but we need to mask out the _lh_instance_slow_path_bit. +@@ -1414,8 +1417,7 @@ void LIRGenerator::do_getObjectSize(Intrinsic* x) { + LabelObj* L_shift_exit = new LabelObj(); + + __ branch_destination(L_shift_loop->label()); +- __ cmp(lir_cond_equal, layout, 0); +- __ branch(lir_cond_equal, L_shift_exit->label()); ++ __ cmp_branch(lir_cond_equal, layout, 0, L_shift_exit->label()); + + #ifdef _LP64 + __ shift_left(length, 1, length); +@@ -1714,8 +1716,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { + + if (GenerateRangeChecks && needs_range_check) { + if (use_length) { +- __ cmp(lir_cond_belowEqual, length.result(), index.result()); +- __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result())); ++ CodeStub* stub = new RangeCheckStub(range_check_info, index.result(), array.result()); ++ __ cmp_branch(lir_cond_belowEqual, length.result(), index.result(), stub); + } else { + array_range_check(array.result(), index.result(), null_check_info, range_check_info); + // range_check also does the null check +@@ -1893,12 +1895,11 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { + CodeEmitInfo* info = state_for(x); + CodeStub* stub = new RangeCheckStub(info, index.result()); + if (index.result()->is_constant()) { +- cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); +- __ branch(lir_cond_belowEqual, stub); ++ cmp_mem_int_branch(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), ++ index.result()->as_jint(), stub, info); + } else { +- cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), +- java_nio_Buffer::limit_offset(), T_INT, info); +- __ branch(lir_cond_aboveEqual, stub); ++ cmp_reg_mem_branch(lir_cond_aboveEqual, index.result(), buf.result(), ++ java_nio_Buffer::limit_offset(), T_INT, stub, info); + } + __ move(index.result(), result); + } else { +@@ -1976,8 +1977,8 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { + } else if (use_length) { + // TODO: use a (modified) version of array_range_check that does not require a + // constant length to be loaded to a register +- __ cmp(lir_cond_belowEqual, length.result(), index.result()); +- __ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result())); ++ CodeStub* stub = new RangeCheckStub(range_check_info, index.result(), array.result()); ++ __ cmp_branch(lir_cond_belowEqual, length.result(), index.result(), stub); + } else { + array_range_check(array.result(), index.result(), null_check_info, range_check_info); + // The range check performs the null check, so clear it out for the load +@@ -2352,19 +2353,14 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi + int high_key = one_range->high_key(); + BlockBegin* dest = one_range->sux(); + if (low_key == high_key) { +- __ cmp(lir_cond_equal, value, low_key); +- __ branch(lir_cond_equal, dest); ++ __ cmp_branch(lir_cond_equal, value, low_key, dest); + } else if (high_key - low_key == 1) { +- __ cmp(lir_cond_equal, value, low_key); +- __ branch(lir_cond_equal, dest); +- __ cmp(lir_cond_equal, value, high_key); +- __ branch(lir_cond_equal, dest); ++ __ cmp_branch(lir_cond_equal, value, low_key, dest); ++ __ cmp_branch(lir_cond_equal, value, high_key, dest); + } else { + LabelObj* L = new LabelObj(); +- __ cmp(lir_cond_less, value, low_key); +- __ branch(lir_cond_less, L->label()); +- __ cmp(lir_cond_lessEqual, value, high_key); +- __ branch(lir_cond_lessEqual, dest); ++ __ cmp_branch(lir_cond_less, value, low_key, L->label()); ++ __ cmp_branch(lir_cond_lessEqual, value, high_key, dest); + __ branch_destination(L->label()); + } + } +@@ -2464,12 +2460,11 @@ void LIRGenerator::do_TableSwitch(TableSwitch* x) { + __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg); + for (int i = 0; i < len; i++) { + int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i)); +- __ cmp(lir_cond_equal, value, i + lo_key); + __ move(data_offset_reg, tmp_reg); +- __ cmove(lir_cond_equal, +- LIR_OprFact::intptrConst(count_offset), +- tmp_reg, +- data_offset_reg, T_INT); ++ __ cmp_cmove(lir_cond_equal, value, LIR_OprFact::intConst(i + lo_key), ++ LIR_OprFact::intptrConst(count_offset), ++ tmp_reg, ++ data_offset_reg, T_INT); + } + + LIR_Opr data_reg = new_pointer_register(); +@@ -2483,8 +2478,7 @@ void LIRGenerator::do_TableSwitch(TableSwitch* x) { + do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); + } else { + for (int i = 0; i < len; i++) { +- __ cmp(lir_cond_equal, value, i + lo_key); +- __ branch(lir_cond_equal, x->sux_at(i)); ++ __ cmp_branch(lir_cond_equal, value, i + lo_key, x->sux_at(i)); + } + __ jump(x->default_sux()); + } +@@ -2522,12 +2516,11 @@ void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { + __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg); + for (int i = 0; i < len; i++) { + int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i)); +- __ cmp(lir_cond_equal, value, x->key_at(i)); + __ move(data_offset_reg, tmp_reg); +- __ cmove(lir_cond_equal, +- LIR_OprFact::intptrConst(count_offset), +- tmp_reg, +- data_offset_reg, T_INT); ++ __ cmp_cmove(lir_cond_equal, value, LIR_OprFact::intConst(x->key_at(i)), ++ LIR_OprFact::intptrConst(count_offset), ++ tmp_reg, ++ data_offset_reg, T_INT); + } + + LIR_Opr data_reg = new_pointer_register(); +@@ -2542,8 +2535,7 @@ void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { + } else { + int len = x->length(); + for (int i = 0; i < len; i++) { +- __ cmp(lir_cond_equal, value, x->key_at(i)); +- __ branch(lir_cond_equal, x->sux_at(i)); ++ __ cmp_branch(lir_cond_equal, value, x->key_at(i), x->sux_at(i)); + } + __ jump(x->default_sux()); + } +@@ -3038,8 +3030,8 @@ void LIRGenerator::do_IfOp(IfOp* x) { + f_val.dont_load_item(); + LIR_Opr reg = rlock_result(x); + +- __ cmp(lir_cond(x->cond()), left.result(), right.result()); +- __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); ++ __ cmp_cmove(lir_cond(x->cond()), left.result(), right.result(), ++ t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); + } + + #ifdef JFR_HAVE_INTRINSICS +@@ -3056,8 +3048,7 @@ void LIRGenerator::do_getEventWriter(Intrinsic* x) { + __ move(LIR_OprFact::oopConst(NULL), result); + LIR_Opr jobj = new_register(T_METADATA); + __ move_wide(jobj_addr, jobj); +- __ cmp(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0)); +- __ branch(lir_cond_equal, L_end->label()); ++ __ cmp_branch(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0), L_end->label()); + + access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result); + +@@ -3365,21 +3356,24 @@ void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { + + void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) { + if (compilation()->count_backedges()) { ++ LIR_Opr step = new_register(T_INT); ++ LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment); ++ LIR_Opr zero = LIR_OprFact::intConst(0); + #if defined(X86) && !defined(_LP64) + // BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy. + LIR_Opr left_copy = new_register(left->type()); + __ move(left, left_copy); + __ cmp(cond, left_copy, right); +-#else +- __ cmp(cond, left, right); +-#endif +- LIR_Opr step = new_register(T_INT); +- LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment); +- LIR_Opr zero = LIR_OprFact::intConst(0); + __ cmove(cond, + (left_bci < bci) ? plus_one : zero, + (right_bci < bci) ? plus_one : zero, + step, left->type()); ++#else ++ __ cmp_cmove(cond, left, right, ++ (left_bci < bci) ? plus_one : zero, ++ (right_bci < bci) ? plus_one : zero, ++ step, left->type()); ++#endif + increment_backedge_counter(info, step, bci); + } + } +@@ -3418,8 +3412,7 @@ void LIRGenerator::decrement_age(CodeEmitInfo* info) { + // DeoptimizeStub will reexecute from the current state in code info. + CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured, + Deoptimization::Action_make_not_entrant); +- __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0)); +- __ branch(lir_cond_lessEqual, deopt); ++ __ cmp_branch(lir_cond_lessEqual, result, LIR_OprFact::intConst(0), deopt); + } + } + +@@ -3465,8 +3458,7 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, + int freq = frequency << InvocationCounter::count_shift; + if (freq == 0) { + if (!step->is_constant()) { +- __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0)); +- __ branch(lir_cond_notEqual, overflow); ++ __ cmp_branch(lir_cond_notEqual, step, LIR_OprFact::intConst(0), overflow); + } else { + __ branch(lir_cond_always, overflow); + } +@@ -3474,12 +3466,11 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, + LIR_Opr mask = load_immediate(freq, T_INT); + if (!step->is_constant()) { + // If step is 0, make sure the overflow check below always fails +- __ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0)); +- __ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT); ++ __ cmp_cmove(lir_cond_notEqual, step, LIR_OprFact::intConst(0), ++ result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT); + } + __ logical_and(result, mask, result); +- __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); +- __ branch(lir_cond_equal, overflow); ++ __ cmp_branch(lir_cond_equal, result, LIR_OprFact::intConst(0), overflow); + } + __ branch_destination(overflow->continuation()); + } +@@ -3592,8 +3583,7 @@ void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { + CodeEmitInfo *info = state_for(x, x->state()); + CodeStub* stub = new PredicateFailedStub(info); + +- __ cmp(lir_cond(cond), left, right); +- __ branch(lir_cond(cond), stub); ++ __ cmp_branch(lir_cond(cond), left, right, stub); + } + } + +@@ -3747,8 +3737,8 @@ LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& + __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout); + int diffbit = Klass::layout_helper_boolean_diffbit(); + __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout); +- __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0)); +- __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE); ++ __ cmp_cmove(lir_cond_notEqual, layout, LIR_OprFact::intConst(0), ++ value_fixed, value, value_fixed, T_BYTE); + value = value_fixed; + return value; + } +diff --git a/src/hotspot/share/c1/c1_LIRGenerator.hpp b/src/hotspot/share/c1/c1_LIRGenerator.hpp +index 67c986cb4e7..cdd97fc386d 100644 +--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp ++++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp +@@ -365,8 +365,10 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { + void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); + + // machine dependent +- void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); +- void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info); ++ template ++ void cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info); ++ template ++ void cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info); + + void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type); + +@@ -393,7 +395,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { + + LIR_Opr safepoint_poll_register(); + +- void profile_branch(If* if_instr, If::Condition cond); ++ void profile_branch(If* if_instr, If::Condition cond, LIR_Opr left = LIR_OprFact::illegalOpr, LIR_Opr right = LIR_OprFact::illegalOpr); + void increment_event_counter_impl(CodeEmitInfo* info, + ciMethod *method, LIR_Opr step, int frequency, + int bci, bool backedge, bool notify); diff --git a/src/hotspot/share/c1/c1_LinearScan.cpp b/src/hotspot/share/c1/c1_LinearScan.cpp -index d3d38d11a90..6947406b2e7 100644 +index 84846e8e43d..f81a440d237 100644 --- a/src/hotspot/share/c1/c1_LinearScan.cpp +++ b/src/hotspot/share/c1/c1_LinearScan.cpp @@ -35,6 +35,12 @@ @@ -114842,52 +127181,128 @@ index d3d38d11a90..6947406b2e7 100644 #include "utilities/bitMap.inline.hpp" +/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made ++ * This file has been modified by Loongson Technology in 2022, These ++ * modifications are Copyright (c) 2022, Loongson Technology, and are made + * available on the same license terms set forth above. + */ + #ifndef PRODUCT static LinearScanStatistic _stat_before_alloc; -@@ -1240,7 +1246,7 @@ void LinearScan::add_register_hints(LIR_Op* op) { +@@ -1256,6 +1262,23 @@ void LinearScan::add_register_hints(LIR_Op* op) { + } break; } - case lir_cmove: { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(op->as_Op4() != NULL, "lir_cmove must be LIR_Op4"); - LIR_Op4* cmove = (LIR_Op4*)op; - #else -@@ -3151,7 +3157,7 @@ void LinearScan::do_linear_scan() { - } ++ case lir_cmp_cmove: { ++ assert(op->as_Op4() != NULL, "lir_cmp_cmove must be LIR_Op4"); ++ LIR_Op4* cmove = (LIR_Op4*)op; ++ ++ LIR_Opr move_from = cmove->in_opr3(); ++ LIR_Opr move_to = cmove->result_opr(); ++ ++ if (move_to->is_register() && move_from->is_register()) { ++ Interval* from = interval_at(reg_num(move_from)); ++ Interval* to = interval_at(reg_num(move_to)); ++ if (from != NULL && to != NULL) { ++ to->set_register_hint(from); ++ TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num())); ++ } ++ } ++ break; ++ } + default: + break; } +@@ -3386,7 +3409,9 @@ void LinearScan::verify_no_oops_in_fixed_intervals() { + check_live = (move->patch_code() == lir_patch_none); + } + LIR_OpBranch* branch = op->as_OpBranch(); +- if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) { ++ LIR_OpCmpBranch* cmp_branch = op->as_OpCmpBranch(); ++ if ((branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) || ++ (cmp_branch != NULL && cmp_branch->stub() != NULL && cmp_branch->stub()->is_exception_throw_stub())) { + // Don't bother checking the stub in this case since the + // exception stub will never return to normal control flow. + check_live = false; +@@ -6276,6 +6301,16 @@ void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegi + assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); + LIR_OpBranch* branch = (LIR_OpBranch*)op; --#ifndef RISCV -+#if !defined(RISCV) && !defined(LOONGARCH) - // Disable these optimizations on riscv temporarily, because it does not - // work when the comparison operands are bound to branches or cmoves. - { TIME_LINEAR_SCAN(timer_optimize_lir); -@@ -6385,7 +6391,7 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) { - // There might be a cmove inserted for profiling which depends on the same - // compare. If we change the condition of the respective compare, we have - // to take care of this cmove as well. --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - LIR_Op4* prev_cmove = NULL; - #else - LIR_Op2* prev_cmove = NULL; -@@ -6395,7 +6401,7 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) { - prev_op = instructions->at(j); - // check for the cmove - if (prev_op->code() == lir_cmove) { --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - assert(prev_op->as_Op4() != NULL, "cmove must be of type LIR_Op4"); - prev_cmove = (LIR_Op4*)prev_op; - #else ++ if (branch->block() == target_from) { ++ branch->change_block(target_to); ++ } ++ if (branch->ublock() == target_from) { ++ branch->change_ublock(target_to); ++ } ++ } else if (op->code() == lir_cmp_branch || op->code() == lir_cmp_float_branch) { ++ assert(op->as_OpCmpBranch() != NULL, "branch must be of type LIR_OpCmpBranch"); ++ LIR_OpCmpBranch* branch = (LIR_OpCmpBranch*)op; ++ + if (branch->block() == target_from) { + branch->change_block(target_to); + } +@@ -6404,6 +6439,20 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) { + } + } + } ++ } else if (prev_op->code() == lir_cmp_branch || prev_op->code() == lir_cmp_float_branch) { ++ assert(prev_op->as_OpCmpBranch() != NULL, "branch must be of type LIR_OpCmpBranch"); ++ LIR_OpCmpBranch* prev_branch = (LIR_OpCmpBranch*)prev_op; ++ ++ if (prev_branch->stub() == NULL) { ++ if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) { ++ TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id())); ++ ++ // eliminate a conditional branch to the immediate successor ++ prev_branch->change_block(last_branch->block()); ++ prev_branch->negate_cond(); ++ instructions->trunc_to(instructions->length() - 1); ++ } ++ } + } + } + } +@@ -6479,6 +6528,13 @@ void ControlFlowOptimizer::verify(BlockList* code) { + assert(op_branch->block() == NULL || code->find(op_branch->block()) != -1, "branch target not valid"); + assert(op_branch->ublock() == NULL || code->find(op_branch->ublock()) != -1, "branch target not valid"); + } ++ ++ LIR_OpCmpBranch* op_cmp_branch = instructions->at(j)->as_OpCmpBranch(); ++ ++ if (op_cmp_branch != NULL) { ++ assert(op_cmp_branch->block() == NULL || code->find(op_cmp_branch->block()) != -1, "branch target not valid"); ++ assert(op_cmp_branch->ublock() == NULL || code->find(op_cmp_branch->ublock()) != -1, "branch target not valid"); ++ } + } + + for (j = 0; j < block->number_of_sux() - 1; j++) { +@@ -6722,6 +6778,24 @@ void LinearScanStatistic::collect(LinearScan* allocator) { + break; + } + ++ case lir_cmp_branch: ++ case lir_cmp_float_branch: { ++ LIR_OpCmpBranch* branch = op->as_OpCmpBranch(); ++ if (branch->block() == NULL) { ++ inc_counter(counter_stub_branch); ++ } else { ++ inc_counter(counter_cond_branch); ++ } ++ inc_counter(counter_cmp); ++ break; ++ } ++ ++ case lir_cmp_cmove: { ++ inc_counter(counter_misc_inst); ++ inc_counter(counter_cmp); ++ break; ++ } ++ + case lir_neg: + case lir_add: + case lir_sub: diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp -index f5861d1f3ed..e1b78dcf381 100644 +index e2b27e5f4f0..3ae63dbc5d5 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -22,6 +22,12 @@ @@ -114903,7 +127318,7 @@ index f5861d1f3ed..e1b78dcf381 100644 #include "precompiled.hpp" #include "jvm.h" #include "asm/assembler.inline.hpp" -@@ -2541,7 +2547,8 @@ void nmethod::verify_scopes() { +@@ -2480,7 +2486,8 @@ void nmethod::verify_scopes() { //verify_interrupt_point(iter.addr()); break; case relocInfo::runtime_call_type: @@ -114913,7 +127328,7 @@ index f5861d1f3ed..e1b78dcf381 100644 address destination = iter.reloc()->value(); // Right now there is no way to find out which entries support // an interrupt point. It would be nice if we had this -@@ -3106,7 +3113,8 @@ const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { +@@ -3042,7 +3049,8 @@ const char* nmethod::reloc_string_for(u_char* begin, u_char* end) { return st.as_string(); } case relocInfo::runtime_call_type: @@ -115010,7 +127425,7 @@ index 55d4ac7c62d..b1c34733021 100644 // Trampoline Relocations. // A trampoline allows to encode a small branch in the code, even if there diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp -index 7ea2d19fcd3..7306230e8fb 100644 +index d490adb3eef..0b1d7dc0a27 100644 --- a/src/hotspot/share/code/vtableStubs.cpp +++ b/src/hotspot/share/code/vtableStubs.cpp @@ -22,6 +22,12 @@ @@ -115046,8 +127461,41 @@ index 7ea2d19fcd3..7306230e8fb 100644 // ppc (linux, BE): 404 288 // ppc (linux, LE): 356 276 // ppc (AIX): 416 296 +diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp +index 60a213aec77..4970790b873 100644 +--- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp ++++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp +@@ -74,7 +74,6 @@ void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr, + // Read the marking-in-progress flag. + LIR_Opr flag_val = gen->new_register(T_INT); + __ load(mark_active_flag_addr, flag_val); +- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); + + LIR_PatchCode pre_val_patch_code = lir_patch_none; + +@@ -103,7 +102,7 @@ void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr, + slow = new G1PreBarrierStub(pre_val); + } + +- __ branch(lir_cond_notEqual, slow); ++ __ cmp_branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), slow); + __ branch_destination(slow->continuation()); + } + +@@ -168,10 +167,8 @@ void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprD + } + assert(new_val->is_register(), "must be a register at this point"); + +- __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); +- + CodeStub* slow = new G1PostBarrierStub(addr, new_val); +- __ branch(lir_cond_notEqual, slow); ++ __ cmp_branch(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD), slow); + __ branch_destination(slow->continuation()); + } + diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp -index f0944108810..a8c1f97a80e 100644 +index db117c50611..40d9b6e7211 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp @@ -22,6 +22,12 @@ @@ -115073,48 +127521,96 @@ index f0944108810..a8c1f97a80e 100644 assert(_task_queue->taskqueue_empty(), "invariant"); } -diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp -index 56fdd2abeb4..17b9bcafd47 100644 ---- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp -+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp -@@ -22,6 +22,12 @@ - * - */ +diff --git a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp +index 663ff91372b..7447fd5b1dd 100644 +--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp ++++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp +@@ -193,8 +193,7 @@ void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { + /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */ + if (mask_boolean) { + LabelObj* equalZeroLabel = new LabelObj(); +- __ cmp(lir_cond_equal, result, 0); +- __ branch(lir_cond_equal, equalZeroLabel->label()); ++ __ cmp_branch(lir_cond_equal, result, 0, equalZeroLabel->label()); + __ move(LIR_OprFact::intConst(1), result); + __ branch_destination(equalZeroLabel->label()); + } +@@ -321,14 +320,12 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) { + referent_off = gen->new_register(T_LONG); + __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset()), referent_off); + } +- __ cmp(lir_cond_notEqual, offset, referent_off); +- __ branch(lir_cond_notEqual, cont->label()); ++ __ cmp_branch(lir_cond_notEqual, offset, referent_off, cont->label()); + } + if (gen_source_check) { + // offset is a const and equals referent offset + // if (source == null) -> continue +- __ cmp(lir_cond_equal, base_reg, LIR_OprFact::oopConst(NULL)); +- __ branch(lir_cond_equal, cont->label()); ++ __ cmp_branch(lir_cond_equal, base_reg, LIR_OprFact::oopConst(NULL), cont->label()); + } + LIR_Opr src_klass = gen->new_register(T_METADATA); + if (gen_type_check) { +@@ -338,8 +335,7 @@ void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) { + LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE); + LIR_Opr reference_type = gen->new_register(T_INT); + __ move(reference_type_addr, reference_type); +- __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE)); +- __ branch(lir_cond_equal, cont->label()); ++ __ cmp_branch(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE), cont->label()); + } + } + } +diff --git a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp +index a89b57d8e45..f89cb3b2dcc 100644 +--- a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp ++++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp +@@ -86,8 +86,7 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L + __ move(card_addr, cur_value); -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "gc/shared/tlab_globals.hpp" - #include "gc/shared/c2/barrierSetC2.hpp" -@@ -266,6 +272,8 @@ public: + LabelObj* L_already_dirty = new LabelObj(); +- __ cmp(lir_cond_equal, cur_value, dirty); +- __ branch(lir_cond_equal, L_already_dirty->label()); ++ __ cmp_branch(lir_cond_equal, cur_value, dirty, L_already_dirty->label()); + __ move(dirty, card_addr); + __ branch_destination(L_already_dirty->label()); + } else { +diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +index 97041ee1743..efdb92bbaaa 100644 +--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp ++++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +@@ -78,7 +78,6 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, + // Read the marking-in-progress flag. + LIR_Opr flag_val = gen->new_register(T_INT); + __ load(mark_active_flag_addr, flag_val); +- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); - bool is_volatile = (decorators & MO_SEQ_CST) != 0; - bool is_acquire = (decorators & MO_ACQUIRE) != 0; -+ bool is_relaxed = (decorators & MO_RELAXED) != 0; -+ bool is_unsafe = (decorators & C2_UNSAFE_ACCESS) != 0; + LIR_PatchCode pre_val_patch_code = lir_patch_none; - // If reference is volatile, prevent following volatiles ops from - // floating up before the volatile access. -@@ -299,6 +307,13 @@ public: - assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); - Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); - mb->as_MemBar()->set_trailing_load(); -+ } else if (is_relaxed && is_unsafe) { -+#ifdef LOONGARCH64 -+ assert(kit != nullptr, "unsupported at optimization time"); -+ Node* n = _access.raw_access(); -+ Node* mb = kit->insert_mem_bar(Op_SameAddrLoadFence, n); -+ mb->as_MemBar()->set_trailing_load(); -+#endif - } - } +@@ -106,7 +105,7 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, + slow = new ShenandoahPreBarrierStub(pre_val); } + +- __ branch(lir_cond_notEqual, slow); ++ __ cmp_branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), slow); + __ branch_destination(slow->continuation()); + } + +@@ -153,10 +152,9 @@ LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, L + __ logical_and(flag_val, mask_reg, masked_flag); + flag_val = masked_flag; + } +- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); + + CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators); +- __ branch(lir_cond_notEqual, slow); ++ __ cmp_branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), slow); + __ branch_destination(slow->continuation()); + + return result; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp -index 7d31ff02e1a..07dac06aecf 100644 +index c7e0c9b0cd9..c6717bb76af 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -22,6 +22,12 @@ @@ -115134,37 +127630,61 @@ index 7d31ff02e1a..07dac06aecf 100644 #include "utilities/defaultStream.hpp" void ShenandoahArguments::initialize() { --#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64) -+#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64 || defined LOONGARCH64) +-#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64) ++#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined LOONGARCH64) vm_exit_during_initialization("Shenandoah GC is not supported on this platform."); #endif diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp -index 0e99bf107c1..d5541cf8966 100644 +index 4f2e36a8304..b145a63363a 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp -@@ -21,6 +21,12 @@ - * questions. - */ +@@ -99,15 +99,20 @@ public: -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ + virtual void visit(LIR_OpVisitState* state) { + state->do_input(_opr); ++ if (_result->is_valid()) { ++ state->do_temp(_opr); ++ state->do_output(_result); ++ } + } + + virtual void emit_code(LIR_Assembler* ce) { +- ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr); ++ ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr, result_opr()); + } + + virtual void print_instr(outputStream* out) const { + _opr->print(out); + out->print(" "); ++ result_opr()->print(out); + } + + #ifndef PRODUCT +@@ -143,13 +148,21 @@ address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(Decorato + #endif + + void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const { ++ LIR_Op* op = new LIR_OpZLoadBarrierTest(result); + - #include "precompiled.hpp" - #include "c1/c1_LIR.hpp" - #include "c1/c1_LIRGenerator.hpp" -@@ -94,7 +100,7 @@ private: + // Fast path +- __ append(new LIR_OpZLoadBarrierTest(result)); ++ __ append(op); + + // Slow path + const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators()); + CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub); +- __ branch(lir_cond_notEqual, stub); ++ if (ZPlatformLoadBarrierTestResultInRegister) { ++ LIR_Opr res = access.gen()->new_register(result->type()); ++ op->set_result_opr(res); ++ __ cmp_branch(lir_cond_notEqual, res, LIR_OprFact::intptrConst(NULL_WORD), stub); ++ } else { ++ __ branch(lir_cond_notEqual, stub); ++ } + __ branch_destination(stub->continuation()); + } - public: - LIR_OpZLoadBarrierTest(LIR_Opr opr) : --#ifdef RISCV -+#if defined(RISCV) || defined(LOONGARCH) - LIR_Op(lir_zloadbarrier_test, LIR_OprFact::illegalOpr, NULL), - #else - LIR_Op(), diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index d66ed24d862..b682bb9d62a 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -115247,7 +127767,7 @@ index 4e167ff451a..9441bae96c1 100644 #ifdef PPC void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false); diff --git a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp -index 597ddb3800f..427a9503eaf 100644 +index db2e7ebaad2..c381aa11475 100644 --- a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp +++ b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp @@ -22,6 +22,12 @@ @@ -115267,8 +127787,8 @@ index 597ddb3800f..427a9503eaf 100644 inline bool JfrBigEndian::platform_supports_unaligned_reads(void) { #if defined(IA32) || defined(AMD64) || defined(PPC) || defined(S390) return true; --#elif defined(ARM) || defined(AARCH64) || defined(RISCV) -+#elif defined(ARM) || defined(AARCH64) || defined(RISCV) || defined(MIPS) || defined(LOONGARCH) +-#elif defined(ARM) || defined(AARCH64) ++#elif defined(ARM) || defined(AARCH64) || defined(MIPS) || defined(LOONGARCH) return false; #else #warning "Unconfigured platform" @@ -115308,7 +127828,7 @@ index 3f57d487bae..3b49daaf96e 100644 #define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp -index d1882c70e2c..7e62537658a 100644 +index 2c42c013560..2bfb3d71634 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -23,6 +23,12 @@ @@ -115324,7 +127844,7 @@ index d1882c70e2c..7e62537658a 100644 #include "precompiled.hpp" #include "cds/metaspaceShared.hpp" #include "classfile/classLoaderData.hpp" -@@ -581,12 +587,15 @@ bool Metaspace::class_space_is_initialized() { +@@ -587,12 +593,15 @@ bool Metaspace::class_space_is_initialized() { // On error, returns an unreserved space. ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) { @@ -115332,8 +127852,8 @@ index d1882c70e2c..7e62537658a 100644 +#if defined(AARCH64) || defined(PPC64) || defined(MIPS64) || defined(LOONGARCH64) const size_t alignment = Metaspace::reserve_alignment(); - // AArch64: Try to align metaspace class space so that we can decode a - // compressed klass with a single MOVK instruction. We can do this iff the + // AArch64: Try to align metaspace so that we can decode a compressed + // klass with a single MOVK instruction. We can do this iff the // compressed class base is a multiple of 4G. + + // MIPS: Cannot mmap for 1G space at 4G position, and prepare for future optimization. @@ -115341,8 +127861,8 @@ index d1882c70e2c..7e62537658a 100644 // Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits // of the upper 32-bits of the address are zero so we can handle a shift // when decoding. -@@ -643,16 +652,16 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz - return rs; +@@ -627,16 +636,16 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz + a += search_ranges[i].increment; } } -#endif // defined(AARCH64) || defined(PPC64) @@ -115361,115 +127881,8 @@ index d1882c70e2c..7e62537658a 100644 } #endif // _LP64 -diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp -index 614d0b4e112..8d7764eb8a0 100644 ---- a/src/hotspot/share/opto/classes.hpp -+++ b/src/hotspot/share/opto/classes.hpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "utilities/macros.hpp" - - // The giant table of Node classes. -@@ -223,6 +229,7 @@ macro(StoreStoreFence) - macro(MemBarReleaseLock) - macro(MemBarVolatile) - macro(MemBarStoreStore) -+macro(SameAddrLoadFence) - macro(MergeMem) - macro(MinI) - macro(MinL) -diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp -index 4e8e39ffa74..6c7770dbf9c 100644 ---- a/src/hotspot/share/opto/compile.cpp -+++ b/src/hotspot/share/opto/compile.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "jvm_io.h" - #include "asm/macroAssembler.hpp" -@@ -3522,6 +3528,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f - n->set_req(MemBarNode::Precedent, top()); - } - break; -+ case Op_SameAddrLoadFence: - case Op_MemBarAcquire: { - if (n->as_MemBar()->trailing_load() && n->req() > MemBarNode::Precedent) { - // At parse time, the trailing MemBarAcquire for a volatile load -diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp -index 074b129b059..a1886f813c3 100644 ---- a/src/hotspot/share/opto/memnode.cpp -+++ b/src/hotspot/share/opto/memnode.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "classfile/javaClasses.hpp" - #include "compiler/compileLog.hpp" -@@ -3298,6 +3304,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { - case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn); - case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn); - case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); -+ case Op_SameAddrLoadFence: return new SameAddrLoadFenceNode(C, atp, pn); - case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); - case Op_Initialize: return new InitializeNode(C, atp, pn); - default: ShouldNotReachHere(); return nullptr; -diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp -index 99a30486274..71bf997533f 100644 ---- a/src/hotspot/share/opto/memnode.hpp -+++ b/src/hotspot/share/opto/memnode.hpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #ifndef SHARE_OPTO_MEMNODE_HPP - #define SHARE_OPTO_MEMNODE_HPP - -@@ -1336,6 +1342,14 @@ public: - virtual uint ideal_reg() const { return 0; } // not matched in the AD file - }; - -+// Used to prevent LoadLoad reorder for same address. -+class SameAddrLoadFenceNode: public MemBarNode { -+public: -+ SameAddrLoadFenceNode(Compile* C, int alias_idx, Node* precedent) -+ : MemBarNode(C, alias_idx, precedent) {} -+ virtual int Opcode() const; -+}; -+ - class OnSpinWaitNode: public MemBarNode { - public: - OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp -index 43c46a0eb8f..af7a37a1bf2 100644 +index 57d2fe05481..79c2b223588 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -22,6 +22,12 @@ @@ -115485,10 +127898,10 @@ index 43c46a0eb8f..af7a37a1bf2 100644 #include "precompiled.hpp" #include "asm/assembler.inline.hpp" #include "asm/macroAssembler.inline.hpp" -@@ -1016,6 +1022,27 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { +@@ -1011,6 +1017,27 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { // Add the safepoint in the DebugInfoRecorder if( !mach->is_MachCall() ) { - mcall = nullptr; + mcall = NULL; +#if defined(MIPS) || defined(LOONGARCH) + // safepoint_pc_offset should point to tha last instruction in safePoint. + // In X86 and sparc, their safePoints only contain one instruction. @@ -115513,7 +127926,7 @@ index 43c46a0eb8f..af7a37a1bf2 100644 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map); } else { mcall = mach->as_MachCall(); -@@ -1691,6 +1718,22 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { +@@ -1686,6 +1713,22 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { DEBUG_ONLY(uint instr_offset = cb->insts_size()); n->emit(*cb, C->regalloc()); current_offset = cb->insts_size(); @@ -115537,7 +127950,7 @@ index 43c46a0eb8f..af7a37a1bf2 100644 // Above we only verified that there is enough space in the instruction section. // However, the instruction may emit stubs that cause code buffer expansion. diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp -index f13ffbc12af..f78be3e0871 100644 +index 4c6fe91aee9..9a204facf42 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -22,6 +22,12 @@ @@ -115568,43 +127981,8 @@ index f13ffbc12af..f78be3e0871 100644 #else // all other { Bad, T_ILLEGAL, "vectormask:", false, Op_RegVectMask, relocInfo::none }, // VectorMask. { Bad, T_ILLEGAL, "vectora:", false, Op_VecA, relocInfo::none }, // VectorA. -diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp -index 374816fd355..e7015344a18 100644 ---- a/src/hotspot/share/runtime/objectMonitor.cpp -+++ b/src/hotspot/share/runtime/objectMonitor.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "classfile/vmSymbols.hpp" - #include "gc/shared/oopStorage.hpp" -@@ -362,6 +368,9 @@ bool ObjectMonitor::enter(JavaThread* current) { - } - - assert(owner_raw() != current, "invariant"); -+ // Thread _succ != current assertion load reording before Thread if (_succ == current) _succ = nullptr. -+ // But expect order is firstly if (_succ == current) _succ = nullptr then _succ != current assertion. -+ DEBUG_ONLY(LOONGARCH64_ONLY(__asm__ __volatile__ ("dbar 0x700\n");)MIPS64_ONLY(OrderAccess::loadload();)) - assert(_succ != current, "invariant"); - assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); - assert(current->thread_state() != _thread_blocked, "invariant"); -@@ -723,6 +732,7 @@ void ObjectMonitor::EnterI(JavaThread* current) { - } - - // The Spin failed -- Enqueue and park the thread ... -+ DEBUG_ONLY(LOONGARCH64_ONLY(__asm__ __volatile__ ("dbar 0x700\n");)MIPS64_ONLY(OrderAccess::loadload();)) - assert(_succ != current, "invariant"); - assert(owner_raw() != current, "invariant"); - assert(_Responsible != current, "invariant"); diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp -index e786b057262..6f34a7d5ea5 100644 +index bb6cea80efc..a8847dcf92d 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -22,6 +22,12 @@ @@ -115620,7 +127998,7 @@ index e786b057262..6f34a7d5ea5 100644 #include "precompiled.hpp" #include "jvm.h" #include "classfile/javaClasses.hpp" -@@ -1238,7 +1244,8 @@ bool os::is_first_C_frame(frame* fr) { +@@ -1312,7 +1318,8 @@ bool os::is_first_C_frame(frame* fr) { if ((uintptr_t)fr->sender_sp() == (uintptr_t)-1 || is_pointer_bad(fr->sender_sp())) return true; uintptr_t old_fp = (uintptr_t)fr->link_or_null(); @@ -115631,7 +128009,7 @@ index e786b057262..6f34a7d5ea5 100644 // stack grows downwards; if old_fp is below current fp or if the stack diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp -index 9af4b513a99..1a3e9fd0ad5 100644 +index dfebed9c146..767532c826d 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -22,6 +22,12 @@ @@ -115647,7 +128025,7 @@ index 9af4b513a99..1a3e9fd0ad5 100644 #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "jvm.h" -@@ -3054,7 +3060,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { +@@ -3041,7 +3047,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { CodeBuffer buffer(buf); struct { double data[20]; } locs_buf; buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); @@ -115689,68 +128067,8 @@ index 6e3aa30b0b9..8f1d486f5cb 100644 static const double S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */ S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */ -diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp -index d86fce3c8ac..71bfd4dfa19 100644 ---- a/src/hotspot/share/runtime/thread.inline.hpp -+++ b/src/hotspot/share/runtime/thread.inline.hpp -@@ -23,6 +23,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2018, 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #ifndef SHARE_RUNTIME_THREAD_INLINE_HPP - #define SHARE_RUNTIME_THREAD_INLINE_HPP - -@@ -132,7 +138,7 @@ inline void JavaThread::set_pending_async_exception(oop e) { - } - - inline JavaThreadState JavaThread::thread_state() const { --#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) -+#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) || defined(LOONGARCH64) - // Use membars when accessing volatile _thread_state. See - // Threads::create_vm() for size checks. - return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state); -@@ -144,7 +150,7 @@ inline JavaThreadState JavaThread::thread_state() const { - inline void JavaThread::set_thread_state(JavaThreadState s) { - assert(current_or_null() == NULL || current_or_null() == this, - "state change should only be called by the current thread"); --#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) -+#if defined(PPC64) || defined (AARCH64) || defined(RISCV64) || defined(LOONGARCH64) - // Use membars when accessing volatile _thread_state. See - // Threads::create_vm() for size checks. - Atomic::release_store((volatile jint*)&_thread_state, (jint)s); -diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp -index 33de84a68c1..7dc3d58f381 100644 ---- a/src/hotspot/share/runtime/vmStructs.cpp -+++ b/src/hotspot/share/runtime/vmStructs.cpp -@@ -22,6 +22,12 @@ - * - */ - -+/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2023, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - #include "precompiled.hpp" - #include "cds/filemap.hpp" - #include "ci/ciField.hpp" -@@ -1597,6 +1603,7 @@ typedef HashtableEntry KlassHashtableEntry; - declare_c2_type(StoreFenceNode, MemBarNode) \ - declare_c2_type(MemBarVolatileNode, MemBarNode) \ - declare_c2_type(MemBarCPUOrderNode, MemBarNode) \ -+ declare_c2_type(SameAddrLoadFenceNode, MemBarNode) \ - declare_c2_type(OnSpinWaitNode, MemBarNode) \ - declare_c2_type(BlackholeNode, MultiNode) \ - declare_c2_type(InitializeNode, MemBarNode) \ diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp -index 33ecfe089f8..4d024b35735 100644 +index 501ba0dbabc..8783e6dca2f 100644 --- a/src/hotspot/share/utilities/macros.hpp +++ b/src/hotspot/share/utilities/macros.hpp @@ -22,6 +22,12 @@ @@ -115805,7 +128123,7 @@ index 33ecfe089f8..4d024b35735 100644 #if defined(PPC32) || defined(PPC64) #ifndef PPC #define PPC -@@ -605,16 +643,34 @@ +@@ -579,16 +617,34 @@ // OS_CPU_HEADER(vmStructs) --> vmStructs_linux_x86.hpp // // basename.hpp / basename.inline.hpp @@ -115841,7 +128159,7 @@ index 33ecfe089f8..4d024b35735 100644 #define COMPILER_HEADER(basename) XSTR(COMPILER_HEADER_STEM(basename).hpp) #define COMPILER_HEADER_INLINE(basename) XSTR(COMPILER_HEADER_STEM(basename).inline.hpp) diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp -index 9accba375a2..200bb1e82f3 100644 +index b7a2c6dde8f..c2a144f49b8 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/LinuxDebuggerLocal.cpp @@ -23,6 +23,13 @@ @@ -115858,8 +128176,8 @@ index 9accba375a2..200bb1e82f3 100644 #include #include "libproc.h" #include "proc_service.h" -@@ -64,6 +71,10 @@ - #include "sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext.h" +@@ -60,6 +67,10 @@ + #include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h" #endif +#ifdef loongarch64 @@ -115869,18 +128187,18 @@ index 9accba375a2..200bb1e82f3 100644 class AutoJavaString { JNIEnv* m_env; jstring m_str; -@@ -412,7 +423,7 @@ JNIEXPORT jbyteArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo +@@ -408,7 +419,7 @@ JNIEXPORT jbyteArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo return (err == PS_OK)? array : 0; } --#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(riscv64) -+#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(riscv64) || defined(loongarch64) +-#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) ++#if defined(i586) || defined(amd64) || defined(ppc64) || defined(ppc64le) || defined(aarch64) || defined(loongarch64) extern "C" JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 (JNIEnv *env, jobject this_obj, jint lwp_id) { -@@ -447,6 +458,9 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo - #ifdef riscv64 - #define NPRGREG sun_jvm_hotspot_debugger_riscv64_RISCV64ThreadContext_NPRGREG +@@ -440,6 +451,9 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo + #ifdef aarch64 + #define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG #endif +#ifdef loongarch64 +#define NPRGREG sun_jvm_hotspot_debugger_loongarch64_LOONGARCH64ThreadContext_NPRGREG @@ -115888,9 +128206,9 @@ index 9accba375a2..200bb1e82f3 100644 #if defined(ppc64) || defined(ppc64le) #define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG #endif -@@ -561,6 +575,18 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo - - #endif /* riscv64 */ +@@ -516,6 +530,18 @@ JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLo + } + #endif /* aarch64 */ +#if defined(loongarch64) + @@ -115908,7 +128226,7 @@ index 9accba375a2..200bb1e82f3 100644 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h -index a69496e77a4..64312b4705d 100644 +index b0fcfb1e4d5..17920fafec9 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h @@ -22,6 +22,13 @@ @@ -115925,7 +128243,7 @@ index a69496e77a4..64312b4705d 100644 #ifndef _LIBPROC_H_ #define _LIBPROC_H_ -@@ -37,7 +44,7 @@ +@@ -37,13 +44,17 @@ #include #define user_regs_struct pt_regs #endif @@ -115934,9 +128252,8 @@ index a69496e77a4..64312b4705d 100644 #include #define user_regs_struct user_pt_regs #elif defined(arm) -@@ -46,6 +53,10 @@ - #elif defined(riscv64) #include + #define user_regs_struct pt_regs #endif +#if defined(mips) || defined(mipsel) || defined(mips64) || defined(mips64el) +#include @@ -115946,7 +128263,7 @@ index a69496e77a4..64312b4705d 100644 // This C bool type must be int for compatibility with Linux calls and // it would be a mistake to equivalence it to C++ bool on many platforms diff --git a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c -index 3068f475626..d35cc73221f 100644 +index b5fec835a98..d991f29cbb1 100644 --- a/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c +++ b/src/jdk.hotspot.agent/linux/native/libsaproc/ps_proc.c @@ -22,6 +22,12 @@ @@ -115962,17 +128279,17 @@ index 3068f475626..d35cc73221f 100644 #include #include #include -@@ -143,7 +149,7 @@ static bool process_get_lwp_regs(struct ps_prochandle* ph, pid_t pid, struct use - return false; - } - return true; --#elif defined(PTRACE_GETREGS_REQ) -+#elif defined(PTRACE_GETREGS_REQ) && !defined(loongarch64) - if (ptrace(PTRACE_GETREGS_REQ, pid, NULL, user) < 0) { +@@ -138,7 +144,7 @@ static bool process_get_lwp_regs(struct ps_prochandle* ph, pid_t pid, struct use + #define PTRACE_GETREGS_REQ PT_GETREGS + #endif + +-#ifdef PTRACE_GETREGS_REQ ++#if defined(PTRACE_GETREGS_REQ) && !defined(loongarch64) + if (ptrace_getregs(PTRACE_GETREGS_REQ, pid, user, NULL) < 0) { print_debug("ptrace(PTRACE_GETREGS, ...) failed for lwp(%d) errno(%d) \"%s\"\n", pid, errno, strerror(errno)); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java -index e0e9b4b6727..9af1218ed46 100644 +index aa021510567..021ba6f2820 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/HotSpotAgent.java @@ -23,6 +23,12 @@ @@ -115988,19 +128305,19 @@ index e0e9b4b6727..9af1218ed46 100644 package sun.jvm.hotspot; import java.rmi.RemoteException; -@@ -38,6 +44,8 @@ import sun.jvm.hotspot.debugger.MachineDescriptionPPC64; +@@ -37,6 +43,8 @@ import sun.jvm.hotspot.debugger.MachineDescriptionAMD64; + import sun.jvm.hotspot.debugger.MachineDescriptionPPC64; import sun.jvm.hotspot.debugger.MachineDescriptionAArch64; - import sun.jvm.hotspot.debugger.MachineDescriptionRISCV64; import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86; +import sun.jvm.hotspot.debugger.MachineDescriptionMIPS64; +import sun.jvm.hotspot.debugger.MachineDescriptionLOONGARCH64; import sun.jvm.hotspot.debugger.NoSuchSymbolException; import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal; import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal; -@@ -572,6 +580,10 @@ public class HotSpotAgent { +@@ -569,6 +577,10 @@ public class HotSpotAgent { + machDesc = new MachineDescriptionPPC64(); + } else if (cpu.equals("aarch64")) { machDesc = new MachineDescriptionAArch64(); - } else if (cpu.equals("riscv64")) { - machDesc = new MachineDescriptionRISCV64(); + } else if (cpu.equals("mips64")) { + machDesc = new MachineDescriptionMIPS64(); + } else if (cpu.equals("loongarch64")) { @@ -116103,7 +128420,7 @@ index 00000000000..1b49efd2017 + } +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java -index 469bb6e0665..ea3a118de2a 100644 +index 491e3d5dc2c..db3b1cc20ee 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java @@ -23,6 +23,12 @@ @@ -116119,10 +128436,10 @@ index 469bb6e0665..ea3a118de2a 100644 package sun.jvm.hotspot.debugger.linux; import java.io.*; -@@ -34,12 +40,16 @@ import sun.jvm.hotspot.debugger.x86.*; +@@ -33,11 +39,15 @@ import sun.jvm.hotspot.debugger.cdbg.*; + import sun.jvm.hotspot.debugger.x86.*; import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.aarch64.*; - import sun.jvm.hotspot.debugger.riscv64.*; +import sun.jvm.hotspot.debugger.mips64.*; +import sun.jvm.hotspot.debugger.loongarch64.*; import sun.jvm.hotspot.debugger.ppc64.*; @@ -116130,13 +128447,12 @@ index 469bb6e0665..ea3a118de2a 100644 import sun.jvm.hotspot.debugger.linux.amd64.*; import sun.jvm.hotspot.debugger.linux.ppc64.*; import sun.jvm.hotspot.debugger.linux.aarch64.*; - import sun.jvm.hotspot.debugger.linux.riscv64.*; +import sun.jvm.hotspot.debugger.linux.mips64.*; +import sun.jvm.hotspot.debugger.linux.loongarch64.*; import sun.jvm.hotspot.utilities.*; class LinuxCDebugger implements CDebugger { -@@ -93,7 +103,21 @@ class LinuxCDebugger implements CDebugger { +@@ -91,7 +101,21 @@ class LinuxCDebugger implements CDebugger { Address pc = context.getRegisterAsAddress(AMD64ThreadContext.RIP); if (pc == null) return null; return LinuxAMD64CFrame.getTopFrame(dbg, pc, context); @@ -117566,7 +129882,7 @@ index 00000000000..b39b0144901 + } +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java -index d16ac8aae51..de1e70a7290 100644 +index 4a2fa691d3f..59e4a3aca46 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java @@ -22,6 +22,12 @@ @@ -117582,19 +129898,19 @@ index d16ac8aae51..de1e70a7290 100644 package sun.jvm.hotspot.runtime; import java.util.*; -@@ -36,6 +42,8 @@ import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess; +@@ -35,6 +41,8 @@ import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess; import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess; - import sun.jvm.hotspot.runtime.linux_riscv64.LinuxRISCV64JavaThreadPDAccess; import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess; +import sun.jvm.hotspot.runtime.linux_mips64.LinuxMIPS64JavaThreadPDAccess; +import sun.jvm.hotspot.runtime.linux_loongarch64.LinuxLOONGARCH64JavaThreadPDAccess; import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess; import sun.jvm.hotspot.runtime.bsd_amd64.BsdAMD64JavaThreadPDAccess; import sun.jvm.hotspot.runtime.bsd_aarch64.BsdAARCH64JavaThreadPDAccess; -@@ -116,6 +124,10 @@ public class Threads { +@@ -113,6 +121,10 @@ public class Threads { + access = new LinuxPPC64JavaThreadPDAccess(); + } else if (cpu.equals("aarch64")) { access = new LinuxAARCH64JavaThreadPDAccess(); - } else if (cpu.equals("riscv64")) { - access = new LinuxRISCV64JavaThreadPDAccess(); + } else if (cpu.equals("mips64")) { + access = new LinuxMIPS64JavaThreadPDAccess(); + } else if (cpu.equals("loongarch64")) { @@ -119680,7 +131996,7 @@ index 00000000000..f2da760af4a + protected Address getLocationPD(VMReg reg) { return null; } +} diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java -index f4cd4873207..6901946e58a 100644 +index 3e66e24700b..f894792f912 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java @@ -22,6 +22,13 @@ @@ -119701,8 +132017,8 @@ index f4cd4873207..6901946e58a 100644 public static boolean knownCPU(String cpu) { final String[] KNOWN = -- new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "riscv64"}; -+ new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "riscv64", "mips64", "mips64el", "loongarch64"}; +- new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64"}; ++ new String[] {"i386", "x86", "x86_64", "amd64", "ppc64", "ppc64le", "aarch64", "mips64", "mips64el", "loongarch64"}; for(String s : KNOWN) { if(s.equals(cpu)) @@ -119719,15 +132035,15 @@ index f4cd4873207..6901946e58a 100644 return cpu; } -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java new file mode 100644 -index 00000000000..1f54e9f3c59 +index 00000000000..e3a7daa1983 --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotJVMCIBackendFactory.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -119867,15 +132183,15 @@ index 00000000000..1f54e9f3c59 + return new JVMCIBackend(metaAccess, codeCache, constantReflection, stackIntrospection); + } +} -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java new file mode 100644 -index 00000000000..e1a007000d2 +index 00000000000..2ee6a4b8472 --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotRegisterConfig.java @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120170,15 +132486,15 @@ index 00000000000..e1a007000d2 + return String.format("Allocatable: " + getAllocatableRegisters() + "%n" + "CallerSave: " + getCallerSaveRegisters() + "%n"); + } +} -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java new file mode 100644 -index 00000000000..0a2e857204c +index 00000000000..c8605976a0d --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/LoongArch64HotSpotVMConfig.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120253,15 +132569,15 @@ index 00000000000..0a2e857204c + final long loongarch64UAL = getConstant("VM_Version::CPU_UAL", Long.class); + // Checkstyle: resume +} -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java new file mode 100644 -index 00000000000..74c6ca9801f +index 00000000000..1048ea9d64b --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/loongarch64/package-info.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.loongarch64/src/jdk/vm/ci/hotspot/loongarch64/package-info.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120287,15 +132603,15 @@ index 00000000000..74c6ca9801f + * The LoongArch64 HotSpot specific portions of the JVMCI API. + */ +package jdk.vm.ci.hotspot.loongarch64; -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java new file mode 100644 -index 00000000000..930b17e820a +index 00000000000..99201889b84 --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64.java @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120542,15 +132858,15 @@ index 00000000000..930b17e820a + } + } +} -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java new file mode 100644 -index 00000000000..047a1dbbe36 +index 00000000000..84b7f2027f1 --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/LoongArch64Kind.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/LoongArch64Kind.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120711,15 +133027,15 @@ index 00000000000..047a1dbbe36 + } + } +} -diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java +diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java new file mode 100644 -index 00000000000..6df1b7b3a92 +index 00000000000..9d020833eaf --- /dev/null -+++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/loongarch64/package-info.java ++++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.loongarch64/src/jdk/vm/ci/loongarch64/package-info.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. -+ * Copyright (c) 2022, 2023, Loongson Technology. All rights reserved. ++ * Copyright (c) 2022, Loongson Technology. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it @@ -120784,7 +133100,7 @@ index 66dac7130bd..dd2ccd340f2 100644 OUTFLAGS += -o $@ else diff --git a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java -index 4c56daebfb8..92836130408 100644 +index 273da8ce7f2..3c81fc96949 100644 --- a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java +++ b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java @@ -21,12 +21,18 @@ @@ -120804,11 +133120,11 @@ index 4c56daebfb8..92836130408 100644 * java.management - * @requires vm.cpu.features ~= ".*aes.*" & !vm.graal.enabled + * @requires (vm.cpu.features ~= ".*aes.*" | os.arch == "loongarch64") & !vm.graal.enabled - * @build jdk.test.whitebox.WhiteBox - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @build sun.hotspot.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm/timeout=600 -Xbootclasspath/a:. diff --git a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java -index 03016ea3dd6..62ce6c1a7a5 100644 +index f13c5dd2b89..460a3dafe38 100644 --- a/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java +++ b/test/hotspot/jtreg/compiler/cpuflags/TestAESIntrinsicsOnUnsupportedConfig.java @@ -21,6 +21,12 @@ @@ -120827,14 +133143,14 @@ index 03016ea3dd6..62ce6c1a7a5 100644 @@ -28,7 +34,7 @@ * java.management * - * @build jdk.test.whitebox.WhiteBox + * @build sun.hotspot.WhiteBox - * @requires !(vm.cpu.features ~= ".*aes.*") + * @requires !(vm.cpu.features ~= ".*aes.*" | os.arch == "loongarch64") * @requires vm.compiler1.enabled | !vm.graal.enabled - * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions diff --git a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java -index 468cd83d7a2..40d2b03e301 100644 +index 2b8d143dd68..0209ea644ef 100644 --- a/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java +++ b/test/hotspot/jtreg/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java @@ -21,6 +21,12 @@ @@ -120854,59 +133170,56 @@ index 468cd83d7a2..40d2b03e301 100644 /** * Generic test case for SHA-related options targeted to any CPU except -- * AArch64, RISCV64, PPC, S390x, and X86. -+ * AArch64, RISCV64, PPC, S390x, LoongArch64, and X86. +- * AArch64, PPC, S390x, and X86. ++ * AArch64, PPC, S390x, LoongArch64, and X86. */ public class GenericTestCaseForOtherCPU extends DigestOptionsBase.TestCase { -@@ -44,14 +50,15 @@ public class GenericTestCaseForOtherCPU extends +@@ -44,13 +50,14 @@ public class GenericTestCaseForOtherCPU extends } public GenericTestCaseForOtherCPU(String optionName, boolean checkUseSHA) { -- // Execute the test case on any CPU except AArch64, RISCV64, PPC, S390x, and X86. -+ // Execute the test case on any CPU except AArch64, RISCV64, PPC, S390x, LoongArch64, and X86. +- // Execute the test case on any CPU except AArch64, PPC, S390x, and X86. ++ // Execute the test case on any CPU except AArch64, PPC, S390x, LoongArch64, and X86. super(optionName, new NotPredicate( new OrPredicate(Platform::isAArch64, - new OrPredicate(Platform::isRISCV64, new OrPredicate(Platform::isS390x, new OrPredicate(Platform::isPPC, + new OrPredicate(Platform::isLoongArch64, new OrPredicate(Platform::isX64, -- Platform::isX86))))))); -+ Platform::isX86)))))))); +- Platform::isX86)))))); ++ Platform::isX86))))))); this.checkUseSHA = checkUseSHA; } diff --git a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java -index d7ecc7c04ef..0d47a2f3037 100644 +index d7ecc7c04ef..4095202ea30 100644 --- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java +++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/CodeInstallationTest.java -@@ -20,16 +20,25 @@ +@@ -20,14 +20,23 @@ * or visit www.oracle.com if you need additional information or have any * questions. */ + +/* -+ * This file has been modified by Loongson Technology in 2023, These -+ * modifications are Copyright (c) 2022, 2023, Loongson Technology, and are made ++ * This file has been modified by Loongson Technology in 2022, These ++ * modifications are Copyright (c) 2022, Loongson Technology, and are made + * available on the same license terms set forth above. + */ + package jdk.vm.ci.code.test; import jdk.vm.ci.aarch64.AArch64; - import jdk.vm.ci.amd64.AMD64; +import jdk.vm.ci.loongarch64.LoongArch64; + import jdk.vm.ci.amd64.AMD64; import jdk.vm.ci.code.Architecture; import jdk.vm.ci.code.CodeCacheProvider; import jdk.vm.ci.code.InstalledCode; import jdk.vm.ci.code.TargetDescription; ++import jdk.vm.ci.code.test.loongarch64.LoongArch64TestAssembler; import jdk.vm.ci.code.test.aarch64.AArch64TestAssembler; import jdk.vm.ci.code.test.amd64.AMD64TestAssembler; -+import jdk.vm.ci.code.test.loongarch64.LoongArch64TestAssembler; import jdk.vm.ci.hotspot.HotSpotCodeCacheProvider; - import jdk.vm.ci.hotspot.HotSpotCompiledCode; - import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime; @@ -75,6 +84,8 @@ public class CodeInstallationTest { return new AMD64TestAssembler(codeCache, config); } else if (arch instanceof AArch64) { @@ -121016,7 +133329,7 @@ index f473d089a54..6ca7b76f1e7 100644 */ diff --git a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java -index 0f5c8be3f2d..3b6b464b1c1 100644 +index dce107095d5..d8c855dfb3a 100644 --- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java +++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/NativeCallTest.java @@ -21,10 +21,16 @@ @@ -121722,7 +134035,7 @@ index 00000000000..4c76868453a + +} diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java -index 2f2395b77c6..58482edb32e 100644 +index da8496a078f..dd17751a40e 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java @@ -21,6 +21,12 @@ @@ -121737,20 +134050,20 @@ index 2f2395b77c6..58482edb32e 100644 + package compiler.lib.ir_framework; - import compiler.lib.ir_framework.driver.irmatching.IRMatcher; + import compiler.lib.ir_framework.driver.IRMatcher; @@ -58,8 +64,8 @@ public class IRNode { - public static final String ALLOC_ARRAY = "(.*precise klass \\[L.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; - public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "(.*precise klass \\[L.*" + IS_REPLACED + ";:.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; + public static final String ALLOC_ARRAY = "(.*precise klass \\[L.*\\R((.*(?i:mov|xor|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; + public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "(.*precise klass \\[L.*" + IS_REPLACED + ";:.*\\R((.*(?i:mov|xorl|nop|spill).*|\\s*|.*LGHI.*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END; -- public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|mv|or).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; -- public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|mv|or).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; -+ public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|mv|or|li).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; -+ public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|mv|or|li).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; +- public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|or).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; +- public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|or).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; ++ public static final String CHECKCAST_ARRAY = "(((?i:cmp|CLFI|CLR).*precise klass \\[.*;:|.*(?i:mov|or|li).*precise klass \\[.*;:.*\\R.*(cmp|CMP|CLR))" + END; ++ public static final String CHECKCAST_ARRAY_OF = COMPOSITE_PREFIX + "(((?i:cmp|CLFI|CLR).*precise klass \\[.*" + IS_REPLACED + ";:|.*(?i:mov|or|li).*precise klass \\[.*" + IS_REPLACED + ";:.*\\R.*(cmp|CMP|CLR))" + END; // Does not work on s390 (a rule containing this regex will be skipped on s390). public static final String CHECKCAST_ARRAYCOPY = "(.*((?i:call_leaf_nofp,runtime)|CALL,\\s?runtime leaf nofp|BCTRL.*.leaf call).*checkcast_arraycopy.*" + END; diff --git a/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java b/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java -index 85fd3fa938d..0655f2b0bd1 100644 +index 2f9189cd9bc..eb9db789f91 100644 --- a/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java +++ b/test/hotspot/jtreg/compiler/runtime/TestConstantsInError.java @@ -21,6 +21,12 @@ @@ -121770,8 +134083,8 @@ index 85fd3fa938d..0655f2b0bd1 100644 results.shouldMatch("Test_C1/.*::test \\(3 bytes\\)$") .shouldMatch("Test_C2/.*::test \\(3 bytes\\)$"); -- if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching -+ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching +- if (isC1 && Platform.isAArch64()) { // no code patching ++ if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching results.shouldMatch("Test_C1/.*::test \\(3 bytes\\) made not entrant") .shouldMatch("Test_C2/.*::test \\(3 bytes\\) made not entrant"); } else { @@ -121779,8 +134092,8 @@ index 85fd3fa938d..0655f2b0bd1 100644 .shouldMatch("Test_MH3/.*::test \\(3 bytes\\)$") .shouldMatch("Test_MH4/.*::test \\(3 bytes\\)$"); -- if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching -+ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching +- if (isC1 && Platform.isAArch64()) { // no code patching ++ if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching results.shouldMatch("Test_MH1/.*::test \\(3 bytes\\) made not entrant") .shouldMatch("Test_MH2/.*::test \\(3 bytes\\) made not entrant") .shouldMatch("Test_MH3/.*::test \\(3 bytes\\) made not entrant") @@ -121788,13 +134101,13 @@ index 85fd3fa938d..0655f2b0bd1 100644 results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\)$") .shouldMatch("Test_MT2/.*::test \\(3 bytes\\)$"); -- if (isC1 && (Platform.isAArch64() || Platform.isRISCV64())) { // no code patching -+ if (isC1 && (Platform.isAArch64() || Platform.isRISCV64() || Platform.isLoongArch64())) { // no code patching +- if (isC1 && Platform.isAArch64()) { // no code patching ++ if (isC1 && (Platform.isAArch64() || Platform.isLoongArch64())) { // no code patching results.shouldMatch("Test_MT1/.*::test \\(3 bytes\\) made not entrant") .shouldMatch("Test_MT2/.*::test \\(3 bytes\\) made not entrant"); } else { diff --git a/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java b/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java -index 10d87d51f0f..dbea76741d6 100644 +index d38d76443f0..119acb520da 100644 --- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java +++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/argumentcorruption/CheckLongArgs.java @@ -21,10 +21,17 @@ @@ -121810,13 +134123,13 @@ index 10d87d51f0f..dbea76741d6 100644 /* @test * @bug 8167409 - * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") + * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") + * @requires (os.arch != "mips64el") & (os.arch != "loongarch64") & (vm.flavor != "zero") * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.argumentcorruption.CheckLongArgs */ package compiler.runtime.criticalnatives.argumentcorruption; diff --git a/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java b/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java -index 23c1e6e6acb..2f402d567d9 100644 +index 37c42c9c336..d3e73158a25 100644 --- a/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java +++ b/test/hotspot/jtreg/compiler/runtime/criticalnatives/lookup/LookUp.java @@ -21,10 +21,17 @@ @@ -121832,13 +134145,13 @@ index 23c1e6e6acb..2f402d567d9 100644 /* @test * @bug 8167408 - * @requires (os.arch != "aarch64") & (os.arch != "arm") & (os.arch != "riscv64") & (vm.flavor != "zero") + * @requires (os.arch != "aarch64") & (os.arch != "arm") & (vm.flavor != "zero") + * @requires (os.arch != "mips64el") & (os.arch != "loongarch64") & (vm.flavor != "zero") * @run main/othervm/native -Xcomp -XX:+CriticalJNINatives compiler.runtime.criticalnatives.lookup.LookUp */ package compiler.runtime.criticalnatives.lookup; diff --git a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java -index 689c7c8cc2f..f734c1baa3f 100644 +index 782ab6b9b46..8b859a92d8a 100644 --- a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java +++ b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java @@ -21,6 +21,12 @@ @@ -121854,7 +134167,7 @@ index 689c7c8cc2f..f734c1baa3f 100644 package compiler.testlibrary.sha.predicate; import jdk.test.lib.Platform; -@@ -61,19 +67,22 @@ public class IntrinsicPredicates { +@@ -61,30 +67,35 @@ public class IntrinsicPredicates { public static final BooleanSupplier MD5_INSTRUCTION_AVAILABLE = new OrPredicate(new CPUSpecificPredicate("aarch64.*", null, null), @@ -121867,19 +134180,17 @@ index 689c7c8cc2f..f734c1baa3f 100644 public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" }, null), - new OrPredicate(new CPUSpecificPredicate("riscv64.*", new String[] { "sha1" }, null), new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha1" }, null), + // Basic instructions are used to implement SHA1 Intrinsics on LA, so "sha1" feature is not needed. + new OrPredicate(new CPUSpecificPredicate("loongarch64.*", null, null), // x86 variants new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" }, null), new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), -- new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null)))))); -+ new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null))))))); +- new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null))))); ++ new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null)))))); public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" }, null), -@@ -81,12 +90,14 @@ public class IntrinsicPredicates { new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha256" }, null), new OrPredicate(new CPUSpecificPredicate("ppc64.*", new String[] { "sha" }, null), new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha" }, null), @@ -121890,13 +134201,13 @@ index 689c7c8cc2f..f734c1baa3f 100644 new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null), new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null), new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null), -- new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null)))))))))); -+ new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))))); +- new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))); ++ new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null)))))))))); public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE = new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512" }, null), diff --git a/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java b/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java -index 36f74d01b54..035b91b9d8e 100644 +index c11472a0918..bf933f04957 100644 --- a/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java +++ b/test/hotspot/jtreg/runtime/ReservedStack/ReservedStackTest.java @@ -21,6 +21,12 @@ @@ -121912,18 +134223,17 @@ index 36f74d01b54..035b91b9d8e 100644 /* * @test ReservedStackTest * -@@ -240,7 +246,8 @@ public class ReservedStackTest { +@@ -240,7 +246,7 @@ public class ReservedStackTest { return Platform.isAix() || (Platform.isLinux() && (Platform.isPPC() || Platform.isS390x() || Platform.isX64() || -- Platform.isX86() || Platform.isAArch64() || Platform.isRISCV64())) || -+ Platform.isX86() || Platform.isAArch64() || Platform.isRISCV64() || -+ Platform.isMIPS() || Platform.isLoongArch64())) || +- Platform.isX86() || Platform.isAArch64())) || ++ Platform.isX86() || Platform.isAArch64() || Platform.isMIPS() || Platform.isLoongArch64())) || Platform.isOSX(); } diff --git a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java -index 26dd3514e8e..2818343ec3d 100644 +index c48962e47a1..c9d6c86834e 100644 --- a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java +++ b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java @@ -21,6 +21,12 @@ @@ -121939,7 +134249,7 @@ index 26dd3514e8e..2818343ec3d 100644 package ir_framework.tests; import compiler.lib.ir_framework.*; -@@ -215,7 +221,7 @@ public class TestIRMatching { +@@ -207,7 +213,7 @@ public class TestIRMatching { runCheck(BadFailOnConstraint.create(Membar.class, "membar()", 1, "MemBar")); String cmp; @@ -121948,259 +134258,74 @@ index 26dd3514e8e..2818343ec3d 100644 cmp = "CMP"; } else if (Platform.isS390x()){ cmp = "CLFI"; -diff --git a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java -index 16863189500..e2f3a13473b 100644 ---- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java -+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java -@@ -21,6 +21,12 @@ - * questions. - */ - -+/* -+ * This file has been modified by Loongson Technology in 2022, These -+ * modifications are Copyright (c) 2022, Loongson Technology, and are made -+ * available on the same license terms set forth above. -+ */ -+ - package nsk.share.jdi; - - import nsk.share.*; -@@ -520,21 +526,22 @@ class CheckedFeatures { - * available only on the Microsoft Windows platform. - * " - */ -- {"linux-i586", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-ia64", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-amd64", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-x64", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-arm", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-ppc64", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-ppc64le", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, -- {"linux-riscv64", "com.sun.jdi.SharedMemoryAttach"}, -- {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, -- {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, -- {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -- {"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -- {"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-i586", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-ia64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-amd64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-x64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-arm", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-ppc64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-ppc64le", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-riscv64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"linux-loongarch64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"}, -+ {"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"}, - - // listening connectors - /* -@@ -546,21 +553,22 @@ class CheckedFeatures { - * It is available only on the Microsoft Windows platform. - * " - */ -- {"linux-i586", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-ia64", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-amd64", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-x64", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-aarch64", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-arm", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-ppc64", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-ppc64le", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, -- {"linux-riscv64", "com.sun.jdi.SharedMemoryListen"}, -- {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, -- {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, -- {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, -- {"mac-aarch64", "com.sun.jdi.SharedMemoryListen"}, -- {"aix-ppc64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-i586", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-ia64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-amd64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-x64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-aarch64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-arm", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-ppc64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-ppc64le", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-riscv64", "com.sun.jdi.SharedMemoryListen"}, -+ {"linux-loongarch64", "com.sun.jdi.SharedMemoryListen"}, -+ {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, -+ {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, -+ {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, -+ {"mac-aarch64", "com.sun.jdi.SharedMemoryListen"}, -+ {"aix-ppc64", "com.sun.jdi.SharedMemoryListen"}, - - // launching connectors - /* -@@ -575,78 +583,82 @@ class CheckedFeatures { - * Windows, the shared memory transport is used. On Linux the socket transport is used. - * " - */ -- {"linux-i586", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-i586", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, +diff --git a/test/jdk/java/security/cert/CertPathBuilder/targetConstraints/BuildEEBasicConstraints.java b/test/jdk/java/security/cert/CertPathBuilder/targetConstraints/BuildEEBasicConstraints.java +index c7cc90f958f..fd4193207c0 100644 +--- a/test/jdk/java/security/cert/CertPathBuilder/targetConstraints/BuildEEBasicConstraints.java ++++ b/test/jdk/java/security/cert/CertPathBuilder/targetConstraints/BuildEEBasicConstraints.java +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -45,9 +45,12 @@ import java.security.cert.PKIXCertPathBuilderResult; + import java.security.cert.TrustAnchor; + import java.security.cert.X509Certificate; + import java.security.cert.X509CertSelector; ++import java.text.DateFormat; + import java.util.ArrayList; + import java.util.Collections; + import java.util.List; ++import java.util.Locale; + -+ {"linux-ia64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-ia64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-arm", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-arm", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-arm", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-arm", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-ppc64le", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-ppc64le", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-ppc64le", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-ppc64le", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-s390x", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-s390x", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-s390x", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-s390x", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-riscv64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-riscv64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"linux-riscv64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"linux-riscv64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"linux-loongarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"linux-loongarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"windows-i586", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -- {"windows-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, -+ {"windows-i586", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -+ {"windows-i586", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, - -- {"windows-ia64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -- {"windows-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, -+ {"windows-ia64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -+ {"windows-ia64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, - -- {"windows-amd64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -- {"windows-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, -+ {"windows-amd64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -+ {"windows-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, - -- {"windows-x64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -- {"windows-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, -+ {"windows-x64", "com.sun.jdi.CommandLineLaunch", "dt_socket"}, -+ {"windows-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_socket"}, + import jdk.test.lib.security.CertUtils; -- {"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - -- {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + public final class BuildEEBasicConstraints { +@@ -65,6 +68,11 @@ public final class BuildEEBasicConstraints { + PKIXBuilderParameters params = new PKIXBuilderParameters + (Collections.singleton(anchor), sel); + params.setRevocationEnabled(false); ++ ++ // Certs expired on 7th Nov 2022 ++ params.setDate(DateFormat.getDateInstance(DateFormat.MEDIUM, ++ Locale.US).parse("June 01, 2022")); ++ + X509Certificate eeCert = CertUtils.getCertFromFile("ee.cer"); + X509Certificate caCert = CertUtils.getCertFromFile("ca.cer"); + ArrayList certs = new ArrayList(); +diff --git a/test/jdk/java/security/cert/pkix/policyChanges/TestPolicy.java b/test/jdk/java/security/cert/pkix/policyChanges/TestPolicy.java +index a92eee2c5e0..de2f94d27b5 100644 +--- a/test/jdk/java/security/cert/pkix/policyChanges/TestPolicy.java ++++ b/test/jdk/java/security/cert/pkix/policyChanges/TestPolicy.java +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -34,6 +34,7 @@ + */ -- {"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + import java.io.*; ++import java.text.DateFormat; + import java.util.*; -- {"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + import java.security.Security; +@@ -97,6 +98,10 @@ public class TestPolicy { + params.setRevocationEnabled(false); + params.setInitialPolicies(testCase.initialPolicies); -- {"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -- {"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, -+ {"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, -+ {"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, ++ // Certs expired on 7th Nov 2022 ++ params.setDate(DateFormat.getDateInstance(DateFormat.MEDIUM, ++ Locale.US).parse("June 01, 2022")); ++ + CertPath path = factory.generateCertPath(Arrays.asList(new X509Certificate[] {ee, ca})); - // shared memory transport is implemented only on windows platform -- {"linux-i586", "dt_shmem"}, -- {"linux-ia64", "dt_shmem"}, -- {"linux-amd64", "dt_shmem"}, -- {"linux-x64", "dt_shmem"}, -- {"linux-aarch64", "dt_shmem"}, -- {"linux-arm", "dt_shmem"}, -- {"linux-ppc64", "dt_shmem"}, -- {"linux-ppc64le", "dt_shmem"}, -- {"linux-s390x", "dt_shmem"}, -- {"linux-riscv64", "dt_shmem"}, -- {"macosx-amd64", "dt_shmem"}, -- {"mac-x64", "dt_shmem"}, -- {"macosx-aarch64", "dt_shmem"}, -- {"mac-aarch64", "dt_shmem"}, -- {"aix-ppc64", "dt_shmem"}, -+ {"linux-i586", "dt_shmem"}, -+ {"linux-ia64", "dt_shmem"}, -+ {"linux-amd64", "dt_shmem"}, -+ {"linux-x64", "dt_shmem"}, -+ {"linux-aarch64", "dt_shmem"}, -+ {"linux-arm", "dt_shmem"}, -+ {"linux-ppc64", "dt_shmem"}, -+ {"linux-ppc64le", "dt_shmem"}, -+ {"linux-s390x", "dt_shmem"}, -+ {"linux-riscv64", "dt_shmem"}, -+ {"linux-loongarch64", "dt_shmem"}, -+ {"macosx-amd64", "dt_shmem"}, -+ {"mac-x64", "dt_shmem"}, -+ {"macosx-aarch64", "dt_shmem"}, -+ {"mac-aarch64", "dt_shmem"}, -+ {"aix-ppc64", "dt_shmem"}, - }; - } -diff --git a/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java b/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java -index cdc5882fefd..16120f85168 100644 ---- a/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java -+++ b/test/jdk/java/nio/channels/DatagramChannel/Disconnect.java -@@ -52,7 +52,7 @@ public class Disconnect { - if (IPSupport.hasIPv4()) { - // test with IPv4 only - try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET)) { -- InetAddress lo4 = InetAddress.ofLiteral("127.0.0.1"); -+ InetAddress lo4 = InetAddress.getByName("127.0.0.1"); - System.out.println("Testing with INET family and " + lo4); - test(dc, lo4); - test(dc, lo4); -@@ -62,7 +62,7 @@ public class Disconnect { - if (IPSupport.hasIPv6()) { - // test with IPv6 only - try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET6)) { -- InetAddress lo6 = InetAddress.ofLiteral("::1"); -+ InetAddress lo6 = InetAddress.getByName("::1"); - System.out.println("Testing with INET6 family and " + lo6); - test(dc, lo6); - test(dc, lo6); + PKIXCertPathValidatorResult result = (PKIXCertPathValidatorResult)validator.validate(path, params); diff --git a/test/jdk/jdk/jfr/event/os/TestCPUInformation.java b/test/jdk/jdk/jfr/event/os/TestCPUInformation.java -index c5166580010..913136a1fd1 100644 +index a4b5169ca4a..ae739a92f28 100644 --- a/test/jdk/jdk/jfr/event/os/TestCPUInformation.java +++ b/test/jdk/jdk/jfr/event/os/TestCPUInformation.java @@ -21,6 +21,12 @@ @@ -122220,15 +134345,74 @@ index c5166580010..913136a1fd1 100644 Events.assertField(event, "hwThreads").atLeast(1); Events.assertField(event, "cores").atLeast(1); Events.assertField(event, "sockets").atLeast(1); -- Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); -- Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390"); -+ Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390", "MIPS", "LoongArch"); -+ Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "RISCV64", "s390", "MIPS", "LoongArch"); +- Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390"); +- Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390"); ++ Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390", "MIPS", "LoongArch"); ++ Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "ARM", "PPC", "PowerPC", "AArch64", "s390", "MIPS", "LoongArch"); } } } +diff --git a/test/jdk/sun/security/pkcs11/PKCS11Test.java b/test/jdk/sun/security/pkcs11/PKCS11Test.java +index 3f746c0d9f6..514aadbe716 100644 +--- a/test/jdk/sun/security/pkcs11/PKCS11Test.java ++++ b/test/jdk/sun/security/pkcs11/PKCS11Test.java +@@ -21,6 +21,12 @@ + * questions. + */ + ++/* ++ * This file has been modified by Loongson Technology in 2022, These ++ * modifications are Copyright (c) 2021, 2022, Loongson Technology, and are made ++ * available on the same license terms set forth above. ++ */ ++ + // common infrastructure for SunPKCS11 tests + + import java.io.ByteArrayOutputStream; +@@ -693,6 +699,9 @@ public abstract class PKCS11Test { + "/usr/lib64/" }); + osMap.put("Linux-ppc64-64", new String[] { "/usr/lib64/" }); + osMap.put("Linux-ppc64le-64", new String[] { "/usr/lib64/" }); ++ osMap.put("Linux-mips64el-64", new String[]{"/usr/lib64/"}); ++ osMap.put("Linux-loongarch64-64", new String[]{"/usr/lib/loongarch64-linux-gnu/", ++ "/usr/lib64/" }); + osMap.put("Linux-s390x-64", new String[] { "/usr/lib64/" }); + osMap.put("Windows-x86-32", new String[] {}); + osMap.put("Windows-amd64-64", new String[] {}); +diff --git a/test/langtools/jdk/javadoc/doclet/testJavaFX/TestJavaFxMode.java b/test/langtools/jdk/javadoc/doclet/testJavaFX/TestJavaFxMode.java +index ae3155823d8..ae3d8de1491 100644 +--- a/test/langtools/jdk/javadoc/doclet/testJavaFX/TestJavaFxMode.java ++++ b/test/langtools/jdk/javadoc/doclet/testJavaFX/TestJavaFxMode.java +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -77,13 +77,16 @@ public class TestJavaFxMode extends JavadocTester { + checkExit(Exit.OK); + checkOrder("pkg/A.html", + "Property Summary", +- "javafx.beans.property.Property", "prop", ++ """ ++ prop""", + "Field Summary", +- "javafx.beans.property.Property", "prop", ++ """ ++ prop""", + "Method Summary", +- "getProp", "Gets the value of the property prop.", + """ +- propProperty""", "Sets the value of the property prop."); ++ getProp""", ++ """ ++ propProperty"""); + } + + void createTestClass(Path src) throws Exception { diff --git a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java -index e78e200ac24..2b9fcc0ff47 100644 +index 4be69a6566c..d5b5b3adfa4 100644 --- a/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java +++ b/test/lib-test/jdk/test/lib/TestMutuallyExclusivePlatformPredicates.java @@ -33,6 +33,12 @@ import java.util.HashSet; @@ -122248,13 +134432,13 @@ index e78e200ac24..2b9fcc0ff47 100644 */ public class TestMutuallyExclusivePlatformPredicates { private static enum MethodGroup { -- ARCH("isAArch64", "isARM", "isRISCV64", "isPPC", "isS390x", "isX64", "isX86"), -+ ARCH("isAArch64", "isARM", "isRISCV64", "isPPC", "isS390x", "isX64", "isX86", "isMIPS", "isLoongArch64"), +- ARCH("isAArch64", "isARM", "isPPC", "isS390x", "isX64", "isX86"), ++ ARCH("isAArch64", "isARM", "isPPC", "isS390x", "isX64", "isX86", "isMIPS", "isLoongArch64"), BITNESS("is32bit", "is64bit"), OS("isAix", "isLinux", "isOSX", "isWindows"), VM_TYPE("isClient", "isServer", "isMinimal", "isZero", "isEmbedded"), diff --git a/test/lib/jdk/test/lib/Platform.java b/test/lib/jdk/test/lib/Platform.java -index 2e2e16e6593..64b42446e84 100644 +index 952c02270e5..ef98ee74a48 100644 --- a/test/lib/jdk/test/lib/Platform.java +++ b/test/lib/jdk/test/lib/Platform.java @@ -21,6 +21,12 @@ @@ -122269,8 +134453,8 @@ index 2e2e16e6593..64b42446e84 100644 + package jdk.test.lib; - import java.io.BufferedReader; -@@ -234,6 +240,14 @@ public class Platform { + import java.io.FileNotFoundException; +@@ -214,6 +220,14 @@ public class Platform { return isArch("(i386)|(x86(?!_64))"); } diff --git a/openjdk-17.spec b/openjdk-17.spec index 4c69127f93e3f7a77b5005b9e06862c75b5ed6a1..c4231f799630411f998697cc0f005dca14eed1d1 100644 --- a/openjdk-17.spec +++ b/openjdk-17.spec @@ -914,7 +914,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release} Name: java-%{javaver}-%{origin} Version: %{newjavaver}.%{buildver} -Release: 0 +Release: 1 # java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons # and this change was brought into RHEL-4. java-1.5.0-ibm packages @@ -1963,6 +1963,9 @@ cjc.mainProgram(args) -- the returns from copy_jdk_configs.lua should not affect %changelog +* Tue Feb 11 2025 Pan Xuefeng - 1:17.0.14.7-1 +- upgrade LoongArch64 port to 17.0.14 + * Thu Jan 23 2025 Benshuai5D - 1:17.0.14.7-0 - update to 17.0.14 - modify 8264805-Backport-Ahead-of-Time-Compiler.patch