diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index cf3799fdfb08fc16299d2a2de90d734784e0d1e8..8d3dd026f390575dd844e11c0d0faf2664650aa1 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -109,6 +109,7 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_ra_opt.cpp", "src/cg/aarch64/aarch64_alignment.cpp", "src/cg/aarch64/aarch64_regsaves.cpp", + "src/cg/aarch64/aarch64_utils.cpp", ] src_libcgriscv64 = [ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h index de8e5c02739162c439e3a1375e4199fba8d979d8..638f09ffde66f495a5129295d48abdf29f64c699 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h @@ -20,7 +20,6 @@ #include "aarch64_cgfunc.h" namespace maplebe { -using namespace maple; class AArch64Ebo : public Ebo { public: @@ -30,18 +29,9 @@ class AArch64Ebo : public Ebo { a64CGFunc = static_cast(cgFunc); } - ~AArch64Ebo() override = default; + enum ExtOpTable : uint8; - enum ExtOpTable { - AND = 0, - SXTB, - SXTH, - SXTW, - ZXTB, - ZXTH, - ZXTW, - ExtTableSize - }; + ~AArch64Ebo() override = default; protected: MapleVector callerSaveRegTable; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..cb8300a2e77492da7372e8fae83c09d453e3b114 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H + +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { + +/** + * Get or create new memory operand for load instruction loadIns for which + * machine opcode will be replaced with newLoadMop. + * + * @param loadIns load instruction + * @param newLoadMop new opcode for load instruction + * @return memory operand for new load machine opcode + * or nullptr if memory operand can't be obtained + */ +AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, + MOperator newLoadMop); +} // namespace maplebe + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp index f6165fc0247499b0c6b1699211fe75bc583d9bd4..744fd8104bf0bc0f15b4a1af6d47e7ff058e827b 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -15,22 +15,30 @@ #include "aarch64_ebo.h" #include "aarch64_cg.h" #include "mpl_logging.h" +#include "aarch64_utils.h" + namespace maplebe { using namespace maple; #define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) -uint8 extIndexTable[AArch64Ebo::ExtTableSize][2] = { - /* extInsnPairTable row index, valid columns */ - {0, 1}, /* AND */ - {1, 2}, /* SXTB */ - {2, 4}, /* SXTH */ - {3, 5}, /* SXTW */ - {4, 2}, /* ZXTB */ - {5, 3}, /* ZXTH */ - {6, 3}, /* ZXTW */ +enum AArch64Ebo::ExtOpTable : uint8 { + AND, + SXTB, + SXTH, + SXTW, + ZXTB, + ZXTH, + ZXTW, + ExtTableSize }; -MOperator extInsnPairTable[AArch64Ebo::ExtTableSize][5][2] = { +namespace { + +using PairMOperator = MOperator[2]; + +constexpr uint8 insPairsNum = 5; + +PairMOperator extInsnPairTable[ExtTableSize][insPairsNum] = { /* {origMop, newMop} */ {{MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* AND */ @@ -48,6 +56,8 @@ MOperator extInsnPairTable[AArch64Ebo::ExtTableSize][5][2] = { {MOP_undef, MOP_undef}} /* ZXTW */ }; +} // anonymous namespace + MOperator AArch64Ebo::ExtLoadSwitchBitSize(MOperator lowMop) const { switch (lowMop) { case MOP_wldrsb : @@ -817,42 +827,73 @@ bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVectorinsn; - if (prevInsn != nullptr) { - uint32 rowIndex = extIndexTable[idx][0]; - uint32 numColumns = extIndexTable[idx][1]; - MOperator prevMop = prevInsn->GetMachineOpcode(); - for (uint32 i = 0; i < numColumns; ++i) { - if (prevMop == extInsnPairTable[rowIndex][i][0]) { - auto &res = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); - OpndInfo *prevOpndInfo = GetOpndInfo(res, -1); - MOperator newPreMop = extInsnPairTable[rowIndex][i][1]; - if (!ValidPatternForCombineExtAndLoad(prevOpndInfo, insn, newPreMop, prevMop, res)) { - return false; - } - if (is64bits && idx <= SXTW && idx >= SXTB) { - newPreMop = ExtLoadSwitchBitSize(newPreMop); - prevInsn->GetOperand(kInsnFirstOpnd).SetSize(k64BitSize); - } - prevInsn->SetMOP(newPreMop); - MOperator movOp = is64bits ? MOP_xmovrr : MOP_wmovrr; - if (insn->GetMachineOpcode() == MOP_wandrri12 || insn->GetMachineOpcode() == MOP_xandrri13) { - Insn &newInsn = cgFunc->GetCG()->BuildInstruction(movOp, insn->GetOperand(kInsnFirstOpnd), - insn->GetOperand(kInsnSecondOpnd)); - insn->GetBB()->ReplaceInsn(*insn, newInsn); - } else { - insn->SetMOP(movOp); - } - return true; - } - } + if (prevInsn == nullptr) { + return false; } - return false; + + MOperator prevMop = prevInsn->GetMachineOpcode(); + ASSERT(prevMop != MOP_undef, "Invalid opcode of instruction!"); + + PairMOperator *begin = &extInsnPairTable[idx][0]; + PairMOperator *end = &extInsnPairTable[idx][insPairsNum]; + + auto pairIt = std::find_if(begin, end, [prevMop](PairMOperator insPair) { + return prevMop == insPair[0]; + }); + + if (pairIt == end) { + return false; + } + + auto &res = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + OpndInfo *prevOpndInfo = GetOpndInfo(res, -1); + + MOperator newPreMop = (*pairIt)[1]; + ASSERT(newPreMop != MOP_undef, "Invalid opcode of instruction!"); + + if (!ValidPatternForCombineExtAndLoad(prevOpndInfo, insn, newPreMop, prevMop, + res)) { + return false; + } + + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, newPreMop); + + if (newMemOp == nullptr) { + return false; + } + + prevInsn->SetMemOpnd(newMemOp); + + if (is64bits && idx <= SXTW && idx >= SXTB) { + newPreMop = ExtLoadSwitchBitSize(newPreMop); + prevInsn->GetOperand(kInsnFirstOpnd).SetSize(k64BitSize); + } + + prevInsn->SetMOP(newPreMop); + + MOperator movOp = is64bits ? MOP_xmovrr : MOP_wmovrr; + + if (insn->GetMachineOpcode() == MOP_wandrri12 || + insn->GetMachineOpcode() == MOP_xandrri13) { + Insn &newInsn = cgFunc->GetCG()->BuildInstruction( + movOp, insn->GetOperand(kInsnFirstOpnd), + insn->GetOperand(kInsnSecondOpnd)); + insn->GetBB()->ReplaceInsn(*insn, newInsn); + } else { + insn->SetMOP(movOp); + } + + return true; } bool AArch64Ebo::CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 88d897a8e060885ac510f70d76793a6f3d646078..6c2bc6f22201751a4901ed48109e23b9e1aa9c62 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -17,6 +17,7 @@ #include "mpl_logging.h" #include "common_utils.h" #include "cg_option.h" +#include "aarch64_utils.h" namespace maplebe { #define JAVALANG (cgFunc.GetMirModule().IsJavaModule()) @@ -1660,6 +1661,14 @@ void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { return; } + + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, prevNewMop); + + if (newMemOp == nullptr) { + return; + } + auto *aarCGSSAInfo = static_cast(ssaInfo); if (CG_PEEP_DUMP) { LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; @@ -1669,12 +1678,16 @@ void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); } } + + prevInsn->SetMemOpnd(newMemOp); prevInsn->SetMOP(prevNewMop); + if ((prevOrigMop != prevNewMop) && CG_PEEP_DUMP) { LogInfo::MapleLogger() << "======= NewPrevInsn : \n"; prevInsn->Dump(); aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); } + MOperator movMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; Insn &newMovInsn = cgFunc->GetCG()->BuildInstruction(movMop, insn.GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnFirstOpnd)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6378799da1f1259f61186078cbc0cc375e21b654 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_utils.h" +#include "cg_option.h" + +namespace maplebe { + +AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, + MOperator newLoadMop) { + MemPool &memPool = *cgFunc.GetMemoryPool(); + auto *memOp = static_cast(loadIns.GetMemOpnd()); + MOperator loadMop = loadIns.GetMachineOpcode(); + + ASSERT(loadIns.IsLoad() && AArch64CG::kMd[newLoadMop].IsLoad(), + "ins and Mop must be load"); + + AArch64MemOperand *newMemOp = memOp; + + uint32 memSize = AArch64CG::kMd[loadMop].GetOperandSize(); + uint32 newMemSize = AArch64CG::kMd[newLoadMop].GetOperandSize(); + + if (newMemSize == memSize) { + // if sizes are the same just return old memory operand + return newMemOp; + } + + newMemOp = static_cast(memOp->Clone(memPool)); + newMemOp->SetSize(newMemSize); + + if (!CGOptions::IsBigEndian()) { + return newMemOp; + } + + // for big-endian it's necessary to adjust offset if it's present + if (memOp->GetAddrMode() != AArch64MemOperand::kAddrModeBOi || + newMemSize > memSize) { + // currently, it's possible to adjust an offset only for immediate offset + // operand if new size is less than the original one + return nullptr; + } + + auto *newOffOp = static_cast( + memOp->GetOffsetImmediate()->Clone(memPool)); + + newOffOp->AdjustOffset((memSize - newMemSize) >> kLog2BitsPerByte); + newMemOp->SetOffsetImmediate(*newOffOp); + + ASSERT(memOp->IsOffsetMisaligned(memSize) || + !newMemOp->IsOffsetMisaligned(newMemSize), + "New offset value is misaligned!"); + + return newMemOp; +} + +} // namespace maplebe