diff --git a/src/bin/jbc2mpl b/src/bin/jbc2mpl index e4edc0088c3761b77eff352ab2cf727f45936434..836a8e20f61807b83e641cf01e9b83eb2a6b7da0 100755 Binary files a/src/bin/jbc2mpl and b/src/bin/jbc2mpl differ diff --git a/src/bin/maple b/src/bin/maple index 199020769a739f36037ac65f38798197fde6f8aa..9738820d68b7627dd797530c339b76d8e4d6c732 100755 Binary files a/src/bin/maple and b/src/bin/maple differ diff --git a/src/deplibs/libmplphase.a b/src/deplibs/libmplphase.a index fc6c265a96a5f519d8e02dc21fc81ef454f401ec..62b849b1a308b35f7930bfce3dc445bc56caeb21 100644 Binary files a/src/deplibs/libmplphase.a and b/src/deplibs/libmplphase.a differ diff --git a/src/deplibs/libmplutil.a b/src/deplibs/libmplutil.a index 4970e2465e66340d6ad134f541b625e261e30212..2431c2f66129f99ceb0fef9f12c460ca520a7209 100644 Binary files a/src/deplibs/libmplutil.a and b/src/deplibs/libmplutil.a differ diff --git a/src/maple_be/include/be/common_utils.h b/src/maple_be/include/be/common_utils.h index 05c5db1897c356ecb8b68f9d0cc58b860fcce521..775795da8c91c3a52c61505f1768156c803ec7eb 100644 --- a/src/maple_be/include/be/common_utils.h +++ b/src/maple_be/include/be/common_utils.h @@ -86,12 +86,12 @@ constexpr int32 kEARetTempNameSize = 10; * Aarch64 data processing instructions have 12 bits of space for values in their instuction word * This is arranged as a four-bit rotate value and an eight-bit immediate value: */ -constexpr uint32 kMaxAarch64ImmVal12Bits = 12; - -constexpr uint32 kMaxAarch64ImmVal13Bits = 13; +constexpr uint32 kMaxImmVal8Bits = 8; +constexpr uint32 kMaxImmVal12Bits = 12; +constexpr uint32 kMaxImmVal13Bits = 13; /* aarch64 assembly takes up to 24-bits */ -constexpr uint32 kMaxAarch64ImmVal24Bits = 24; +constexpr uint32 kMaxImmVal24Bits = 24; constexpr double kMicroSecPerMilliSec = 1000.0; diff --git a/src/maple_be/include/cg/aarch64/aarch64_ebo.h b/src/maple_be/include/cg/aarch64/aarch64_ebo.h index 79fa21e7ed264b4819ae2af45d0bb5d2a141fa21..ad12b65fb8aefdb9cda8ebe2516b7e0b88f96afc 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_ebo.h +++ b/src/maple_be/include/cg/aarch64/aarch64_ebo.h @@ -57,6 +57,7 @@ class AArch64Ebo : public Ebo { bool IsClinitCheck(const Insn &insn) const override; bool IsLastAndBranch(BB &bb, Insn &insn) const override; bool ResIsNotDefAndUse(Insn &insn) const override; + bool LiveOutOfBB(const Operand &opnd, const BB &bb) const override; private: /* The number of elements in callerSaveRegTable must less then 45. */ diff --git a/src/maple_be/include/cg/aarch64/aarch64_operand.h b/src/maple_be/include/cg/aarch64/aarch64_operand.h index de7930a63292aa1c950356cbb3345347f6a2dc67..26296b1ade891aed7082dcf5d39887290f729110 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_operand.h +++ b/src/maple_be/include/cg/aarch64/aarch64_operand.h @@ -149,6 +149,14 @@ class AArch64ImmOperand : public ImmOperand { return memPool.Clone(*this); } + bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const override { + /* mask1 is a 64bits number that is all 1 shifts left size bits */ + const uint64 mask1 = 0xffffffffffffffffUL << size; + /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ + uint64 mask2 = (static_cast(1) << static_cast(nLowerZeroBits)) - 1UL; + return (mask2 & value) == 0UL && (mask1 & ((static_cast(value)) >> nLowerZeroBits)) == 0UL; + } + bool IsBitmaskImmediate() const { ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); @@ -240,6 +248,14 @@ class AArch64OfstOperand : public OfstOperand { return memPool.Clone(*this); } + bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const override { + /* mask1 is a 64bits number that is all 1 shifts left size bits */ + const uint64 mask1 = 0xffffffffffffffffUL << size; + /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ + uint64 mask2 = (static_cast(1) << static_cast(nLowerZeroBits)) - 1UL; + return (mask2 & value) == 0UL && (mask1 & ((static_cast(value)) >> nLowerZeroBits)) == 0UL; + } + bool IsSymOffset() const { return offsetType == kSymbolOffset; } diff --git a/src/maple_be/include/cg/ebo.h b/src/maple_be/include/cg/ebo.h index ed3572ef60c67a6454435b07021828def2eb2ab0..8485973d1556286fb30dd914e92e716aee8e020e 100644 --- a/src/maple_be/include/cg/ebo.h +++ b/src/maple_be/include/cg/ebo.h @@ -50,6 +50,9 @@ struct OpndInfo { InsnInfo *insnInfo = nullptr; bool redefinedInBB = false; /* A following definition exisit in bb. */ bool redefined = false; /* A following definition exisit. */ +#if TARGARM32 + bool mayReDef = false; +#endif OpndInfo *same = nullptr; /* Other definitions of the same operand. */ OpndInfo *prev = nullptr; OpndInfo *next = nullptr; @@ -141,7 +144,10 @@ class Ebo { bool IsPhysicalReg(const Operand &opnd) const; bool HasAssignedReg(const Operand &opnd) const; bool IsOfSameClass(const Operand &op0, const Operand &op1) const; - bool OpndAvailableInBB(const BB &bb, OpndInfo &info); + bool OpndAvailableInBB(const BB &bb, OpndInfo *info); + bool ForwardPropCheck(const Operand *opndReplace, OpndInfo &opndInfo, const Operand &opnd, Insn &insn); + bool RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo); bool IsNotVisited(const BB &bb) { return !visitedBBs.at(bb.GetId()); }; @@ -160,7 +166,6 @@ class Ebo { int32 ComputeOpndHash(const Operand &opnd) const; uint32 ComputeHashVal(const Insn &insn, const MapleVector &opndInfo) const; void MarkOpndLiveIntoBB(const Operand &opnd, BB &intoBB, BB &outOfBB) const; - bool LiveOutOfBB(const Operand &opnd, const BB &bb) const; void RemoveInsn(InsnInfo &insnInfo); void RemoveUses(uint32 opndNum, const MapleVector &origInfo); void HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfo); @@ -204,6 +209,7 @@ class Ebo { virtual bool IsClinitCheck(const Insn &insn) const = 0; virtual bool IsLastAndBranch(BB &bb, Insn &insn) const = 0; virtual bool ResIsNotDefAndUse(Insn &insn) const = 0; + virtual bool LiveOutOfBB(const Operand &opnd, const BB &bb) const = 0; OpndInfo *BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, int32 opndIndex); OpndInfo *BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector &origInfos); bool ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo, diff --git a/src/maple_be/include/cg/operand.h b/src/maple_be/include/cg/operand.h index d029d14d874e1203f976cb5846c50cc0660ba756..f640baa6a4697b7ff264e6b22b00dc1bf0e46a5c 100644 --- a/src/maple_be/include/cg/operand.h +++ b/src/maple_be/include/cg/operand.h @@ -328,6 +328,7 @@ class ImmOperand : public Operand { ~ImmOperand() override = default; virtual bool IsSingleInstructionMovable() const = 0; + virtual bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const = 0; int64 GetValue() const { return value; @@ -357,14 +358,6 @@ class ImmOperand : public Operand { return isSigned; } - bool IsInBitSize(uint8 size, uint8 nLowerZeroBits = 0) const { - /* mask1 is a 64bits number that is all 1 shifts left size bits */ - const uint64 mask1 = 0xffffffffffffffffUL << size; - /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ - uint64 mask2 = (static_cast(1) << static_cast(nLowerZeroBits)) - 1UL; - return (mask2 & value) == 0UL && (mask1 & ((static_cast(value)) >> nLowerZeroBits)) == 0UL; - } - bool IsInBitSizeRot(uint8 size) const { return IsInBitSizeRot(size, value); } diff --git a/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index cf1465544304ecd5319897b948d8c488d028a832..8c73570d2d8a0850769a312bf4efe5374e8fac3d 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1786,8 +1786,8 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod * either cmp or cmp with shift 12 encoding */ ImmOperand *immOpnd = static_cast(opnd1); - if (immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits) || - immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits)) { + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri; } else { opnd1 = &SelectCopy(*opnd1, primType, primType); @@ -1958,7 +1958,7 @@ void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, SelectSub(resOpnd, opnd0, *immOpnd, primType); return; } - if (immOpnd->IsInBitSize(kMaxAarch64ImmVal24Bits)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { /* * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers @@ -1967,15 +1967,15 @@ void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, */ MOperator mOpCode = MOP_undef; Operand *newOpnd0 = &opnd0; - if (!(immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits) || - immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { /* process higher 12 bits */ - ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxAarch64ImmVal12Bits, + ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits, immOpnd->GetSize(), immOpnd->IsSignedValue()); mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; Insn &newInsn = GetCG()->BuildInstruction(mOpCode, resOpnd, opnd0, immOpnd2, addSubLslOperand); GetCurBB()->AppendInsn(newInsn); - immOpnd->ModuloByPow2(kMaxAarch64ImmVal12Bits); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); newOpnd0 = &resOpnd; } /* process lower 12 bits */ @@ -2060,7 +2060,7 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, return; } - if (immOpnd->IsInBitSize(kMaxAarch64ImmVal24Bits)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { /* * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers @@ -2068,15 +2068,15 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 */ MOperator mOpCode = MOP_undef; - if (!(immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits) || - immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { /* process higher 12 bits */ - ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxAarch64ImmVal12Bits, + ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits, immOpnd->GetSize(), immOpnd->IsSignedValue()); mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; Insn &newInsn = GetCG()->BuildInstruction(mOpCode, resOpnd, *opnd0Bak, immOpnd2, addSubLslOperand); GetCurBB()->AppendInsn(newInsn); - immOpnd->ModuloByPow2(kMaxAarch64ImmVal12Bits); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); opnd0Bak = &resOpnd; } /* process lower 12 bits */ @@ -2598,7 +2598,7 @@ void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, u * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 */ - if (immOpnd->IsInBitSize(12) || immOpnd->IsInBitSize(12, 12)) { + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; } else { /* load into register */ @@ -3006,7 +3006,7 @@ Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpn uint8 bitOffset = node.GetBitsOffset(); uint8 bitSize = node.GetBitsSize(); bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); - uint32 immWidth = is64Bits ? kMaxAarch64ImmVal13Bits : kMaxAarch64ImmVal12Bits; + uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits; Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); if ((bitOffset == 0) && !isSigned && (bitSize < immWidth)) { SelectBand(resOpnd, opnd0, CreateImmOperand((static_cast(1) << bitSize) - 1, immWidth, false), dtype); @@ -5303,7 +5303,7 @@ AArch64MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(AArch64MemOperand::AArch64A /* offset: base offset from FP or SP */ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int32 offset, uint32 size) { AArch64OfstOperand &offsetOpnd = CreateOfstOpnd(offset, k32BitSize); - if (!ImmOperand::IsInBitSizeRot(kMaxAarch64ImmVal12Bits, offset)) { + if (!ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) { Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32); return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, baseOpnd, static_cast(resImmOpnd), nullptr, nullptr); @@ -5317,7 +5317,7 @@ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int32 offset, uin /* offset: base offset + #:lo12:Label+immediate */ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int32 offset, uint32 size, const MIRSymbol &sym) { AArch64OfstOperand &offsetOpnd = CreateOfstOpnd(offset, k32BitSize); - ASSERT(ImmOperand::IsInBitSizeRot(kMaxAarch64ImmVal12Bits, offset), ""); + ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), ""); return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); } @@ -5553,13 +5553,13 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand MOperator mOpCode = MOP_undef; /* lower 24 bits has 1, higher bits are all 0 */ - if (immOpnd->IsInBitSize(kMaxAarch64ImmVal24Bits)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { /* lower 12 bits and higher 12 bits both has 1 */ Operand *newOpnd0 = &opnd0; - if (!(immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits) || - immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { /* process higher 12 bits */ - ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxAarch64ImmVal12Bits, + ImmOperand &immOpnd2 = CreateImmOperand(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits, immOpnd->GetSize(), immOpnd->IsSignedValue()); mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; Insn &newInsn = GetCG()->BuildInstruction(mOpCode, resOpnd, opnd0, immOpnd2, addSubLslOperand); @@ -5569,7 +5569,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand insn.GetBB()->InsertInsnBefore(insn, newInsn); } /* get lower 12 bits value */ - immOpnd->ModuloByPow2(kMaxAarch64ImmVal12Bits); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); newOpnd0 = &resOpnd; } /* process lower 12 bits value */ diff --git a/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp index 5eef9ee2cd17be8398ec736f979a72e2b4c863a0..1fec18814d533c98eb12edcf10d4e2eee6f2d8ca 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -75,6 +75,20 @@ bool AArch64Ebo::ResIsNotDefAndUse(Insn &insn) const { return true; } +/* Return true if opnd live out of bb. */ +bool AArch64Ebo::LiveOutOfBB(const Operand &opnd, const BB &bb) const { + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + /* when optimize_level < 2, there is need to anlyze live range. */ + if (live == nullptr) { + return false; + } + bool isLiveOut = false; + if (bb.GetLiveOut()->TestBit(static_cast(&opnd)->GetRegisterNumber())) { + isLiveOut = true; + } + return isLiveOut; +} + bool AArch64Ebo::IsLastAndBranch(BB &bb, Insn &insn) const { return (bb.GetLastInsn() == &insn) && insn.IsBranch(); } @@ -299,9 +313,8 @@ bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { case MOP_waddrrr: case MOP_xsubrrr: case MOP_wsubrrr: { - if ((idx != kInsnThirdOpnd) || !src->IsInBitSize(kMaxAarch64ImmVal24Bits) || - !(src->IsInBitSize(kMaxAarch64ImmVal12Bits) || - src->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if ((idx != kInsnThirdOpnd) || !src->IsInBitSize(kMaxImmVal24Bits, 0) || + !(src->IsInBitSize(kMaxImmVal12Bits, 0) || src->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { return false; } Operand &result = insn.GetOperand(0); @@ -490,15 +503,14 @@ bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &o } if ((insn.GetMachineOpcode() == MOP_xaddrrr) || (insn.GetMachineOpcode() == MOP_waddrrr)) { - if (immOpnd->IsInBitSize(kMaxAarch64ImmVal24Bits)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { /* * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 */ - if ((immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits) || - immOpnd->IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { MOperator mOp = opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12; Insn &newInsn = cgFunc->GetCG()->BuildInstruction(mOp, *res, *op, *immOpnd); bb->ReplaceInsn(insn, newInsn); @@ -520,8 +532,8 @@ bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &o AArch64ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); int64_t val = imm0.GetValue() + immOpnd->GetValue(); AArch64ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); - if (imm1.IsInBitSize(kMaxAarch64ImmVal24Bits) && (imm1.IsInBitSize(kMaxAarch64ImmVal12Bits) || - imm1.IsInBitSize(kMaxAarch64ImmVal12Bits, kMaxAarch64ImmVal12Bits))) { + if (imm1.IsInBitSize(kMaxImmVal24Bits, 0) && (imm1.IsInBitSize(kMaxImmVal12Bits, 0) || + imm1.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { MOperator mOp = (opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12); bb->ReplaceInsn(insn, cgFunc->GetCG()->BuildInstruction(mOp, *res, prevOpnd0, imm1)); result = true; @@ -724,8 +736,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI auto &res1 = static_cast(insn1->GetOperand(kInsnFirstOpnd)); if (RegistersIdentical(res1, *op1) && RegistersIdentical(res1, res2) && (GetOpndInfo(base2, -1) != nullptr) && !GetOpndInfo(base2, -1)->redefined) { - immVal = - imm0Val + imm1.GetValue() + (static_cast(immOpnd2.GetValue()) << kMaxAarch64ImmVal12Bits); + immVal = imm0Val + imm1.GetValue() + (static_cast(immOpnd2.GetValue()) << kMaxImmVal12Bits); op1 = &base2; } else { return false; diff --git a/src/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/maple_be/src/cg/aarch64/aarch64_peep.cpp index bd71d6e116c52b2b1bac53144fc63b75524c5f45..637ca7bc0cd4d8eeab71e8734729da390d66ae15 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -2462,13 +2462,11 @@ void ComputationTreeAArch64::Run(BB &bb, Insn &insn) { if (lsl.GetShiftAmount() == lslShiftAmountCaseA) { sxtw = &aarch64CGFunc->CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, lslShiftAmountCaseA + 1, lslBitLenth); - imm = &aarch64CGFunc->CreateImmOperand(oriAddEnd + (1ULL << lslShiftAmountCaseA), - kMaxAarch64ImmVal12Bits, true); + imm = &aarch64CGFunc->CreateImmOperand(oriAddEnd + (1ULL << lslShiftAmountCaseA), kMaxImmVal12Bits, true); } else if (lsl.GetShiftAmount() == lslShiftAmountCaseB) { sxtw = &aarch64CGFunc->CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, lslShiftAmountCaseB + 1, lslBitLenth); - imm = &aarch64CGFunc->CreateImmOperand(oriAddEnd + (1ULL << lslShiftAmountCaseB), - kMaxAarch64ImmVal12Bits, true); + imm = &aarch64CGFunc->CreateImmOperand(oriAddEnd + (1ULL << lslShiftAmountCaseB), kMaxImmVal12Bits, true); } Insn &newInsn = cgFunc.GetCG()->BuildInstruction(MOP_xxwaddrrre, sxtwInsn->GetOperand(kInsnFirstOpnd), diff --git a/src/maple_be/src/cg/ebo.cpp b/src/maple_be/src/cg/ebo.cpp index 8099276077e87dbc358a2c1075299aa9ef095db1..76616c39801c609f7cd9e002788db281b14da3ad 100644 --- a/src/maple_be/src/cg/ebo.cpp +++ b/src/maple_be/src/cg/ebo.cpp @@ -119,12 +119,15 @@ bool Ebo::IsOfSameClass(const Operand &op0, const Operand &op1) const { } /* return true if opnd of bb is available. */ -bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo &info) { - if (info.opnd == nullptr) { +bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo *info) { + if (info == nullptr) { + return false; + } + if (info->opnd == nullptr) { return false; } - Operand *op = info.opnd; + Operand *op = info->opnd; if (op->IsConstant()) { return true; } @@ -133,13 +136,13 @@ bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo &info) { if (op->IsRegShift() || op->IsRegister()) { hashVal = -1; } else { - hashVal = info.hashVal; + hashVal = info->hashVal; } - if (GetOpndInfo(*op, hashVal) != &info) { + if (GetOpndInfo(*op, hashVal) != info) { return false; } /* global operands aren't supported at low levels of optimization. */ - if ((Globals::GetInstance()->GetOptimLevel() < CGOptions::kLevel2) && (&bb != info.bb)) { + if ((Globals::GetInstance()->GetOptimLevel() < CGOptions::kLevel2) && (&bb != info->bb)) { return false; } if (beforeRegAlloc && IsPhysicalReg(*op)) { @@ -148,6 +151,54 @@ bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo &info) { return true; } +bool Ebo::ForwardPropCheck(const Operand *opndReplace, OpndInfo &opndInfo, const Operand &opnd, Insn &insn) { + if (opndReplace == nullptr) { + return false; + } + if ((opndInfo.replacementInfo != nullptr) && opndInfo.replacementInfo->redefined) { + return false; + } +#if TARGARM32 + /* for arm32, disable forwardProp in strd insn. */ + if (insn.GetMachineOpcode() == MOP_strd) { + return false; + } + if (opndInfo.mayReDef) { + return false; + } +#endif + if (!(opndReplace->IsConstant() || + ((OpndAvailableInBB(*insn.GetBB(), opndInfo.replacementInfo) || RegistersIdentical(opnd, *opndReplace)) && + (HasAssignedReg(opnd) == HasAssignedReg(*opndReplace))))) { + return false; + } + /* if beforeRA, replace op should not be PhysicalRe */ + return !beforeRegAlloc || !IsPhysicalReg(*opndReplace); +} + +bool Ebo::RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo) { + if (opnd.IsConstant()) { + return false; + } + if (!(!beforeRegAlloc || (HasAssignedReg(oldOpnd) == HasAssignedReg(*opndReplace)) || opnd.IsConstReg() || + !insn.IsMove())) { + return false; + } + if (!((insn.GetResultNum() == 0) || + (((insn.GetResult(0) != nullptr) && !RegistersIdentical(opnd, *(insn.GetResult(0)))) || !beforeRegAlloc))) { + return false; + } + if (!(beforeRegAlloc || !IsFrameReg(oldOpnd))) { + return false; + } + if (insn.IsDestRegAlsoSrcReg()) { + return false; + } + return ((IsOfSameClass(oldOpnd, *opndReplace) && (oldOpnd.GetSize() <= opndReplace->GetSize())) || + ((tmpInfo != nullptr) && IsMovToSIMDVmov(insn, *tmpInfo->insn))); +} + /* For Memory Operand, its info was stored in a hash table, this function is to compute its hash value. */ int32 Ebo::ComputeOpndHash(const Operand &opnd) const { uint64 hashIdx = reinterpret_cast(&opnd) >> k4ByteSize; @@ -489,85 +540,73 @@ bool Ebo::ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, } /* forward propagation of constants */ - ASSERT(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); - if ((opndReplace != nullptr) && !((opndInfo->replacementInfo != nullptr) && opndInfo->replacementInfo->redefined) && - (opndReplace->IsConstant() || - ((((opndInfo->replacementInfo != nullptr) && OpndAvailableInBB(*insn.GetBB(), *opndInfo->replacementInfo)) || - RegistersIdentical(*opnd, *opndReplace)) && - (HasAssignedReg(*opnd) == HasAssignedReg(*opndReplace)))) && - (!beforeRegAlloc || (!IsPhysicalReg(*opndReplace)))) { - Operand *oldOpnd = opnd; - opnd = opndInfo->replacementOpnd; - opndInfo = opndInfo->replacementInfo; - - /* constant prop. */ - if (opnd->IsIntImmediate() && oldOpnd->IsRegister()) { - if (DoConstProp(insn, opndIndex, *opnd)) { - DecRef(*origInfos.at(opndIndex)); - /* Update the actual expression info. */ - origInfos.at(opndIndex) = opndInfo; - } - } - /* move reg, wzr, store vreg, mem ==> store wzr, mem */ -#if TARGAARCH64 - if (opnd->IsZeroRegister() && opndIndex == 0 && - (insn.GetMachineOpcode() == MOP_wstr || insn.GetMachineOpcode() == MOP_xstr)) { - if (EBO_DUMP) { - LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; - insn.Dump(); - LogInfo::MapleLogger() << "the new insn is:\n"; - } - insn.SetOperand(opndIndex, *opnd); + CHECK_FATAL(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (!ForwardPropCheck(opndReplace, *opndInfo, *opnd, insn)) { + return false; + } + Operand *oldOpnd = opnd; + opnd = opndInfo->replacementOpnd; + opndInfo = opndInfo->replacementInfo; + + /* constant prop. */ + if (opnd->IsIntImmediate() && oldOpnd->IsRegister()) { + if (DoConstProp(insn, opndIndex, *opnd)) { DecRef(*origInfos.at(opndIndex)); /* Update the actual expression info. */ origInfos.at(opndIndex) = opndInfo; - if (EBO_DUMP) { - insn.Dump(); - } } + } + /* move reg, wzr, store vreg, mem ==> store wzr, mem */ +#if TARGAARCH64 + if (opnd->IsZeroRegister() && opndIndex == 0 && + (insn.GetMachineOpcode() == MOP_wstr || insn.GetMachineOpcode() == MOP_xstr)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + insn.SetOperand(opndIndex, *opnd); + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + if (EBO_DUMP) { + insn.Dump(); + } + } #endif - /* forward prop for registers. */ - if (!opnd->IsConstant() && - (!beforeRegAlloc || (HasAssignedReg(*oldOpnd) == HasAssignedReg(*opndReplace)) || opnd->IsConstReg() || - !insn.IsMove()) && - (opndInfo != nullptr) && - ((insn.GetResultNum() == 0) || - (((insn.GetResult(0) != nullptr) && !RegistersIdentical(*opnd, *(insn.GetResult(0)))) || !beforeRegAlloc)) && - (beforeRegAlloc || !IsFrameReg(*oldOpnd)) && !insn.IsDestRegAlsoSrcReg() && - ((IsOfSameClass(*oldOpnd, *opndReplace) && (oldOpnd->GetSize() <= opndReplace->GetSize())) || - IsMovToSIMDVmov(insn, *origInfos.at(opndIndex)->insn))) { - /* Copies to and from the same register are not needed. */ - if (!beforeRegAlloc && insn.IsEffectiveCopy() && (insn.CopyOperands() == opndIndex) && - RegistersIdentical(*opnd, *(insn.GetResult(0)))) { - if (EBO_DUMP) { - LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; - insn.Dump(); - LogInfo::MapleLogger() << "===Remove the new insn because Copies to and from the same register. \n"; - } - return true; - } - - if (EBO_DUMP) { - LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; - insn.Dump(); - LogInfo::MapleLogger() << "the new insn is:\n"; - } - DecRef(*origInfos.at(opndIndex)); - insn.SetOperand(opndIndex, *opnd); - - if (EBO_DUMP) { - insn.Dump(); - } - IncRef(*opndInfo); - /* Update the actual expression info. */ - origInfos.at(opndIndex) = opndInfo; - /* extend the live range of the replacement operand. */ - if ((opndInfo->bb != insn.GetBB()) && opnd->IsRegister()) { - MarkOpndLiveIntoBB(*opnd, *insn.GetBB(), *opndInfo->bb); - } + /* forward prop for registers. */ + if (!RegForwardCheck(insn, *opnd, opndReplace, *oldOpnd, origInfos.at(opndIndex))) { + return false; + } + /* Copies to and from the same register are not needed. */ + if (!beforeRegAlloc && insn.IsEffectiveCopy() && (insn.CopyOperands() == opndIndex) && + RegistersIdentical(*opnd, *(insn.GetResult(0)))) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "===Remove the new insn because Copies to and from the same register. \n"; } + return true; } + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + DecRef(*origInfos.at(opndIndex)); + insn.SetOperand(opndIndex, *opnd); + + if (EBO_DUMP) { + insn.Dump(); + } + IncRef(*opndInfo); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + /* extend the live range of the replacement operand. */ + if ((opndInfo->bb != insn.GetBB()) && opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opnd, *insn.GetBB(), *opndInfo->bb); + } return false; } @@ -836,19 +875,6 @@ InsnInfo *Ebo::LocateInsnInfo(const OpndInfo &info) { return nullptr; } -/* Return true if opnd live out of bb. */ -bool Ebo::LiveOutOfBB(const Operand &opnd, const BB &bb) const { - CHECK_FATAL(opnd.IsRegister(), "expect register here."); - /* when optimize_level < 2, there is need to anlyze live range. */ - if (live == nullptr) { - return false; - } - if (bb.GetLiveOut()->TestBit(static_cast(&opnd)->GetRegisterNumber())) { - return true; - } - return false; -} - /* redundant insns elimination */ void Ebo::RemoveUnusedInsns(BB &bb, bool normal) { OpndInfo *opndInfo = nullptr; @@ -1270,7 +1296,7 @@ AnalysisResult *CgDoEbo::Run(CGFunc *cgFunc, CgFuncResultMgr *cgFuncResultMgr) { ebo = eboMp->New(*cgFunc, *eboMp, live, true, PhaseName()); #endif #if TARGARM32 - ebo = eboMp->New(*cgFunc, *eboMp, live, false, PhaseName()); + ebo = eboMp->New(*cgFunc, *eboMp, live, true, PhaseName()); #endif ebo->Run(); /* the live range info may changed, so invalid the info. */ @@ -1296,7 +1322,7 @@ AnalysisResult *CgDoEbo1::Run(CGFunc *cgFunc, CgFuncResultMgr *cgFuncResultMgr) ebo = eboMp->New(*cgFunc, *eboMp, live, true, PhaseName()); #endif #if TARGARM32 - ebo = eboMp->New(*cgFunc, *eboMp, live, false, PhaseName()); + ebo = eboMp->New(*cgFunc, *eboMp, live, true, PhaseName()); #endif ebo->Run(); /* the live range info may changed, so invalid the info. */ diff --git a/src/maple_be/src/cg/emit.cpp b/src/maple_be/src/cg/emit.cpp index cf0e2a19b9c4e15acbc9f933f3fc6e405fdaed3e..698f697d82a5a84514ab828009c17a355af5944b 100644 --- a/src/maple_be/src/cg/emit.cpp +++ b/src/maple_be/src/cg/emit.cpp @@ -670,7 +670,10 @@ void Emitter::EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemCo return; } - if ((idx == static_cast(FieldPropertyCompact::kPOffset)) && mirSymbol.IsReflectionFieldsInfoCompact()) { + if (((idx == static_cast(FieldPropertyCompact::kPOffset)) && mirSymbol.IsReflectionFieldsInfoCompact()) || + ((idx == static_cast(MethodProperty::kSigName)) && mirSymbol.IsReflectionMethodsInfo()) || + ((idx == static_cast(MethodSignatureProperty::kParameterTypes)) && + mirSymbol.IsReflectionMethodSignature())) { Emit("\t.long\t"); Emit(symAddrName + " - .\n"); return; @@ -1027,6 +1030,8 @@ void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, ui bool isFieldsInfo = (idx == static_cast(FieldProperty::kTypeName) || idx == static_cast(FieldProperty::kName) || idx == static_cast(FieldProperty::kAnnotation)) && mirSymbol.IsReflectionFieldsInfo(); + bool isMethodSignature = (idx == static_cast(MethodSignatureProperty::kSignatureOffset)) && + mirSymbol.IsReflectionMethodSignature(); /* RegisterTable has been Int Array, visit element instead of field. */ bool isInOffsetTab = (idx == 1 || idx == methodTypeIdx) && (StringUtils::StartsWith(stName, kVtabOffsetTabStr) || @@ -1039,7 +1044,7 @@ void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, ui StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR); bool isArrayClassCacheName = mirSymbol.IsArrayClassCacheName(); if (isClassInfo || isMethodsInfo || isFieldsInfo || mirSymbol.IsRegJNITab() || isInOffsetTab || - isStaticStr || isConflictPerfix || isArrayClassCacheName) { + isStaticStr || isConflictPerfix || isArrayClassCacheName || isMethodSignature) { /* compare with all 1s */ uint32 index = static_cast((safe_cast(elemConst))->GetValue()) & 0xFFFFFFFF; bool isHotReflectStr = (index & 0x00000003) != 0; /* use the last two bits of index in this expression */ @@ -1072,7 +1077,7 @@ void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, ui #endif /* USE_32BIT_REF */ Emit("+" + strTabName); if (mirSymbol.IsRegJNITab() || mirSymbol.IsReflectionMethodsInfo() || mirSymbol.IsReflectionFieldsInfo() || - mirSymbol.IsArrayClassCacheName()) { + mirSymbol.IsArrayClassCacheName() || mirSymbol.IsReflectionMethodSignature()) { Emit("-."); } if (StringUtils::StartsWith(stName, kDecoupleStaticKeyStr)) { @@ -1719,6 +1724,7 @@ void Emitter::EmitGlobalVariable() { std::vector muidVec = { nullptr }; std::vector fieldOffsetDatas; std::vector methodAddrDatas; + std::vector methodSignatureDatas; std::vector staticDecoupleKeyVec; std::vector staticDecoupleValueVec; std::vector superClassStVec; @@ -1801,6 +1807,9 @@ void Emitter::EmitGlobalVariable() { } else if (mirSymbol->IsReflectionSuperclassInfo()) { superClassStVec.emplace_back(mirSymbol); continue; + } else if (mirSymbol->IsReflectionMethodSignature()) { + methodSignatureDatas.push_back(mirSymbol); + continue; } if (mirSymbol->IsReflectionInfo()) { @@ -2071,6 +2080,10 @@ void Emitter::EmitGlobalVariable() { EmitMetaDataSymbolWithMarkFlag(fieldOffsetDatas, strIdx2Type, kFieldOffsetDataPrefixStr, sectionNameIsEmpty, false); /* method address rw */ EmitMetaDataSymbolWithMarkFlag(methodAddrDatas, strIdx2Type, kMethodAddrDataPrefixStr, sectionNameIsEmpty, false); + /* method address ro */ + std::string methodSignatureSectionName("romethodsignature"); + EmitMetaDataSymbolWithMarkFlag(methodSignatureDatas, strIdx2Type, kMethodSignaturePrefixStr, + methodSignatureSectionName, false); /* array class cache table */ EmitMuidTable(arrayClassCacheVec, strIdx2Type, kArrayClassCacheTable); diff --git a/src/maple_driver/src/driver_option_common.cpp b/src/maple_driver/src/driver_option_common.cpp index 15c1154b2787659d9294c1316537347cf2e8b5f3..927421b84b0517605909403c567dda4fa0c1f853 100644 --- a/src/maple_driver/src/driver_option_common.cpp +++ b/src/maple_driver/src/driver_option_common.cpp @@ -239,4 +239,4 @@ DriverOptionCommon &DriverOptionCommon::GetInstance() { DriverOptionCommon::DriverOptionCommon() { CreateUsages(usages); } -} // namespace maple \ No newline at end of file +} // namespace maple diff --git a/src/maple_ir/include/mir_symbol.h b/src/maple_ir/include/mir_symbol.h index 0dbb88a2b2d4071e43fba99780e55c65a84f1677..ccd5e6f627d93f5cdc70e18e852f8b3e54fa35aa 100644 --- a/src/maple_ir/include/mir_symbol.h +++ b/src/maple_ir/include/mir_symbol.h @@ -318,6 +318,7 @@ class MIRSymbol { bool IsReflectionSuperclassInfo() const; bool IsReflectionFieldOffsetData() const; bool IsReflectionMethodAddrData() const; + bool IsReflectionMethodSignature() const; bool IsReflectionClassInfo() const; bool IsReflectionArrayClassInfo() const; bool IsReflectionClassInfoPtr() const; diff --git a/src/maple_ir/src/mir_symbol.cpp b/src/maple_ir/src/mir_symbol.cpp index e315514157c0c40d4cff586caadc4581e132fd2c..2abad231a53afe8e1cc092f86eb9780c01d74a99 100644 --- a/src/maple_ir/src/mir_symbol.cpp +++ b/src/maple_ir/src/mir_symbol.cpp @@ -200,6 +200,10 @@ bool MIRSymbol::IsReflectionMethodAddrData() const { return (GetName().find(kMethodAddrDataPrefixStr) == 0); } +bool MIRSymbol::IsReflectionMethodSignature() const { + return (GetName().find(kMethodSignaturePrefixStr) == 0); +} + bool MIRSymbol::IsReflectionClassInfo() const { return StringUtils::StartsWith(GetName(), CLASSINFO_PREFIX_STR); } diff --git a/src/maple_me/include/bb.h b/src/maple_me/include/bb.h index ba588c4daf970a67791b1440f2d3fe6d32fd242c..62e53b47eb9eec8af04fd0e16d9b3402019ef17e 100755 --- a/src/maple_me/include/bb.h +++ b/src/maple_me/include/bb.h @@ -255,11 +255,15 @@ class BB { } void RemoveAllPred() { - pred.clear(); + while (!pred.empty()) { + RemovePred(*pred.back()); + } } void RemoveAllSucc() { - succ.clear(); + while (!succ.empty()) { + RemoveSucc(*succ.back()); + } succFreq.clear(); } diff --git a/src/maple_me/include/me_cfg.h b/src/maple_me/include/me_cfg.h index 57308d3f86a59df40ecf87bb9aa68c28cbcd2093..cc7c61c594de81589add13071e60a653e75355dc 100755 --- a/src/maple_me/include/me_cfg.h +++ b/src/maple_me/include/me_cfg.h @@ -51,6 +51,8 @@ class MeCFG { } private: + void ReplaceSwitchContainsOneCaseBranchWithBrtrue(BB &bb, MapleVector &exitBlocks); + void AddCatchHandlerForTryBB(BB &bb, MapleVector &exitBlocks); std::string ConstructFileNameToDump(const std::string &prefix) const; void DumpToFileInStrs(std::ofstream &cfgFile) const; void ConvertPhiList2IdentityAssigns(BB &meBB) const; diff --git a/src/maple_me/include/me_function.h b/src/maple_me/include/me_function.h index edd1b1c1601ffe2b24e77f6bede859b31194b18d..df1d9e551aa28c830cba48208c1682ad0f7f2fd7 100644 --- a/src/maple_me/include/me_function.h +++ b/src/maple_me/include/me_function.h @@ -387,6 +387,10 @@ class MeFunction : public FuncEmit { irmap = currIRMap; } + void SetBBTryNodeMap(BB &bb, StmtNode &tryStmt) { + bbTryNodeMap[&bb] = &tryStmt; + } + const MapleUnorderedMap &GetBBTryNodeMap() const { return bbTryNodeMap; } diff --git a/src/maple_me/src/me_analyzector.cpp b/src/maple_me/src/me_analyzector.cpp index 75d5ebf4a10ca5456efb75af441d02f1157f22f0..7b70dc5210824ba2b000361b256862722773b8c1 100644 --- a/src/maple_me/src/me_analyzector.cpp +++ b/src/maple_me/src/me_analyzector.cpp @@ -54,6 +54,10 @@ void AnalyzeCtor::ProcessFunc() { // collect field ids which are assigned inside the stmt and mark sideeffect // flag for non-ctor calls void AnalyzeCtor::ProcessStmt(MeStmt &stmt) { + if (kOpcodeInfo.IsCall(stmt.GetOp())) { + hasSideEffect = true; + return; + } switch (stmt.GetOp()) { case OP_iassign: { auto &iassign = static_cast(stmt); @@ -71,17 +75,6 @@ void AnalyzeCtor::ProcessStmt(MeStmt &stmt) { fieldSet.insert(ivarMeExpr.GetFieldID()); break; } - case OP_callassigned: - case OP_call: - case OP_icall: - case OP_intrinsiccall: - case OP_xintrinsiccall: - case OP_virtualcall: - case OP_superclasscall: - case OP_interfacecall: { - hasSideEffect = true; - break; - } default: break; } diff --git a/src/maple_me/src/me_bb_layout.cpp b/src/maple_me/src/me_bb_layout.cpp index deb37255e9fb001cf24ce8b9f459d8bc643761cc..aecbae47e606c1e5aaf5c65d39c8ac69029e127d 100644 --- a/src/maple_me/src/me_bb_layout.cpp +++ b/src/maple_me/src/me_bb_layout.cpp @@ -450,7 +450,8 @@ void BBLayout::FixTryBB(BB &startTryBB, BB &nextBB) { nextBB.GetPred(i)->ReplaceSucc(&nextBB, &startTryBB); } nextBB.RemoveAllPred(); - startTryBB.ReplaceSucc(startTryBB.GetSucc(0), &nextBB); + ASSERT(startTryBB.GetSucc().empty(), "succ of try should have been removed"); + startTryBB.AddSucc(nextBB); } void BBLayout::DealWithStartTryBB() { @@ -486,7 +487,8 @@ void BBLayout::RemoveUnreachable(BB &bb) { if (bb.GetAttributes(kBBAttrIsEntry)) { return; } - while (bb.GetSucc().size() > 0) { + + while (!bb.GetSucc().empty()) { BB *succ = bb.GetSucc(0); succ->RemovePred(bb, false); if (succ->GetPred().empty()) { diff --git a/src/maple_me/src/me_cfg.cpp b/src/maple_me/src/me_cfg.cpp index f5b0052ecf55348e175c16d82951111b9281a6f6..d1a5498ca5ab9d77c908a2d463b3de456397c8bc 100755 --- a/src/maple_me/src/me_cfg.cpp +++ b/src/maple_me/src/me_cfg.cpp @@ -22,12 +22,117 @@ namespace { constexpr int kFuncNameLenLimit = 80; +static bool CaseValOfSwitchIsSuccInt(const maple::CaseVector &switchTable) { + ASSERT(!switchTable.empty(), "switch table is empty"); + size_t caseNum = switchTable.size(); + int val = switchTable[0].first; + for (size_t id = 1; id < caseNum; id++) { + val++; + if (val != switchTable[id].first) { + return false; + } + } + return true; +} } namespace maple { + +void MeCFG::ReplaceSwitchContainsOneCaseBranchWithBrtrue(maple::BB &bb, MapleVector &exitBlocks) { + StmtNode &lastStmt = bb.GetStmtNodes().back(); + ASSERT(lastStmt.GetOpCode() == OP_switch, "runtime check error"); + auto &switchStmt = static_cast(lastStmt); + auto &swithcTable = switchStmt.GetSwitchTable(); + if (!CaseValOfSwitchIsSuccInt(swithcTable)) { + return; + } + LabelIdx defaultLabelIdx = switchStmt.GetDefaultLabel(); + int32 minCaseVal = swithcTable.front().first; + int32 maxCaseVal = swithcTable.back().first; + auto *baseNode = switchStmt.Opnd(0); + + auto &mirBuilder = func.GetMIRModule().GetMIRBuilder(); + auto *minCaseNode = mirBuilder->CreateIntConst(minCaseVal, PTY_i32); + auto *ltNode = mirBuilder->CreateExprCompare(OP_lt, GetTypeFromTyIdx(TyIdx(PTY_u1)), + GetTypeFromTyIdx(TyIdx(PTY_i32)), baseNode, minCaseNode); + auto *condGoto = mirBuilder->CreateStmtCondGoto(ltNode, OP_brtrue, defaultLabelIdx); + bb.ReplaceStmt(&switchStmt, condGoto); + bb.SetKind(kBBCondGoto); + + auto *newBB = func.NewBasicBlock(); + auto *maxCaseNode = mirBuilder->CreateIntConst(maxCaseVal, PTY_i32); + auto *gtNode = mirBuilder->CreateExprCompare(OP_gt, GetTypeFromTyIdx(TyIdx(PTY_u1)), + GetTypeFromTyIdx(TyIdx(PTY_i32)), baseNode, maxCaseNode); + condGoto = mirBuilder->CreateStmtCondGoto(gtNode, OP_brtrue, defaultLabelIdx); + newBB->GetStmtNodes().push_back(condGoto); + newBB->SetKind(kBBCondGoto); + + BB *defaultBB = func.GetLabelBBAt(defaultLabelIdx); + ASSERT(defaultBB != nullptr, "null ptr check"); + while (!bb.GetSucc().empty()) { + bb.RemoveSucc(*bb.GetSucc(0)); + } + bb.AddSucc(*newBB); + bb.AddSucc(*defaultBB); + + BB *caseBB = func.GetLabelBBAt(switchStmt.GetSwitchTable().front().second); + ASSERT(caseBB != nullptr, "null ptr check"); + newBB->AddSucc(*caseBB); + newBB->AddSucc(*defaultBB); + + if (bb.GetAttributes(kBBAttrIsTry)) { + newBB->SetAttributes(kBBAttrIsTry); + func.SetBBTryNodeMap(*newBB, *func.GetBBTryNodeMap().at(&bb)); + AddCatchHandlerForTryBB(bb, exitBlocks); + AddCatchHandlerForTryBB(*newBB, exitBlocks); + } +} + +void MeCFG::AddCatchHandlerForTryBB(BB &bb, MapleVector &exitBlocks) { + if (!bb.GetAttributes(kBBAttrIsTry)) { + return; + } + auto it = func.GetBBTryNodeMap().find(&bb); + CHECK_FATAL(it != func.GetBBTryNodeMap().end(), "try bb without try"); + StmtNode *currTry = it->second; + const auto *tryNode = static_cast(currTry); + bool hasFinallyHandler = false; + // add exception handler bb + for (size_t j = 0; j < tryNode->GetOffsetsCount(); ++j) { + LabelIdx labelIdx = tryNode->GetOffset(j); + ASSERT(func.GetLabelBBIdMap().find(labelIdx) != func.GetLabelBBIdMap().end(), "runtime check error"); + BB *meBB = func.GetLabelBBAt(labelIdx); + CHECK_FATAL(meBB != nullptr, "null ptr check"); + ASSERT(meBB->GetAttributes(kBBAttrIsCatch), "runtime check error"); + if (meBB->GetAttributes(kBBAttrIsJSFinally) || meBB->GetAttributes(kBBAttrIsCatch)) { + hasFinallyHandler = true; + } + // avoid redundant succ + if (!meBB->IsSuccBB(bb)) { + bb.AddSucc(*meBB); + } + } + // if try block don't have finally catch handler, add common_exit_bb as its succ + if (!hasFinallyHandler) { + if (!bb.GetAttributes(kBBAttrIsExit)) { + bb.SetAttributes(kBBAttrIsExit); // may exit + exitBlocks.push_back(&bb); + } + } else if ((func.GetMIRModule().GetSrcLang() == kSrcLangJava) && bb.GetAttributes(kBBAttrIsExit)) { + // deal with throw bb, if throw bb in a tryblock and has finallyhandler + auto &stmtNodes = bb.GetStmtNodes(); + if (!stmtNodes.empty() && stmtNodes.back().GetOpCode() == OP_throw) { + bb.ClearAttributes(kBBAttrIsExit); + ASSERT(&bb == exitBlocks.back(), "runtime check error"); + exitBlocks.pop_back(); + } + } +} + void MeCFG::BuildMirCFG() { MapleVector entryBlocks(func.GetAlloc().Adapter()); MapleVector exitBlocks(func.GetAlloc().Adapter()); + std::vector switchBBsWithOneCaseBranch; auto eIt = func.valid_end(); for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { if (bIt == func.common_entry() || bIt == func.common_exit()) { @@ -86,15 +191,23 @@ void MeCFG::BuildMirCFG() { LabelIdx lblIdx = switchStmt.GetDefaultLabel(); BB *mirBB = func.GetLabelBBAt(lblIdx); bb->AddSucc(*mirBB); + std::set caseLabels; for (size_t j = 0; j < switchStmt.GetSwitchTable().size(); ++j) { lblIdx = switchStmt.GetCasePair(j).second; BB *meBB = func.GetLabelBBAt(lblIdx); + (void)caseLabels.insert(lblIdx); // Avoid duplicate succs. auto it = std::find(bb->GetSucc().begin(), bb->GetSucc().end(), meBB); if (it == bb->GetSucc().end()) { bb->AddSucc(*meBB); } } + if (bb->GetSucc().size() == 1) { + bb->RemoveLastStmt(); + bb->SetKind(kBBFallthru); + } else if (caseLabels.size() == 1) { + switchBBsWithOneCaseBranch.push_back(bb); + } break; } case kBBReturn: @@ -109,45 +222,15 @@ void MeCFG::BuildMirCFG() { break; } } - // deal try blocks, add catch handler to try's succ + // deal try blocks, add catch handler to try's succ. SwitchBB dealed individually if (bb->GetAttributes(kBBAttrIsTry)) { - auto it = func.GetBBTryNodeMap().find(bb); - CHECK_FATAL(it != func.GetBBTryNodeMap().end(), "try bb without try"); - StmtNode *currTry = it->second; - const auto *tryNode = static_cast(currTry); - bool hasFinallyHandler = false; - // add exception handler bb - for (size_t j = 0; j < tryNode->GetOffsetsCount(); ++j) { - LabelIdx labelIdx = tryNode->GetOffset(j); - ASSERT(func.GetLabelBBIdMap().find(labelIdx) != func.GetLabelBBIdMap().end(), "runtime check error"); - BB *meBB = func.GetLabelBBAt(labelIdx); - CHECK_FATAL(meBB != nullptr, "null ptr check"); - ASSERT(meBB->GetAttributes(kBBAttrIsCatch), "runtime check error"); - if (meBB->GetAttributes(kBBAttrIsJSFinally) || meBB->GetAttributes(kBBAttrIsCatch)) { - hasFinallyHandler = true; - } - // avoid redundant succ - if (!meBB->IsSuccBB(*bb)) { - bb->AddSucc(*meBB); - } - } - // if try block don't have finally catch handler, add common_exit_bb as its succ - if (!hasFinallyHandler) { - if (!bb->GetAttributes(kBBAttrIsExit)) { - bb->SetAttributes(kBBAttrIsExit); // may exit - exitBlocks.push_back(bb); - } - } else if ((func.GetMIRModule().GetSrcLang() == kSrcLangJava) && bb->GetAttributes(kBBAttrIsExit)) { - // deal with throw bb, if throw bb in a tryblock and has finallyhandler - auto &stmtNodes = bb->GetStmtNodes(); - if (!stmtNodes.empty() && stmtNodes.back().GetOpCode() == OP_throw) { - bb->ClearAttributes(kBBAttrIsExit); - ASSERT(bb == exitBlocks.back(), "runtime check error"); - exitBlocks.pop_back(); - } - } + AddCatchHandlerForTryBB(*bb, exitBlocks); } } + + for (BB *switchBB : switchBBsWithOneCaseBranch) { + ReplaceSwitchContainsOneCaseBranchWithBrtrue(*switchBB, exitBlocks); + } // merge all blocks in entryBlocks for (BB *bb : entryBlocks) { func.GetCommonEntryBB()->AddEntry(*bb); @@ -445,8 +528,9 @@ void MeCFG::FixTryBB(maple::BB &startBB, maple::BB &nextBB) { for (size_t i = 0; i < nextBB.GetPred().size(); ++i) { nextBB.GetPred(i)->ReplaceSucc(&nextBB, &startBB); } - nextBB.RemoveAllPred(); - startBB.ReplaceSucc(startBB.GetSucc(0), &nextBB); + ASSERT(nextBB.GetPred().empty(), "pred of nextBB should be empty"); + startBB.RemoveAllSucc(); + startBB.AddSucc(nextBB); } // analyse the CFG to find the BBs that are not reachable from function entries diff --git a/src/maple_me/src/me_hdse.cpp b/src/maple_me/src/me_hdse.cpp index cdb7ea48835b5c8a5736e07437be44938fb1048a..03a444e53678a61532ef3145409a8ef232d7ae7a 100755 --- a/src/maple_me/src/me_hdse.cpp +++ b/src/maple_me/src/me_hdse.cpp @@ -38,7 +38,8 @@ void MeDoHDSE::MakeEmptyTrysUnreachable(MeFunction &func) { endTry->GetAttributes(kBBAttrIsTryEnd) && endTry->IsMeStmtEmpty()) { // we found a try BB followed by an empty endtry BB BB *targetBB = endTry->GetSucc(0); - for (auto *tryPred : tryBB->GetPred()) { + while (!tryBB->GetPred().empty()) { + auto *tryPred = tryBB->GetPred(0); // update targetbb's predecessors if (!tryPred->IsPredBB(*targetBB)) { ASSERT(endTry->IsPredBB(*targetBB), "MakeEmptyTrysUnreachable: processing error"); diff --git a/src/maple_util/include/namemangler.h b/src/maple_util/include/namemangler.h index ef54c2283b7d713de58f7c45d3b3194657263ca5..f827bbfcbd484ccf126617db4ac4cb72ca11d5f6 100644 --- a/src/maple_util/include/namemangler.h +++ b/src/maple_util/include/namemangler.h @@ -90,6 +90,7 @@ static constexpr const char kVtableKeyOffsetTabStr[] = "__vtable_offset_key_tabl static constexpr const char kVtableOffsetTabKeyStr[] = "__vtable_offset_key_table"; static constexpr const char kFieldKeyOffsetTabStr[] = "__field_offset_table"; static constexpr const char kOffsetTabStr[] = "__offset_value_table"; +static constexpr const char kInlineCacheTabStr[] = "__inline_cache_table"; static constexpr const char kLocalClassInfoStr[] = "__local_classinfo_table"; static constexpr const char kMethodsInfoPrefixStr[] = "__methods_info__"; static constexpr const char kMethodsInfoCompactPrefixStr[] = "__methods_infocompact__"; @@ -97,6 +98,8 @@ static constexpr const char kFieldsInfoPrefixStr[] = "__fields_info__"; static constexpr const char kFieldsInfoCompactPrefixStr[] = "__fields_infocompact__"; static constexpr const char kFieldOffsetDataPrefixStr[] = "__fieldOffsetData__"; static constexpr const char kMethodAddrDataPrefixStr[] = "__methodAddrData__"; +static constexpr const char kMethodSignaturePrefixStr[] = "__methodSignature__"; +static constexpr const char kParameterTypesPrefixStr[] = "__parameterTypes__"; static constexpr const char kRegJNITabPrefixStr[] = "__reg_jni_tab"; static constexpr const char kRegJNIFuncTabPrefixStr[] = "__reg_jni_func_tab"; static constexpr const char kReflectionStrtabPrefixStr[] = "__reflection_strtab"; diff --git a/src/maple_util/include/profile.h b/src/maple_util/include/profile.h index 912a4ae69f4ddb116049cf85ab0ff7aa0d4271a0..fce07def5bb5b4599405d4bbddf0309a55cc16fc 100644 --- a/src/maple_util/include/profile.h +++ b/src/maple_util/include/profile.h @@ -56,6 +56,7 @@ class Profile { void InitTestData(); bool CheckFuncHot(const std::string &className) const; bool CheckMethodHot(const std::string &className) const; + bool CheckMethodSigHot(const std::string &methodSigStr) const; bool CheckFieldHot(const std::string &className) const; bool CheckClassHot(const std::string &className) const; bool CheckLiteralHot(const std::string &literal) const; @@ -93,6 +94,7 @@ class Profile { std::unordered_set classMeta; std::unordered_set methodMeta; std::unordered_set fieldMeta; + std::unordered_set methodSigMeta; std::unordered_set literal; std::unordered_map reflectionStrData; std::unordered_map funcProfData; @@ -106,6 +108,8 @@ class Profile { bool CheckProfileHeader(const Header &header) const; std::string GetProfileNameByType(uint8 type) const; std::string GetFunctionName(uint32 classIdx, uint32 methodIdx, uint32 sigIdx) const; + std::string GetMethodSigStr(uint32 methodIdx, uint32 sigIdx) const; + void ParseMethodSignature(const char *data, int fileNum, std::unordered_set &metaData) const; void ParseMeta(const char *data, int fileNum, std::unordered_set &metaData) const; void ParseReflectionStr(const char *data, int fileNum); void ParseFunc(const char *data, int fileNum); diff --git a/src/maple_util/include/profile_type.h b/src/maple_util/include/profile_type.h index a7eae2d5d6805a33fb9fd4607b6fb4dfd7f6f3c5..770e1845d619ef0de8feb2e81ac67f89f071a03e 100644 --- a/src/maple_util/include/profile_type.h +++ b/src/maple_util/include/profile_type.h @@ -33,6 +33,7 @@ enum ProfileType : uint8_t { kBBInfo = 0x06, kIRCounter = 0x07, kAll = 0x08, + kMethodSig = 0x09, kFileDesc = 0xFF }; @@ -78,6 +79,12 @@ struct MetaItem { MetaItem(uint32_t idx) : idx(idx) {} }; +struct MethodSignatureItem { + uint32_t methodIdx; + uint32_t sigIdx; + MethodSignatureItem(uint32_t methodIdx, uint32_t sigIdx) : methodIdx(methodIdx), sigIdx(sigIdx) {} +}; + struct ReflectionStrItem { uint8_t type; uint32_t idx; diff --git a/src/maple_util/src/profile.cpp b/src/maple_util/src/profile.cpp index cc717c71b5a1a3d554fd160ef43dda6a109db68b..4d172e72d68c4883c3c127a9fa53dfd5e51f32db 100644 --- a/src/maple_util/src/profile.cpp +++ b/src/maple_util/src/profile.cpp @@ -107,6 +107,13 @@ std::string Profile::GetFunctionName(uint32 classIdx, uint32 methodIdx, uint32 s return funcName; } +std::string Profile::GetMethodSigStr(uint32 methodIdx, uint32 sigIdx) const { + const std::string methodName = namemangler::EncodeName(strMap.at(methodIdx)); + const std::string sigName = namemangler::EncodeName(strMap.at(sigIdx)); + const std::string methodSigStr = methodName + "_7C" + sigName; + return methodSigStr; +} + void Profile::ParseFunc(const char *data, int32 fileNum) { const MapleFileProf *funcProf = nullptr; const FunctionItem *funcItem = nullptr; @@ -135,6 +142,28 @@ void Profile::ParseFunc(const char *data, int32 fileNum) { } } +void Profile::ParseMethodSignature(const char *data, int fileNum, std::unordered_set &metaData) const { + const MapleFileProf *methodSigProf = nullptr; + const MethodSignatureItem *methodSigItem = nullptr; + uint32_t offset = 0; + for (int32 mapleFileIdx = 0; mapleFileIdx < fileNum; ++mapleFileIdx) { + methodSigProf = reinterpret_cast(data + offset); + if (CheckDexValid(methodSigProf->idx)) { + if (debug) { + LogInfo::MapleLogger() << "MethodSignatureProfile" << ":" + << strMap.at(methodSigProf->idx) << ":" << methodSigProf->num << "\n"; + } + methodSigItem = reinterpret_cast(data + offset + sizeof(MapleFileProf)); + for (uint32 item = 0; item < methodSigProf->num; ++item, ++methodSigItem) { + std::string methodSigStr = GetMethodSigStr(methodSigItem->methodIdx, methodSigItem->sigIdx); + metaData.insert(methodSigStr); + } + // new maple file's profile + } + offset += sizeof(MapleFileProf) + methodSigProf->size; + } +} + void Profile::ParseIRFuncDesc(const char *data, int32 fileNum) { const MapleFileProf *funcProf = nullptr; const FunctionIRProfItem *funcItem = nullptr; @@ -334,6 +363,9 @@ bool Profile::DeCompress(const std::string &path, const std::string &dexNameInne case kIRCounter: ParseCounterTab(proFileData, profileDataInfo->mapleFileNum); break; + case kMethodSig: + ParseMethodSignature(proFileData, profileDataInfo->mapleFileNum, methodSigMeta); + break; case kFileDesc: { uint32_t appPackageNameIdx = *reinterpret_cast(proFileData); this->appPackageName = strMap.at(appPackageNameIdx); @@ -399,6 +431,19 @@ bool Profile::CheckMethodHot(const std::string &className) const { return false; } +bool Profile::CheckMethodSigHot(const std::string &methodSigStr) const { + if (methodSigMeta.empty()) { + return false; + } + if (valid) { + if (methodSigMeta.find(methodSigStr) == methodSigMeta.end()) { + return false; + } + return true; + } + return false; +} + bool Profile::CheckFieldHot(const std::string &className) const { if (fieldMeta.empty()) { return true; diff --git a/src/mpl2mpl/include/annotation_analysis.h b/src/mpl2mpl/include/annotation_analysis.h index 71dc24c2a116f81e6eff8eee30fa387e973f4ad7..429bf5f306749ec615a4e0578642b12571d3ffc1 100644 --- a/src/mpl2mpl/include/annotation_analysis.h +++ b/src/mpl2mpl/include/annotation_analysis.h @@ -240,12 +240,12 @@ class AnnotationParser { std::string curStrToken; }; -class AnnotationAnalysis : AnalysisResult { +class AnnotationAnalysis : public AnalysisResult { public: static char annoDeclare; static char annoSemiColon; AnnotationAnalysis(MIRModule *mod, MemPool *tmpMp, MemPool *pragmaMp, KlassHierarchy *kh) - : AnalysisResult(tmpMp), + : AnalysisResult(pragmaMp), mirModule(mod), tmpAllocator(tmpMp), pragmaMemPool(pragmaMp), @@ -260,7 +260,6 @@ class AnnotationAnalysis : AnalysisResult { void Run(); private: void AnalysisAnnotation(); - void AnnotationCleanUp(); void AnalysisAnnotationForClass(MIRPragma &classPragma); void AnalysisAnnotationForVar(const MIRPragma &varPragma, MIRStructType &structType); void AnalysisAnnotationForFunc(MIRPragma &funcPragma, MIRStructType &structType); @@ -276,8 +275,8 @@ class AnnotationAnalysis : AnalysisResult { MemPool *pragmaMemPool; MapleAllocator pragmaAllocator; KlassHierarchy *klassH; - std::set analysised; - std::set analysisedFunc; + MapleSet analysised{tmpAllocator.Adapter()}; + MapleSet analysisedFunc{tmpAllocator.Adapter()}; AnnotationType *genericMatch; GenericType *dummyObj; }; diff --git a/src/mpl2mpl/include/gen_check_cast.h b/src/mpl2mpl/include/gen_check_cast.h index fca8bd193d23160ca324bf425278173a91f1adc8..eb8e8c381fe2d83de275997b0a2bce30b948cb94 100644 --- a/src/mpl2mpl/include/gen_check_cast.h +++ b/src/mpl2mpl/include/gen_check_cast.h @@ -58,8 +58,7 @@ class DoCheckCastGeneration : public ModulePhase { AnalysisResult *Run(MIRModule *mod, ModuleResultMgr *mrm) override { OPT_TEMPLATE(CheckCastGenerator); - memPoolCtrler.DeleteMemPool(mod->GetPragmaMemPool()); - mod->ChangePragmaMemPool(nullptr); + mrm->InvalidAnalysisResult(MoPhase_ANNOTATIONANALYSIS, mod); return nullptr; } }; diff --git a/src/mpl2mpl/include/muid_replacement.h b/src/mpl2mpl/include/muid_replacement.h index 06f721b6ae45b64d87b276ebea052774ccebf47a..fbb61d44939e2f5693ba990da66bef80e8f6cda8 100644 --- a/src/mpl2mpl/include/muid_replacement.h +++ b/src/mpl2mpl/include/muid_replacement.h @@ -29,6 +29,8 @@ constexpr uint32 kFuncDefSizeIndex = 0; constexpr uint32 kFuncDefNameIndex = 1; constexpr uint32 kRangeBeginIndex = 0; constexpr int32_t kDecoupleAndLazy = 3; +constexpr uint32_t kShiftBit16 = 16; +constexpr uint32_t kShiftBit15 = 15; enum RangeIdx { // 0,1 entry is reserved for a stamp @@ -57,6 +59,19 @@ enum RangeIdx { kNewMaxNum = 24 // New num }; +struct SourceFileMethod { + uint32 sourceFileIndex; + uint32 sourceClassIndex; + uint32 sourceMethodIndex; + bool isVirtual; +}; + +struct SourceFileField { + uint32 sourceFileIndex; + uint32 sourceClassIndex; + uint32 sourceFieldIndex; +}; + class MUIDReplacement : public FuncOptimizeImpl { public: MUIDReplacement(MIRModule &mod, KlassHierarchy *kh, bool dump); @@ -78,6 +93,7 @@ class MUIDReplacement : public FuncOptimizeImpl { private: using SymIdxPair = std::pair; + using SourceIndexPair = std::pair; enum LazyBindingOption : uint32 { kNoLazyBinding = 0, kConservativeLazyBinding = 1, @@ -118,12 +134,14 @@ class MUIDReplacement : public FuncOptimizeImpl { void GenerateCompilerVersionNum(); int64 GetDefOrUndefOffsetWithMask(uint64, bool isDef, bool muidIndex32Mod = false) const; void CollectSuperClassArraySymbolData(); + void GenerateSourceInfo(); static MIRSymbol *GetSymbolFromName(const std::string &name); ConstvalNode* GetConstvalNode(int64 index); void InsertArrayClassSet(const MIRType &type); MIRType *GetIntrinsicConstArrayClass(StmtNode &stmt); void CollectArrayClass(); void GenArrayClassCache(); + void ReleasePragmaMemPool(); std::unordered_set arrayClassSet; // The following sets are for internal uses. Sorting order does not matter here. std::unordered_set funcDefSet; @@ -178,6 +196,9 @@ class MUIDReplacement : public FuncOptimizeImpl { std::map funcUndefMap; std::map dataUndefMap; std::map defMuidIdxMap; + std::map sourceIndexMap; + std::map sourceFileMethodMap; + std::map sourceFileFieldMap; static MUID mplMuid; std::string mplMuidStr; }; diff --git a/src/mpl2mpl/include/reflection_analysis.h b/src/mpl2mpl/include/reflection_analysis.h index e0ffe77a468b1fbbd74665cf1e25da57e1a33976..beea562d19718845b2cda9420cee606458edf592 100644 --- a/src/mpl2mpl/include/reflection_analysis.h +++ b/src/mpl2mpl/include/reflection_analysis.h @@ -101,6 +101,11 @@ enum class FieldPropertyCompact : uint32 { kAnnotation }; +enum class MethodSignatureProperty : uint32 { + kSignatureOffset, + kParameterTypes +}; + class ReflectionAnalysis : public AnalysisResult { public: ReflectionAnalysis(MIRModule *mod, MemPool *memPool, KlassHierarchy *kh, MIRBuilder &builder) @@ -180,6 +185,8 @@ class ReflectionAnalysis : public AnalysisResult { std::string GetAnnotationValue(const MapleVector &subElemVector, GStrIdx typeStrIdx); MIRSymbol *GenSuperClassMetaData(std::list superClassList); MIRSymbol *GenFieldOffsetData(const Klass &klass, std::pair &fieldInfo); + MIRSymbol *GetMethodSignatureSymbol(std::string signature); + MIRSymbol *GetParameterTypesSymbol(uint32 size, uint32 index); MIRSymbol *GenFieldsMetaData(const Klass &klass); MIRSymbol *GenMethodsMetaData(const Klass &klass); MIRSymbol *GenFieldsMeta(const Klass &klass, std::vector> &fieldsVector, @@ -244,6 +251,7 @@ class ReflectionAnalysis : public AnalysisResult { KlassHierarchy *klassH; MIRBuilder &mirBuilder; MapleVector classTab; + std::mapmapMethodSignature; bool isLibcore; bool isLazyBindingOrDecouple; std::string reflectionMuidStr; @@ -257,6 +265,7 @@ class ReflectionAnalysis : public AnalysisResult { static TyIdx superclassMetadataTyIdx; static TyIdx fieldOffsetDataTyIdx; static TyIdx methodAddrDataTyIdx; + static TyIdx methodSignatureTyIdx; static std::string strTab; static std::unordered_map str2IdxMap; static std::string strTabStartHot; diff --git a/src/mpl2mpl/include/vtable_impl.h b/src/mpl2mpl/include/vtable_impl.h index 601372dd4a7d3baa1cc80a94e6e5be4121a1fc0d..8cbb1c292a2d07016aa1c31b081653ab7a474008 100644 --- a/src/mpl2mpl/include/vtable_impl.h +++ b/src/mpl2mpl/include/vtable_impl.h @@ -37,6 +37,8 @@ class VtableImpl : public FuncOptimizeImpl { private: void ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode); + void ItabProcess(StmtNode &stmt, const ResolveFuncNode &resolveNode, const std::string &signature, + PregIdx &pregFuncPtr, const MIRType &compactPtrType, const PrimType &compactPtrPrim); bool Intrinsify(MIRFunction &func, CallNode &cnode); MIRModule *mirModule; MIRFunction *mccItabFunc; diff --git a/src/mpl2mpl/src/annotation_analysis.cpp b/src/mpl2mpl/src/annotation_analysis.cpp index af5c658fcfac3b0b739777ec7d72a9a20640d06d..456005d8b74c80825afc7732742c830c6546082c 100644 --- a/src/mpl2mpl/src/annotation_analysis.cpp +++ b/src/mpl2mpl/src/annotation_analysis.cpp @@ -216,16 +216,6 @@ void AnnotationParser::InitClassGenericDeclare(MemPool &pragmaMemPool, MIRStruct } } -void AnnotationAnalysis::AnnotationCleanUp() { - const MapleVector &klasses = klassH->GetTopoSortedKlasses(); - for (Klass *klass : klasses) { - MIRStructType *mirStruct = klass->GetMIRStructType(); - mirStruct->GetPragmaVec().clear(); - mirStruct->GetPragmaVec().shrink_to_fit(); - } - memPoolCtrler.DeleteMemPool(mirModule->GetPragmaMemPool()); -} - void AnnotationAnalysis::ByPassFollowingInfo(AnnotationParser &aParser, MIRStructType *sType) { ATokenKind t = aParser.GetNextToken(); if (t == kTemplateStart) { @@ -555,7 +545,6 @@ void AnnotationAnalysis::AnalysisAnnotation() { void AnnotationAnalysis::Run() { AnalysisAnnotation(); - AnnotationCleanUp(); } AnalysisResult *DoAnnotationAnalysis::Run(MIRModule *module, ModuleResultMgr *moduleResultMgr) { @@ -563,10 +552,9 @@ AnalysisResult *DoAnnotationAnalysis::Run(MIRModule *module, ModuleResultMgr *mo MemPool *pragmaMemPool = memPoolCtrler.NewMemPool("New Pragma mempool"); auto *kh = static_cast(moduleResultMgr->GetAnalysisResult(MoPhase_CHA, module)); ASSERT_NOT_NULL(kh); - AnnotationAnalysis AA(module, memPool, pragmaMemPool, kh); - AA.Run(); + AnnotationAnalysis *aa = pragmaMemPool->New(module, memPool, pragmaMemPool, kh); + aa->Run(); memPoolCtrler.DeleteMemPool(memPool); - module->ChangePragmaMemPool(pragmaMemPool); - return nullptr; + return aa; } } diff --git a/src/mpl2mpl/src/muid_replacement.cpp b/src/mpl2mpl/src/muid_replacement.cpp index cc53fa78f52ad6c15684001a97fa2364c92246ca..c552df28dda97af89e955026e8d12987a755410b 100644 --- a/src/mpl2mpl/src/muid_replacement.cpp +++ b/src/mpl2mpl/src/muid_replacement.cpp @@ -732,8 +732,21 @@ void MUIDReplacement::GenerateUnifiedUndefTable() { // to be filled by runtime builder->AddIntFieldConst(*unifiedUndefTabEntryType, *entryConst, fieldID++, 0); funcUndefTabConst->PushBack(entryConst); - builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[0]); - builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[1]); + if (sourceFileMethodMap.find(muid) != sourceFileMethodMap.end()) { + uint32 fileIndex = sourceFileMethodMap[muid].sourceFileIndex; + uint32 classIndex = sourceFileMethodMap[muid].sourceClassIndex; + uint32 methodIndex = sourceFileMethodMap[muid].sourceMethodIndex << 1; + if (sourceFileMethodMap[muid].isVirtual) { + methodIndex |= 0x1; + } + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, + ((fileIndex << kShiftBit16) | classIndex)); + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, + (methodIndex << kShiftBit15) | 0x7FFF); + } else { + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[0]); + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[1]); + } funcUndefMuidTabConst->PushBack(muidEntryConst); } if (!funcUndefTabConst->GetConstVec().empty()) { @@ -768,8 +781,23 @@ void MUIDReplacement::GenerateUnifiedUndefTable() { // Will be emitted as 0 and filled by runtime builder->AddAddrofFieldConst(*unifiedUndefTabEntryType, *entryConst, fieldID++, *mirSymbol); dataUndefTabConst->PushBack(entryConst); - builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[0]); - builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[1]); + if (sourceIndexMap.find(muid) != sourceIndexMap.end()) { + SourceIndexPair pairIndex = sourceIndexMap[muid]; + uint32 value = (pairIndex.first << kShiftBit16) + pairIndex.second; + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, value); + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, 0xFFFFFFFF); + } else if (sourceFileFieldMap.find(muid) != sourceFileFieldMap.end()) { + uint32 sourceFileIndex = sourceFileFieldMap[muid].sourceFileIndex; + uint32 sourceClassIndex = sourceFileFieldMap[muid].sourceClassIndex; + uint32 sourceFieldIndex = sourceFileFieldMap[muid].sourceFieldIndex; + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, + (sourceFileIndex << kShiftBit16) | sourceClassIndex); + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, + (sourceFieldIndex << kShiftBit16) | 0xFFFF); + } else { + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[0]); + builder->AddIntFieldConst(*unifiedUndefMuidTabEntryType, *muidEntryConst, muidFieldID++, muid.data.words[1]); + } dataUndefMuidTabConst->PushBack(muidEntryConst); mplMuidStr += muid.ToStr(); if (trace) { @@ -1496,11 +1524,65 @@ void MUIDReplacement::GenerateCompilerVersionNum() { } +void MUIDReplacement::GenerateSourceInfo() { + for (Klass *klass : klassHierarchy->GetTopoSortedKlasses()) { + if (klass->IsClassIncomplete() || klass->IsInterfaceIncomplete()) { + continue; + } + MIRStructType *structType = klass->GetMIRStructType(); + ASSERT(structType != nullptr, "null ptr check!"); + if (klass->GetMIRStructType()->IsLocal()) { + continue; + } + for (const MIRPragma *prag : structType->GetPragmaVec()) { + const MapleVector &elemVector = prag->GetElementVector(); + GStrIdx typeStrIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prag->GetTyIdx())->GetNameStrIdx(); + std::string typeName = GlobalTables::GetStrTable().GetStringFromStrIdx(typeStrIdx); + if (typeName == "Lharmonyos_2Fannotation_2FInterpreter_3B") { + if (prag->GetKind() == kPragmaClass) { + int64 firstVal = elemVector[0]->GetI64Val(); + int64 secondVal = elemVector[1]->GetI64Val(); + std::string symbolName = CLASSINFO_PREFIX_STR + klass->GetKlassName(); + MUID muid = GetMUID(symbolName); + sourceIndexMap[muid] = SourceIndexPair(firstVal, secondVal); + } else if (prag->GetKind() == kPragmaFunc) { + std::string funcName = GlobalTables::GetStrTable().GetStringFromStrIdx(prag->GetStrIdx()); + MUID muid = GetMUID(funcName); + uint32 sourceFileIndex = elemVector[0]->GetI64Val(); + uint32 sourceClassIndex = elemVector[1]->GetI64Val(); + uint32 sourceMethodIndex = elemVector[2]->GetI64Val(); + bool isVirtual = elemVector[3]->GetI64Val() == 1 ? true : false; + SourceFileMethod methodInf = {sourceFileIndex, sourceClassIndex, sourceMethodIndex, isVirtual}; + sourceFileMethodMap.insert(std::pair(muid, methodInf)); + } else if (prag->GetKind() == kPragmaField) { + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(prag->GetStrIdx()); + MUID muid = GetMUID(fieldName); + uint32 sourceFileIndex = elemVector[0]->GetI64Val(); + uint32 sourceClassIndex = elemVector[1]->GetI64Val(); + uint32 sourceFieldIndex = elemVector[2]->GetI64Val(); + SourceFileField fieldInf = {sourceFileIndex, sourceClassIndex, sourceFieldIndex}; + sourceFileFieldMap.insert(std::pair(muid, fieldInf)); + } + } + } + } +} + +void MUIDReplacement::ReleasePragmaMemPool() { + for (Klass *klass : klassHierarchy->GetTopoSortedKlasses()) { + MIRStructType *mirStruct = klass->GetMIRStructType(); + mirStruct->GetPragmaVec().clear(); + mirStruct->GetPragmaVec().shrink_to_fit(); + } + memPoolCtrler.DeleteMemPool(GetMIRModule().GetPragmaMemPool()); +} + void MUIDReplacement::GenerateTables() { GenerateGlobalRootList(); CollectFuncAndDataFromKlasses(); CollectFuncAndDataFromGlobalTab(); CollectFuncAndDataFromFuncList(); + ReleasePragmaMemPool(); CollectSuperClassArraySymbolData(); CollectArrayClass(); GenArrayClassCache(); diff --git a/src/mpl2mpl/src/reflection_analysis.cpp b/src/mpl2mpl/src/reflection_analysis.cpp index 41233776485a1d534d17edeb425bb27362cb5f95..66a5429872d2b1acd510fab5caeb00cf6b24888f 100755 --- a/src/mpl2mpl/src/reflection_analysis.cpp +++ b/src/mpl2mpl/src/reflection_analysis.cpp @@ -33,6 +33,7 @@ using namespace maple; // If needed, we can make field type in two bits. constexpr uint64 kMethodNotVirtual = 0x00000001; constexpr uint64 kMethodFinalize = 0x00000002; +constexpr uint64 kMethodSignature = 0x00000008; constexpr uint64 kMethodAbstract = 0x00000010; constexpr uint64 kFieldOffsetIspOffset = 0x00000001; @@ -127,6 +128,11 @@ constexpr char kMethodInfoCompactTypeName[] = "__method_info_compact__"; constexpr char kSuperclassOrComponentclassStr[] = "superclass_or_componentclass"; constexpr char kReflectionReferencePrefixStr[] = "Ljava_2Flang_2Fref_2FReference_3B"; constexpr char kJavaLangAnnotationRetentionStr[] = "Ljava_2Flang_2Fannotation_2FRetention_3B"; +constexpr char kMethodSignatureOffsetName[] = "signatureOffset"; +constexpr char kMethodSignatureParameterName[] = "signatureParameter"; +constexpr char kParameterTypeItemName[] = "parameterTypeItem"; +constexpr char kParameterTypesName[] = "parameterTypes"; +constexpr char kMethodSignatureTypeName[] = "__methodSignatureType__"; constexpr int kAnonymousClassIndex = 5; constexpr char kAnonymousClassSuffix[] = "30"; constexpr char kInnerClassStr[] = "Lark/annotation/InnerClass;"; @@ -281,6 +287,7 @@ TyIdx ReflectionAnalysis::fieldsInfoCompactTyIdx = TyIdx(0); TyIdx ReflectionAnalysis::superclassMetadataTyIdx = TyIdx(0); TyIdx ReflectionAnalysis::fieldOffsetDataTyIdx = TyIdx(0); TyIdx ReflectionAnalysis::methodAddrDataTyIdx = TyIdx(0); +TyIdx ReflectionAnalysis::methodSignatureTyIdx = TyIdx(0); TyIdx ReflectionAnalysis::invalidIdx = TyIdx(-1); uint32 ReflectionAnalysis::GetMethodModifier(const Klass &klass, const MIRFunction &func) const { @@ -756,6 +763,11 @@ uint32 ReflectionAnalysis::GetMethodFlag(const MIRFunction &func) const { } uint16 hash = func.GetHashCode(); flag |= (hash << kNoHashBits); // hash 10 bit + + bool isProfHotMethod = (*mirModule).GetProfile().CheckMethodSigHot(func.GetBaseFuncNameWithType()); + if (isProfHotMethod) { + flag |= kMethodSignature; // check profile + } return flag; } @@ -795,16 +807,24 @@ void ReflectionAnalysis::GenMethodMeta(const Klass &klass, MIRStructType &method std::string fullname = fullNameMp[func.GetBaseFuncNameWithTypeStrIdx()]; std::string signature = GetSignatureFromFullName(fullname); ConvertMethodSig(signature); - std::vector typeNames; - GetSignatureTypeNames(signature, typeNames); - uint32 signatureIdx = FindOrInsertReflectString(signature); - mirBuilder.AddIntFieldConst(methodsInfoType, newConst, fieldID++, signatureIdx); + + uint32 flag = GetMethodFlag(func); + // if enable MethodSignature, will generate MethodSignatureSymbol with signature and parameter types cache, + // otherwise we generate signatureoffset directly. + bool isEnableMethodSignature = (flag & kMethodSignature) == kMethodSignature; + if (isEnableMethodSignature) { + MIRSymbol *methodSignatureSymbol = GetMethodSignatureSymbol(signature); + mirBuilder.AddAddrofFieldConst(methodsInfoType, newConst, fieldID++, *methodSignatureSymbol); + } else { + uint32 signatureIdx = FindOrInsertReflectString(signature); + mirBuilder.AddIntFieldConst(methodsInfoType, newConst, fieldID++, signatureIdx); + } + // @annotation MIRStructType *classType = klass.GetMIRStructType(); int annotationIdx = SolveAnnotation(*classType, func); mirBuilder.AddIntFieldConst(methodsInfoType, newConst, fieldID++, annotationIdx); // @flag - uint32 flag = GetMethodFlag(func); mirBuilder.AddIntFieldConst(methodsInfoType, newConst, fieldID++, flag); // @argsize: Number of arguments. size_t argsSize = func.GetParamSize(); @@ -861,6 +881,58 @@ MIRSymbol *ReflectionAnalysis::GenMethodAddrData(const MIRSymbol &funcSym) { return methodAddrSt; } +MIRSymbol *ReflectionAnalysis::GetParameterTypesSymbol(uint32 size, uint32 index) { + MIRModule &module = *mirModule; + MIRStructType parameterTypesType(kTypeStruct); +#ifndef USE_32BIT_REF + MIRType *type = GlobalTables::GetTypeTable().GetUInt64(); +#else + MIRType *type = GlobalTables::GetTypeTable().GetUInt32(); +#endif + for (uint32 i = 0; i < size; i++) { + GlobalTables::GetTypeTable().AddFieldToStructType(parameterTypesType, kParameterTypeItemName, *type); + } + + TyIdx parameterTypesTyIdx = GenMetaStructType(module, parameterTypesType, kParameterTypesName); + MIRStructType ¶meterTypes = + static_cast(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(parameterTypesTyIdx)); + MIRSymbol *parameterTypesSt = + GetOrCreateSymbol(namemangler::kParameterTypesPrefixStr + std::to_string(index), + parameterTypes.GetTypeIndex(), true); + parameterTypesSt->SetStorageClass(kScFstatic); + return parameterTypesSt; +} + +MIRSymbol *ReflectionAnalysis::GetMethodSignatureSymbol(std::string signature) { + if (mapMethodSignature.find(signature) != mapMethodSignature.end()) { + return mapMethodSignature[signature]; + } + + std::vector typeNames; + GetSignatureTypeNames(signature, typeNames); + MIRModule &module = *mirModule; + MIRStructType &methodSignatureType = + static_cast(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(methodSignatureTyIdx)); + MIRArrayType &methodSignatureArrayType = *GlobalTables::GetTypeTable().GetOrCreateArrayType(methodSignatureType, 1); + MIRAggConst *aggConst = module.GetMemPool()->New(module, methodSignatureArrayType); + MIRAggConst *newConst = module.GetMemPool()->New(module, methodSignatureType); + + uint32 fieldID = 1; + uint32 signatureIdx = FindOrInsertReflectString(signature); + mirBuilder.AddIntFieldConst(methodSignatureType, *newConst, fieldID++, signatureIdx); + MIRSymbol *parameterTypesSymbol = GetParameterTypesSymbol(typeNames.size(), mapMethodSignature.size()); + mirBuilder.AddAddrofFieldConst(methodSignatureType, *newConst, fieldID++, *parameterTypesSymbol); + aggConst->PushBack(newConst); + + MIRSymbol *methodSignatureSt = + GetOrCreateSymbol(namemangler::kMethodSignaturePrefixStr + std::to_string(mapMethodSignature.size()), + methodSignatureType.GetTypeIndex(), true); + methodSignatureSt->SetStorageClass(kScFstatic); + methodSignatureSt->SetKonst(aggConst); + mapMethodSignature[signature] = methodSignatureSt; + return methodSignatureSt; +} + MIRSymbol *ReflectionAnalysis::GenMethodsMetaData(const Klass &klass) { MIRStructType *classType = klass.GetMIRStructType(); if (classType == nullptr || classType->GetMethods().empty()) { @@ -1792,6 +1864,12 @@ void ReflectionAnalysis::GenMetadataType(MIRModule &mirModule) { MIRStructType methodAddrDataType(kTypeStruct); GlobalTables::GetTypeTable().AddFieldToStructType(methodAddrDataType, kMethodAddrDataStr, *typeVoidPtr); methodAddrDataTyIdx = GenMetaStructType(mirModule, methodAddrDataType, kMethodAddrDataTypeName); + + // MethodSignature + MIRStructType methodSignatureType(kTypeStruct); + GlobalTables::GetTypeTable().AddFieldToStructType(methodSignatureType, kMethodSignatureOffsetName, *typeI32); + GlobalTables::GetTypeTable().AddFieldToStructType(methodSignatureType, kMethodSignatureParameterName, *typeVoidPtr); + methodSignatureTyIdx = GenMetaStructType(mirModule, methodSignatureType, kMethodSignatureTypeName); } void ReflectionAnalysis::GenClassHashMetaData() { diff --git a/src/mpl2mpl/src/vtable_analysis.cpp b/src/mpl2mpl/src/vtable_analysis.cpp index 11ab1e13e46cc8fe45188b4765e66ec94c24fe0a..2d2dc5a8db18e7be05cf710ff8e788f73ceeffcc 100644 --- a/src/mpl2mpl/src/vtable_analysis.cpp +++ b/src/mpl2mpl/src/vtable_analysis.cpp @@ -519,7 +519,7 @@ void VtableAnalysis::ReplaceInterfaceInvoke(CallNode &stmt) { ASSERT_NOT_NULL(currentFuncMp); auto *resolveNode = currentFuncMp->New( OP_resolveinterfacefunc, GlobalTables::GetTypeTable().GetCompactPtr()->GetPrimType(), stmt.GetPUIdx(), - tabBaseAddress, builder->GetConstUInt32(0)); + tabBaseAddress, tabBaseAddress->Opnd(0)); stmt.SetOpCode(OP_interfaceicallassigned); stmt.GetNopnd().insert(stmt.GetNopnd().begin(), resolveNode); stmt.SetNumOpnds(stmt.GetNumOpnds() + 1); diff --git a/src/mpl2mpl/src/vtable_impl.cpp b/src/mpl2mpl/src/vtable_impl.cpp index 82603807026aa0ad4834cac341ee4cdc353928fe..df3e65239eab94f9dc4a4fcc7c9d3fa720612084 100644 --- a/src/mpl2mpl/src/vtable_impl.cpp +++ b/src/mpl2mpl/src/vtable_impl.cpp @@ -155,24 +155,40 @@ void VtableImpl::ProcessFunc(MIRFunction *func) { void VtableImpl::ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode) { MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(resolveNode.GetPuIdx()); std::string signature = VtableAnalysis::DecodeBaseNameWithType(*func); + MIRType *compactPtrType = GlobalTables::GetTypeTable().GetCompactPtr(); + PrimType compactPtrPrim = compactPtrType->GetPrimType(); + PregIdx pregFuncPtr = currFunc->GetPregTab()->CreatePreg(compactPtrPrim); + + ItabProcess(stmt, resolveNode, signature, pregFuncPtr, *compactPtrType, compactPtrPrim); + + if (stmt.GetOpCode() == OP_regassign) { + auto *regAssign = static_cast(&stmt); + regAssign->SetOpnd(builder->CreateExprRegread(compactPtrPrim, pregFuncPtr), 0); + } else { + auto *icall = static_cast(&stmt); + const size_t nopndSize = icall->GetNopndSize(); + CHECK_FATAL(nopndSize > 0, "container check"); + icall->SetNOpndAt(0, builder->CreateExprRegread(compactPtrPrim, pregFuncPtr)); + } +} + +void VtableImpl::ItabProcess(StmtNode &stmt, const ResolveFuncNode &resolveNode, const std::string &signature, + PregIdx &pregFuncPtr, const MIRType &compactPtrType, const PrimType &compactPtrPrim) { int64 hashCode = GetHashIndex(signature.c_str()); + uint64 secondHashCode = GetSecondHashIndex(signature.c_str()); PregIdx pregItabAddress = currFunc->GetPregTab()->CreatePreg(PTY_ptr); RegassignNode *itabAddressAssign = builder->CreateStmtRegassign(PTY_ptr, pregItabAddress, resolveNode.GetTabBaseAddr()); currFunc->GetBody()->InsertBefore(&stmt, itabAddressAssign); // read funcvalue - MIRType *compactPtrType = GlobalTables::GetTypeTable().GetCompactPtr(); - PrimType compactPtrPrim = compactPtrType->GetPrimType(); BaseNode *offsetNode = builder->CreateIntConst(hashCode * kTabEntrySize, PTY_u32); BaseNode *addrNode = builder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), builder->CreateExprRegread(PTY_ptr, pregItabAddress), offsetNode); BaseNode *readFuncPtr = builder->CreateExprIread( - *compactPtrType, *GlobalTables::GetTypeTable().GetOrCreatePointerType(*compactPtrType), 0, addrNode); - PregIdx pregFuncPtr = currFunc->GetPregTab()->CreatePreg(compactPtrPrim); + compactPtrType, *GlobalTables::GetTypeTable().GetOrCreatePointerType(compactPtrType), 0, addrNode); RegassignNode *funcPtrAssign = builder->CreateStmtRegassign(compactPtrPrim, pregFuncPtr, readFuncPtr); currFunc->GetBody()->InsertBefore(&stmt, funcPtrAssign); // In case not found in the fast path, fall to the slow path - uint64 secondHashCode = GetSecondHashIndex(signature.c_str()); MapleAllocator *currentFuncMpAllocator = builder->GetCurrentFuncCodeMpAllocator(); CHECK_FATAL(currentFuncMpAllocator != nullptr, "null ptr check"); MapleVector opnds(currentFuncMpAllocator->Adapter()); @@ -186,20 +202,12 @@ void VtableImpl::ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode & opnds.push_back(signatureNode); StmtNode *mccCallStmt = builder->CreateStmtCallRegassigned(mccItabFunc->GetPuidx(), opnds, pregFuncPtr, OP_callassigned); - BaseNode *checkExpr = builder->CreateExprCompare(OP_eq, *GlobalTables::GetTypeTable().GetUInt1(), *compactPtrType, + BaseNode *checkExpr = builder->CreateExprCompare(OP_eq, *GlobalTables::GetTypeTable().GetUInt1(), compactPtrType, builder->CreateExprRegread(compactPtrPrim, pregFuncPtr), builder->CreateIntConst(0, compactPtrPrim)); - auto *ifStmt = static_cast(builder->CreateStmtIf(checkExpr)); + IfStmtNode *ifStmt = static_cast(builder->CreateStmtIf(checkExpr)); ifStmt->GetThenPart()->AddStatement(mccCallStmt); currFunc->GetBody()->InsertBefore(&stmt, ifStmt); - if (stmt.GetOpCode() == OP_regassign) { - auto *regAssign = static_cast(&stmt); - regAssign->SetOpnd(builder->CreateExprRegread(compactPtrPrim, pregFuncPtr), 0); - } else { - auto *icall = static_cast(&stmt); - const size_t nopndSize = icall->GetNopndSize(); - CHECK_FATAL(nopndSize > 0, "container check"); - icall->SetNOpndAt(0, builder->CreateExprRegread(compactPtrPrim, pregFuncPtr)); - } } + } // namespace maple