diff --git a/src/bin/jbc2mpl b/src/bin/jbc2mpl index aac408ff7ecf4de0ff77e32cb792e568edcd1673..e4edc0088c3761b77eff352ab2cf727f45936434 100755 Binary files a/src/bin/jbc2mpl and b/src/bin/jbc2mpl differ diff --git a/src/bin/maple b/src/bin/maple index 26fe2babac389b4a15621bb7c1b39855cc7da818..199020769a739f36037ac65f38798197fde6f8aa 100755 Binary files a/src/bin/maple and b/src/bin/maple differ diff --git a/src/deplibs/libmempool.a b/src/deplibs/libmempool.a index 2847e970562f53316f65c05287a72fcde0461694..8271a86613661f7b3cf7257914616ef640d3b67e 100644 Binary files a/src/deplibs/libmempool.a and b/src/deplibs/libmempool.a differ diff --git a/src/deplibs/libmplphase.a b/src/deplibs/libmplphase.a index 98c795f9840f6f4f3bbe4c6e6644ef4f369693ae..fc6c265a96a5f519d8e02dc21fc81ef454f401ec 100644 Binary files a/src/deplibs/libmplphase.a and b/src/deplibs/libmplphase.a differ diff --git a/src/deplibs/libmplutil.a b/src/deplibs/libmplutil.a index d56161938120e50ebbbec83989e610dad0dd1894..4970e2465e66340d6ad134f541b625e261e30212 100644 Binary files a/src/deplibs/libmplutil.a and b/src/deplibs/libmplutil.a differ diff --git a/src/maple_be/include/ad/mad.h b/src/maple_be/include/ad/mad.h index 42393e58b268d2e027c90f5195e5cb6cd48915ff..40dd62faf11ab394e3787cbb3858a77a0f0bc8ff 100644 --- a/src/maple_be/include/ad/mad.h +++ b/src/maple_be/include/ad/mad.h @@ -190,7 +190,7 @@ class MAD { } static void AddUnit(Unit &u) { - allUnits.push_back(&u); + allUnits.emplace_back(&u); } static maple::uint32 GetAllUnitsSize() { @@ -198,7 +198,7 @@ class MAD { } static void AddReservation(Reservation &rev) { - allReservations.push_back(&rev); + allReservations.emplace_back(&rev); } static void AddBypass(Bypass &bp) { diff --git a/src/maple_be/include/be/bbt.h b/src/maple_be/include/be/bbt.h index 733e2e96e83fe9f652367881705fdef20cb87558..f69ee03abc7ae2fdccebd480c90932583d17d8f1 100644 --- a/src/maple_be/include/be/bbt.h +++ b/src/maple_be/include/be/bbt.h @@ -82,7 +82,7 @@ class BBT { } void AddSuccs(BBT *bb) { - succs.push_back(bb); + succs.emplace_back(bb); } void SetCondJumpBranch(BBT *bb) { diff --git a/src/maple_be/include/be/becommon.h b/src/maple_be/include/be/becommon.h index fd069d0d135827ca61bfa3ba60d6d4fd4235632c..16e4fa1053cdebe3cf9f892c5a8464463a538c25 100644 --- a/src/maple_be/include/be/becommon.h +++ b/src/maple_be/include/be/becommon.h @@ -160,13 +160,13 @@ class BECommon { typeSizeTable.at(idx) = value; } void AddTypeSize(uint64 value) { - typeSizeTable.push_back(value); + typeSizeTable.emplace_back(value); } void AddTypeSizeAndAlign(TyIdx tyIdx, uint64 value) { if (typeSizeTable.size() == tyIdx) { - typeSizeTable.push_back(value); - tableAlignTable.push_back(value); + typeSizeTable.emplace_back(value); + tableAlignTable.emplace_back(value); } else { CHECK_FATAL(typeSizeTable.size() > tyIdx, "there are some types haven't set type size and align, %d"); } @@ -185,7 +185,7 @@ class BECommon { tableAlignTable.at(idx) = value; } void AddTypeAlign(uint8 value) { - tableAlignTable.push_back(value); + tableAlignTable.emplace_back(value); } FieldID GetStructFieldCount(uint32 idx) const { diff --git a/src/maple_be/include/be/try_catch.h b/src/maple_be/include/be/try_catch.h index 77e511c40d81be9fb6309673f955e82f78c505e2..e99963946cc2dbd08370e54fcb45933f88db80f4 100644 --- a/src/maple_be/include/be/try_catch.h +++ b/src/maple_be/include/be/try_catch.h @@ -83,7 +83,7 @@ class TryEndTryBlock { } void PushToEnclosedBBs(BBT &bb) { - enclosedBBs.push_back(&bb); + enclosedBBs.emplace_back(&bb); } MapleVector &GetLabeledBBsInTry() { @@ -130,8 +130,8 @@ class TryCatchBlocksLower { bool bodyEndWithEndTry = false; bool generateEHCode = false; MapleVector bbList; - MapleMap prevBBOfTry; - MapleMap firstStmtToBBMap; + MapleUnorderedMap prevBBOfTry; + MapleUnorderedMap firstStmtToBBMap; MapleVector catchesSeenSoFar; void ProcessEnclosedBBBetweenTryEndTry(); diff --git a/src/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 7a44ed8e291242cf09a564cfe1ccecc013c5bdd7..51b8010dfb4190d2393b963f1367264a9638861d 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -32,13 +32,13 @@ class AArch64CGFunc : public CGFunc { calleeSavedRegs(mallocator.Adapter()), formalRegList(mallocator.Adapter()), phyRegOperandTable(std::less(), mallocator.Adapter()), - hashLabelOpndTable(std::less(), mallocator.Adapter()), + hashLabelOpndTable(mallocator.Adapter()), hashOfstOpndTable(std::less(), mallocator.Adapter()), hashMemOpndTable(std::less(), mallocator.Adapter()), memOpndsRequiringOffsetAdjustment(std::less(), mallocator.Adapter()), memOpndsForStkPassedArguments(std::less(), mallocator.Adapter()), - immOpndsRequiringOffsetAdjustment(std::less(), mallocator.Adapter()), - immOpndsRequiringOffsetAdjustmentForRefloc(std::less(), mallocator.Adapter()) { + immOpndsRequiringOffsetAdjustment(mallocator.Adapter()), + immOpndsRequiringOffsetAdjustmentForRefloc(mallocator.Adapter()) { uCatch.regNOCatch = 0; CGFunc::SetMemlayout(*memPool.New(b, f, mallocator)); CGFunc::GetMemlayout()->SetCurrFunction(*this); @@ -51,7 +51,7 @@ class AArch64CGFunc : public CGFunc { } void PushElemIntoFormalRegList(AArch64reg reg) { - formalRegList.push_back(reg); + formalRegList.emplace_back(reg); } uint32 GetRefCount() const { @@ -333,7 +333,7 @@ class AArch64CGFunc : public CGFunc { if (find(calleeSavedRegs.begin(), calleeSavedRegs.end(), reg) != calleeSavedRegs.end()) { return; } - calleeSavedRegs.push_back(reg); + calleeSavedRegs.emplace_back(reg); ASSERT((AArch64isa::IsGPRegister(reg) || AArch64isa::IsFPSIMDRegister(reg)), "Int or FP registers are expected"); if (AArch64isa::IsGPRegister(reg)) { ++numIntregToCalleeSave; @@ -499,7 +499,7 @@ class AArch64CGFunc : public CGFunc { IntrinsiccallNode *cleanEANode = nullptr; MapleMap phyRegOperandTable; /* machine register operand table */ - MapleMap hashLabelOpndTable; + MapleUnorderedMap hashLabelOpndTable; MapleMap hashOfstOpndTable; MapleMap hashMemOpndTable; /* @@ -508,8 +508,8 @@ class AArch64CGFunc : public CGFunc { */ MapleMap memOpndsRequiringOffsetAdjustment; MapleMap memOpndsForStkPassedArguments; - MapleMap immOpndsRequiringOffsetAdjustment; - MapleMap immOpndsRequiringOffsetAdjustmentForRefloc; + MapleUnorderedMap immOpndsRequiringOffsetAdjustment; + MapleUnorderedMap immOpndsRequiringOffsetAdjustmentForRefloc; union { regno_t regNOCatch; /* For O2. */ Operand *opndCatch; /* For O0-O1. */ diff --git a/src/maple_be/include/cg/aarch64/aarch64_color_ra.h b/src/maple_be/include/cg/aarch64/aarch64_color_ra.h index 849971d08c364f00406b480eaaee1d3ceff55f1b..95812d8bbf197e6f3bb882fc0d6a43f7cafd8c8c 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_color_ra.h +++ b/src/maple_be/include/cg/aarch64/aarch64_color_ra.h @@ -44,11 +44,21 @@ inline bool FindNotIn(const std::set &set, const T &item) { return set.find(item) == set.end(); } +template > +inline bool FindNotIn(const std::unordered_set &set, const T &item) { + return set.find(item) == set.end(); +} + template inline bool FindNotIn(const MapleSet &set, const T &item) { return set.find(item) == set.end(); } +template +inline bool FindNotIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) == set.end(); +} + template inline bool FindNotIn(const MapleList &list, const T &item) { return std::find(list.begin(), list.end(), item) == list.end(); @@ -59,11 +69,21 @@ inline bool FindIn(const std::set &set, const T &item) { return set.find(item) != set.end(); } +template > +inline bool FindIn(const std::unordered_set &set, const T &item) { + return set.find(item) != set.end(); +} + template inline bool FindIn(const MapleSet &set, const T &item) { return set.find(item) != set.end(); } +template +inline bool FindIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) != set.end(); +} + template inline bool FindIn(const MapleList &list, const T &item) { return std::find(list.begin(), list.end(), item) != list.end(); @@ -571,8 +591,7 @@ class LocalRaInfo { public: explicit LocalRaInfo(MapleAllocator &allocator) : defCnt(allocator.Adapter()), - useCnt(allocator.Adapter()), - globalPreg(allocator.Adapter()) {} + useCnt(allocator.Adapter()) {} ~LocalRaInfo() = default; @@ -600,32 +619,9 @@ class LocalRaInfo { useCnt[key] = value; } - void InsertElemToGlobalPreg(regno_t regNO) { - globalPreg.insert(regNO); - } - - uint64 GetGlobalPregMask() const { - return globalPregMask; - } - - void SetGlobalPregMask(uint64 mask) { - globalPregMask = mask; - } - - uint64 GetLocalPregMask() const { - return localPregMask; - } - - void SetLocalPregMask(uint64 mask) { - localPregMask = mask; - } - private: MapleMap defCnt; MapleMap useCnt; - MapleSet globalPreg; - uint64 globalPregMask = 0; /* global phys reg used in bb */ - uint64 localPregMask = 0; /* local phys reg used in bb */ }; /* For each bb, record info pertain to allocation */ @@ -722,13 +718,13 @@ class FinalizeRegisterInfo { } void SetDefOperand(Operand &opnd, const int32 idx) { - defOperands.push_back(&opnd); - defIdx.push_back(idx); + defOperands.emplace_back(&opnd); + defIdx.emplace_back(idx); } void SetUseOperand(Operand &opnd, const int32 idx) { - useOperands.push_back(&opnd); - useIdx.push_back(idx); + useOperands.emplace_back(&opnd); + useIdx.emplace_back(idx); } int32 GetMemOperandIdx() const { @@ -1153,7 +1149,7 @@ class GraphColorRegAllocator : public AArch64RegAllocator { void SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint); bool UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint); void UpdateCallInfo(uint32 bbId); - void ClassifyOperand(std::set &pregs, std::set &vregs, const Operand &opnd); + void ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, const Operand &opnd); void SetOpndConflict(const Insn &insn, bool onlyDef); void UpdateOpndConflict(const Insn &insn, bool multiDef); void ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef); @@ -1199,8 +1195,8 @@ class GraphColorRegAllocator : public AArch64RegAllocator { RegType regType); MemOperand *GetReuseMem(uint32 vregNO, uint32 size, RegType regType); MemOperand *GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, AArch64reg regNO, bool &isOutOfRange); - bool SetAvailableSpillReg(std::set &cannotUseReg, LiveRange &lr, uint64 &usedRegMask); - void CollectCannotUseReg(std::set &cannotUseReg, LiveRange &lr, Insn &insn); + bool SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, uint64 &usedRegMask); + void CollectCannotUseReg(std::unordered_set &cannotUseReg, LiveRange &lr, Insn &insn); regno_t PickRegForSpill(uint64 &usedRegMask, RegType regType, uint32 spillIdx, bool &needSpillLr); bool SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, uint64 &usedRegMask, bool isDef); bool GetSpillReg(Insn &insn, LiveRange &lr, uint32 &spillIdx, uint64 &usedRegMask, bool isDef); @@ -1230,26 +1226,26 @@ class GraphColorRegAllocator : public AArch64RegAllocator { void ComputeBBForNewSplit(LiveRange &newLr, LiveRange &oldLr); void ClearLrBBFlags(const std::set &member); void ComputeBBForOldSplit(LiveRange &newLr, LiveRange &oldLr); - bool LrCanBeColored(LiveRange &lr, BB &bbAdded, std::set &conflictRegs); + bool LrCanBeColored(LiveRange &lr, BB &bbAdded, std::unordered_set &conflictRegs); void MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb); bool ContainsLoop(const CGFuncLoops &loop, const std::set &loops) const; void GetAllLrMemberLoops(LiveRange &lr, std::set &loop); bool SplitLrShouldSplit(LiveRange &lr); - bool SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::set &conflictRegs); + bool SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::unordered_set &conflictRegs); void SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, const std::set &oldLoops, const std::set &newLoops); void SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, const std::set &origLoops); void SplitLrFixOrigLrCalls(LiveRange &lr); void SplitLrUpdateInterference(LiveRange &lr); - void SplitLrUpdateRegInfo(LiveRange &origLr, LiveRange &newLr, std::set &conflictRegs); + void SplitLrUpdateRegInfo(LiveRange &origLr, LiveRange &newLr, std::unordered_set &conflictRegs); void SplitLrErrorCheckAndDebug(LiveRange &origLr); void SplitLr(LiveRange &lr); static constexpr uint16 kMaxUint16 = 0x7fff; MapleVector bbVec; - MapleSet vregLive; - MapleSet pregLive; + MapleUnorderedSet vregLive; + MapleUnorderedSet pregLive; MapleVector lrVec; MapleVector localRegVec; /* local reg info for each bb, no local reg if null */ MapleVector bbRegInfo; /* register assignment info for each bb */ diff --git a/src/maple_be/include/cg/aarch64/aarch64_dependence.h b/src/maple_be/include/cg/aarch64/aarch64_dependence.h index c1f805c60bbec34fb86d2dfcddeb155fbd8f065c..fad53e7d79b7bcaca844edbf12ae208898c95916 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_dependence.h +++ b/src/maple_be/include/cg/aarch64/aarch64_dependence.h @@ -74,13 +74,14 @@ class AArch64DepAnalysis : public DepAnalysis { DepNode *GenerateDepNode(Insn &insn, MapleVector &nodes, int32 nodeSum, const MapleVector &comments); void BuildAmbiInsnDependency(Insn &insn); void BuildMayThrowInsnDependency(Insn &insn); - void UpdateRegUseAndDef(Insn &insn, DepNode &depNode); + void UpdateRegUseAndDef(Insn &insn, DepNode &depNode, MapleVector &nodes); void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); AArch64MemOperand *BuildNextMemOperandByByteSize(AArch64MemOperand &aarchMemOpnd, uint32 byteSize) const; void AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type); void AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type); void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, bool isFromClinit) const; void ClearDepNodeInfo(DepNode &depNode) const; + void AddEndSeparatorNode(MapleVector &nodes); Insn **regDefs = nullptr; RegList **regUses = nullptr; @@ -89,8 +90,6 @@ class AArch64DepAnalysis : public DepAnalysis { Insn *lastCallInsn = nullptr; uint32 separatorIndex = 0; Insn *lastFrameDef = nullptr; - MapleVector useRegnos; - MapleVector defRegnos; MapleVector stackUses; MapleVector stackDefs; MapleVector heapUses; @@ -100,7 +99,9 @@ class AArch64DepAnalysis : public DepAnalysis { MapleVector ambiInsns; /* register number that catch bb and cleanup bb uses. */ MapleSet ehInRegs; + /* the bb to be scheduling currently */ + BB *curBB = nullptr; }; } -#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H */ \ No newline at end of file +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H */ diff --git a/src/maple_be/include/cg/aarch64/aarch64_insn.h b/src/maple_be/include/cg/aarch64/aarch64_insn.h index 71b22f475018b7b8ff5bfc8c76062b6bee3b7966..03e6f9547e5f66657167ec660eda5f4a0e7433e4 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_insn.h +++ b/src/maple_be/include/cg/aarch64/aarch64_insn.h @@ -155,7 +155,7 @@ class AArch64Insn : public Insn { uint32 GetLatencyType() const override; - bool CheckRefField(int32 opndIndex) const; + bool CheckRefField(int32 opndIndex, bool isEmit) const; private: void CheckOpnd(Operand &opnd, OpndProp &mopd) const; diff --git a/src/maple_be/include/cg/aarch64/aarch64_schedule.h b/src/maple_be/include/cg/aarch64/aarch64_schedule.h index 0b4dddc828e727f03c54b865e67a2a4621daae97..bf621e0c6f83d571ca7a70b5f65390af773bc07c 100644 --- a/src/maple_be/include/cg/aarch64/aarch64_schedule.h +++ b/src/maple_be/include/cg/aarch64/aarch64_schedule.h @@ -69,7 +69,7 @@ class ScheduleProcessInfo { } void PushElemIntoAvailableReadyList(DepNode *node) { - availableReadyList.push_back(node); + availableReadyList.emplace_back(node); } size_t SizeOfAvailableReadyList() const { @@ -96,7 +96,7 @@ class ScheduleProcessInfo { node->SetState(kScheduled); node->SetSchedCycle(currCycle); node->OccupyUnits(); - scheduledNodes.push_back(node); + scheduledNodes.emplace_back(node); } bool IsFirstSeparator() const { diff --git a/src/maple_be/include/cg/cgbb.h b/src/maple_be/include/cg/cgbb.h index d5803ee43571ce75867f4422cb69dae835b2222c..2fa93935384353ab02aad66f35f6b4f56ea80867 100644 --- a/src/maple_be/include/cg/cgbb.h +++ b/src/maple_be/include/cg/cgbb.h @@ -542,7 +542,7 @@ class BB { return rangeGotoLabelVec; } void PushBackRangeGotoLabel(LabelIdx labelIdx) { - rangeGotoLabelVec.push_back(labelIdx); + rangeGotoLabelVec.emplace_back(labelIdx); } const Insn *GetFirstLoc() const { return firstLoc; diff --git a/src/maple_be/include/cg/cgfunc.h b/src/maple_be/include/cg/cgfunc.h index 52afe3a1b28900836f268842d934d67e50a24aeb..17d304c512d48983ceb61c4cf74487291f290122 100644 --- a/src/maple_be/include/cg/cgfunc.h +++ b/src/maple_be/include/cg/cgfunc.h @@ -565,7 +565,7 @@ class CGFunc { } void PushBackExitBBsVec(BB &bb) { - exitBBVec.push_back(&bb); + exitBBVec.emplace_back(&bb); } void ClearExitBBsVec() { @@ -638,7 +638,7 @@ class CGFunc { } void AddEmitSt(MIRSymbol &symbol) { - emitStVec.push_back(&symbol); + emitStVec.emplace_back(&symbol); } MapleVector &GetLoops() { @@ -650,7 +650,7 @@ class CGFunc { } void PushBackLoops(CGFuncLoops &loop) { - loops.push_back(&loop); + loops.emplace_back(&loop); } #if TARGARM32 @@ -787,11 +787,10 @@ class CGFunc { uint32 vRegCount; /* for assigning a number for each CG virtual register */ uint32 maxRegCount; /* for the current virtual register number limit */ MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ - MapleMap vRegOperandTable; - MapleMap pRegSpillMemOperands; - MapleMap spillRegMemOperands; - MapleSet spillRegMemOperandsAdj; - MapleMap reuseSpillLocMem; + MapleUnorderedMap vRegOperandTable; + MapleUnorderedMap pRegSpillMemOperands; + MapleUnorderedMap spillRegMemOperands; + MapleUnorderedMap reuseSpillLocMem; LabelIdx firstCGGenLabelIdx; MapleMap labelMap; #if DEBUG @@ -868,7 +867,7 @@ class CGFunc { BB *dummyBB; /* use this bb for add some instructions to bb that is no curBB. */ Insn *volReleaseInsn = nullptr; /* use to record the release insn for volatile strore */ MapleVector exitBBVec; - MapleMap lab2BBMap; + MapleUnorderedMap lab2BBMap; BECommon &beCommon; MemLayout *memLayout = nullptr; MapleAllocator *funcScopeAllocator; diff --git a/src/maple_be/include/cg/datainfo.h b/src/maple_be/include/cg/datainfo.h index 92b75d224dda6851416d2fa610ae9cf4b777e9b6..bb374b9995e8f60ae3b8c148a3ed3dc77d012506 100644 --- a/src/maple_be/include/cg/datainfo.h +++ b/src/maple_be/include/cg/datainfo.h @@ -26,7 +26,7 @@ class DataInfo { : allocator(&mp), info(allocator.Adapter()) { for (uint64 i = 0;i < (bitNum / kWordSize + 1); ++i) { - info.push_back(0); + info.emplace_back(0); } } @@ -132,7 +132,7 @@ class DataInfo { /* add one more size for each enlarge action */ auto sizeToEnlarge = static_cast((bitNO / kWordSize + 1) - info.size()); for (int32 i = 0; i < sizeToEnlarge; i++) { - info.push_back(0ULL); + info.emplace_back(0ULL); } } diff --git a/src/maple_be/include/cg/deps.h b/src/maple_be/include/cg/deps.h index 47d501f9d48aa3070f8cde3b55d47d39c6e40367..b322389000d90d87e282384629412406fc673316 100644 --- a/src/maple_be/include/cg/deps.h +++ b/src/maple_be/include/cg/deps.h @@ -102,14 +102,17 @@ class DepNode { eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), - cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), regPressure(nullptr) {} + cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), + defRegnos(alloc.Adapter()), regPressure(nullptr) {} DepNode(Insn &insn, MapleAllocator &alloc, Unit * const *unit, uint32 num, Reservation &rev) : insn(&insn), units(unit), reservation(&rev), unitNum(num), eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), cfiInsns(alloc.Adapter()), - clinitInsns(alloc.Adapter()), locInsn(nullptr), regPressure(nullptr) {} + clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), defRegnos(alloc.Adapter()), + regPressure(nullptr) {} + virtual ~DepNode() = default; Insn *GetInsn() const { @@ -215,8 +218,11 @@ class DepNode { const MapleVector &GetPreds() const { return preds; } + void ReservePreds(size_t size) { + preds.reserve(size); + } void AddPred(DepLink &depLink) { - preds.push_back(&depLink); + preds.emplace_back(&depLink); } void RemovePred() { preds.pop_back(); @@ -224,8 +230,11 @@ class DepNode { const MapleVector &GetSuccs() const{ return succs; } + void ReserveSuccs(size_t size) { + succs.reserve(size); + } void AddSucc(DepLink &depLink) { - succs.push_back(&depLink); + succs.emplace_back(&depLink); } void RemoveSucc() { succs.pop_back(); @@ -237,7 +246,7 @@ class DepNode { comments = com; } void AddComments(Insn &insn) { - comments.push_back(&insn); + comments.emplace_back(&insn); } void ClearComments() { comments.clear(); @@ -249,7 +258,7 @@ class DepNode { cfiInsns = insns; } void AddCfiInsn(Insn &insn) { - cfiInsns.push_back(&insn); + cfiInsns.emplace_back(&insn); } void ClearCfiInsns() { cfiInsns.clear(); @@ -261,7 +270,7 @@ class DepNode { clinitInsns = insns; } void AddClinitInsn(Insn &insn) { - clinitInsns.push_back(&insn); + clinitInsns.emplace_back(&insn); } const RegPressure *GetRegPressure() const { return regPressure; @@ -295,11 +304,11 @@ class DepNode { regPressure->IncDeadDefByIndex(idx); } - void SetRegUses(regno_t regNO, RegList ®List) { - regPressure->SetRegUses(regNO, ®List); + void SetRegUses(RegList ®List) { + regPressure->SetRegUses(®List); } - void SetRegDefs(regno_t regNO, RegList *regList) { - regPressure->SetRegDefs(regNO, regList); + void SetRegDefs(size_t idx, RegList *regList) { + regPressure->SetRegDefs(idx, regList); } int32 GetIncPressure() const { @@ -326,11 +335,33 @@ class DepNode { void SetPriority(int32 value) { regPressure->SetPriority(value); } - const MapleUnorderedMap &GetRegUses() const { - return regPressure->GetRegUses(); + RegList *GetRegUses(size_t idx) const { + return regPressure->GetRegUses(idx); + } + void InitRegUsesSize(size_t size) { + regPressure->InitRegUsesSize(size); } - const MapleUnorderedMap &GetRegDefs() const { - return regPressure->GetRegDefs(); + RegList *GetRegDefs(size_t idx) const { + return regPressure->GetRegDefs(idx); + } + void InitRegDefsSize(size_t size) { + regPressure->InitRegDefsSize(size); + } + + void SetNumCall(int32 value) { + regPressure->SetNumCall(value); + } + + int32 GetNumCall() const { + return regPressure->GetNumCall(); + } + + void SetHasNativeCallRegister(bool value) const { + regPressure->SetHasNativeCallRegister(value); + } + + bool GetHasNativeCallRegister() const { + return regPressure->GetHasNativeCallRegister(); } const Insn *GetLocInsn() const { @@ -350,6 +381,42 @@ class DepNode { PRINT_STR_VAL("validPredsSize: ", validPredsSize); PRINT_STR_VAL("validSuccsSize: ", validSuccsSize); LogInfo::MapleLogger() << '\n'; + + constexpr int32 width = 12; + LogInfo::MapleLogger() << std::left << std::setw(width) << "usereg: "; + for (const auto &useReg : useRegnos) { + LogInfo::MapleLogger() << "R" << useReg << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::left << std::setw(width) << "defreg: "; + for (const auto &defReg : defRegnos) { + LogInfo::MapleLogger() << "R" << defReg << " "; + } + LogInfo::MapleLogger() << "\n"; + } + + void SetHasPreg(bool value) { + regPressure->SetHasPreg(value); + } + + bool GetHasPreg() const { + return regPressure->GetHasPreg(); + } + + void AddUseReg(regno_t reg) { + useRegnos.emplace_back(reg); + } + + const MapleVector &GetUseRegnos() const { + return useRegnos; + } + + void AddDefReg(regno_t reg) { + defRegnos.emplace_back(reg); + } + + const MapleVector &GetDefRegnos() const { + return defRegnos; } private: @@ -387,6 +454,9 @@ class DepNode { /* loc insn which indicate insn location in source file */ const Insn *locInsn; + MapleVector useRegnos; + MapleVector defRegnos; + /* For register pressure analysis */ RegPressure *regPressure; }; diff --git a/src/maple_be/include/cg/ebo.h b/src/maple_be/include/cg/ebo.h index c3151581b05f7fc16969218e1421ab1b7518af25..ed3572ef60c67a6454435b07021828def2eb2ab0 100644 --- a/src/maple_be/include/cg/ebo.h +++ b/src/maple_be/include/cg/ebo.h @@ -113,7 +113,7 @@ class Ebo { eboMp(&memPool), eboAllocator(&memPool), visitedBBs(eboAllocator.Adapter()), - vRegInfo(std::less(), eboAllocator.Adapter()), + vRegInfo(eboAllocator.Adapter()), exprInfoTable(eboAllocator.Adapter()), insnInfoTable(eboAllocator.Adapter()) {} @@ -225,7 +225,7 @@ class Ebo { OpndInfo *lastOpndInfo = nullptr; InsnInfo *firstInsnInfo = nullptr; InsnInfo *lastInsnInfo = nullptr; - MapleMap vRegInfo; + MapleUnorderedMap vRegInfo; MapleVector exprInfoTable; MapleVector insnInfoTable; }; diff --git a/src/maple_be/include/cg/eh_func.h b/src/maple_be/include/cg/eh_func.h index b81f558524da62f3084cae921ac77d9e0175cf7a..7532d3a7593d4ff38c4fe46e4c3f4012cf4d3651 100644 --- a/src/maple_be/include/cg/eh_func.h +++ b/src/maple_be/include/cg/eh_func.h @@ -52,7 +52,7 @@ class EHTry { } void PushBackCatchVec(CatchNode &catchNode) { - catchVec.push_back(&catchNode); + catchVec.emplace_back(&catchNode); } CatchNode *GetCatchNodeAt(size_t pos) const { @@ -143,7 +143,7 @@ class EHFunc { } void AddTry(EHTry &ehTry) { - tryVec.push_back(&ehTry); + tryVec.emplace_back(&ehTry); } size_t GetEHTyTableSize() const { @@ -172,7 +172,7 @@ class EHFunc { } void AddRethrow(EHThrow &rethrow) { - rethrowVec.push_back(&rethrow); + rethrowVec.emplace_back(&rethrow); } private: diff --git a/src/maple_be/include/cg/emit.h b/src/maple_be/include/cg/emit.h index dc71c415496f9cd7793ab745642c3d5df6bd04a7..450ef5167f29d59736a2bb37318936ff5bbe39ba 100644 --- a/src/maple_be/include/cg/emit.h +++ b/src/maple_be/include/cg/emit.h @@ -105,7 +105,7 @@ class Emitter { public: Emitter(CG &cg, const std::string &asmFileName) : cg(&cg), - rangeIdx2PrefixStr(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()), + rangeIdx2PrefixStr(cg.GetMIRModule()->GetMPAllocator().Adapter()), hugeSoTargets(cg.GetMIRModule()->GetMPAllocator().Adapter()) { outStream.open(asmFileName, std::ios::trunc); MIRModule &mirModule = *cg.GetMIRModule(); @@ -259,7 +259,7 @@ class Emitter { CG *cg; MOperator currentMop = UINT_MAX; - MapleMap rangeIdx2PrefixStr; + MapleUnorderedMap rangeIdx2PrefixStr; const AsmInfo *asmInfo; std::ofstream outStream; MemPool *memPool; diff --git a/src/maple_be/include/cg/insn.h b/src/maple_be/include/cg/insn.h index 28a5aa5548a7cac6a5ae2c2b2f6e5982fbaa6890..7c6fa5b66c0cd400539f81b338f4ce171acbceda 100644 --- a/src/maple_be/include/cg/insn.h +++ b/src/maple_be/include/cg/insn.h @@ -52,30 +52,30 @@ class Insn { Check(); #endif } - Insn(MemPool &memPool, MOperator opc, Operand &opnd0) : Insn(memPool, opc) { opnds.push_back(&opnd0); } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0) : Insn(memPool, opc) { opnds.emplace_back(&opnd0); } Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1) : Insn(memPool, opc) { - opnds.push_back(&opnd0); - opnds.push_back(&opnd1); + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); } Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2) : Insn(memPool, opc) { - opnds.push_back(&opnd0); - opnds.push_back(&opnd1); - opnds.push_back(&opnd2); + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); } Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3) : Insn(memPool, opc) { - opnds.push_back(&opnd0); - opnds.push_back(&opnd1); - opnds.push_back(&opnd2); - opnds.push_back(&opnd3); + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); } Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3, Operand &opnd4) : Insn(memPool, opc) { - opnds.push_back(&opnd0); - opnds.push_back(&opnd1); - opnds.push_back(&opnd2); - opnds.push_back(&opnd3); - opnds.push_back(&opnd4); + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); + opnds.emplace_back(&opnd4); } virtual ~Insn() = default; @@ -88,7 +88,7 @@ class Insn { } void AddOperand(Operand &opnd) { - opnds.push_back(&opnd); + opnds.emplace_back(&opnd); } void PopBackOperand() { opnds.pop_back(); @@ -575,7 +575,7 @@ class Insn { mOp = originalInsn.mOp; uint32 opndNum = originalInsn.GetOperandSize(); for (uint32 i = 0; i < opndNum; i++) { - opnds.push_back(originalInsn.opnds[i]->Clone(memPool)); + opnds.emplace_back(originalInsn.opnds[i]->Clone(memPool)); } } diff --git a/src/maple_be/include/cg/loop.h b/src/maple_be/include/cg/loop.h index 2109c808d359a51e047a60cec03359a0b0ec861f..03ad0a5c392100575cc8ed748a7abb15ccfab8c2 100644 --- a/src/maple_be/include/cg/loop.h +++ b/src/maple_be/include/cg/loop.h @@ -167,13 +167,13 @@ class CGFuncLoops { } void AddLoopMembers(BB &bb) { - loopMembers.push_back(&bb); + loopMembers.emplace_back(&bb); } void AddBackedge(BB &bb) { - backedge.push_back(&bb); + backedge.emplace_back(&bb); } void AddInnerLoops(CGFuncLoops &loop) { - innerLoops.push_back(&loop); + innerLoops.emplace_back(&loop); } void SetHeader(BB &bb) { header = &bb; diff --git a/src/maple_be/include/cg/lsda.h b/src/maple_be/include/cg/lsda.h index f2913ff1c4c170c19201452636c47803e2788155..8eebd31628cbae2a31b1ac56061493a61a0235fb 100644 --- a/src/maple_be/include/cg/lsda.h +++ b/src/maple_be/include/cg/lsda.h @@ -152,7 +152,7 @@ class LSDACallSiteTable { } void PushBack(LSDACallSite &lsdaCallSite) { - callSiteTable.push_back(&lsdaCallSite); + callSiteTable.emplace_back(&lsdaCallSite); } const LabelPair &GetCSTable() const { @@ -234,7 +234,7 @@ class LSDAActionTable { } void PushBack(LSDAAction &lsdaAction) { - actionTable.push_back(&lsdaAction); + actionTable.emplace_back(&lsdaAction); } size_t Size() const { diff --git a/src/maple_be/include/cg/memlayout.h b/src/maple_be/include/cg/memlayout.h index 90c5cfd06fca9fa604271ecedf0618a200c65a7c..afdb744b0ae256a57ee7e735835cffa60704a92e 100644 --- a/src/maple_be/include/cg/memlayout.h +++ b/src/maple_be/include/cg/memlayout.h @@ -127,7 +127,7 @@ class MemLayout { segArgsToStkPass(kMsArgsToStkPass), symAllocTable(mallocator.Adapter()), spillLocTable(mallocator.Adapter()), - spillRegLocMap(std::less(), mallocator.Adapter()), + spillRegLocMap(mallocator.Adapter()), localRefLocMap(std::less(), mallocator.Adapter()), memAllocator(&mallocator) { symAllocTable.resize(mirFunc.GetSymTab()->GetSymbolTableSize()); @@ -246,7 +246,7 @@ class MemLayout { MemSegment segArgsToStkPass; MapleVector symAllocTable; /* index is stindex from StIdx */ MapleVector spillLocTable; /* index is preg idx */ - MapleMap spillRegLocMap; + MapleUnorderedMap spillRegLocMap; MapleMap localRefLocMap; /* localrefvar formals. real address passed in stack. */ MapleAllocator *memAllocator; CGFunc *cgFunc = nullptr; diff --git a/src/maple_be/include/cg/pressure.h b/src/maple_be/include/cg/pressure.h index 8b100284dbfef45cc540ba168dbd10053df0deb8..7ccce1ffcf459fd06be7a1231528f6f19fc095a2 100644 --- a/src/maple_be/include/cg/pressure.h +++ b/src/maple_be/include/cg/pressure.h @@ -37,16 +37,15 @@ class RegPressure { void DumpRegPressure() const; - void SetRegUses(regno_t regNO, RegList *regList) { - regUses.insert(std::make_pair(regNO, regList)); + void SetRegUses(RegList *regList) { + regUses.emplace_back(regList); } - void SetRegDefs(regno_t regNO, RegList *regList) { - auto it = regDefs.find(regNO); - if (it == regDefs.end()) { - regDefs.insert(std::make_pair(regNO, regList)); + void SetRegDefs(size_t idx, RegList *regList) { + if (idx < regDefs.size()) { + regDefs[idx] = regList; } else { - it->second = regList; + regDefs.emplace_back(regList); } } @@ -115,19 +114,52 @@ class RegPressure { ++deadDefNum[index]; } - const MapleUnorderedMap &GetRegUses() const { - return regUses; + RegList *GetRegUses(size_t idx) const { + return regUses[idx]; } - const MapleUnorderedMap &GetRegDefs() const { - return regDefs; + void InitRegUsesSize(size_t size) { + regUses.reserve(size); + } + + RegList *GetRegDefs(size_t idx) const { + return regDefs[idx]; + } + + void InitRegDefsSize(size_t size) { + regDefs.reserve(size); + } + + void SetHasPreg(bool value) { + hasPreg = value; + } + + bool GetHasPreg() const { + return hasPreg; + } + + void SetNumCall(int32 value) { + callNum = value; + } + + int32 GetNumCall() const { + return callNum; + } + + void SetHasNativeCallRegister(bool value) { + hasNativeCallRegister = value; + } + + bool GetHasNativeCallRegister() const { + return hasNativeCallRegister; } private: /* save reglist of every uses'register */ - MapleUnorderedMap regUses; + MapleVector regUses; /* save reglist of every defs'register */ - MapleUnorderedMap regDefs; + MapleVector regDefs; + /* the number of the node needs registers */ MapleVector pressure; /* the count of dead define registers */ @@ -137,8 +169,14 @@ class RegPressure { int32 priority = 0; int32 maxDepth = 0; int32 near = 0; + /* the number of successor call */ + int32 callNum = 0; /* if a type register increase then set incPressure as true. */ bool incPressure = false; + /* if define physical register, set hasPreg as true */ + bool hasPreg = false; + /* it is call native special register */ + bool hasNativeCallRegister = false; }; } /* namespace maplebe */ diff --git a/src/maple_be/mdgen/include/mdrecord.h b/src/maple_be/mdgen/include/mdrecord.h index 7185712fdc6a093b4197b8693beec12f31dc5472..29ce022bbe4630fbc34656aadd175b4cfbf74765 100644 --- a/src/maple_be/mdgen/include/mdrecord.h +++ b/src/maple_be/mdgen/include/mdrecord.h @@ -164,7 +164,7 @@ class VecElement : public MDElement { ~VecElement() override = default; void appendElement(MDElement *curElement) { - vecData.push_back(curElement); + vecData.emplace_back(curElement); } const maple::MapleVector GetVecData() const { @@ -195,7 +195,7 @@ class MDObject { const MDElement *GetOneMDElement(size_t index) const; void AddMDElements(MDElement* curElement) { - mdElements.push_back(curElement); + mdElements.emplace_back(curElement); } unsigned int GetIdx() const { diff --git a/src/maple_be/mdgen/src/mdrecord.cpp b/src/maple_be/mdgen/src/mdrecord.cpp index d98966e5de0e10037d84becbe375e819aaa9a524..88b0602b3cb3967fed816516deb503c565e7fdaa 100644 --- a/src/maple_be/mdgen/src/mdrecord.cpp +++ b/src/maple_be/mdgen/src/mdrecord.cpp @@ -44,7 +44,7 @@ const MDObject &MDClass::GetOneMDObject(size_t index) const { } void MDClass::AddClassMember(MDObject inputObj) { - mdObjects.push_back(inputObj); + mdObjects.emplace_back(inputObj); childObjNames.insert(inputObj.GetIdx()); } @@ -53,7 +53,7 @@ bool MDClass::IsClassMember(unsigned int curIdx) const { } void MDClass::BuildFormalTypes(unsigned int memberIdx, bool isVec) { - formalTypes.push_back(std::make_pair(memberIdx, isVec)); + formalTypes.emplace_back(std::make_pair(memberIdx, isVec)); } bool MDClass::IsValidStructEle(RecordType curTy) const { @@ -66,7 +66,7 @@ unsigned int MDClassRange::CreateStrInTable(const std::string &inStr, RecordType auto ret = stringHashTable.insert(std::make_pair(inStr, curInfo)); if (ret.second) { unsigned int temp = totalStr; - stringTable.push_back(inStr); + stringTable.emplace_back(inStr); ++totalStr; return temp; } diff --git a/src/maple_be/src/ad/mad.cpp b/src/maple_be/src/ad/mad.cpp index e6e40b4d0d784fa9c33ec91c112e6dda7bbb3e6c..7e03ddd40f12953d06e6548e6a4b58d4198b232a 100644 --- a/src/maple_be/src/ad/mad.cpp +++ b/src/maple_be/src/ad/mad.cpp @@ -36,7 +36,7 @@ Unit::Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ... va_start(ap, numOfUnits); for (int i = 0; i < numOfUnits; ++i) { - compositeUnits.push_back(static_cast(va_arg(ap, Unit*))); + compositeUnits.emplace_back(static_cast(va_arg(ap, Unit*))); } va_end(ap); diff --git a/src/maple_be/src/be/becommon.cpp b/src/maple_be/src/be/becommon.cpp index f7d9e2dcd9a4fb3933d811e5d180794ffe5b6651..67d77c17c0c8e280666a37b4dc4cf286626794f4 100644 --- a/src/maple_be/src/be/becommon.cpp +++ b/src/maple_be/src/be/becommon.cpp @@ -180,7 +180,7 @@ void BECommon::ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint JClassLayout *layout = mirModule.GetMemPool()->New(mirModule.GetMPAllocator().Adapter()); /* add parent's record to the front */ - layout->push_back(JClassFieldInfo(false, false, false, allocedSize)); + layout->emplace_back(JClassFieldInfo(false, false, false, allocedSize)); /* copy parent's layout plan into my plan */ if (HasJClassLayout(*parentType)) { /* parent may have incomplete type definition. */ const JClassLayout &parentLayout = GetJClassLayout(*parentType); @@ -587,14 +587,14 @@ bool BECommon::TyIsInSizeAlignTable(const MIRType &ty) const { void BECommon::AddAndComputeSizeAlign(MIRType &ty) { CHECK_FATAL(ty.GetTypeIndex() == typeSizeTable.size(), "make sure the ty idx is exactly the table size"); - tableAlignTable.push_back(mirModule.IsCModule()); - typeSizeTable.push_back(0); + tableAlignTable.emplace_back(mirModule.IsCModule()); + typeSizeTable.emplace_back(0); ComputeTypeSizesAligns(ty); } void BECommon::AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info) { JClassLayout &layout = *(jClassLayoutTable.at(&klass)); - layout.push_back(info); + layout.emplace_back(info); } MIRType *BECommon::BeGetOrCreatePointerType(const MIRType &pointedType) { diff --git a/src/maple_be/src/be/lower.cpp b/src/maple_be/src/be/lower.cpp index 19381b9f415797c9f532a906a2d9ec055266c750..056ef9bb52e0738ce85ae4829cd5922bda1dc830 100644 --- a/src/maple_be/src/be/lower.cpp +++ b/src/maple_be/src/be/lower.cpp @@ -132,13 +132,13 @@ void CGLowerer::RegisterExternalLibraryFunctions() { argSt->SetStorageClass(kScFormal); argSt->SetSKind(kStVar); func->GetSymTab()->AddToStringSymbolMap(*argSt); - formals.push_back(argSt); + formals.emplace_back(argSt); } func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); auto *funcType = func->GetMIRFuncType(); ASSERT(funcType != nullptr, "null ptr check"); beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); - extFuncs.push_back(std::pair(id, func->GetPuidx())); + extFuncs.emplace_back(std::pair(id, func->GetPuidx())); } } @@ -320,8 +320,8 @@ BaseNode *CGLowerer::LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offse CGOptions::IsLazyBinding())) { /* for decouple static or lazybinding def/undef tables, replace it with intrinsic */ MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(&baseNode); - args.push_back(&offsetNode); + args.emplace_back(&baseNode); + args.emplace_back(&offsetNode); return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_STATIC_OFFSET_TAB, OP_intrinsicop, *GlobalTables::GetTypeTable().GetPrimType(parent.GetPrimType()), args); } @@ -714,8 +714,8 @@ BaseNode *CGLowerer::LowerRem(BaseNode &expr, BlockNode &blk) { MIRSymbol *ret = CreateNewRetVar(*GlobalTables::GetTypeTable().GetPrimType(remExpr.GetPrimType()), kIntrnRetValPrefix); MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(remExpr.Opnd(0)); - args.push_back(remExpr.Opnd(1)); + args.emplace_back(remExpr.Opnd(0)); + args.emplace_back(remExpr.Opnd(1)); CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCallAssigned(extFuncs[i].second, args, ret); blk.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callStmt)); MIRType *type = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[fmodFunc].retType); @@ -1154,8 +1154,8 @@ StmtNode *CGLowerer::LowerCall(CallNode &callNode, StmtNode *&nextStmt, BlockNod if (needCheckStore) { MIRFunction *fn = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(callNode.Opnd(0)); - args.push_back(callNode.Opnd(kNodeThirdOpnd)); + args.emplace_back(callNode.Opnd(0)); + args.emplace_back(callNode.Opnd(kNodeThirdOpnd)); StmtNode *checkStoreStmt = mirModule.GetMIRBuilder()->CreateStmtCall(fn->GetPuidx(), args); newBlk.AddStatement(checkStoreStmt); } @@ -1207,9 +1207,9 @@ StmtNode *CGLowerer::LowerCall(CallNode &callNode, StmtNode *&nextStmt, BlockNod addrofNode->SetPrimType(LOWERED_PTR_TYPE); addrofNode->SetStIdx(dsgnSt->GetStIdx()); addrofNode->SetFieldID(0); - newNopnd.push_back(addrofNode); + newNopnd.emplace_back(addrofNode); for (auto *opnd : callNode.GetNopnd()) { - newNopnd.push_back(opnd); + newNopnd.emplace_back(opnd); } callNode.SetNOpnd(newNopnd); callNode.SetNumOpnds(static_cast(newNopnd.size())); @@ -1231,10 +1231,10 @@ void CGLowerer::LowerEntry(MIRFunction &func) { retSt->SetTyIdx(pointType->GetTypeIndex()); std::vector formals; - formals.push_back(retSt); + formals.emplace_back(retSt); for (uint32 i = 0; i < func.GetFormalCount(); ++i) { auto formal = func.GetFormal(i); - formals.push_back(formal); + formals.emplace_back(formal); } func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true); @@ -1424,7 +1424,7 @@ MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, con argSt->SetSKind(kStVar); func->GetSymTab()->AddToStringSymbolMap(*argSt); std::vector formals; - formals.push_back(argSt); + formals.emplace_back(argSt); if ((name == "MCC_SyncEnterFast0") || (name == "MCC_SyncEnterFast1") || (name == "MCC_SyncEnterFast2") || (name == "MCC_SyncEnterFast3") || (name == "MCC_SyncExitFast")) { @@ -1434,7 +1434,7 @@ MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, con argStMatch->SetStorageClass(kScFormal); argStMatch->SetSKind(kStVar); func->GetSymTab()->AddToStringSymbolMap(*argStMatch); - formals.push_back(argStMatch); + formals.emplace_back(argStMatch); } func->UpdateFuncTypeAndFormalsAndReturnType(formals, GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex(), false); @@ -1442,7 +1442,7 @@ MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, con ASSERT(funcType != nullptr, "null ptr check"); beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); - builtinFuncIDs.push_back(std::pair(id, func->GetPuidx())); + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); return func; } @@ -1480,14 +1480,14 @@ void CGLowerer::RegisterBuiltIns() { argSt->SetStorageClass(kScFormal); argSt->SetSKind(kStVar); func->GetSymTab()->AddToStringSymbolMap(*argSt); - formals.push_back(argSt); + formals.emplace_back(argSt); } func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); auto *funcType = func->GetMIRFuncType(); ASSERT(funcType != nullptr, "null ptr check"); beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); - builtinFuncIDs.push_back(std::pair(id, func->GetPuidx())); + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); } /* register __builtin_sync_enter */ @@ -1611,8 +1611,8 @@ void CGLowerer::ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode) { CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Array_Boundary_Check", TyIdx(PTY_void)); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(arrayNode.GetNopndAt(0)); - args.push_back(arrayNode.GetNopndAt(1)); + args.emplace_back(arrayNode.GetNopndAt(0)); + args.emplace_back(arrayNode.GetNopndAt(1)); boundaryCheckStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); blkNode.InsertAfter(blkNode.GetLast(), lenRegassignNode); blkNode.InsertAfter(blkNode.GetLast(), brFalseNode); @@ -1814,8 +1814,8 @@ StmtNode *CGLowerer::LowerIntrinsicopDassign(const DassignNode &dsNode, CHECK_FATAL(intrinDesc->IsJsOp(), "intrinDesc should be JsOp"); /* setup parameters */ for (uint32 i = 0; i < nOpnds.size(); ++i) { - fnTyVec.push_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); - fnTaVec.push_back(TypeAttrs()); + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); CHECK_FATAL(addrNode != nullptr, "addrNode should not be nullptr"); nOpnds[i] = addrNode; @@ -1830,7 +1830,7 @@ StmtNode *CGLowerer::LowerIntrinsicopDassign(const DassignNode &dsNode, CHECK_FATAL(dsNode.GetFieldID() == 0, "dsNode's filedId should equal"); AddrofNode *addrofNode = mirBuilder->CreateAddrof(*dst, PTY_a32); MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); - newOpnd.push_back(addrofNode); + newOpnd.emplace_back(addrofNode); newOpnd.insert(newOpnd.end(), nOpnds.begin(), nOpnds.end()); CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); @@ -1869,8 +1869,8 @@ BaseNode *CGLowerer::LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, con CHECK_FATAL(desc.IsJsOp(), "desc should be jsOp"); /* setup parameters */ for (uint32 i = 0; i < nOpnds.size(); ++i) { - fnTyVec.push_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); - fnTaVec.push_back(TypeAttrs()); + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); CHECK_FATAL(addrNode != nullptr, "can not get address"); nOpnds[i] = addrNode; @@ -1889,7 +1889,7 @@ BaseNode *CGLowerer::LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, con fn->SetMIRFuncType(static_cast(fnType)); AddrofNode *addrofNode = mirBuilder->CreateAddrof(*tmpSt, PTY_a32); MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); - newOpnd.push_back(addrofNode); + newOpnd.emplace_back(addrofNode); newOpnd.insert(newOpnd.end(), nOpnds.begin(), nOpnds.end()); CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); @@ -1923,10 +1923,10 @@ StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrin PUIdx bFunc, BaseNode *extraInfo) { MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { - args.push_back(intrinNode.Opnd(i)); + args.emplace_back(intrinNode.Opnd(i)); } if (extraInfo != nullptr) { - args.push_back(extraInfo); + args.emplace_back(extraInfo); } return mirBuilder->CreateStmtCallAssigned(bFunc, args, &ret, OP_callassigned); } @@ -1935,10 +1935,10 @@ StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrin BaseNode *extraInfo) { MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { - args.push_back(intrinNode.Opnd(i)); + args.emplace_back(intrinNode.Opnd(i)); } if (extraInfo != nullptr) { - args.push_back(extraInfo); + args.emplace_back(extraInfo); } return mirBuilder->CreateStmtCallRegassigned(bFunc, args, retpIdx, OP_callassigned); } @@ -2196,8 +2196,8 @@ BaseNode *CGLowerer::GetClassInfoExprFromRuntime(const std::string &classInfo) { arg1->SetPrimType(PTY_ptr); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(arg0); - args.push_back(arg1); + args.emplace_back(arg0); + args.emplace_back(arg1); StmtNode *getClassCall = mirBuilder->CreateStmtCallAssigned(getClassFunc, args, ret0, OP_callassigned); currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*getClassCall)); classInfoExpr = mirBuilder->CreateExprDread(*voidPtrType, 0, *ret0); @@ -2226,8 +2226,8 @@ BaseNode *CGLowerer::GetClassInfoExprFromArrayClassCache(const std::string &clas ConstvalNode *offsetExpr = mirBuilder->CreateIntConst(offset, PTY_u32); AddrofNode *baseExpr = mirBuilder->CreateExprAddrof(0, *arrayClassSt, mirModule.GetMemPool()); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(baseExpr); - args.push_back(offsetExpr); + args.emplace_back(baseExpr); + args.emplace_back(offsetExpr); return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY, OP_intrinsicop, *GlobalTables::GetTypeTable().GetPrimType(PTY_ref), args); } @@ -2407,7 +2407,7 @@ StmtNode *CGLowerer::LowerIntrinsicRCCall(IntrinsiccallNode &intrincall) { CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); callStmt->SetPUIdx(intrinFuncIDs.at(intrinDesc)); for (size_t i = 0; i < intrincall.GetNopndSize(); ++i) { - callStmt->GetNopnd().push_back(intrincall.GetNopndAt(i)); + callStmt->GetNopnd().emplace_back(intrincall.GetNopndAt(i)); callStmt->SetNumOpnds(callStmt->GetNumOpnds() + 1); } return callStmt; @@ -2429,8 +2429,8 @@ void CGLowerer::LowerArrayStore(IntrinsiccallNode &intrincall, BlockNode &newBlk if (needCheckStore) { MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(intrincall.Opnd(0)); - args.push_back(intrincall.Opnd(kNodeThirdOpnd)); + args.emplace_back(intrincall.Opnd(0)); + args.emplace_back(intrincall.Opnd(kNodeThirdOpnd)); StmtNode *checkStoreStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); newBlk.AddStatement(checkStoreStmt); } @@ -2445,22 +2445,22 @@ StmtNode *CGLowerer::LowerDefaultIntrinsicCall(IntrinsiccallNode &intrincall, MI MIRType *retTy = intrinDesc->GetReturnType(); CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); if (retTy->GetKind() == kTypeStruct) { - funcTyVec.push_back(beCommon.BeGetOrCreatePointerType(*retTy)->GetTypeIndex()); - fnTaVec.push_back(TypeAttrs()); + funcTyVec.emplace_back(beCommon.BeGetOrCreatePointerType(*retTy)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); fn.SetReturnStruct(); } for (uint32 i = 0; i < nOpnds.size(); ++i) { MIRType *argTy = intrinDesc->GetArgType(i); CHECK_FATAL(argTy != nullptr, "argTy should not be nullptr"); if (argTy->GetKind() == kTypeStruct) { - funcTyVec.push_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); - fnTaVec.push_back(TypeAttrs()); + funcTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); CHECK_FATAL(addrNode != nullptr, "can not get address"); nOpnds[i] = addrNode; } else { - funcTyVec.push_back(argTy->GetTypeIndex()); - fnTaVec.push_back(TypeAttrs()); + funcTyVec.emplace_back(argTy->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); } } MIRType *funcType = beCommon.BeGetOrCreateFunctionType(retTy->GetTypeIndex(), funcTyVec, fnTaVec); @@ -2528,7 +2528,7 @@ StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { ConstvalNode *exprConst = mirModule.GetMemPool()->New(); exprConst->SetPrimType(PTY_i32); exprConst->SetConstVal(intConst); - nStmt.GetNopnd().push_back(exprConst); + nStmt.GetNopnd().emplace_back(exprConst); nStmt.SetNumOpnds(nStmt.GetNopndSize()); } CHECK_FATAL(nStmt.NumOpnds() == kOperandNumBinary, "wrong args for syncenter"); @@ -2560,7 +2560,7 @@ StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should be found"); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(nStmt.Opnd(0)); + args.emplace_back(nStmt.Opnd(0)); return mirBuilder->CreateStmtCall(bFunc, args); } @@ -2601,7 +2601,7 @@ void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc auto &dsNode = static_cast(node); MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(arg); + args.emplace_back(arg); callAssign = mirBuilder->CreateStmtCallAssigned(funcSecond->GetPuidx(), args, ret, OP_callassigned); } else { CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); @@ -2616,7 +2616,7 @@ void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc if (node.GetOpCode() == OP_dassign) { MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(arg); + args.emplace_back(arg); callAssign = mirBuilder->CreateStmtCallAssigned(func->GetPuidx(), args, ret, OP_callassigned); } else { CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); @@ -2675,7 +2675,7 @@ void CGLowerer::LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode & auto *curFunc = mirModule.CurFunction(); if (isPredefinedArrayClass || (arrayCacheNode != nullptr)) { funcName = GetNewArrayFuncName(elemSize, perm); - args.push_back(node.Opnd(0)); /* n_elems */ + args.emplace_back(node.Opnd(0)); /* n_elems */ if (isPredefinedArrayClass) { GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayClassInfoName); MIRSymbol *arrayClassSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( @@ -2693,23 +2693,23 @@ void CGLowerer::LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode & GlobalTables::GetGsymTable().AddToStringSymbolMap(*arrayClassSym); arrayClassSym->SetTyIdx((TyIdx)PTY_ptr); } - args.push_back(mirBuilder->CreateExprAddrof(0, *arrayClassSym)); + args.emplace_back(mirBuilder->CreateExprAddrof(0, *arrayClassSym)); } else { - args.push_back(arrayCacheNode); + args.emplace_back(arrayCacheNode); } } else { funcName = perm ? "MCC_NewPermanentArray" : "MCC_NewObj_flexible_cname"; - args.push_back(mirBuilder->CreateIntConst(elemSize, PTY_u32)); /* elem_size */ - args.push_back(node.Opnd(0)); /* n_elems */ + args.emplace_back(mirBuilder->CreateIntConst(elemSize, PTY_u32)); /* elem_size */ + args.emplace_back(node.Opnd(0)); /* n_elems */ std::string klassJavaDescriptor; namemangler::DecodeMapleNameToJavaDescriptor(klassName, klassJavaDescriptor); UStrIdx classNameStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(klassJavaDescriptor); ConststrNode *classNameExpr = mirModule.GetMemPool()->New(classNameStrIdx); classNameExpr->SetPrimType(PTY_ptr); - args.push_back(classNameExpr); /* class_name */ - args.push_back(GetBaseNodeFromCurFunc(*curFunc, true)); + args.emplace_back(classNameExpr); /* class_name */ + args.emplace_back(GetBaseNodeFromCurFunc(*curFunc, true)); /* set class flag 0 */ - args.push_back(mirBuilder->CreateIntConst(0, PTY_u32)); + args.emplace_back(mirBuilder->CreateIntConst(0, PTY_u32)); } MIRFunction *func = mirBuilder->GetOrCreateFunction(funcName, (TyIdx)(LOWERED_PTR_TYPE)); CallNode *callAssign = nullptr; diff --git a/src/maple_be/src/be/switch_lowerer.cpp b/src/maple_be/src/be/switch_lowerer.cpp index 5dcfb813a5ddd20d1d7d0e5c877d620aa105ce68..a29029cff0af137babad8466411e28372d8dc5b4 100644 --- a/src/maple_be/src/be/switch_lowerer.cpp +++ b/src/maple_be/src/be/switch_lowerer.cpp @@ -63,7 +63,7 @@ void SwitchLowerer::FindClusters(MapleVector &clusters) { if (((j - i) >= kClusterSwitchCutoff) && (tmp2 < kMaxRangeGotoTableSize) && ((tmp1 / tmp2) >= kClusterSwitchDensity)) { - clusters.push_back(Cluster(i, j)); + clusters.emplace_back(Cluster(i, j)); i = j; break; } @@ -75,21 +75,21 @@ void SwitchLowerer::FindClusters(MapleVector &clusters) { void SwitchLowerer::InitSwitchItems(MapleVector &clusters) { if (clusters.empty()) { for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { - switchItems.push_back(SwitchItem(i, 0)); + switchItems.emplace_back(SwitchItem(i, 0)); } } else { int32 j = 0; Cluster front = clusters[j]; for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { if (i == front.first) { - switchItems.push_back(SwitchItem(i, front.second)); + switchItems.emplace_back(SwitchItem(i, front.second)); i = front.second; ++j; if (static_cast(clusters.size()) > j) { front = clusters[j]; } } else { - switchItems.push_back(SwitchItem(i, 0)); + switchItems.emplace_back(SwitchItem(i, 0)); } } } diff --git a/src/maple_be/src/be/trycatchblockslower.cpp b/src/maple_be/src/be/trycatchblockslower.cpp index 989455e6d2a56ea35c17b31cced7180eadbe2f4a..46cdbba295df29fc16f617d11e24829585a2b7bb 100644 --- a/src/maple_be/src/be/trycatchblockslower.cpp +++ b/src/maple_be/src/be/trycatchblockslower.cpp @@ -16,7 +16,7 @@ namespace maplebe { BBT *TryCatchBlocksLower::CreateNewBB(StmtNode *first, StmtNode *last) { BBT *newBB = memPool.New(first, last, &memPool); - bbList.push_back(newBB); + bbList.emplace_back(newBB); return newBB; } @@ -165,17 +165,17 @@ void TryCatchBlocksLower::RecoverBasicBlock() { break; case OP_label: { LabelNode *labelStmt = static_cast(stmt); - labeledBBs.push_back(curBB); + labeledBBs.emplace_back(curBB); curBB->SetLabelIdx((LabelIdx)labelStmt->GetLabelIdx()); } break; case OP_brtrue: case OP_brfalse: - condbrBBs.push_back(curBB); + condbrBBs.emplace_back(curBB); lastBB = curBB; curBB = nullptr; break; case OP_switch: - switchBBs.push_back(curBB); + switchBBs.emplace_back(curBB); lastBB = curBB; curBB = nullptr; break; @@ -199,7 +199,7 @@ void TryCatchBlocksLower::RecoverBasicBlock() { openTry = curBB; prevBBOfTry[openTry] = lastBB; } else { - tryBBs.push_back(BBTPair(openTry, curBB)); + tryBBs.emplace_back(BBTPair(openTry, curBB)); openTry = nullptr; curBB->SetType(BBT::kBBEndTry, *stmt); lastBB = curBB; @@ -219,7 +219,7 @@ void TryCatchBlocksLower::RecoverBasicBlock() { CHECK_FATAL(tb != curBB, "tb should not equal curBB"); } #endif - catchBBs.push_back(curBB); + catchBBs.emplace_back(curBB); curBB->SetType(BBT::kBBCatch, *stmt); break; } @@ -272,7 +272,7 @@ bool TryCatchBlocksLower::CheckAndProcessCatchNodeInCurrTryBlock(BBT &origLowerB std::vector currBBThread; BBT *lowerBB = &origLowerBB; /* append it to the list of blocks placed after the end try block */ - currBBThread.push_back(lowerBB); + currBBThread.emplace_back(lowerBB); while (lowerBB->GetFallthruBranch() != nullptr) { lowerBB = lowerBB->GetFallthruBranch(); CHECK_FATAL(!lowerBB->IsTry(), "ebb must not be tryBB"); @@ -286,12 +286,12 @@ bool TryCatchBlocksLower::CheckAndProcessCatchNodeInCurrTryBlock(BBT &origLowerB break; } } - currBBThread.push_back(lowerBB); + currBBThread.emplace_back(lowerBB); } if (!lowerBB->IsEndTry()) { for (auto &e : currBBThread) { - bbsToRelocate.push_back(e); + bbsToRelocate.emplace_back(e); } } else { /* @@ -364,7 +364,7 @@ BBT *TryCatchBlocksLower::CollectCatchAndFallthruUntilNextCatchBB(BBT *&lowerBB, nextBBThreadHead = lowerBB; break; } - currBBThread.push_back(lowerBB); + currBBThread.emplace_back(lowerBB); } if (nextBBThreadHead == nullptr && lowerBB->GetFallthruBranch() == nullptr && lowerBB != endTryBB && @@ -381,7 +381,7 @@ BBT *TryCatchBlocksLower::CollectCatchAndFallthruUntilNextCatchBB(BBT *&lowerBB, nextBBThreadHead = lowerBB; break; } - currBBThread.push_back(lowerBB); + currBBThread.emplace_back(lowerBB); } while (nextEnclosedIdx < enclosedBBs.size()); } @@ -448,7 +448,7 @@ void TryCatchBlocksLower::WrapCatchWithTryEndTryBlock(std::vector &currBBT nextBBThreadHead = ebbSecond; break; } - currBBThread.push_back(ebbSecond); + currBBThread.emplace_back(ebbSecond); } /* normal bb. */ StmtNode *stmt = threadHead->GetFirstStmt(); @@ -526,7 +526,7 @@ void TryCatchBlocksLower::ProcessEnclosedBBBetweenTryEndTry() { if (!lowerBB->IsLabeled()) { continue; } - labeledBBsInTry.push_back(lowerBB); + labeledBBsInTry.emplace_back(lowerBB); /* * It seems the way a finally is associated with its try is to put the catch block inside @@ -547,7 +547,7 @@ void TryCatchBlocksLower::ProcessEnclosedBBBetweenTryEndTry() { } nextBBThreadHead = nullptr; currBBThread.clear(); - currBBThread.push_back(lowerBB); + currBBThread.emplace_back(lowerBB); nextBBThreadHead = CollectCatchAndFallthruUntilNextCatchBB(lowerBB, nextEnclosedIdx, currBBThread); WrapCatchWithTryEndTryBlock(currBBThread, nextBBThreadHead, nextEnclosedIdx, hasMoveEndTry); if (isFirstTime) { @@ -797,7 +797,7 @@ void TryCatchBlocksLower::TraverseBBList() { for (auto &bb : bbList) { if (bb->IsCatch() && tryEndTryBlock.GetStartTryBB() == nullptr) { /* Add to the list of catch blocks seen so far. */ - catchesSeenSoFar.push_back(bb); + catchesSeenSoFar.emplace_back(bb); } bodyEndWithEndTry = false; diff --git a/src/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/maple_be/src/cg/aarch64/aarch64_args.cpp index 9f1ea5517e098590d976fe0750c46184876c5715..042d0ab8ef18c27c8edc6c8f31311654fdca8eb5 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_args.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -41,7 +41,7 @@ void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsL continue; } argsList[i] = ploc.reg0; - indexList.push_back(i); + indexList.emplace_back(i); } } diff --git a/src/maple_be/src/cg/aarch64/aarch64_cg.cpp b/src/maple_be/src/cg/aarch64/aarch64_cg.cpp index 6d6f6ae18ba113f991192ee49a8de67822efa32f..c6b451f6ded3678c00e9618f6d0fca89b28737b9 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_cg.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -127,9 +127,9 @@ namespace wordsMap { CHECK_FATAL((curOffset > prevOffset) || (prevOffset == 0), "not ascending offset"); uint32 wordIndex = curOffset >> kMapWordIndexShift; if (wordIndex > curBitmapIndex) { - bitmapWords.push_back(curBitmap); + bitmapWords.emplace_back(curBitmap); for (uint32 i = curBitmapIndex + 1; i < wordIndex; i++) { - bitmapWords.push_back(0); + bitmapWords.emplace_back(0); } curBitmap = 0; curBitmapIndex = wordIndex; @@ -150,7 +150,7 @@ namespace wordsMap { } } if (curBitmap != 0) { - bitmapWords.push_back(curBitmap); + bitmapWords.emplace_back(curBitmap); } } else if (stType.GetKind() != kTypeInterface) { /* interface doesn't have reference fields */ diff --git a/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 1105b40926c75c3f9eabf246aeddadadea167a2c..cf1465544304ecd5319897b948d8c488d028a832 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -3545,7 +3545,7 @@ void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOp * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp */ std::vector sizeArray; - sizeArray.push_back(switchTable.size()); + sizeArray.emplace_back(switchTable.size()); MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); MIRAggConst *arrayConst = memPool->New(mirModule, *arrayType); for (const auto &itPair : switchTable) { @@ -3652,12 +3652,12 @@ Operand *AArch64CGFunc::SelectMalloc(UnaryNode &node, Operand &opnd0) { std::vector opndVec; RegOperand &resOpnd = CreateRegisterOperandOfType(retType); - opndVec.push_back(&resOpnd); - opndVec.push_back(&opnd0); + opndVec.emplace_back(&resOpnd); + opndVec.emplace_back(&opnd0); /* Use calloc to make sure allocated memory is zero-initialized */ const std::string &funcName = "calloc"; Operand &opnd1 = CreateImmOperand(1, PTY_u32, false); - opndVec.push_back(&opnd1); + opndVec.emplace_back(&opnd1); SelectLibCall(funcName, opndVec, PTY_u32, retType); return &resOpnd; } @@ -4010,7 +4010,7 @@ void AArch64CGFunc::MergeReturn() { onlyExitBB->SetKind(BB::kBBFallthru); GetExitBBsVec().pop_back(); - GetExitBBsVec().push_back(retBB); + GetExitBBsVec().emplace_back(retBB); return; } } @@ -4028,7 +4028,7 @@ void AArch64CGFunc::MergeReturn() { GetCleanupBB()->PrependBB(*retBB); GetExitBBsVec().clear(); - GetExitBBsVec().push_back(retBB); + GetExitBBsVec().emplace_back(retBB); } void AArch64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) { @@ -4156,7 +4156,7 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(refSymbol->GetStIndex())); int32 tempOffset = GetBaseOffset(*symLoc); - offsets.push_back(tempOffset); + offsets.emplace_back(tempOffset); #ifdef CC_DEBUG_INFO LogInfo::MapleLogger() << "refsym " << refSymbol->GetName() << " offset " << tempOffset << '\n'; #endif @@ -4170,7 +4170,7 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(skipSym->GetStIndex())); CHECK_FATAL(GetBaseOffset(*symLoc) < std::numeric_limits::max(), "out of range"); skipOffset = GetBaseOffset(*symLoc); - offsets.push_back(skipOffset); + offsets.emplace_back(skipOffset); #ifdef CC_DEBUG_INFO LogInfo::MapleLogger() << "skip " << skipSym->GetName() << " offset " << skipOffset << '\n'; @@ -4655,7 +4655,7 @@ void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, AArch auto *expr = static_cast(argExpr); int64 offsetValue = 0; Operand *opnd = SelectClearStackCallParam(*expr, offsetValue); - stackPostion.push_back(offsetValue); + stackPostion.emplace_back(offsetValue); auto *expRegOpnd = static_cast(opnd); parmLocator.LocateNextParm(*ty, ploc); CHECK_FATAL(ploc.reg0 != 0, "the parameter of ClearStackCall must be passed by register"); @@ -4689,14 +4689,14 @@ void AArch64CGFunc::IntrinsifyGetAndAddInt(AArch64ListOperand &srcOpnds, PrimTyp RegOperand &tempOpnd2 = CreateRegisterOperandOfType(PTY_i32); MOperator mOp = (pty == PTY_i64) ? MOP_get_and_addL : MOP_get_and_addI; std::vector intrnOpnds; - intrnOpnds.push_back(&retVal); - intrnOpnds.push_back(&tempOpnd0); - intrnOpnds.push_back(&tempOpnd1); - intrnOpnds.push_back(&tempOpnd2); - intrnOpnds.push_back(objOpnd); - intrnOpnds.push_back(offOpnd); - intrnOpnds.push_back(deltaOpnd); - intrnOpnds.push_back(&targetOpnd); + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(&tempOpnd2); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(deltaOpnd); + intrnOpnds.emplace_back(&targetOpnd); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, intrnOpnds)); } @@ -4720,13 +4720,13 @@ void AArch64CGFunc::IntrinsifyGetAndSetInt(AArch64ListOperand &srcOpnds, PrimTyp MOperator mOp = (pty == PTY_i64) ? MOP_get_and_setL : MOP_get_and_setI; std::vector intrnOpnds; - intrnOpnds.push_back(&retVal); - intrnOpnds.push_back(&tempOpnd0); - intrnOpnds.push_back(&tempOpnd1); - intrnOpnds.push_back(objOpnd); - intrnOpnds.push_back(offOpnd); - intrnOpnds.push_back(newValueOpnd); - intrnOpnds.push_back(&targetOpnd); + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&targetOpnd); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, intrnOpnds)); } @@ -4752,15 +4752,15 @@ void AArch64CGFunc::IntrinsifyCompareAndSwapInt(AArch64ListOperand &srcOpnds, Pr LabelOperand &label2Opnd = GetOrCreateLabelOperand(labIdx2); MOperator mOp = (pty == PTY_i32) ? MOP_compare_and_swapI : MOP_compare_and_swapL; std::vector intrnOpnds; - intrnOpnds.push_back(&retVal); - intrnOpnds.push_back(&tempOpnd0); - intrnOpnds.push_back(&tempOpnd1); - intrnOpnds.push_back(objOpnd); - intrnOpnds.push_back(offOpnd); - intrnOpnds.push_back(expectedValueOpnd); - intrnOpnds.push_back(newValueOpnd); - intrnOpnds.push_back(&label1Opnd); - intrnOpnds.push_back(&label2Opnd); + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(expectedValueOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&label1Opnd); + intrnOpnds.emplace_back(&label2Opnd); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, intrnOpnds)); } @@ -4832,22 +4832,22 @@ void AArch64CGFunc::GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString immStringBaseOffset)); auto &retVal = static_cast(GetTargetRetOperand(PTY_i32)); std::vector intrnOpnds; - intrnOpnds.push_back(&retVal); - intrnOpnds.push_back(&srcStringBaseOpnd); - intrnOpnds.push_back(&srcLengthOpnd); - intrnOpnds.push_back(&patternStringBaseOpnd); - intrnOpnds.push_back(&patternLengthOpnd); + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&srcStringBaseOpnd); + intrnOpnds.emplace_back(&srcLengthOpnd); + intrnOpnds.emplace_back(&patternStringBaseOpnd); + intrnOpnds.emplace_back(&patternLengthOpnd); const uint32 tmpRegOperandNum = 6; for (uint32 i = 0; i < tmpRegOperandNum - 1; ++i) { RegOperand &tmpOpnd = CreateRegisterOperandOfType(PTY_i64); - intrnOpnds.push_back(&tmpOpnd); + intrnOpnds.emplace_back(&tmpOpnd); } - intrnOpnds.push_back(&CreateRegisterOperandOfType(PTY_i32)); + intrnOpnds.emplace_back(&CreateRegisterOperandOfType(PTY_i32)); const uint32 labelNum = 7; for (uint32 i = 0; i < labelNum; ++i) { LabelIdx labIdx = CreateLabel(); LabelOperand &labelOpnd = GetOrCreateLabelOperand(labIdx); - intrnOpnds.push_back(&labelOpnd); + intrnOpnds.emplace_back(&labelOpnd); } bb.AppendInsn(GetCG()->BuildInstruction(MOP_string_indexof, intrnOpnds)); bb.AppendInsn(GetCG()->BuildInstruction(MOP_xuncond, GetOrCreateLabelOperand(jumpLabIdx))); @@ -5086,7 +5086,7 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { } else if (opnd0 != nullptr) { /* pass in memory */ CHECK_FATAL(false, "SelectReturn: return in memory NYI"); } - GetExitBBsVec().push_back(GetCurBB()); + GetExitBBsVec().emplace_back(GetCurBB()); } RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType) { @@ -5141,7 +5141,7 @@ AArch64RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg } const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const { - const MapleMap::const_iterator it = hashLabelOpndTable.find(labIdx); + const MapleUnorderedMap::const_iterator it = hashLabelOpndTable.find(labIdx); if (it != hashLabelOpndTable.end()) { return it->second; } @@ -5149,7 +5149,7 @@ const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const { } LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) { - MapleMap::iterator it = hashLabelOpndTable.find(labIdx); + MapleUnorderedMap::iterator it = hashLabelOpndTable.find(labIdx); if (it != hashLabelOpndTable.end()) { return *(it->second); } @@ -5427,8 +5427,8 @@ void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector vec; std::vector vecAt; for (size_t i = 1; i < opndVec.size(); ++i) { - vec.push_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]->GetTypeIndex()); - vecAt.push_back(TypeAttrs()); + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); } MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(primType)); @@ -5634,7 +5634,7 @@ void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum) { } uint32 size = memOpnd->GetSize(); - MapleMap::iterator iter; + MapleUnorderedMap::iterator iter; if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { iter->second->Add(*memOpnd); } else { @@ -5673,7 +5673,6 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { MemOperand *memOpnd = memPool->New(AArch64MemOperand::kAddrModeBOi, dataSize, baseOpnd, nullptr, offsetOpnd, nullptr); spillRegMemOperands.insert(std::pair(vrNum, memOpnd)); - spillRegMemOperandsAdj.insert(memOpnd); return memOpnd; } else { return p->second; @@ -6057,7 +6056,7 @@ void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { for (size_t i = 0; i < intrinsiccallNode.NumOpnds(); i++) { BaseNode *argExpr = intrinsiccallNode.Opnd(i); Operand *opnd = HandleExpr(intrinsiccallNode, *argExpr); - operands.push_back(opnd); + operands.emplace_back(opnd); if (!opnd->IsRegister()) { opnd = &LoadIntoRegister(*opnd, argExpr->GetPrimType()); } diff --git a/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp index 860330a22f4729dd506f98cbe50985c7066e9c5d..714683e0f205832845fa75f143f6fbf59b2e00ae 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -440,39 +440,26 @@ LiveRange *GraphColorRegAllocator::NewLiveRange() { /* Create local info for LR. return true if reg is not local. */ bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, BB &bb, bool isDef) { - if (FindNotIn(bb.GetLiveInRegNO(), regNO) && FindNotIn(bb.GetLiveOutRegNO(), regNO)) { - /* - * register not in globals for the bb, so it is local. - * Compute local RA info. - */ - LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; - if (lraInfo == nullptr) { - lraInfo = cgFunc->GetMemoryPool()->New(alloc); - localRegVec[bb.GetId()] = lraInfo; - } - if (isDef) { - /* movk is handled by different id for use/def in the same insn. */ - lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); - lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1ULL << regNO)); - } else { - lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); - lraInfo->SetLocalPregMask(lraInfo->GetLocalPregMask() | (1ULL << regNO)); - } - /* lr info is useful for lra, so continue lr info */ - return false; + if (FindIn(bb.GetLiveInRegNO(), regNO) || FindIn(bb.GetLiveOutRegNO(), regNO)) { + return true; } - if (regNO < kNArmRegisters) { - /* This is a cross bb physical reg */ - LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; - if (lraInfo == nullptr) { - lraInfo = cgFunc->GetMemoryPool()->New(alloc); - localRegVec[bb.GetId()] = lraInfo; - } - lraInfo->InsertElemToGlobalPreg(regNO); - lraInfo->SetGlobalPregMask(lraInfo->GetGlobalPregMask() | (1ULL << regNO)); - return false; + /* + * register not in globals for the bb, so it is local. + * Compute local RA info. + */ + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = cgFunc->GetMemoryPool()->New(alloc); + localRegVec[bb.GetId()] = lraInfo; } - return true; + if (isDef) { + /* movk is handled by different id for use/def in the same insn. */ + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } else { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + /* lr info is useful for lra, so continue lr info */ + return false; } LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, @@ -505,9 +492,6 @@ LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regN if (lu->GetBegin() > currId) { lu->SetBegin(currId); } - if (lu->GetEnd() < currId) { - lu->SetEnd(currId); - } } return lr; @@ -656,22 +640,23 @@ void GraphColorRegAllocator::SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint for (const auto &vregNO : vregLive) { LiveRange *lr = lrVec[vregNO]; lr->InsertElemToPregveto(liveOut); + } - /* See if phys reg is livein also. Then assume it span the entire bb. */ - if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { - continue; - } - LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; - if (lraInfo == nullptr) { - lraInfo = cgFunc->GetMemoryPool()->New(alloc); - localRegVec[bb.GetId()] = lraInfo; - } - /* Make it a large enough so no locals can be allocated. */ - lraInfo->SetUseCntElem(liveOut, kMaxUint16); + /* See if phys reg is livein also. Then assume it span the entire bb. */ + if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { + return; + } + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = cgFunc->GetMemoryPool()->New(alloc); + localRegVec[bb.GetId()] = lraInfo; } + /* Make it a large enough so no locals can be allocated. */ + lraInfo->SetUseCntElem(liveOut, kMaxUint16); } -void GraphColorRegAllocator::ClassifyOperand(std::set &pregs, std::set &vregs, const Operand &opnd) { +void GraphColorRegAllocator::ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) { if (!opnd.IsRegister()) { return; } @@ -690,8 +675,8 @@ void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) { return; } const AArch64MD *md = &AArch64CG::kMd[static_cast(insn).GetMachineOpcode()]; - std::set pregs; - std::set vregs; + std::unordered_set pregs; + std::unordered_set vregs; for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn.GetOperand(i); @@ -843,35 +828,6 @@ void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, ui } } -void GraphColorRegAllocator::UpdateRegLive(BB &bb, BB &succBB) { - if (FindIn(bb.GetLoopSuccs(), &succBB)) { - return; - } - for (auto regNO : succBB.GetLiveInRegNO()) { - if (IsUnconcernedReg(regNO)) { - continue; - } - if (regNO < kNArmRegisters) { - pregLive.insert(regNO); - } else { - vregLive.insert(regNO); - } - } -} - -/* find all preg and vreg of bb's succ and ehSucc */ -void GraphColorRegAllocator::ComputeLiveOut(BB &bb) { - vregLive.clear(); - pregLive.clear(); - /* No loop backedge */ - for (auto *succ : bb.GetSuccs()) { - UpdateRegLive(bb, *succ); - } - for (auto *succ : bb.GetEhSuccs()) { - UpdateRegLive(bb, *succ); - } -} - bool GraphColorRegAllocator::UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) { insn.SetId(currPoint); if (insn.IsImmaterialInsn() || !insn.IsMachineInstruction()) { @@ -913,7 +869,8 @@ void GraphColorRegAllocator::ComputeLiveRanges() { bbVec[bb->GetId()] = bb; bb->SetLevel(bbIdx - 1); - ComputeLiveOut(*bb); + pregLive.clear(); + vregLive.clear(); for (auto liveOut : bb->GetLiveOutRegNO()) { SetupLiveRangeByRegNO(liveOut, *bb, currPoint); } @@ -1051,9 +1008,9 @@ void GraphColorRegAllocator::BuildInterferenceGraphSeparateIntFp(std::vectorGetRegType() == kRegTyInt) { - intLrVec.push_back(lr); + intLrVec.emplace_back(lr); } else if (lr->GetRegType() == kRegTyFloat) { - fpLrVec.push_back(lr); + fpLrVec.emplace_back(lr); } else { ASSERT(false, "Illegal regType in BuildInterferenceGraph"); LogInfo::MapleLogger() << "error: Illegal regType in BuildInterferenceGraph\n"; @@ -1136,17 +1093,17 @@ void GraphColorRegAllocator::Separate() { if (((lr->GetNumDefs() <= 1) && (lr->GetNumUses() <= 1) && (lr->GetNumCall() > 0)) && (lr->GetFrequency() <= (cgFunc->GetFirstBB()->GetFrequency() << 1))) { if (lr->GetRegType() == kRegTyInt) { - intDelayed.push_back(lr); + intDelayed.emplace_back(lr); } else { - fpDelayed.push_back(lr); + fpDelayed.emplace_back(lr); } continue; } #endif /* OPTIMIZE_FOR_PROLOG */ if (HaveAvailableColor(*lr, lr->GetNumBBConflicts() + lr->GetPregvetoSize() + lr->GetForbiddenSize())) { - unconstrained.push_back(lr); + unconstrained.emplace_back(lr); } else { - constrained.push_back(lr); + constrained.emplace_back(lr); } } if (GCRA_DUMP) { @@ -1588,10 +1545,10 @@ void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &o * Side effect : Adding the new forbidden regs from bbAdded into * conflictRegs if the LR can still be colored. */ -bool GraphColorRegAllocator::LrCanBeColored(LiveRange &lr, BB &bbAdded, std::set &conflictRegs) { +bool GraphColorRegAllocator::LrCanBeColored(LiveRange &lr, BB &bbAdded, std::unordered_set &conflictRegs) { RegType type = lr.GetRegType(); - std::set newConflict; + std::unordered_set newConflict; auto updateConflictFunc = [&bbAdded, &conflictRegs, &newConflict, &lr, this](regno_t regNO) { /* check the real conflict in current bb */ LiveRange *conflictLr = lrVec[regNO]; @@ -1725,7 +1682,8 @@ bool GraphColorRegAllocator::SplitLrShouldSplit(LiveRange &lr) { * Initially newLr is empty, then add bb if can be colored. * Return true if there is a split. */ -bool GraphColorRegAllocator::SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::set &conflictRegs) { +bool GraphColorRegAllocator::SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, + std::unordered_set &conflictRegs) { if (GCRA_DUMP) { LogInfo::MapleLogger() << "start split lr for vreg " << lr.GetRegNO() << "\n"; } @@ -1848,7 +1806,7 @@ void GraphColorRegAllocator::SplitLrUpdateInterference(LiveRange &lr) { } void GraphColorRegAllocator::SplitLrUpdateRegInfo(LiveRange &origLr, LiveRange &newLr, - std::set &conflictRegs) { + std::unordered_set &conflictRegs) { for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { if (origLr.GetPregveto(regNO)) { newLr.InsertElemToPregveto(regNO); @@ -1882,7 +1840,7 @@ void GraphColorRegAllocator::SplitLr(LiveRange &lr) { * is using to the conflict register set indicating that these * registers cannot be used for the new LR's color. */ - std::set conflictRegs; + std::unordered_set conflictRegs; if (!SplitLrFindCandidateLr(lr, *newLr, conflictRegs)) { return; } @@ -2643,7 +2601,8 @@ Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool } /* Try to find available reg for spill. */ -bool GraphColorRegAllocator::SetAvailableSpillReg(std::set &cannotUseReg, LiveRange &lr, uint64 &usedRegMask) { +bool GraphColorRegAllocator::SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, + uint64 &usedRegMask) { bool isInt = (lr.GetRegType() == kRegTyInt); regno_t base = isInt ? R0 : V0; uint32 pregInterval = isInt ? 0 : (V0 - R30); @@ -2669,7 +2628,7 @@ bool GraphColorRegAllocator::SetAvailableSpillReg(std::set &cannotUseRe return false; } -void GraphColorRegAllocator::CollectCannotUseReg(std::set &cannotUseReg, LiveRange &lr, Insn &insn) { +void GraphColorRegAllocator::CollectCannotUseReg(std::unordered_set &cannotUseReg, LiveRange &lr, Insn &insn) { /* Find the bb in the conflict LR that actually conflicts with the current bb. */ for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { if (lr.GetPregveto(regNO)) { @@ -2747,7 +2706,7 @@ regno_t GraphColorRegAllocator::PickRegForSpill(uint64 &usedRegMask, RegType reg /* return true if need extra spill */ bool GraphColorRegAllocator::SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, uint64 &usedRegMask, bool isDef) { - std::set cannotUseReg; + std::unordered_set cannotUseReg; /* SPILL COALESCE */ if (!isDef && (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr)) { auto &ropnd = static_cast(insn.GetOperand(0)); diff --git a/src/maple_be/src/cg/aarch64/aarch64_dependence.cpp b/src/maple_be/src/cg/aarch64/aarch64_dependence.cpp index d26474c2d7b544c4d8de126eb30b8d8bcb1cfd8d..1ad74e5b745ce22ea3ba76fa940591f9e336213f 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_dependence.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_dependence.cpp @@ -21,8 +21,7 @@ namespace maplebe { /* constructor */ AArch64DepAnalysis::AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA) - : DepAnalysis(func, mp, mad, beforeRA), useRegnos(alloc.Adapter()), - defRegnos(alloc.Adapter()), stackUses(alloc.Adapter()), + : DepAnalysis(func, mp, mad, beforeRA), stackUses(alloc.Adapter()), stackDefs(alloc.Adapter()), heapUses(alloc.Adapter()), heapDefs(alloc.Adapter()), mayThrows(alloc.Adapter()), ambiInsns(alloc.Adapter()), ehInRegs(alloc.Adapter()) { @@ -77,6 +76,14 @@ void AArch64DepAnalysis::AppendRegUseList(Insn &insn, regno_t regNO) { regList->next = nullptr; if (regUses[regNO] == nullptr) { regUses[regNO] = regList; + if (beforeRA) { + Insn *defInsn = regDefs[regNO]; + if (defInsn == nullptr) { + return; + } + DepNode *defNode = defInsn->GetDepNode(); + defNode->SetRegDefs(regNO, regList); + } return; } RegList *lastRegList = regUses[regNO]; @@ -144,7 +151,8 @@ void AArch64DepAnalysis::RemoveSelfDeps(Insn &insn) { /* Build dependences of source register operand. */ void AArch64DepAnalysis::BuildDepsUseReg(Insn &insn, regno_t regNO) { - useRegnos.push_back(regNO); + DepNode *node = insn.GetDepNode(); + node->AddUseReg(regNO); if (regDefs[regNO] != nullptr) { /* Build true dependences. */ AddDependence(*regDefs[regNO]->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); @@ -153,17 +161,18 @@ void AArch64DepAnalysis::BuildDepsUseReg(Insn &insn, regno_t regNO) { /* Build dependences of destination register operand. */ void AArch64DepAnalysis::BuildDepsDefReg(Insn &insn, regno_t regNO) { - defRegnos.push_back(regNO); + DepNode *node = insn.GetDepNode(); + node->AddDefReg(regNO); /* Build anti dependences. */ RegList *regList = regUses[regNO]; while (regList != nullptr) { CHECK_NULL_FATAL(regList->insn); - AddDependence(*regList->insn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); regList = regList->next; } /* Build output depnedence. */ if (regDefs[regNO] != nullptr) { - AddDependence(*regDefs[regNO]->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + AddDependence(*regDefs[regNO]->GetDepNode(), *node, kDependenceTypeOutput); } } @@ -317,7 +326,7 @@ void AArch64DepAnalysis::CombineDependence(DepNode &firstNode, DepNode &secondNo */ void AArch64DepAnalysis::BuildDepsAmbiInsn(Insn &insn) { AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); - ambiInsns.push_back(&insn); + ambiInsns.emplace_back(&insn); } /* Build dependences of may throw instructions. */ @@ -382,11 +391,11 @@ void AArch64DepAnalysis::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); /* Build output depnedence. */ AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); - heapDefs.push_back(&insn); + heapDefs.emplace_back(&insn); } else { /* Heap memory */ AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); - heapUses.push_back(&insn); + heapUses.emplace_back(&insn); } if (memBarInsn != nullptr) { AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); @@ -406,11 +415,11 @@ void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) { continue; } } - stackUses.push_back(&insn); + stackUses.emplace_back(&insn); } else { /* Heap memory */ AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); - heapUses.push_back(&insn); + heapUses.emplace_back(&insn); } if (memBarInsn != nullptr) { AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); @@ -482,7 +491,7 @@ void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) { AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); } } - stackDefs.push_back(&insn); + stackDefs.emplace_back(&insn); } else { /* Heap memory * Build anti dependences. @@ -490,7 +499,7 @@ void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) { AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); /* Build output depnedence. */ AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); - heapDefs.push_back(&insn); + heapDefs.emplace_back(&insn); } if (memBarInsn != nullptr) { AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); @@ -510,9 +519,10 @@ void AArch64DepAnalysis::BuildDepsMemBar(Insn &insn) { /* A pseudo separator node depends all the other nodes. */ void AArch64DepAnalysis::BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) { - uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() - ? (separatorIndex + kMaxDependenceNum) - : nodes.size() - 1; + uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() ? (separatorIndex + kMaxDependenceNum) + : nodes.size() - 1; + newSepNode.ReservePreds(nextSepIndex - separatorIndex); + newSepNode.ReserveSuccs(nextSepIndex - separatorIndex); for (uint32 i = separatorIndex; i < nextSepIndex; ++i) { AddDependence(*nodes[i], newSepNode, kDependenceTypeSeparator); } @@ -597,7 +607,7 @@ void AArch64DepAnalysis::BuildDepsDirtyStack(Insn &insn) { AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); /* Build output depnedence. */ AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); - stackDefs.push_back(&insn); + stackDefs.emplace_back(&insn); } /* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast". */ @@ -615,7 +625,7 @@ void AArch64DepAnalysis::BuildDepsDirtyHeap(Insn &insn) { if (memBarInsn != nullptr) { AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); } - heapDefs.push_back(&insn); + heapDefs.emplace_back(&insn); } /* Build a pseudo node to seperate dependence graph. */ @@ -623,23 +633,37 @@ DepNode *AArch64DepAnalysis::BuildSeparatorNode() { Insn &pseudoSepInsn = cgFunc.GetCG()->BuildInstruction(MOP_pseudo_dependence_seperator); DepNode *separatorNode = memPool.New(pseudoSepInsn, alloc); separatorNode->SetType(kNodeTypeSeparator); + pseudoSepInsn.SetDepNode(*separatorNode); if (beforeRA) { RegPressure *regPressure = memPool.New(alloc); separatorNode->SetRegPressure(*regPressure); + separatorNode->InitPressure(); } return separatorNode; } /* Init depAnalysis data struction */ void AArch64DepAnalysis::Init(BB &bb, MapleVector &nodes) { + curBB = &bb; ClearAllDepData(); lastComments.clear(); /* Analysis live-in registers in catch BB. */ AnalysisAmbiInsns(bb); /* Clear all dependence nodes and push the first separator node. */ nodes.clear(); - nodes.push_back(BuildSeparatorNode()); + DepNode *pseudoSepNode = BuildSeparatorNode(); + nodes.emplace_back(pseudoSepNode); separatorIndex = 0; + + if (beforeRA) { + /* assump first pseudo_dependence_seperator insn of current bb define live-in's registers */ + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + regDefs[regNO] = pseudoSepInsn; + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } } /* When a separator build, it is the same as a new basic block. */ @@ -850,8 +874,19 @@ void AArch64DepAnalysis::SeperateDependenceGraph(MapleVector &nodes, u /* Add a pseudo node to seperate dependence graph. */ DepNode *separatorNode = BuildSeparatorNode(); separatorNode->SetIndex(nodeSum); - nodes.push_back(separatorNode); + nodes.emplace_back(separatorNode); BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } ClearAllDepData(); separatorIndex = nodeSum++; } @@ -873,11 +908,16 @@ DepNode *AArch64DepAnalysis::GenerateDepNode(Insn &insn, MapleVector & if (beforeRA) { RegPressure *regPressure = memPool.New(alloc); depNode->SetRegPressure(*regPressure); + depNode->InitPressure(); } depNode->SetIndex(nodeSum); - nodes.push_back(depNode); + nodes.emplace_back(depNode); insn.SetDepNode(*depNode); + constexpr size_t vectorSize = 5; + depNode->ReservePreds(vectorSize); + depNode->ReserveSuccs(vectorSize); + if (!comments.empty()) { depNode->SetComments(comments); } @@ -885,6 +925,7 @@ DepNode *AArch64DepAnalysis::GenerateDepNode(Insn &insn, MapleVector & } void AArch64DepAnalysis::BuildAmbiInsnDependency(Insn &insn) { + const auto &defRegnos = insn.GetDepNode()->GetDefRegnos(); for (const auto ®NO : defRegnos) { if (IfInAmbiRegs(regNO)) { BuildDepsAmbiInsn(insn); @@ -903,35 +944,74 @@ void AArch64DepAnalysis::BuildMayThrowInsnDependency(Insn &insn) { } } -void AArch64DepAnalysis::UpdateRegUseAndDef(Insn &insn, DepNode &depNode) { +void AArch64DepAnalysis::UpdateRegUseAndDef(Insn &insn, DepNode &depNode, MapleVector &nodes) { + const auto &useRegnos = depNode.GetUseRegnos(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } for (auto regNO : useRegnos) { AppendRegUseList(insn, regNO); if (beforeRA) { - depNode.SetRegUses(regNO, *regUses[regNO]); + depNode.SetRegUses(*regUses[regNO]); + if (regDefs[regNO] == nullptr) { + regDefs[regNO] = nodes[separatorIndex]->GetInsn(); + nodes[separatorIndex]->AddDefReg(regNO); + nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), regUses[regNO]); + } } } + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } for (const auto regNO : defRegnos) { regDefs[regNO] = &insn; regUses[regNO] = nullptr; if (beforeRA) { - depNode.SetRegDefs(regNO, nullptr); + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); + } } + ++i; } } - /* Update stack and heap dependency */ void AArch64DepAnalysis::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn) { if (!insn.MayThrow()) { return; } depNode.SetLocInsn(locInsn); - mayThrows.push_back(&insn); + mayThrows.emplace_back(&insn); AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeThrow); AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeThrow); } +/* Add a separatorNode to the end of a nodes + * * before RA: add all live-out registers to this separatorNode'Uses + * */ +void AArch64DepAnalysis::AddEndSeparatorNode(MapleVector &nodes) { + DepNode *separatorNode = BuildSeparatorNode(); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } +} + /* * Build dependence graph. * 1: Build dependence nodes. @@ -953,7 +1033,7 @@ void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) { if (!insn->IsComment()) { locInsn = insn; } else { - comments.push_back(insn); + comments.emplace_back(insn); } } else if (insn->IsCfiInsn()) { if (!nodes.empty()) { @@ -970,8 +1050,6 @@ void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) { comments.clear(); /* Build Dependency for maythrow insn; */ BuildMayThrowInsnDependency(*insn); - useRegnos.clear(); - defRegnos.clear(); /* Build Dependency for each Operand of insn */ BuildOpndDependency(*insn); /* Build Dependency for special insn */ @@ -986,12 +1064,11 @@ void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) { /* Seperator exists. */ AddDependence(*nodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); /* Update register use and register def */ - UpdateRegUseAndDef(*insn, *depNode); + UpdateRegUseAndDef(*insn, *depNode, nodes); } - DepNode *separatorNode = BuildSeparatorNode(); - nodes.push_back(separatorNode); - BuildDepsSeparator(*separatorNode, nodes); + AddEndSeparatorNode(nodes); + if (!comments.empty()) { lastComments = comments; } diff --git a/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp index 1b30b61b59ccc5d5e6754cc64839753292bf217e..5eef9ee2cd17be8398ec736f979a72e2b4c863a0 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -131,22 +131,22 @@ void AArch64Ebo::BuildCallerSaveRegisters() { callerSaveRegTable.clear(); RegOperand &phyOpndR0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); RegOperand &phyOpndV0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); - callerSaveRegTable.push_back(&phyOpndR0); - callerSaveRegTable.push_back(&phyOpndV0); + callerSaveRegTable.emplace_back(&phyOpndR0); + callerSaveRegTable.emplace_back(&phyOpndV0); for (uint32 i = R1; i <= R18; i++) { RegOperand &phyOpnd = a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); - callerSaveRegTable.push_back(&phyOpnd); + callerSaveRegTable.emplace_back(&phyOpnd); } for (uint32 i = V1; i <= V7; i++) { RegOperand &phyOpnd = a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); - callerSaveRegTable.push_back(&phyOpnd); + callerSaveRegTable.emplace_back(&phyOpnd); } for (uint32 i = V16; i <= V31; i++) { RegOperand &phyOpnd = a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); - callerSaveRegTable.push_back(&phyOpnd); + callerSaveRegTable.emplace_back(&phyOpnd); } CHECK_FATAL(callerSaveRegTable.size() < kMaxCallerSaveReg, "number of elements in callerSaveRegTable must less then 45!"); @@ -737,7 +737,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI /* multiple of 4 and 8 */ const int multiOfFour = 4; const int multiOfEight = 8; - is64bits = is64bits && (!static_cast(insn).CheckRefField(kInsnFirstOpnd)); + is64bits = is64bits && (!static_cast(insn).CheckRefField(kInsnFirstOpnd, false)); if ((!is64bits && (immVal < kStrLdrImm32UpperBound) && (immVal % multiOfFour == 0)) || (is64bits && (immVal < kStrLdrImm64UpperBound) && (immVal % multiOfEight == 0))) { /* Reserved physicalReg beforeRA */ diff --git a/src/maple_be/src/cg/aarch64/aarch64_ico.cpp b/src/maple_be/src/cg/aarch64/aarch64_ico.cpp index 85a3c36873d470be5ed123acf2607f16db5a0925..52fdbf5e53a79549266499292437592dd010dc5a 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_ico.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -27,7 +27,7 @@ #define ICO_DUMP CG_DEBUG_FUNC(cgFunc) namespace maplebe { void AArch64IfConversionOptimizer::InitOptimizePatterns() { - singlePassPatterns.push_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); } Insn *AArch64ICOPattern::BuildCmpInsn(const Insn &condBr) { @@ -121,26 +121,26 @@ void AArch64ICOPattern::GenerateInsnForImm(const Insn &branchInsn, Operand &ifDe if (inverse || (imm2.IsZero() && imm1.IsOne())) { Insn *csetInsn = BuildCondSet(branchInsn, destReg, inverse); ASSERT(csetInsn != nullptr, "build a insn failed"); - generateInsn.push_back(csetInsn); + generateInsn.emplace_back(csetInsn); } else if (imm1.GetValue() == imm2.GetValue()) { MOperator mOp = (destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_xmovri32); Insn &tempInsn = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction(mOp, destReg, imm1); - generateInsn.push_back(&tempInsn); + generateInsn.emplace_back(&tempInsn); } else { MOperator mOp = (destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_xmovri32); RegOperand *tempTarIf = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); Insn &tempInsnIf = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction(mOp, *tempTarIf, imm1); - generateInsn.push_back(&tempInsnIf); + generateInsn.emplace_back(&tempInsnIf); RegOperand *tempTarElse = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); Insn &tempInsnElse = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction(mOp, *tempTarElse, imm2); - generateInsn.push_back(&tempInsnElse); + generateInsn.emplace_back(&tempInsnElse); uint32 dSize = destReg.GetSize(); bool isIntTy = destReg.IsOfIntClass(); @@ -149,7 +149,7 @@ void AArch64ICOPattern::GenerateInsnForImm(const Insn &branchInsn, Operand &ifDe MOP_scselrrrc : MOP_hcselrrrc)); Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tempTarIf, *tempTarElse); CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); - generateInsn.push_back(cselInsn); + generateInsn.emplace_back(cselInsn); } } @@ -163,7 +163,7 @@ RegOperand *AArch64ICOPattern::GenerateRegAndTempInsn(Operand &dest, const RegOp Insn &tempInsn = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction(mOp, *reg, tempSrcElse); - generateInsn.push_back(&tempInsn); + generateInsn.emplace_back(&tempInsn); return reg; } else { return (static_cast(&dest)); @@ -189,7 +189,7 @@ void AArch64ICOPattern::GenerateInsnForReg(const Insn &branchInsn, Operand &ifDe Insn &tempInsnIf = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction(mOp, destReg, *tReg); - generateInsn.push_back(&tempInsnIf); + generateInsn.emplace_back(&tempInsnIf); } else { uint32 dSize = destReg.GetSize(); bool isIntTy = destReg.IsOfIntClass(); @@ -198,7 +198,7 @@ void AArch64ICOPattern::GenerateInsnForReg(const Insn &branchInsn, Operand &ifDe MOP_scselrrrc : MOP_hcselrrrc)); Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tReg, *eReg); CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); - generateInsn.push_back(cselInsn); + generateInsn.emplace_back(cselInsn); } } @@ -353,7 +353,7 @@ bool AArch64ICOPattern::CheckCondMoveBB(BB *bb, std::map &de } (void)destSrcMap.insert(std::make_pair(dest, src)); - destRegs.push_back(dest); + destRegs.emplace_back(dest); } return true; } diff --git a/src/maple_be/src/cg/aarch64/aarch64_insn.cpp b/src/maple_be/src/cg/aarch64/aarch64_insn.cpp index be94f537e781772a8381de70ad79c85c649a0235..7b2fbcc9d02ba9583f63e1f464e8c09f009395f4 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_insn.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -1019,7 +1019,7 @@ void AArch64Insn::Emit(const CG &cg, Emitter &emitter) const { } } - bool isRefField = (opndSize == 0) ? false : CheckRefField(seq[0]); + bool isRefField = (opndSize == 0) ? false : CheckRefField(seq[0], true); if (mOp != MOP_comment) { emitter.IncreaseJavaInsnCount(); } @@ -1083,11 +1083,13 @@ void AArch64Insn::Emit(const CG &cg, Emitter &emitter) const { } /* set opnd0 ref-field flag, so we can emit the right register */ -bool AArch64Insn::CheckRefField(int32 opndIndex) const { +bool AArch64Insn::CheckRefField(int32 opndIndex, bool isEmit) const { if (IsAccessRefField() && AccessMem()) { Operand *opnd0 = opnds[opndIndex]; if (opnd0->IsRegister()) { - static_cast(opnd0)->SetRefField(true); + if (isEmit) { + static_cast(opnd0)->SetRefField(true); + } return true; } } diff --git a/src/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/maple_be/src/cg/aarch64/aarch64_memlayout.cpp index c9fb92303a01d1848405e219e0aaade6dfc61863..10f30d0c8baeb401c01372a9fd66008a29a6a2b2 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_memlayout.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -162,7 +162,7 @@ void AArch64MemLayout::LayoutLocalVariales(std::vector &tempVar, std if (sym->IsRefType()) { if (mirFunction->GetRetRefSym().find(sym) != mirFunction->GetRetRefSym().end()) { /* try to put ret_ref at the end of segRefLocals */ - returnDelays.push_back(sym); + returnDelays.emplace_back(sym); continue; } symLoc->SetMemSegment(segRefLocals); @@ -172,7 +172,7 @@ void AArch64MemLayout::LayoutLocalVariales(std::vector &tempVar, std } else { if (sym->GetName() == "__EARetTemp__" || sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") { - tempVar.push_back(sym); + tempVar.emplace_back(sym); continue; } symLoc->SetMemSegment(segLocals); diff --git a/src/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/maple_be/src/cg/aarch64/aarch64_peep.cpp index 9088c785de6f7172eae7dc9dcf23e0c77df0f006..bd71d6e116c52b2b1bac53144fc63b75524c5f45 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -1686,7 +1686,7 @@ bool LoadFloatPointAArch64::FindLoadFloatPoint(std::vector &optInsn, Insn if (mOp != MOP_xmovzri16) { return false; } - optInsn.push_back(&insn); + optInsn.emplace_back(&insn); Insn *insnMov2 = insn.GetNextMachineInsn(); if (insnMov2 == nullptr) { @@ -1695,7 +1695,7 @@ bool LoadFloatPointAArch64::FindLoadFloatPoint(std::vector &optInsn, Insn if (insnMov2->GetMachineOpcode() != MOP_xmovkri16) { return false; } - optInsn.push_back(insnMov2); + optInsn.emplace_back(insnMov2); Insn *insnMov3 = insnMov2->GetNextMachineInsn(); if (insnMov3 == nullptr) { @@ -1704,7 +1704,7 @@ bool LoadFloatPointAArch64::FindLoadFloatPoint(std::vector &optInsn, Insn if (insnMov3->GetMachineOpcode() != MOP_xmovkri16) { return false; } - optInsn.push_back(insnMov3); + optInsn.emplace_back(insnMov3); Insn *insnMov4 = insnMov3->GetNextMachineInsn(); if (insnMov4 == nullptr) { @@ -1713,7 +1713,7 @@ bool LoadFloatPointAArch64::FindLoadFloatPoint(std::vector &optInsn, Insn if (insnMov4->GetMachineOpcode() != MOP_xmovkri16) { return false; } - optInsn.push_back(insnMov4); + optInsn.emplace_back(insnMov4); return true; } @@ -1910,7 +1910,7 @@ bool LongIntCompareWithZAArch64::FindLondIntCmpWithZ(std::vector &optInsn if (thisMop != MOP_xcmpri) { return false; } - optInsn.push_back(&insn); + optInsn.emplace_back(&insn); /* second */ Insn *nextInsn1 = insn.GetNextMachineInsn(); @@ -1921,7 +1921,7 @@ bool LongIntCompareWithZAArch64::FindLondIntCmpWithZ(std::vector &optInsn if (nextMop1 != MOP_wcsinvrrrc) { return false; } - optInsn.push_back(nextInsn1); + optInsn.emplace_back(nextInsn1); /* third */ Insn *nextInsn2 = nextInsn1->GetNextMachineInsn(); @@ -1932,7 +1932,7 @@ bool LongIntCompareWithZAArch64::FindLondIntCmpWithZ(std::vector &optInsn if (nextMop2 != MOP_wcsincrrrc) { return false; } - optInsn.push_back(nextInsn2); + optInsn.emplace_back(nextInsn2); /* forth */ Insn *nextInsn3 = nextInsn2->GetNextMachineInsn(); @@ -1943,7 +1943,7 @@ bool LongIntCompareWithZAArch64::FindLondIntCmpWithZ(std::vector &optInsn if (nextMop3 != MOP_wcmpri) { return false; } - optInsn.push_back(nextInsn3); + optInsn.emplace_back(nextInsn3); return true; } @@ -2202,7 +2202,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField if (fieldValueDefInsnDestReg.GetRegisterNumber() != R2) { return false; } - paramDefInsns.push_back(fieldValueDefInsn); + paramDefInsns.emplace_back(fieldValueDefInsn); param.fieldValue = &(fieldValueDefInsn->GetOperand(kInsnSecondOpnd)); Insn *fieldParamDefInsn = fieldValueDefInsn->GetPreviousMachineInsn(); if (fieldParamDefInsn == nullptr || fieldParamDefInsn->GetMachineOpcode() != MOP_xmovrr) { @@ -2213,7 +2213,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField if (fieldParamDestReg.GetRegisterNumber() != R1) { return false; } - paramDefInsns.push_back(fieldParamDefInsn); + paramDefInsns.emplace_back(fieldParamDefInsn); Insn *fieldDesignateInsn = fieldParamDefInsn->GetPreviousMachineInsn(); if (fieldDesignateInsn == nullptr || fieldDesignateInsn->GetMachineOpcode() != MOP_xaddrri12) { return false; @@ -2227,7 +2227,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField param.fieldBaseOpnd = &(static_cast(fieldDesignateBaseOpnd)); auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); param.fieldOffset = immOpnd.GetValue(); - paramDefInsns.push_back(fieldDesignateInsn); + paramDefInsns.emplace_back(fieldDesignateInsn); Insn *objDesignateInsn = fieldDesignateInsn->GetPreviousMachineInsn(); if (objDesignateInsn == nullptr || objDesignateInsn->GetMachineOpcode() != MOP_xmovrr) { return false; @@ -2243,7 +2243,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField return false; } param.objOpnd = &(objDesignateInsn->GetOperand(kInsnSecondOpnd)); - paramDefInsns.push_back(objDesignateInsn); + paramDefInsns.emplace_back(objDesignateInsn); return true; } @@ -2357,7 +2357,7 @@ bool ComputationTreeAArch64::FindComputationTree(std::vector &optInsn, In if (thisMop != MOP_xaddrri12) { return false; } - optInsn.push_back(&insn); + optInsn.emplace_back(&insn); /* second */ Insn *nextInsn1 = insn.GetNextMachineInsn(); if (nextInsn1 == nullptr) { @@ -2367,7 +2367,7 @@ bool ComputationTreeAArch64::FindComputationTree(std::vector &optInsn, In if (nextMop1 != MOP_waddrrr) { return false; } - optInsn.push_back(nextInsn1); + optInsn.emplace_back(nextInsn1); /* third */ Insn *nextInsn2 = nextInsn1->GetNextMachineInsn(); if (nextInsn2 == nullptr) { @@ -2377,7 +2377,7 @@ bool ComputationTreeAArch64::FindComputationTree(std::vector &optInsn, In if (nextMop2 != MOP_waddrri12) { return false; } - optInsn.push_back(nextInsn2); + optInsn.emplace_back(nextInsn2); /* forth */ Insn *nextInsn3 = nextInsn2->GetNextMachineInsn(); if (nextInsn3 == nullptr) { @@ -2387,7 +2387,7 @@ bool ComputationTreeAArch64::FindComputationTree(std::vector &optInsn, In if (nextMop3 != MOP_xsxtw64) { return false; } - optInsn.push_back(nextInsn3); + optInsn.emplace_back(nextInsn3); /* fifth */ Insn *nextInsn4 = nextInsn3->GetNextMachineInsn(); if (nextInsn4 == nullptr) { @@ -2397,7 +2397,7 @@ bool ComputationTreeAArch64::FindComputationTree(std::vector &optInsn, In if (nextMop4 != MOP_xaddrrrs) { return false; } - optInsn.push_back(nextInsn4); + optInsn.emplace_back(nextInsn4); return true; } diff --git a/src/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 188254489c64cf7190678760c60762d98885d127..48f906e0dcca49127195f8d1851b22749baacf67 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -61,7 +61,7 @@ void AArch64ReachingDefinition::InitStartGen() { Insn &pseudoInsn = cgFunc->GetCG()->BuildInstruction(mOp, regOpnd); bb->InsertInsnBegin(pseudoInsn); - pseudoInsns.push_back(&pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); { /* @@ -89,7 +89,7 @@ void AArch64ReachingDefinition::InitStartGen() { MOperator mopTemp = firstStackSize <= k4ByteSize ? MOP_pseudo_param_store_w : MOP_pseudo_param_store_x; Insn &pseudoInsnTemp = cgFunc->GetCG()->BuildInstruction(mopTemp, *memOpnd); bb->InsertInsnBegin(pseudoInsnTemp); - pseudoInsns.push_back(&pseudoInsnTemp); + pseudoInsns.emplace_back(&pseudoInsnTemp); } } } @@ -103,7 +103,7 @@ void AArch64ReachingDefinition::InitStartGen() { Insn &pseudoInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ref_init_x, *memOpnd); bb->InsertInsnBegin(pseudoInsn); - pseudoInsns.push_back(&pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); } } @@ -115,13 +115,13 @@ void AArch64ReachingDefinition::InitEhDefine(BB &bb) { RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); Insn &pseudoInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_eh_def_x, regOpnd1); bb.InsertInsnBegin(pseudoInsn); - pseudoInsns.push_back(&pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); /* insert MOP_pseudo_eh_def_x R0. */ RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); Insn &newPseudoInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_eh_def_x, regOpnd2); bb.InsertInsnBegin(newPseudoInsn); - pseudoInsns.push_back(&newPseudoInsn); + pseudoInsns.emplace_back(&newPseudoInsn); } /* insert pseudoInsns for return value R0/V0 */ @@ -136,14 +136,14 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyInt); Insn &retInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ret_int, regOpnd); bb.AppendInsn(retInsn); - pseudoInsns.push_back(&retInsn); + pseudoInsns.emplace_back(&retInsn); } else { ASSERT(regNO == V0, "CG internal error. Return value should be R0 or V0."); RegOperand ®Opnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyFloat); Insn &retInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ret_float, regOpnd); bb.AppendInsn(retInsn); - pseudoInsns.push_back(&retInsn); + pseudoInsns.emplace_back(&retInsn); } } @@ -196,7 +196,7 @@ std::vector AArch64ReachingDefinition::FindRegDefBetweenInsn(uint32 regNO const AArch64MD *md = &AArch64CG::kMd[static_cast(insn)->GetMachineOpcode()]; if (insn->IsCall() && IsCallerSavedReg(regNO)) { - defInsnVec.push_back(insn); + defInsnVec.emplace_back(insn); return defInsnVec; } uint32 opndNum = insn->GetOperandSize(); @@ -218,13 +218,13 @@ std::vector AArch64ReachingDefinition::FindRegDefBetweenInsn(uint32 regNO if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && base->GetRegisterNumber() == regNO) { - defInsnVec.push_back(insn); + defInsnVec.emplace_back(insn); return defInsnVec; } } } else if ((opnd.IsConditionCode() || opnd.IsRegister()) && (static_cast(opnd).GetRegisterNumber() == regNO)) { - defInsnVec.push_back(insn); + defInsnVec.emplace_back(insn); return defInsnVec; } } @@ -313,7 +313,7 @@ std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn(uint32 offse if (insn->IsCall()) { if (CallInsnClearDesignateStackRef(*insn, offset)) { - defInsnVec.push_back(insn); + defInsnVec.emplace_back(insn); return defInsnVec; } continue; @@ -340,7 +340,7 @@ std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn(uint32 offse int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); if ((offset == memOffset) || (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode()))) { - defInsnVec.push_back(insn); + defInsnVec.emplace_back(insn); return defInsnVec; } } diff --git a/src/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/src/maple_be/src/cg/aarch64/aarch64_schedule.cpp index d4050bca6b6ff7e75bb06171012624f018f2c568..9f5ac140529edef422400176c561df4a0c57a8f3 100644 --- a/src/maple_be/src/cg/aarch64/aarch64_schedule.cpp +++ b/src/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -56,7 +56,7 @@ void AArch64Schedule::Init() { } } - readyList.push_back(node); + readyList.emplace_back(node); node->SetState(kReady); /* Init validPredsSize and validSuccsSize. */ @@ -155,9 +155,9 @@ void AArch64Schedule::MemoryAccessPairOpt() { succNode.SetState(kReady); ASSERT(succNode.GetInsn() != nullptr, "insn can't be nullptr!"); if (CanCombine(*succNode.GetInsn())) { - memList.push_back(&succNode); + memList.emplace_back(&succNode); } else { - readyList.push_back(&succNode); + readyList.emplace_back(&succNode); } } } @@ -252,7 +252,7 @@ void AArch64Schedule::RegPressureScheduling(BB &bb, MapleVector &nodes RegPressureSchedule *regSchedule = memPool.New(cgFunc, alloc); /* * Get physical register amount currently - * undef, Int Reg, Floag Reg, Flag Reg + * undef, Int Reg, Float Reg, Flag Reg */ const std::vector kRegNumVec = { 0, V0, kMaxRegNum - V0 + 1, 1 }; regSchedule->InitBBInfo(bb, memPool, nodes); @@ -316,7 +316,7 @@ uint32 AArch64Schedule::ComputeEstart(uint32 cycle) { maxEstart = (maxEstart < succNode.GetEStart() ? succNode.GetEStart() : maxEstart); succNode.IncreaseVisit(); if ((succNode.GetVisit() >= succNode.GetValidPredsSize()) && (succNode.GetType() != kNodeTypeSeparator)) { - readyNodes.push_back(&succNode); + readyNodes.emplace_back(&succNode); } ASSERT(succNode.GetVisit() <= succNode.GetValidPredsSize(), "CG internal error."); } @@ -340,7 +340,7 @@ void AArch64Schedule::ComputeLstart(uint32 maxEstart) { node->SetVisit(0); } - readyNodes.push_back(nodes[maxIndex]); + readyNodes.emplace_back(nodes[maxIndex]); while (!readyNodes.empty()) { DepNode *node = readyNodes.front(); readyNodes.erase(readyNodes.begin()); @@ -355,7 +355,7 @@ void AArch64Schedule::ComputeLstart(uint32 maxEstart) { } predNode.IncreaseVisit(); if ((predNode.GetVisit() >= predNode.GetValidSuccsSize()) && (predNode.GetType() != kNodeTypeSeparator)) { - readyNodes.push_back(&predNode); + readyNodes.emplace_back(&predNode); } ASSERT(predNode.GetVisit() <= predNode.GetValidSuccsSize(), "CG internal error."); @@ -504,7 +504,7 @@ void AArch64Schedule::RandomTest() { DepNode *currNode = readyList.back(); currNode->SetState(kScheduled); readyList.pop_back(); - nodes.push_back(currNode); + nodes.emplace_back(currNode); for (auto succLink : currNode->GetSuccs()) { DepNode &succNode = succLink->GetTo(); @@ -519,7 +519,7 @@ void AArch64Schedule::RandomTest() { if (ready) { ASSERT(succNode.GetState() == kNormal, "succNode must be kNormal"); - readyList.push_back(&succNode); + readyList.emplace_back(&succNode); succNode.SetState(kReady); } } @@ -857,7 +857,7 @@ void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVector tempList = readyList; EraseNodeFromNodeList(targetNode, tempList); @@ -905,7 +905,7 @@ void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVectorGetEStart() <= currCycle) { - tempAvailableList.push_back(node); + tempAvailableList.emplace_back(node); } } @@ -918,7 +918,7 @@ void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVectorCanBeScheduled()) { - availableReadyList.push_back(node); + availableReadyList.emplace_back(node); } } @@ -975,7 +975,7 @@ uint32 AArch64Schedule::DoBruteForceSchedule() { DepNode *targetNode = readyList.front(); targetNode->SetState(kScheduled); targetNode->SetSchedCycle(currCycle); - scheduledNodes.push_back(targetNode); + scheduledNodes.emplace_back(targetNode); readyList.clear(); /* Update readyList. */ @@ -1002,7 +1002,7 @@ void AArch64Schedule::UpdateReadyList(DepNode &targetNode, MapleVector DepNode &succNode = succLink->GetTo(); succNode.DescreaseValidPredsSize(); if (succNode.GetValidPredsSize() == 0) { - readyList.push_back(&succNode); + readyList.emplace_back(&succNode); succNode.SetState(kReady); /* Set eStart. */ @@ -1168,21 +1168,23 @@ void AArch64Schedule::ListScheduling(bool beforeRA) { InitIDAndLoc(); mad = Globals::GetInstance()->GetMAD(); + if (beforeRA) { + RegPressure::SetMaxRegClassNum(kRegisterLast); + } depAnalysis = memPool.New(cgFunc, memPool, *mad, beforeRA); FOR_ALL_BB(bb, &cgFunc) { depAnalysis->Run(*bb, nodes); - ClinitPairOpt(); - MemoryAccessPairOpt(); if (LIST_SCHED_DUMP_REF) { GenerateDot(*bb, nodes); DumpDepGraph(nodes); } if (beforeRA) { - RegPressure::SetMaxRegClassNum(kRegisterLast); RegPressureScheduling(*bb, nodes); } else { + ClinitPairOpt(); + MemoryAccessPairOpt(); if (CGOptions::IsDruteForceSched()) { BruteForceScheduling(*bb); } else if (CGOptions::IsSimulateSched()) { diff --git a/src/maple_be/src/cg/cfgo.cpp b/src/maple_be/src/cg/cfgo.cpp index d59e6074ead5be76f4616fd60437740fee498525..5d80f84f93b96b63afb0f0fb7096452f2bbb2833 100644 --- a/src/maple_be/src/cg/cfgo.cpp +++ b/src/maple_be/src/cg/cfgo.cpp @@ -33,12 +33,12 @@ using namespace maple; void CFGOptimizer::InitOptimizePatterns() { /* Initialize cfg optimization patterns */ - diffPassPatterns.push_back(memPool->New(*cgFunc)); - diffPassPatterns.push_back(memPool->New(*cgFunc)); - diffPassPatterns.push_back(memPool->New(*cgFunc)); - diffPassPatterns.push_back(memPool->New(*cgFunc)); - diffPassPatterns.push_back(memPool->New(*cgFunc)); - diffPassPatterns.push_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); } /* return true if to is put after from and there is no real insns between from and to, */ @@ -690,7 +690,7 @@ bool DuplicateBBPattern::Optimize(BB &curBB) { std::vector candidates; for (BB *bb : curBB.GetPreds()) { if (bb->GetKind() == BB::kBBGoto && bb->GetNext() != &curBB && bb != &curBB && !bb->IsEmpty()) { - candidates.push_back(bb); + candidates.emplace_back(bb); } } if (candidates.empty()) { diff --git a/src/maple_be/src/cg/cg.cpp b/src/maple_be/src/cg/cg.cpp index 64cc3a34411434c6532a020c77f082535abf407a..26f42af98453ea5a47aa86f46af14f3afb0d110c 100644 --- a/src/maple_be/src/cg/cg.cpp +++ b/src/maple_be/src/cg/cg.cpp @@ -60,7 +60,7 @@ void CG::GenExtraTypeMetadata(const std::string &classListFileName, const std::s } visited.insert(name); - classesToGenerate.push_back(classType); + classesToGenerate.emplace_back(classType); } } else { /* Visit listed classes. */ @@ -77,7 +77,7 @@ void CG::GenExtraTypeMetadata(const std::string &classListFileName, const std::s return; } - classesToGenerate.push_back(classType); + classesToGenerate.emplace_back(classType); } } @@ -223,7 +223,7 @@ static void AppendReferenceOffsets64(const BECommon &beCommon, MIRStructType &cu if (!CGOptions::IsQuiet()) { LogInfo::MapleLogger() << " ** Is a pointer field.\n"; } - result.push_back(myOffset); + result.emplace_back(myOffset); } if ((fieldTypeKind == kTypeArray) || (fieldTypeKind == kTypeStruct) || (fieldTypeKind == kTypeClass) || @@ -246,7 +246,7 @@ std::vector CG::GetReferenceOffsets64(const BECommon &beCommon, MIRStruct if (structType.GetKind() == kTypeClass) { for (auto fieldInfo : beCommon.GetJClassLayout(static_cast(structType))) { if (fieldInfo.IsRef()) { - result.push_back(static_cast(fieldInfo.GetOffset())); + result.emplace_back(static_cast(fieldInfo.GetOffset())); } } } else if (structType.GetKind() != kTypeInterface) { /* interface doesn't have reference fields */ diff --git a/src/maple_be/src/cg/cgfunc.cpp b/src/maple_be/src/cg/cgfunc.cpp index d7ce782f4ebb048877dc53e06c8a0f45651e23b8..7523dab408c0faadab261aad51ea2bf0afa071e2 100644 --- a/src/maple_be/src/cg/cgfunc.cpp +++ b/src/maple_be/src/cg/cgfunc.cpp @@ -663,18 +663,17 @@ void InitHandleStmtFactory() { CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, MapleAllocator &allocator, uint32 funcId) : vRegTable(allocator.Adapter()), - vRegOperandTable(std::less(), allocator.Adapter()), - pRegSpillMemOperands(std::less(), allocator.Adapter()), - spillRegMemOperands(std::less(), allocator.Adapter()), - spillRegMemOperandsAdj(allocator.Adapter()), - reuseSpillLocMem(std::less(), allocator.Adapter()), + vRegOperandTable(allocator.Adapter()), + pRegSpillMemOperands(allocator.Adapter()), + spillRegMemOperands(allocator.Adapter()), + reuseSpillLocMem(allocator.Adapter()), labelMap(std::less(), allocator.Adapter()), cg(&cg), mirModule(mod), memPool(&memPool), func(mirFunc), exitBBVec(allocator.Adapter()), - lab2BBMap(std::less(), allocator.Adapter()), + lab2BBMap(allocator.Adapter()), beCommon(beCommon), funcScopeAllocator(&allocator), emitStVec(allocator.Adapter()), @@ -1051,7 +1050,7 @@ void CGFunc::ProcessExitBBVec() { LabelIdx newLabelIdx = CreateLabel(); BB *retBB = CreateNewBB(newLabelIdx, cleanupBB->IsUnreachable(), BB::kBBReturn, cleanupBB->GetFrequency()); cleanupBB->PrependBB(*retBB); - exitBBVec.push_back(retBB); + exitBBVec.emplace_back(retBB); return; } /* split an empty exitBB */ diff --git a/src/maple_be/src/cg/ebo.cpp b/src/maple_be/src/cg/ebo.cpp index cbb604b3f53da6026274312e03a97547fdec324c..8099276077e87dbc358a2c1075299aa9ef095db1 100644 --- a/src/maple_be/src/cg/ebo.cpp +++ b/src/maple_be/src/cg/ebo.cpp @@ -361,8 +361,8 @@ void Ebo::HashInsn(Insn &insn, const MapleVector &origInfo, const Map uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { /* Copy all the opndInfo entries for the operands. */ - insnInfo->origOpnd.push_back(origInfo.at(i)); - insnInfo->optimalOpnd.push_back(opndInfos.at(i)); + insnInfo->origOpnd.emplace_back(origInfo.at(i)); + insnInfo->optimalOpnd.emplace_back(opndInfos.at(i)); /* Keep the result info. */ if (insn.OpndIsDef(i)) { OpndInfo *opndInfo = nullptr; @@ -373,7 +373,7 @@ void Ebo::HashInsn(Insn &insn, const MapleVector &origInfo, const Map opndInfo = OperandInfoDef(*insn.GetBB(), insn, op); opndInfo->insnInfo = insnInfo; } - insnInfo->result.push_back(opndInfo); + insnInfo->result.emplace_back(opndInfo); } } SetInsnInfo(hashVal, *insnInfo); @@ -699,15 +699,15 @@ void Ebo::BuildAllInfo(BB &bb) { /* start : Process all the operands. */ for (uint32 i = 0; i < opndNum; ++i) { if (!insn->OpndIsUse(i)) { - opnds.push_back(nullptr); - opndInfos.push_back(nullptr); - origInfos.push_back(nullptr); + opnds.emplace_back(nullptr); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); continue; } Operand *opnd = &(insn->GetOperand(i)); - opnds.push_back(opnd); - opndInfos.push_back(nullptr); - origInfos.push_back(nullptr); + opnds.emplace_back(opnd); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); if (opnd->IsConstant()) { continue; } diff --git a/src/maple_be/src/cg/eh_func.cpp b/src/maple_be/src/cg/eh_func.cpp index 64efb310c09ed6e4064cb6c27e7fff3477cb440f..a7e2008855ca0da79b713cc4917fecbc55b604b1 100644 --- a/src/maple_be/src/cg/eh_func.cpp +++ b/src/maple_be/src/cg/eh_func.cpp @@ -59,7 +59,7 @@ void EHFunc::CollectEHInformation(std::vector> & CatchNode *catchNode = static_cast(stmt); ASSERT(stmt->GetPrev()->GetOpCode() == OP_label, "catch's previous node is not a label"); LabelNode *labelStmt = static_cast(stmt->GetPrev()); - catchVec.push_back(std::pair(labelStmt->GetLabelIdx(), catchNode)); + catchVec.emplace_back(std::pair(labelStmt->GetLabelIdx(), catchNode)); /* rename the type of <*void> to <*Throwable> */ for (uint32 i = 0; i < catchNode->Size(); i++) { MIRType *ehType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(i)); @@ -119,7 +119,7 @@ void EHThrow::ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg) { MIRFunction *calleeFunc = mirModule->GetMIRBuilder()->GetOrCreateFunction("MCC_ThrowException", (TyIdx)(PTY_void)); calleeFunc->SetNoReturn(); MapleVector args(mirModule->GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(&arg); + args.emplace_back(&arg); CallNode *callAssign = mirModule->GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callAssign); } @@ -131,7 +131,7 @@ void EHThrow::ConvertThrowToRethrow(CGFunc &cgFunc) { MIRFunction *unFunc = mirBuilder->GetOrCreateFunction("MCC_RethrowException", (TyIdx)PTY_void); unFunc->SetNoReturn(); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(rethrow->Opnd(0)); + args.emplace_back(rethrow->Opnd(0)); CallNode *callNode = mirBuilder->CreateStmtCall(unFunc->GetPuidx(), args); mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callNode); } @@ -420,7 +420,7 @@ void EHFunc::BuildEHTypeTable(const std::vector> if (!catchVec.empty()) { /* the first one assume to be <*void> */ TyIdx voidTyIdx(PTY_void); - ehTyTable.push_back(voidTyIdx); + ehTyTable.emplace_back(voidTyIdx); ty2IndexTable[voidTyIdx] = 0; /* create void pointer and update becommon's size table */ cgFunc->GetBecommon().UpdateTypeTable(*GlobalTables::GetTypeTable().GetVoidPtr()); @@ -441,7 +441,7 @@ void EHFunc::BuildEHTypeTable(const std::vector> } ty2IndexTable[ehTyIdx] = ehTyTable.size(); - ehTyTable.push_back(ehTyIdx); + ehTyTable.emplace_back(ehTyIdx); MIRClassType *catchType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyIdx)); MIRClassType *rootType = catchType->GetExceptionRootType(); if (rootType == nullptr) { @@ -595,7 +595,7 @@ void EHFunc::InsertCxaAfterEachCatch(const std::vectorSetRegIdx(-kSregRetval0); retRegRead0->SetPrimType(LOWERED_PTR_TYPE); MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); - args.push_back(retRegRead0); + args.emplace_back(retRegRead0); CallNode *callAssign = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); funcBody->InsertAfter(jCatchNode, callAssign); } diff --git a/src/maple_be/src/cg/emit.cpp b/src/maple_be/src/cg/emit.cpp index e3004df44c91e8e977f48b2ab1d4fddc29cc8912..cf0e2a19b9c4e15acbc9f933f3fc6e405fdaed3e 100644 --- a/src/maple_be/src/cg/emit.cpp +++ b/src/maple_be/src/cg/emit.cpp @@ -1561,9 +1561,9 @@ void Emitter::GetHotAndColdMetaSymbolInfo(const std::vector &mirSymb std::string klassJavaDescriptor; namemangler::DecodeMapleNameToJavaDescriptor(name, klassJavaDescriptor); if (isHot && !forceCold) { - hotFieldInfoSymbolVec.push_back(mirSymbol); + hotFieldInfoSymbolVec.emplace_back(mirSymbol); } else { - coldFieldInfoSymbolVec.push_back(mirSymbol); + coldFieldInfoSymbolVec.emplace_back(mirSymbol); } } } @@ -1731,45 +1731,45 @@ void Emitter::EmitGlobalVariable() { continue; } if (mirSymbol->GetName().find(VTAB_PREFIX_STR) == 0) { - vtabVec.push_back(mirSymbol); + vtabVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(ITAB_PREFIX_STR) == 0) { - itabVec.push_back(mirSymbol); + itabVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(ITAB_CONFLICT_PREFIX_STR) == 0) { - itabConflictVec.push_back(mirSymbol); + itabConflictVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(kVtabOffsetTabStr) == 0) { - vtabOffsetVec.push_back(mirSymbol); + vtabOffsetVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(kFieldOffsetTabStr) == 0) { - fieldOffsetVec.push_back(mirSymbol); + fieldOffsetVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(kOffsetTabStr) == 0) { - valueOffsetVec.push_back(mirSymbol); + valueOffsetVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->IsArrayClassCache()) { - arrayClassCacheVec.push_back(mirSymbol); + arrayClassCacheVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->IsArrayClassCacheName()) { - arrayClassCacheNameVec.push_back(mirSymbol); + arrayClassCacheNameVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(kLocalClassInfoStr) == 0) { - localClassInfoVec.push_back(mirSymbol); + localClassInfoVec.emplace_back(mirSymbol); continue; } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticKeyStr)) { - staticDecoupleKeyVec.push_back(mirSymbol); + staticDecoupleKeyVec.emplace_back(mirSymbol); continue; } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticValueStr)) { - staticDecoupleValueVec.push_back(mirSymbol); + staticDecoupleValueVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->IsLiteral()) { - literalVec.push_back(std::make_pair(mirSymbol, false)); + literalVec.emplace_back(std::make_pair(mirSymbol, false)); continue; } else if (mirSymbol->IsConstString() || mirSymbol->IsLiteralPtr()) { MIRConst *mirConst = mirSymbol->GetKonst(); if (mirConst != nullptr && mirConst->GetKind() == kConstAddrof) { - constStrVec.push_back(mirSymbol); + constStrVec.emplace_back(mirSymbol); continue; } } else if (mirSymbol->IsReflectionClassInfoPtr()) { @@ -1783,7 +1783,7 @@ void Emitter::EmitGlobalVariable() { EmitFuncLayoutInfo(*mirSymbol); continue; } else if (mirSymbol->GetName().find(kStaticFieldNamePrefixStr) == 0) { - staticFieldsVec.push_back(mirSymbol); + staticFieldsVec.emplace_back(mirSymbol); continue; } else if (mirSymbol->GetName().find(kGcRootList) == 0) { EmitGlobalRootList(*mirSymbol); @@ -1793,19 +1793,19 @@ void Emitter::EmitGlobalVariable() { EmitMuidTable(muidVec, strIdx2Type, kFunctionProfileTabPrefixStr); continue; } else if (mirSymbol->IsReflectionFieldOffsetData()) { - fieldOffsetDatas.push_back(mirSymbol); + fieldOffsetDatas.emplace_back(mirSymbol); continue; } else if (mirSymbol->IsReflectionMethodAddrData()) { - methodAddrDatas.push_back(mirSymbol); + methodAddrDatas.emplace_back(mirSymbol); continue; } else if (mirSymbol->IsReflectionSuperclassInfo()) { - superClassStVec.push_back(mirSymbol); + superClassStVec.emplace_back(mirSymbol); continue; } if (mirSymbol->IsReflectionInfo()) { if (mirSymbol->IsReflectionClassInfo()) { - classInfoVec.push_back(mirSymbol); + classInfoVec.emplace_back(mirSymbol); } continue; } @@ -1815,11 +1815,11 @@ void Emitter::EmitGlobalVariable() { continue; } if (mirSymbol->GetStorageClass() == kScTypeInfo) { - typeStVec.push_back(mirSymbol); + typeStVec.emplace_back(mirSymbol); continue; } if (mirSymbol->GetStorageClass() == kScTypeInfoName) { - typeNameStVec.push_back(mirSymbol); + typeNameStVec.emplace_back(mirSymbol); continue; } if (mirSymbol->GetStorageClass() == kScTypeCxxAbi) { @@ -1845,7 +1845,7 @@ void Emitter::EmitGlobalVariable() { EmitAsmLabel(*mirSymbol, kAsmType); EmitAsmLabel(*mirSymbol, kAsmComm); } else { - globalVarVec.push_back(std::make_pair(mirSymbol, false)); + globalVarVec.emplace_back(std::make_pair(mirSymbol, false)); } continue; } @@ -1982,16 +1982,16 @@ void Emitter::EmitGlobalVariable() { GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoCompactPrefixStr + className)); if (fieldSt != nullptr) { - fieldInfoStVec.push_back(fieldSt); + fieldInfoStVec.emplace_back(fieldSt); } if (fieldStCompact != nullptr) { - fieldInfoStCompactVec.push_back(fieldStCompact); + fieldInfoStCompactVec.emplace_back(fieldStCompact); } if (methodSt != nullptr) { - methodInfoStVec.push_back(methodSt); + methodInfoStVec.emplace_back(methodSt); } if (methodStCompact != nullptr) { - methodInfoStCompactVec.push_back(methodStCompact); + methodInfoStCompactVec.emplace_back(methodStCompact); } } } @@ -2042,10 +2042,10 @@ void Emitter::EmitGlobalVariable() { * And itab to vtab section */ for (auto sym : hotItabStVec) { - hotVtabStVec.push_back(sym); + hotVtabStVec.emplace_back(sym); } for (auto sym : coldItabStVec) { - coldVtabStVec.push_back(sym); + coldVtabStVec.emplace_back(sym); } MarkVtabOrItabEndFlag(coldVtabStVec); EmitMuidTable(hotVtabStVec, strIdx2Type, kMuidVtabAndItabPrefixStr); @@ -2095,8 +2095,8 @@ void Emitter::EmitGlobalRootList(const MIRSymbol &gcrootsSt) { Emit("\t.section .maple.gcrootsmap").Emit(",\"aw\",%progbits\n"); std::vector nameVec; std::string name = gcrootsSt.GetName(); - nameVec.push_back(name); - nameVec.push_back(name + "Size"); + nameVec.emplace_back(name); + nameVec.emplace_back(name + "Size"); bool gcrootsFlag = true; uint64 vecSize = 0; for (const auto &gcrootsName : nameVec) { diff --git a/src/maple_be/src/cg/pressure.cpp b/src/maple_be/src/cg/pressure.cpp index 16e06e3ccf3e1cee8193309a0fb61e456a4702f0..b3fceddc0b6ea978aebc6d8d82f7cc272d416de5 100644 --- a/src/maple_be/src/cg/pressure.cpp +++ b/src/maple_be/src/cg/pressure.cpp @@ -22,20 +22,11 @@ int32 RegPressure::maxRegClassNum = 0; /* print regpressure information */ void RegPressure::DumpRegPressure() const { - constexpr int32 width = 12; PRINT_STR_VAL("Priority: ", priority); PRINT_STR_VAL("maxDepth: ", maxDepth); PRINT_STR_VAL("near: ", near); - LogInfo::MapleLogger() << "\n"; - LogInfo::MapleLogger() << std::left << std::setw(width) << "usereg: "; - for (const auto &useReg : regUses) { - LogInfo::MapleLogger() << "R" << useReg.first << " "; - } - LogInfo::MapleLogger() << "\n"; - LogInfo::MapleLogger() << std::left << std::setw(width) << "defreg: "; - for (const auto &defReg : regDefs) { - LogInfo::MapleLogger() << "R" << defReg.first << " "; - } + PRINT_STR_VAL("callNum: ", callNum); + LogInfo::MapleLogger() << "\n"; } } /* namespace maplebe */ diff --git a/src/maple_be/src/cg/schedule.cpp b/src/maple_be/src/cg/schedule.cpp index f0da826ea4df7ec1db3266f00cf06d041f1c7cec..e23cb5fcee2181bf6c60385a9b0d8498ef9fc2be 100644 --- a/src/maple_be/src/cg/schedule.cpp +++ b/src/maple_be/src/cg/schedule.cpp @@ -61,32 +61,32 @@ void RegPressureSchedule::Init(const MapleVector &nodes) { } for (auto *node : nodes) { - /* initialize */ - node->InitPressure(); - /* calculate the node uses'register pressure */ - for (auto &useReg : node->GetRegUses()) { - CalculatePressure(*node, useReg.first, false); + for (auto &useReg : node->GetUseRegnos()) { + CalculatePressure(*node, useReg, false); } /* calculate the node defs'register pressure */ - for (auto &defReg : node->GetRegDefs()) { - CalculatePressure(*node, defReg.first, true); - - regno_t reg = defReg.first; - RegType regType = GetRegisterType(reg); + size_t i = 0; + for (auto &defReg : node->GetDefRegnos()) { + CalculatePressure(*node, defReg, true); + RegType regType = GetRegisterType(defReg); /* if no use list, a register is only defined, not be used */ - if (defReg.second == nullptr) { + if (node->GetRegDefs(i) == nullptr) { node->IncDeadDefByIndex(regType); } + ++i; } node->SetValidPredsSize(node->GetPreds().size()); } DepNode *firstNode = nodes.front(); - readyList.push_back(firstNode); + readyList.emplace_back(firstNode); firstNode->SetState(kReady); + scheduledNode.reserve(nodes.size()); + constexpr size_t readyListSize = 10; + readyList.reserve(readyListSize); } void RegPressureSchedule::SortReadyList() { @@ -97,23 +97,47 @@ void RegPressureSchedule::SortReadyList() { bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2) { CHECK_NULL_FATAL(node1); CHECK_NULL_FATAL(node2); - if (node1->GetPriority() != node2->GetPriority()) { - return node1->GetPriority() > node2->GetPriority(); + int32 priority1 = node1->GetPriority(); + int32 priority2 = node2->GetPriority(); + if (priority1 != priority2) { + return priority1 > priority2; + } + + int32 numCall1 = node1->GetNumCall(); + int32 numCall2 = node2->GetNumCall(); + if (node1->GetIncPressure() == true && node2->GetIncPressure() == true) { + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } } - int32 depthS1 = node1->GetMaxDepth() + node1->GetNear(); - int32 depthS2 = node2->GetMaxDepth() + node2->GetNear(); + int32 near1 = node1->GetNear(); + int32 near2 = node1->GetNear(); + int32 depthS1 = node1->GetMaxDepth() + near1; + int32 depthS2 = node2->GetMaxDepth() + near2; if (depthS1 != depthS2) { return depthS1 > depthS2; } - int32 near1 = node1->GetNear(); - int32 near2 = node2->GetNear(); if (near1 != near2) { - return node1->GetNear() > node2->GetNear(); + return near1 > near2; + } + + if (numCall1 != numCall2) { + return numCall1 > numCall2; } - return node1->GetSuccs().size() < node2->GetSuccs().size(); + size_t succsSize1 = node1->GetSuccs().size(); + size_t succsSize2 = node1->GetSuccs().size(); + if (succsSize1 != succsSize2) { + return succsSize1 < succsSize2; + } + + if (node1->GetHasPreg() != node2->GetHasPreg()) { + return node1->GetHasPreg(); + } + + return node1->GetInsn()->GetId() < node2->GetInsn()->GetId(); } /* set a node's incPressure is true, when a class register inscrease */ @@ -127,12 +151,22 @@ void RegPressureSchedule::ReCalculateDepNodePressure(DepNode &node) { void RegPressureSchedule::CalculateMaxDepth(const MapleVector &nodes) { /* from the last node to first node. */ for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) { + /* init call count */ + if ((*it)->GetInsn()->IsCall()) { + (*it)->SetNumCall(1); + } /* traversing each successor of it. */ for (auto succ : (*it)->GetSuccs()) { DepNode &to = succ->GetTo(); if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) { (*it)->SetMaxDepth(to.GetMaxDepth() + 1); } + + if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) { + (*it)->SetNumCall(to.GetNumCall() + 1); + } else if ((*it)->GetNumCall() < to.GetNumCall()) { + (*it)->SetNumCall(to.GetNumCall()); + } } } } @@ -149,10 +183,14 @@ void RegPressureSchedule::CalculateNear(const DepNode &node) { /* return true if it is last time using the regNO. */ bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO) const { - auto it = node.GetRegUses().find(regNO); - ASSERT(it->second != nullptr, "valid iterator check"); - ASSERT(it != node.GetRegUses().end(), "not find reg!"); - RegList *regList = it->second; + size_t i = 0; + for (auto reg : node.GetUseRegnos()) { + if (reg == regNO) { + break; + } + ++i; + } + RegList *regList = node.GetRegUses(i); /* * except the node, if there are insn that has no scheduled in regNO'sregList, @@ -190,9 +228,14 @@ void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool d liveReg.insert(reg); } /* if no use list, a register is only defined, not be used */ - auto it = node.GetRegDefs().find(reg); - ASSERT(it != node.GetRegDefs().end(), "not find reg!"); - if (it->second == nullptr) { + size_t i = 0; + for (auto defReg : node.GetDefRegnos()) { + if (defReg == reg) { + break; + } + ++i; + } + if (node.GetRegDefs(i) == nullptr) { liveReg.erase(reg); } } else { @@ -206,21 +249,18 @@ void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool d /* update register pressure information. */ void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { - for (auto &useReg : node.GetRegUses()) { - auto reg = useReg.first; - + size_t idx = 0; + for (auto ® : node.GetUseRegnos()) { #ifdef PRESCHED_DEBUG UpdateLiveReg(node, reg, false); if (liveReg.find(reg) == liveReg.end()) { + ++idx; continue; } #endif /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */ - auto it = node.GetRegUses().find(reg); - ASSERT(it->second != nullptr, "valid iterator check"); - ASSERT(it != node.GetRegUses().end(), "not find reg!"); - RegList *regList = it->second; + RegList *regList = node.GetRegUses(idx); while (regList != nullptr) { CHECK_NULL_FATAL(regList->insn); @@ -236,11 +276,12 @@ void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { } break; } + ++idx; } #ifdef PRESCHED_DEBUG - for (auto &defReg : node.GetRegDefs()) { - UpdateLiveReg(node, defReg.first, true); + for (auto &defReg : node.GetDefRegnos()) { + UpdateLiveReg(node, defReg, true); } #endif @@ -249,7 +290,7 @@ void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { #ifdef PRESCHED_DEBUG LogInfo::MapleLogger() << "node's pressure: "; for (auto pressure : pressures) { - LogInfo::MapleLogger() << pressure[i] << " "; + LogInfo::MapleLogger() << pressure << " "; } LogInfo::MapleLogger() << "\n"; #endif @@ -266,16 +307,16 @@ void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { /* update node priority and try to update the priority of all node's ancestor. */ void RegPressureSchedule::UpdatePriority(DepNode &node) { std::vector workQueue; - workQueue.push_back(&node); + workQueue.emplace_back(&node); node.SetPriority(maxPriority++); do { DepNode *nowNode = workQueue.front(); workQueue.erase(workQueue.begin()); for (auto pred : nowNode->GetPreds()) { DepNode &from = pred->GetFrom(); - if (from.GetState() != kScheduled && from.GetPriority() != maxPriority) { + if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) { from.SetPriority(maxPriority); - workQueue.push_back(&from); + workQueue.emplace_back(&from); } } } while (!workQueue.empty()); @@ -287,17 +328,26 @@ bool RegPressureSchedule::CanSchedule(const DepNode &node) const { } /* + * delete node from readylist and * add the successor of node to readyList when * 1. successor has no been scheduled; * 2. successor's has been scheduled or the dependence between node and successor is true-dependence. */ void RegPressureSchedule::UpdateReadyList(const DepNode &node) { + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + for (auto *succ : node.GetSuccs()) { DepNode &succNode = succ->GetTo(); succNode.DescreaseValidPredsSize(); if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { - readyList.push_back(&succNode); + readyList.emplace_back(&succNode); succNode.SetState(kReady); } } @@ -307,7 +357,7 @@ void RegPressureSchedule::UpdateReadyList(const DepNode &node) { DepNode *RegPressureSchedule::ChooseNode() { DepNode *node = nullptr; for (auto *it : readyList) { - if (!it->GetIncPressure()) { + if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) { if (CanSchedule(*it)) { return it; } else if (node == nullptr) { @@ -391,6 +441,9 @@ void RegPressureSchedule::DoScheduling(MapleVector &nodes) { for (DepNode *it : readyList) { ReCalculateDepNodePressure(*it); } + if (readyList.size() > 1) { + SortReadyList(); + } /* choose a node can be scheduled currently. */ DepNode *node = ChooseNode(); @@ -404,22 +457,18 @@ void RegPressureSchedule::DoScheduling(MapleVector &nodes) { UpdatePriority(*node); SortReadyList(); node = readyList.front(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "update ready list: " << "\n"; + DumpReadyList(); +#endif } - scheduledNode.push_back(node); + scheduledNode.emplace_back(node); /* mark node has scheduled */ node->SetState(kScheduled); UpdateBBPressure(*node); CalculateNear(*node); - /* delete node from readylist */ - for (auto it = readyList.begin(); it != readyList.end(); ++it) { - if (*it == node) { - readyList.erase(it); - break; - } - } UpdateReadyList(*node); - SortReadyList(); #ifdef PRESCHED_DEBUG DumpSelectInfo(*node); #endif @@ -431,7 +480,7 @@ void RegPressureSchedule::DoScheduling(MapleVector &nodes) { /* update nodes according to scheduledNode. */ nodes.clear(); for (auto node : scheduledNode) { - nodes.push_back(node); + nodes.emplace_back(node); } } diff --git a/src/maple_driver/defs/phases.def b/src/maple_driver/defs/phases.def index 66aa557bf7f73b80b2c3621dc612fca20cc0048e..289690c15471d0616cd976a906e1e324620f4af2 100644 --- a/src/maple_driver/defs/phases.def +++ b/src/maple_driver/defs/phases.def @@ -21,7 +21,6 @@ ADD_PHASE("vtableanalysis", true) ADD_PHASE("reflectionanalysis", JAVALANG) ADD_PHASE("gencheckcast", JAVALANG) ADD_PHASE("javaintrnlowering", JAVALANG) -ADD_PHASE("analyzector", true) // mephase begin ADD_PHASE("bypatheh", MeOption::optLevel == 2) ADD_PHASE("loopcanon", MeOption::optLevel == 2) @@ -30,6 +29,7 @@ ADD_PHASE("ssatab", MeOption::optLevel == 2 || JAVALANG) ADD_PHASE("aliasclass", MeOption::optLevel == 2 || JAVALANG) ADD_PHASE("ssa", MeOption::optLevel == 2 || JAVALANG) ADD_PHASE("dse", MeOption::optLevel == 2) +ADD_PHASE("analyzector", JAVALANG) ADD_PHASE("abcopt", MeOption::optLevel == 2) ADD_PHASE("ssadevirt", MeOption::optLevel == 2) ADD_PHASE("hprop", MeOption::optLevel == 2) diff --git a/src/maple_driver/include/option_descriptor.h b/src/maple_driver/include/option_descriptor.h index a9893e196dda82a2f4016a25946b3e7ef2de2d72..9a1a4f9c18912b94842205dc9fbf432257738a59 100644 --- a/src/maple_driver/include/option_descriptor.h +++ b/src/maple_driver/include/option_descriptor.h @@ -36,11 +36,11 @@ enum ArgCheckPolicy { kArgCheckPolicyBool }; -constexpr unsigned int kMaxExtraOptions = 10; +constexpr size_t kMaxExtraOptions = 10; struct Descriptor { // Unique option index - const unsigned int index; + const size_t index; // Unique option type const int type; @@ -80,22 +80,22 @@ struct Descriptor { default: // should never reach return true; - } } + } }; class Option { public: - Option(Descriptor desc, const std::string &optionKey, const std::string &args) + Option(const Descriptor &desc, const std::string &optionKey, const std::string &args) : descriptor(desc), optionKey(optionKey), args(args) {} ~Option() = default; - unsigned int Index() const { + size_t Index() const { return descriptor.index; } - unsigned int Type() const { + int Type() const { return descriptor.type; } @@ -113,7 +113,7 @@ class Option { std::vector GetExtras() const { auto ret = std::vector(); - unsigned int index = 0; + size_t index = 0; while (index < kMaxExtraOptions && index < descriptor.extras.size()) { ret.push_back(descriptor.extras[index++]); } diff --git a/src/maple_driver/include/option_parser.h b/src/maple_driver/include/option_parser.h index eee758260b769c292b6407d0c9b3e95327529235..b02ebb4983650c2f3ce7a91d91f38701661c5b73 100644 --- a/src/maple_driver/include/option_parser.h +++ b/src/maple_driver/include/option_parser.h @@ -53,7 +53,7 @@ class OptionParser { bool SetOption(const std::string &key, const std::string &value, const std::string &exeName, std::vector &exeOption); - void PrintUsage(const std::string &helpType, const unsigned int helpLevel = kBuildTypeDefault) const; + void PrintUsage(const std::string &helpType, const uint32_t helpLevel = kBuildTypeDefault) const; private: bool HandleKeyValue(const std::string &key, const std::string &value, diff --git a/src/maple_driver/src/option_parser.cpp b/src/maple_driver/src/option_parser.cpp index da376de2033af5059363a9bb4442eb90776f21e6..e95dd8673a8707f5985fb5c5b8256064a7f9b9d5 100644 --- a/src/maple_driver/src/option_parser.cpp +++ b/src/maple_driver/src/option_parser.cpp @@ -117,7 +117,7 @@ void OptionParser::RegisteUsages(const Descriptor usage[]) { } } -void OptionParser::PrintUsage(const std::string &helpType, const unsigned int helpLevel) const { +void OptionParser::PrintUsage(const std::string &helpType, const uint32_t helpLevel) const { for (size_t i = 0; i < rawUsages.size(); ++i) { if (rawUsages[i].help != "" && rawUsages[i].IsEnabledForCurrentBuild() && rawUsages[i].exeName == helpType) { if (helpLevel != kBuildTypeDefault && diff --git a/src/maple_ipa/include/call_graph.h b/src/maple_ipa/include/call_graph.h index 57559c9a402851a58ea2dd1a7437013bf75c4003..30db4948eeb308bc58c3a76522f8caa5d9cfdf1f 100644 --- a/src/maple_ipa/include/call_graph.h +++ b/src/maple_ipa/include/call_graph.h @@ -123,7 +123,7 @@ class CGNode { mustNotBeInlined(false), vcallCands(alloc->Adapter()) {} - ~CGNode() {} + ~CGNode() = default; void Dump(std::ofstream &fout) const; void DumpDetail() const; @@ -359,7 +359,7 @@ class SCCNode { class CallGraph : public AnalysisResult { public: CallGraph(MIRModule &m, MemPool &memPool, KlassHierarchy &kh, const std::string &fn); - ~CallGraph() {} + ~CallGraph() = default; void InitCallExternal() { callExternal = cgAlloc.GetMemPool()->New(static_cast(nullptr), cgAlloc, numOfNodes++); @@ -392,7 +392,7 @@ class CallGraph : public AnalysisResult { void HandleBody(MIRFunction&, BlockNode&, CGNode&, uint32); void AddCallGraphNode(MIRFunction&); - void DumpToFile(bool dumpAll = true); + void DumpToFile(bool dumpAll = true) const; void Dump() const; CGNode *GetCGNode(MIRFunction *func) const; CGNode *GetCGNode(PUIdx puIdx) const; @@ -429,7 +429,7 @@ class CallGraph : public AnalysisResult { void DelNode(CGNode &node); void BuildSCC(); void VerifySCC() const; - void BuildSCCDFS(CGNode &caller, unsigned int &visitIndex, std::vector &sccNodes, + void BuildSCCDFS(CGNode &caller, uint32 &visitIndex, std::vector &sccNodes, std::vector &cgNodes, std::vector &visitedOrder); void SetDebugFlag(bool flag) { @@ -478,7 +478,7 @@ class DoCallGraph : public ModulePhase { return "callgraph"; } - virtual ~DoCallGraph() {}; + ~DoCallGraph() = default; }; class IPODevirtulize { @@ -510,7 +510,7 @@ class DoIPODevirtulize : public ModulePhase { return "ipodevirtulize"; } - virtual ~DoIPODevirtulize() {}; + ~DoIPODevirtulize() = default; }; } // namespace maple #endif // MAPLE_IPA_INCLUDE_CALLGRAPH_H diff --git a/src/maple_ipa/include/clone.h b/src/maple_ipa/include/clone.h index 2ae866a71729d42aa07459cebcb2166d2273fa24..0a37ef3288980848b6aafd8d60b5efd829a5f66c 100644 --- a/src/maple_ipa/include/clone.h +++ b/src/maple_ipa/include/clone.h @@ -31,7 +31,7 @@ namespace maple { class ReplaceRetIgnored { public: explicit ReplaceRetIgnored(MemPool *memPool); - virtual ~ReplaceRetIgnored() = default; + ~ReplaceRetIgnored() = default; bool ShouldReplaceWithVoidFunc(const CallMeStmt &stmt, const MIRFunction &calleeFunc) const; std::string GenerateNewBaseName(const MIRFunction &originalFunc) const; @@ -91,7 +91,7 @@ class DoClone : public ModulePhase { public: explicit DoClone(ModulePhaseID id) : ModulePhase(id) {} - virtual ~DoClone() = default; + ~DoClone() = default; AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; std::string PhaseName() const override { diff --git a/src/maple_ipa/include/module_phases.def b/src/maple_ipa/include/module_phases.def index ddecb1390f61501fb551a9b9b42a563afcad1af1..e9b4df485702010b34128261d35ff3e06cf43b62 100644 --- a/src/maple_ipa/include/module_phases.def +++ b/src/maple_ipa/include/module_phases.def @@ -26,7 +26,6 @@ MODTPHASE(MoPhase_JAVAINTRNLOWERING, DoJavaIntrnLowering) MODTPHASE(MoPhase_JAVAEHLOWER, JavaEHLowererPhase) MODTPHASE(MoPhase_MUIDREPLACEMENT, DoMUIDReplacement) MODTPHASE(MoPhase_CHECKCASTGENERATION, DoCheckCastGeneration) -MODTPHASE(MoPhase_ANALYZECTOR, DoAnalyzeCtor) MODTPHASE(MoPhase_CodeReLayout, DoCodeReLayout) #endif MODTPHASE(MoPhase_CONSTANTFOLD, DoConstantFold) diff --git a/src/maple_ipa/include/retype.h b/src/maple_ipa/include/retype.h index 670a5ab713a934673a98f263a45cf6fd630cd34a..6b27bc49ab25b6ef558e37f2230443665f85b8ae 100644 --- a/src/maple_ipa/include/retype.h +++ b/src/maple_ipa/include/retype.h @@ -23,7 +23,7 @@ class Retype { public: Retype(MIRModule *mod, MemPool *memPool) : mirModule(mod), allocator(memPool) {} - virtual ~Retype() {} + ~Retype() = default; void ReplaceRetypeExpr(const BaseNode &opnd); void RetypeStmt(MIRFunction &func); diff --git a/src/maple_ipa/src/call_graph.cpp b/src/maple_ipa/src/call_graph.cpp index 5db89f442139bb37b7878f25fb4c6f569c25c466..fea788965e529a89db7688fa24b8e0d7c028d09a 100644 --- a/src/maple_ipa/src/call_graph.cpp +++ b/src/maple_ipa/src/call_graph.cpp @@ -200,7 +200,7 @@ void CallGraph::DelNode(CGNode &node) { if (func->GetClassTyIdx() != 0u) { MIRType *classType = GlobalTables::GetTypeTable().GetTypeTable().at(func->GetClassTyIdx()); auto *mirStructType = static_cast(classType); - uint32 j = 0; + size_t j = 0; for (; j < mirStructType->GetMethods().size(); ++j) { if (mirStructType->GetMethodsElement(j).first == func->GetStIdx()) { mirStructType->GetMethods().erase(mirStructType->GetMethods().begin() + j); @@ -225,7 +225,7 @@ void CallGraph::DelNode(CGNode &node) { } nodesMap.erase(func); // Update Klass info as it has been built - if (klassh->GetKlassFromFunc(func)) { + if (klassh->GetKlassFromFunc(func) != nullptr) { klassh->GetKlassFromFunc(func)->DelMethod(*func); } } @@ -429,8 +429,8 @@ void CallGraph::HandleBody(MIRFunction &func, BlockNode &body, CGNode &node, uin DoloopNode *doloopNode = static_cast(stmt); HandleBody(func, *doloopNode->GetDoBody(), node, loopDepth + 1); } else if (op == OP_dowhile || op == OP_while) { - WhileStmtNode *n = static_cast(stmt); - HandleBody(func, *n->GetBody(), node, loopDepth + 1); + WhileStmtNode *whileStmt = static_cast(stmt); + HandleBody(func, *whileStmt->GetBody(), node, loopDepth + 1); } else if (op == OP_if) { IfStmtNode *n = static_cast(stmt); HandleBody(func, *n->GetThenPart(), node, loopDepth); @@ -472,7 +472,7 @@ void CallGraph::HandleBody(MIRFunction &func, BlockNode &body, CGNode &node, uin std::string funcName = klass->GetKlassName(); funcName.append((namemangler::kNameSplitterStr)); funcName.append(calleeFuncT->GetBaseFuncNameWithType()); - MIRFunction *methodT = mirBuilder->GetOrCreateFunction(funcName, (TyIdx) (PTY_void)); + MIRFunction *methodT = mirBuilder->GetOrCreateFunction(funcName, static_cast(PTY_void)); methodT->SetBaseClassNameStrIdx(klass->GetKlassNameStrIdx()); methodT->SetBaseFuncNameWithTypeStrIdx(calleeFuncStrIdx); calleePUIdx = methodT->GetPuidx(); @@ -593,39 +593,39 @@ void CallGraph::AddCallGraphNode(MIRFunction &func) { } static void ResetInferredType(std::vector &inferredSymbols) { - for (unsigned int i = 0; i < inferredSymbols.size(); ++i) { + for (size_t i = 0; i < inferredSymbols.size(); ++i) { inferredSymbols[i]->SetInferredTyIdx(TyIdx()); } inferredSymbols.clear(); } -static void ResetInferredType(std::vector &inferredSymbols, MIRSymbol *s) { - if (s == nullptr) { +static void ResetInferredType(std::vector &inferredSymbols, MIRSymbol *symbol) { + if (symbol == nullptr) { return; } - if (s->GetInferredTyIdx() == kInitTyIdx || s->GetInferredTyIdx() == kNoneTyIdx) { + if (symbol->GetInferredTyIdx() == kInitTyIdx || symbol->GetInferredTyIdx() == kNoneTyIdx) { return; } - unsigned int i = 0; + size_t i = 0; for (; i < inferredSymbols.size(); ++i) { - if (inferredSymbols[i] == s) { - s->SetInferredTyIdx(TyIdx()); + if (inferredSymbols[i] == symbol) { + symbol->SetInferredTyIdx(TyIdx()); inferredSymbols.erase(inferredSymbols.begin() + i); break; } } } -static void SetInferredType(std::vector &inferredSymbols, MIRSymbol &s, TyIdx idx) { - s.SetInferredTyIdx(idx); - unsigned int i = 0; +static void SetInferredType(std::vector &inferredSymbols, MIRSymbol &symbol, TyIdx idx) { + symbol.SetInferredTyIdx(idx); + size_t i = 0; for (; i < inferredSymbols.size(); ++i) { - if (inferredSymbols[i] == &s) { + if (inferredSymbols[i] == &symbol) { break; } } if (i == inferredSymbols.size()) { - inferredSymbols.push_back(&s); + inferredSymbols.push_back(&symbol); } } @@ -661,7 +661,7 @@ void IPODevirtulize::SearchDefInClinit(const Klass &klass) { case OP_dassign: { DassignNode *dassignNode = static_cast(stmt); MIRSymbol *leftSymbol = func->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); - unsigned i = 0; + size_t i = 0; for (; i < staticFinalPrivateSymbols.size(); ++i) { if (staticFinalPrivateSymbols[i] == leftSymbol) { break; @@ -709,7 +709,7 @@ void IPODevirtulize::SearchDefInClinit(const Klass &klass) { // ignore all side effect of initizlizor continue; } - for (unsigned int i = 0; i < callNode->GetReturnVec().size(); ++i) { + for (size_t i = 0; i < callNode->GetReturnVec().size(); ++i) { StIdx stIdx = callNode->GetReturnPair(i).first; MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stIdx); ResetInferredType(gcmallocSymbols, tmpSymbol); @@ -744,7 +744,7 @@ void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) { SearchDefInClinit(klass); MIRClassType *classType = static_cast(klass.GetMIRStructType()); std::vector finalPrivateFieldID; - for (uint32 i = 0; i < classType->GetFieldsSize(); ++i) { + for (size_t i = 0; i < classType->GetFieldsSize(); ++i) { FieldAttrs attribute = classType->GetFieldsElemt(i).second.second; if (attribute.GetAttr(FLDATTR_final)) { FieldID id = mirBuilder->GetStructFieldIDFromFieldNameParentFirst( @@ -755,8 +755,8 @@ void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) { std::vector initMethods; std::string typeName = klass.GetKlassName(); typeName.append(namemangler::kCinitStr); - for (MIRFunction *const &method : klass.GetMethods()) { - if (!strncmp(method->GetName().c_str(), typeName.c_str(), typeName.length())) { + for (MIRFunction * const &method : klass.GetMethods()) { + if (strncmp(method->GetName().c_str(), typeName.c_str(), typeName.length()) == 0) { initMethods.push_back(method); } } @@ -765,7 +765,7 @@ void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) { } ASSERT(!initMethods.empty(), "Must have initializor"); StmtNode *stmtNext = nullptr; - for (unsigned int i = 0; i < initMethods.size(); ++i) { + for (size_t i = 0; i < initMethods.size(); ++i) { MIRFunction *func = initMethods[i]; if (func->GetBody() == nullptr) { continue; @@ -837,7 +837,7 @@ void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) { if (pointedType->GetPointedTyIdx() == classType->GetTypeIndex()) { // set field of current class FieldID fieldID = iassignNode->GetFieldID(); - unsigned j = 0; + size_t j = 0; for (; j < finalPrivateFieldID.size(); ++j) { if (finalPrivateFieldID[j] == fieldID) { break; @@ -935,9 +935,9 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { std::vector klassVector; klassVector.push_back(currKlass); bool hasDevirtualed = false; - for (unsigned int index = 0; index < klassVector.size(); ++index) { + for (size_t index = 0; index < klassVector.size(); ++index) { Klass *tmpKlass = klassVector[index]; - for (MIRFunction *const &method : tmpKlass->GetMethods()) { + for (MIRFunction * const &method : tmpKlass->GetMethods()) { if (calleeFunc->GetBaseFuncNameWithTypeStrIdx() == method->GetBaseFuncNameWithTypeStrIdx()) { calleeNode->SetPUIdx(method->GetPuidx()); if (op == OP_virtualcall || op == OP_interfacecall) { @@ -979,7 +979,7 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { } if (op == OP_interfacecallassigned || op == OP_virtualcallassigned) { CallNode *callNode = static_cast(stmt); - for (unsigned int i = 0; i < callNode->GetReturnVec().size(); ++i) { + for (size_t i = 0; i < callNode->GetReturnVec().size(); ++i) { StIdx stIdx = callNode->GetReturnPair(i).first; MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stIdx); ResetInferredType(inferredSymbols, tmpSymbol); @@ -991,7 +991,7 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { Klass *tmpInterface = nullptr; MIRFunction *tmpMethod = nullptr; for (Klass *iKlass : currKlass->GetImplInterfaces()) { - for (MIRFunction *const &method : iKlass->GetMethods()) { + for (MIRFunction * const &method : iKlass->GetMethods()) { if (calleeFunc->GetBaseFuncNameWithTypeStrIdx() == method->GetBaseFuncNameWithTypeStrIdx() && !method->GetFuncAttrs().GetAttr(FUNCATTR_abstract)) { if (tmpInterface == nullptr || klassh.IsSuperKlassForInterface(tmpInterface, iKlass)) { @@ -1010,14 +1010,14 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { if (parentKlass->GetKlassName() == currKlass->GetKlassName()) { flag = true; } else { - for (Klass *const &superclass : currKlass->GetSuperKlasses()) { + for (Klass * const &superclass : currKlass->GetSuperKlasses()) { if (parentKlass->GetKlassName() == superclass->GetKlassName()) { flag = true; break; } } if (!flag && parentKlass->IsInterface()) { - for (Klass *const &implClass : currKlass->GetImplKlasses()) { + for (Klass * const &implClass : currKlass->GetImplKlasses()) { if (parentKlass->GetKlassName() == implClass->GetKlassName()) { flag = true; break; @@ -1050,7 +1050,7 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { Klass *curRetKlass = nullptr; bool isCurrVtabScalar = false; bool isFindMethod = false; - for (MIRFunction *const &method : currKlass->GetMethods()) { + for (MIRFunction * const &method : currKlass->GetMethods()) { if (calleeFunc->GetBaseFuncSigStrIdx() == method->GetBaseFuncSigStrIdx()) { Klass *tmpKlass = nullptr; MIRType *tmpType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(method->GetReturnTyIdx()); @@ -1186,16 +1186,16 @@ void IPODevirtulize::DevirtualFinal() { classType->SetElemInferredTyIdx(i, kInitTyIdx); } SearchDefInMemberMethods(*klass); - for (unsigned int i = 0; i < classType->GetFieldInferredTyIdx().size(); ++i) { + for (size_t i = 0; i < classType->GetFieldInferredTyIdx().size(); ++i) { if (classType->GetElemInferredTyIdx(i) != kInitTyIdx && classType->GetElemInferredTyIdx(i) != kNoneTyIdx && debugFlag) { - FieldID tmpID = i; + FieldID tmpID = static_cast(i); FieldPair pair = classType->TraverseToFieldRef(tmpID); LogInfo::MapleLogger() << "Inferred Final Private None-Static Variable:" + klass->GetKlassName() + ":" + GlobalTables::GetStrTable().GetStringFromStrIdx(pair.first) << '\n'; } } - for (uint32 i = 0; i < classType->GetStaticFields().size(); ++i) { + for (size_t i = 0; i < classType->GetStaticFields().size(); ++i) { FieldAttrs attribute = classType->GetStaticFieldsPair(i).second.second; if (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(classType->GetStaticFieldsGStrIdx(i)) == nullptr) { continue; @@ -1252,8 +1252,8 @@ void CallGraph::GenCallGraph() { if (cands == nullptr || cands->empty()) { continue; // Fix CI } - MIRFunction *actualMirfunc = cands->at(0); - CGNode *tempNode = GetOrGenCGNode(actualMirfunc->GetPuidx()); + MIRFunction *actualMirFunc = cands->at(0); + CGNode *tempNode = GetOrGenCGNode(actualMirFunc->GetPuidx()); ASSERT(tempNode != nullptr, "calleenode is null in CallGraph::HandleBody"); node->AddCallsite(*info, tempNode); } @@ -1306,7 +1306,7 @@ void CallGraph::Dump() const { } } -void CallGraph::DumpToFile(bool dumpAll) { +void CallGraph::DumpToFile(bool dumpAll) const { if (Options::noDot) { return; } @@ -1377,7 +1377,7 @@ void CallGraph::SetCompilationFunclist() const { for (auto const kIt : sccNode->GetCGNodes()) { CGNode *node = kIt; MIRFunction *func = node->GetMIRFunction(); - if ((func != nullptr && func->GetBody() && !IsInIPA()) || (func != nullptr && !func->IsNative())) { + if ((func != nullptr && func->GetBody() != nullptr && !IsInIPA()) || (func != nullptr && !func->IsNative())) { mirModule->GetCompilationList().push_back(func); mirModule->GetFunctionList().push_back(func); } @@ -1434,7 +1434,7 @@ void SCCNode::Dump() const { LogInfo::MapleLogger() << "SCC " << id << " contains\n"; for (auto const kIt : cgNodes) { CGNode *node = kIt; - if (node->GetMIRFunction()) { + if (node->GetMIRFunction() != nullptr) { LogInfo::MapleLogger() << " function(" << node->GetMIRFunction()->GetPuidx() << "): " << node->GetMIRFunction()->GetName() << "\n"; } else { @@ -1454,7 +1454,7 @@ void SCCNode::DumpCycle() const { for (auto &cgIt : *callSite.second) { CGNode *calleeNode = cgIt; if (calleeNode->GetSCCNode() == this) { - unsigned int j = 0; + size_t j = 0; for (; j < invalidNodes.size(); ++j) { if (invalidNodes[j] == calleeNode) { break; @@ -1496,7 +1496,7 @@ void SCCNode::Verify() const { if (cgNodes.size() <= 0) { CHECK_FATAL(false, "the size of cgNodes less than zero"); } - for (CGNode *const &node : cgNodes) { + for (CGNode * const &node : cgNodes) { if (node->GetSCCNode() != this) { CHECK_FATAL(false, "must equal this"); } @@ -1504,7 +1504,7 @@ void SCCNode::Verify() const { } void SCCNode::Setup() { - for (CGNode *const &node : cgNodes) { + for (CGNode * const &node : cgNodes) { for (auto &callSite : node->GetCallee()) { for (auto &cgIt : *callSite.second) { CGNode *calleeNode = cgIt; @@ -1543,7 +1543,7 @@ void CallGraph::BuildSCCDFS(CGNode &caller, uint32 &visitIndex, std::vectorGetID(); - if (!visitedOrder.at(calleeId)) { + if (visitedOrder.at(calleeId) == 0) { // callee has not been processed yet BuildSCCDFS(*calleeNode, visitIndex, sccNodes, cgNodes, visitedOrder); if (lowestOrder.at(calleeId) < lowestOrder.at(id)) { @@ -1589,7 +1589,7 @@ void CallGraph::BuildSCC() { std::vector sccNodes; uint32 visitIndex = 1; // Starting from roots is a good strategy for DSF - for (CGNode *const &root : rootNodes) { + for (CGNode * const &root : rootNodes) { BuildSCCDFS(*root, visitIndex, sccNodes, cgNodes, visitedOrder); } // However, not all SCC can be reached from roots. @@ -1600,7 +1600,7 @@ void CallGraph::BuildSCC() { BuildSCCDFS(*node, visitIndex, sccNodes, cgNodes, visitedOrder); } } - for (SCCNode *const &scc : sccNodes) { + for (SCCNode * const &scc : sccNodes) { scc->Verify(); scc->Setup(); // fix caller and callee info. if (debugScc && scc->HasRecursion()) { @@ -1615,14 +1615,14 @@ void CallGraph::BuildSCC() { void CallGraph::SCCTopologicalSort(const std::vector &sccNodes) { std::set> inQueue; // Local variable, no need to use MapleSet - for (SCCNode *const &node : sccNodes) { + for (SCCNode * const &node : sccNodes) { if (!node->HasCaller()) { sccTopologicalVec.push_back(node); inQueue.insert(node); } } // Top-down iterates all nodes - for (unsigned i = 0; i < sccTopologicalVec.size(); ++i) { + for (size_t i = 0; i < sccTopologicalVec.size(); ++i) { SCCNode *sccNode = sccTopologicalVec[i]; for (SCCNode *callee : sccNode->GetCalleeScc()) { if (inQueue.find(callee) == inQueue.end()) { @@ -1667,7 +1667,7 @@ MIRFunction *CGNode::HasOneCandidate() const { cand = mirFunc; } // scan candidates - for (uint32 i = 0; i < vcallCands.size(); ++i) { + for (size_t i = 0; i < vcallCands.size(); ++i) { if (vcallCands[i] == nullptr) { CHECK_FATAL(false, "must not be nullptr"); } diff --git a/src/maple_ipa/src/clone.cpp b/src/maple_ipa/src/clone.cpp index 972726165b7c9e1643385415ea24df657567c451..60e2b352db93373b4491d245dab35d5d476175ad 100644 --- a/src/maple_ipa/src/clone.cpp +++ b/src/maple_ipa/src/clone.cpp @@ -83,7 +83,7 @@ void Clone::CloneLabels(MIRFunction &newFunc, const MIRFunction &oldFunc) { for (size_t i = 1; i < labelTabSize; ++i) { const std::string &labelName = oldFunc.GetLabelTabItem(i); GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(labelName); - newFunc.GetLabelTab()->AddLabel(strIdx); + (void)newFunc.GetLabelTab()->AddLabel(strIdx); } } diff --git a/src/maple_ipa/src/module_phase_manager.cpp b/src/maple_ipa/src/module_phase_manager.cpp index bf1517186443257112468833e02d9d21c0e160df..daeb1d019e832ca0659c862f192f686886895f06 100644 --- a/src/maple_ipa/src/module_phase_manager.cpp +++ b/src/maple_ipa/src/module_phase_manager.cpp @@ -31,7 +31,6 @@ #include "native_stub_func.h" #include "muid_replacement.h" #include "gen_check_cast.h" -#include "analyzector.h" #include "coderelayout.h" #include "constantfold.h" #endif // ~MIR_JAVA diff --git a/src/maple_ir/include/mir_module.h b/src/maple_ir/include/mir_module.h index 2098bcf1cd1cf6adc3e3ddaf91ed4bc1655c1e9a..56137a8fbab8d2d2b035870cf908e4c4a97dea63 100644 --- a/src/maple_ir/include/mir_module.h +++ b/src/maple_ir/include/mir_module.h @@ -24,6 +24,7 @@ #if MIR_FEATURE_FULL #include #include +#include #include "mempool.h" #include "mempool_allocator.h" #include "maple_string.h" @@ -294,9 +295,11 @@ class MIRModule { } const MapleMap*> &GetPuIdxFieldInitializedMap() const { + std::shared_lock lock(fieldMapMutex); return puIdxFieldInitializedMap; } void SetPuIdxFieldSet(PUIdx puIdx, MapleSet *fieldIDSet) { + std::unique_lock lock(fieldMapMutex); puIdxFieldInitializedMap[puIdx] = fieldIDSet; } const auto &GetRealCaller() const { @@ -308,6 +311,7 @@ class MIRModule { } const MapleSet *GetPUIdxFieldInitializedMapItem(PUIdx key) const { + std::shared_lock lock(fieldMapMutex); auto it = puIdxFieldInitializedMap.find(key); if (it != puIdxFieldInitializedMap.end()) { return it->second; @@ -562,6 +566,7 @@ class MIRModule { // if puIdx appears in the map, and the value of first corresponding MapleSet is 0, the puIdx appears in this module // and writes to all field id otherwise, it writes the field ids in MapleSet MapleMap*> puIdxFieldInitializedMap; + mutable std::shared_timed_mutex fieldMapMutex; std::map, GStrIdx> realCaller; }; #endif // MIR_FEATURE_FULL diff --git a/src/maple_me/BUILD.gn b/src/maple_me/BUILD.gn index cba7020bb685f51b4e9bfe7f42dd49b257172d39..01e63614ffc6db844e1aa1f57288d294fe0fdba3 100644 --- a/src/maple_me/BUILD.gn +++ b/src/maple_me/BUILD.gn @@ -44,6 +44,7 @@ src_libmplme = [ "src/me_function.cpp", "src/me_loop_analysis.cpp", "src/me_irmap.cpp", + "src/me_analyzector.cpp", "src/me_loop_canon.cpp", "src/me_option.cpp", "src/me_phase_manager.cpp", @@ -70,6 +71,7 @@ src_libmplme = [ "src/occur.cpp", "src/me_inequality_graph.cpp", "src/me_abco.cpp", + "src/me_ssi.cpp", ] src_libmplmewpo = [ diff --git a/src/maple_me/include/me_abco.h b/src/maple_me/include/me_abco.h index a1f40f7ade83995c78127cef8306e12928eb1957..004d89542f781156dcf2e7f9401bb6d7685d0f7c 100755 --- a/src/maple_me/include/me_abco.h +++ b/src/maple_me/include/me_abco.h @@ -21,18 +21,9 @@ #include "me_cfg.h" #include "mir_module.h" #include "mir_builder.h" +#include "me_ssi.h" namespace maple { -struct StmtComparator { - bool operator()(const std::pair &lhs, const std::pair &rhs) const { - if (lhs.first != rhs.first) { - return lhs.first < rhs.first; - } else { - return lhs.second < rhs.second; - } - } -}; - class CarePoint { public: enum CareKind { @@ -86,116 +77,6 @@ class CarePoint { CareStmt value; }; -class DefPoint { - public: - enum DefineKind { - kDefByPi, - kDefByPhi - }; - - explicit DefPoint(DefineKind dk) : defKind(dk) {} - ~DefPoint() = default; - - void SetDefPi(PiassignMeStmt &s) { - CHECK_FATAL(defKind == kDefByPi, "must be"); - value.pi = &s; - } - - PiassignMeStmt *GetPiStmt() { - CHECK_FATAL(defKind == kDefByPi, "must be"); - return value.pi; - } - - const PiassignMeStmt *GetPiStmt() const { - CHECK_FATAL(defKind == kDefByPi, "must be"); - return value.pi; - } - - void SetDefPhi(MePhiNode &s) { - CHECK_FATAL(defKind == kDefByPhi, "must be"); - value.phi = &s; - } - - BB *GetBB() const { - if (defKind == kDefByPi) { - return value.pi->GetBB(); - } else { - return value.phi->GetDefBB(); - } - } - - BB *GetGeneratedByBB() const { - CHECK_FATAL(defKind == kDefByPi, "must be"); - return value.pi->GetGeneratedBy()->GetBB(); - } - - VarMeExpr *GetRHS() const { - if (defKind == kDefByPi) { - return value.pi->GetRHS(); - } else { - return static_cast(value.phi->GetOpnd(0)); - } - } - - VarMeExpr *GetLHS() const { - if (defKind == kDefByPi) { - return value.pi->GetLHS(); - } else { - return static_cast(value.phi->GetLHS()); - } - } - - const OStIdx &GetOStIdx() const { - if (defKind == kDefByPi) { - return value.pi->GetRHS()->GetOStIdx(); - } else { - return value.phi->GetOpnd(0)->GetOStIdx(); - } - } - - bool IsPiStmt() const { - return defKind == kDefByPi; - } - - bool IsGeneratedByBr() const { - CHECK_FATAL(defKind == kDefByPi, "must be"); - MeStmt *stmt = value.pi->GetGeneratedBy(); - if (stmt->GetOp() == OP_brtrue || stmt->GetOp() == OP_brfalse) { - return true; - } - return false; - } - - void RemoveFromBB() { - if (defKind == kDefByPi) { - if (IsGeneratedByBr()) { - GetBB()->GetPiList().clear(); - } else { - GetBB()->RemoveMeStmt(value.pi); - } - } else { - GetBB()->GetMePhiList().erase(GetOStIdx()); - } - } - - void Dump(const IRMap &irMap) { - LogInfo::MapleLogger() << "New Def : " << '\n'; - if (defKind == kDefByPi) { - value.pi->Dump(&irMap); - } else { - value.phi->Dump(&irMap); - } - LogInfo::MapleLogger() << '\n'; - } - - private: - DefineKind defKind; - union DefStmt { - PiassignMeStmt *pi; - MePhiNode *phi; - }; - DefStmt value; -}; class MeABC { public: @@ -208,43 +89,27 @@ class MeABC { allocator(&pool), inequalityGraph(nullptr), prove(nullptr), - currentCheck(nullptr) {} + currentCheck(nullptr) { + ssi = std::make_unique(meFunction, dom, map, pool, &arrayChecks, &containsBB); + ssi->SetSSIType(kArrayBoundsCheckOpt); + } ~MeABC() = default; void ExecuteABCO(); private: bool CollectABC(); - void RemoveExtraNodes(); - void InsertPiNodes(); - bool ExistedPhiNode(BB &bb, VarMeExpr &rhs); - void InsertPhiNodes(); - void Rename(); - void RenameStartPiBr(DefPoint &newDefPoint); - void RenameStartPiArray(DefPoint &newDefPoint); - void RenameStartPhi(DefPoint &newDefPoint); - void ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar); - bool ReplaceStmt(MeStmt &meStmt, VarMeExpr &newVar, VarMeExpr &oldVar); - void ReplaceBB(BB &bb, BB &parentBB, DefPoint &newDefPoint); - bool IsLegal(MeStmt &meStmt); void ABCCollectArrayExpr(MeStmt &meStmt, MeExpr &meExpr, bool isUpdate = false); void CollectCareInsns(); - bool ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs); - void CreatePhi(VarMeExpr &rhs, BB &dfBB); - VarMeExpr *CreateNewPiExpr(const MeExpr &opnd); - void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, BB &bb, MeStmt &generatedBy, bool isToken); - void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, MeStmt &generatedBy); - MeExpr *ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &oldVar, MeExpr &repExpr); - MeExpr *NewMeExpr(MeExpr &meExpr); - bool ReplaceMeExprStmtOpnd(uint32 opndID, MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update); - bool ReplaceStmtWithNewVar(MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update); bool IsVirtualVar(const VarMeExpr &var, const SSATab &ssaTab) const; ESSABaseNode *GetOrCreateRHSNode(MeExpr &expr); void BuildPhiInGraph(MePhiNode &phi); void BuildSoloPiInGraph(const PiassignMeStmt &piMeStmt); bool PiExecuteBeforeCurrentCheck(const PiassignMeStmt &piMeStmt); + void AddEdgePair(ESSABaseNode &from, ESSABaseNode &to, int64 value, EdgeType type); bool BuildArrayCheckInGraph(MeStmt &meStmt); bool BuildBrMeStmtInGraph(MeStmt &meStmt); bool BuildAssignInGraph(MeStmt &meStmt); + MeExpr *TryToResolveVar(MeExpr &expr, bool isConst); MeExpr *TryToResolveVar(MeExpr &expr, std::set &visitedPhi, MeExpr &dummyExpr, bool isConst); bool BuildStmtInGraph(MeStmt &meStmt); void AddUseDef(MeExpr &meExpr); @@ -285,18 +150,15 @@ class MeABC { std::unique_ptr inequalityGraph; std::unique_ptr prove; MeStmt *currentCheck; + std::unique_ptr ssi; std::map arrayChecks; std::map arrayNewChecks; std::set careMeStmts; std::set careMePhis; std::map> containsBB; - std::vector newDefPoints; std::vector carePoints; - std::map newDef2Old; - std::map, MeExpr*, StmtComparator> modifiedStmt; - std::map> modifiedPhi; - std::set visitedBBs; std::set targetMeStmt; + std::set visited; // map, c>; a = b + c b is relative with length, c is var weight std::map, MeExpr*> unresolveEdge; }; diff --git a/src/mpl2mpl/include/analyzector.h b/src/maple_me/include/me_analyzector.h similarity index 47% rename from src/mpl2mpl/include/analyzector.h rename to src/maple_me/include/me_analyzector.h index 78add925a4ae399d844fa5e1bb9047d8aea6e29b..d1be98c91b1dd8953ed054f685d601338688a2ad 100644 --- a/src/mpl2mpl/include/analyzector.h +++ b/src/maple_me/include/me_analyzector.h @@ -12,44 +12,47 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v1 for more details. */ -#ifndef MPL2MPL_INCLUDE_ANALYZECTOR_H -#define MPL2MPL_INCLUDE_ANALYZECTOR_H -#include "module_phase.h" -#include "phase_impl.h" +#ifndef MAPLEME_INCLUDE_ANALYZECTOR_H +#define MAPLEME_INCLUDE_ANALYZECTOR_H + +#include "me_function.h" +#include "me_phase.h" +#include "me_irmap.h" +#include "annotation_analysis.h" namespace maple { -class AnalyzeCtor : public FuncOptimizeImpl { - public: - AnalyzeCtor(MIRModule &mod, KlassHierarchy *kh, bool trace) : FuncOptimizeImpl(mod, kh, trace) {} - ~AnalyzeCtor() = default; +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-private-field" - FuncOptimizeImpl *Clone() override { - return new AnalyzeCtor(*this); - } +class AnalyzeCtor { + public: + AnalyzeCtor(MeFunction &func, Dominance &dom, KlassHierarchy &kh) : func(&func), dominance(&dom), klassh(&kh) {} + virtual ~AnalyzeCtor() = default; - void ProcessFunc(MIRFunction *func) override; - void Finish() override; + virtual void ProcessFunc(); + void ProcessStmt(MeStmt &stmt); private: - void ProcessStmt(StmtNode&) override; bool hasSideEffect = false; std::unordered_set fieldSet; + MeFunction *func; + Dominance *dominance; + KlassHierarchy *klassh; + }; +#pragma clang diagnostic pop -class DoAnalyzeCtor : public ModulePhase { +class MeDoAnalyzeCtor : public MeFuncPhase { public: - explicit DoAnalyzeCtor(ModulePhaseID id) : ModulePhase(id) {} + explicit MeDoAnalyzeCtor(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoAnalyzeCtor() = default; - ~DoAnalyzeCtor() = default; + AnalysisResult *Run(MeFunction*, MeFuncResultMgr*, ModuleResultMgr*) override; std::string PhaseName() const override { return "analyzector"; } - - AnalysisResult *Run(MIRModule *mod, ModuleResultMgr *mrm) override { - OPT_TEMPLATE(AnalyzeCtor); - return nullptr; - } }; } // namespace maple -#endif // MPL2MPL_INCLUDE_ANALYZECTOR_H +#endif // MAPLEME_INCLUDE_ANALYZECTOR_H diff --git a/src/maple_me/include/me_ir.h b/src/maple_me/include/me_ir.h index 018334f4521fb76b61b39fa296310859c24a6459..e01ceaceeef6ec99312f66d5dd39bbf2152e3983 100755 --- a/src/maple_me/include/me_ir.h +++ b/src/maple_me/include/me_ir.h @@ -1087,7 +1087,7 @@ class MeStmt { } bool IsAssign() const { - return op == OP_dassign || op == OP_maydassign || op == OP_iassign || op == OP_regassign; + return op == OP_dassign || op == OP_maydassign || op == OP_iassign || op == OP_regassign || op == OP_piassign; } virtual MIRType *GetReturnType() const { diff --git a/src/maple_me/include/me_phases.def b/src/maple_me/include/me_phases.def index 8491bde2daeafa68a010279843bac8eccc35587b..8562b00214c600e1ad6010327110913a0cabb807 100644 --- a/src/maple_me/include/me_phases.def +++ b/src/maple_me/include/me_phases.def @@ -20,6 +20,7 @@ FUNCAPHASE(MeFuncPhase_IRMAP, MeDoIRMap) FUNCAPHASE(MeFuncPhase_BBLAYOUT, MeDoBBLayout) FUNCAPHASE(MeFuncPhase_MELOOP, MeDoMeLoop) FUNCTPHASE(MeFuncPhase_BYPATHEH, MeDoBypathEH) +FUNCAPHASE(MeFuncPhase_MEANALYZECTOR, MeDoAnalyzeCtor) FUNCAPHASE(MeFuncPhase_MEABCOPT, MeDoABCOpt) FUNCAPHASE(MeFuncPhase_CONDBASEDNPC, MeDoCondBasedNPC) FUNCTPHASE(MeFuncPhase_MAY2DASSIGN, MeDoMay2Dassign) diff --git a/src/maple_me/include/me_ssi.h b/src/maple_me/include/me_ssi.h new file mode 100644 index 0000000000000000000000000000000000000000..c2a61039fc4b7c15450230210bbd2f95cfec578e --- /dev/null +++ b/src/maple_me/include/me_ssi.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MESSI_H +#define MAPLE_ME_INCLUDE_MESSI_H +#include "me_function.h" +#include "me_irmap.h" +#include "me_ir.h" + +namespace maple { +struct StmtComparator { + bool operator()(const std::pair &lhs, const std::pair &rhs) const { + if (lhs.first != rhs.first) { + return lhs.first < rhs.first; + } else { + return lhs.second < rhs.second; + } + } +}; + +class DefPoint { + public: + enum DefineKind { + kDefByPi, + kDefByPhi + }; + + explicit DefPoint(DefineKind dk) : defKind(dk) {} + ~DefPoint() = default; + + void SetDefPi(PiassignMeStmt &s) { + CHECK_FATAL(defKind == kDefByPi, "must be"); + value.pi = &s; + } + + PiassignMeStmt *GetPiStmt() { + CHECK_FATAL(defKind == kDefByPi, "must be"); + return value.pi; + } + + const PiassignMeStmt *GetPiStmt() const { + CHECK_FATAL(defKind == kDefByPi, "must be"); + return value.pi; + } + + void SetDefPhi(MePhiNode &s) { + CHECK_FATAL(defKind == kDefByPhi, "must be"); + value.phi = &s; + } + + BB *GetBB() const { + if (defKind == kDefByPi) { + return value.pi->GetBB(); + } else { + return value.phi->GetDefBB(); + } + } + + BB *GetGeneratedByBB() const { + CHECK_FATAL(defKind == kDefByPi, "must be"); + return value.pi->GetGeneratedBy()->GetBB(); + } + + VarMeExpr *GetRHS() const { + if (defKind == kDefByPi) { + return value.pi->GetRHS(); + } else { + return static_cast(value.phi->GetOpnd(0)); + } + } + + VarMeExpr *GetLHS() const { + if (defKind == kDefByPi) { + return value.pi->GetLHS(); + } else { + return static_cast(value.phi->GetLHS()); + } + } + + const OStIdx &GetOStIdx() const { + if (defKind == kDefByPi) { + return value.pi->GetRHS()->GetOStIdx(); + } else { + return value.phi->GetOpnd(0)->GetOStIdx(); + } + } + + bool IsPiStmt() const { + return defKind == kDefByPi; + } + + bool IsGeneratedByBr() const { + CHECK_FATAL(defKind == kDefByPi, "must be"); + MeStmt *stmt = value.pi->GetGeneratedBy(); + if (stmt->GetOp() == OP_brtrue || stmt->GetOp() == OP_brfalse) { + return true; + } + return false; + } + + void RemoveFromBB() { + if (defKind == kDefByPi) { + if (IsGeneratedByBr()) { + GetBB()->GetPiList().clear(); + } else { + GetBB()->RemoveMeStmt(value.pi); + } + } else { + GetBB()->GetMePhiList().erase(GetOStIdx()); + } + } + + void Dump(const IRMap &irMap) { + LogInfo::MapleLogger() << "New Def : " << '\n'; + if (defKind == kDefByPi) { + value.pi->Dump(&irMap); + } else { + value.phi->Dump(&irMap); + } + LogInfo::MapleLogger() << '\n'; + } + + private: + DefineKind defKind; + union DefStmt { + PiassignMeStmt *pi; + MePhiNode *phi; + }; + DefStmt value; +}; + +enum SSIOptType : uint8_t { + kNullPointerCheckOpt = 0x1, + kCheckCastOpt = 0x2, + kCondBaseRCOpt = 0x4, + kArrayBoundsCheckOpt = 0x8, + kCondBasePropOpt = 0x10, + kBranchResolve = 0x20, +}; + +class SSIType { + public: + SSIType() = default; + ~SSIType() = default; + + void SetOptType(uint8_t t) { + optType = t; + } + + uint8_t GetOptType() { + return optType; + } + + bool GetOptKindType(SSIOptType x) const { + return (optType & x) != 0; + } + + void DumpOptType() const; + + private: + uint8_t optType = 0; +}; + +class MeSSI { + public: + static bool isDebug; + MeSSI(MeFunction &meFunction, Dominance &dom, MeIRMap &map, MemPool &pool, + std::map* acs = nullptr, + std::map>* cBB = nullptr) + : meFunc(&meFunction), + dom(&dom), + irMap(&map), + memPool(&pool), + allocator(&pool), + arrayChecks(acs), + containsBB(cBB) {} + ~MeSSI() = default; + void ConvertToSSI(); + void ConvertToSSA(); + void SetSSIType(uint8_t opt) { + ssiType.SetOptType(opt); + } + MIRType *GetInferredType(MeExpr *expr); + private: + NaryMeExpr *GetInstanceOfType(MeExpr &e); + void AddPiForABCOpt(BB &bb); + void AddNullPointerInfoForVar(); + uint8_t AnalysisBranch(MeStmt &meStmt); + void RemoveExtraNodes(); + void InsertPiNodes(); + bool ExistedPhiNode(BB &bb, VarMeExpr &rhs); + void InsertPhiNodes(); + void Rename(); + void RenameStartPiBr(DefPoint &newDefPoint); + void RenameStartPiArray(DefPoint &newDefPoint); + void RenameStartPhi(DefPoint &newDefPoint); + void ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar); + bool ReplaceStmt(MeStmt &meStmt, VarMeExpr &newVar, VarMeExpr &oldVar); + void ReplaceBB(BB &bb, BB &parentBB, DefPoint &newDefPoint); + bool ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs); + void CreatePhi(VarMeExpr &rhs, BB &dfBB); + VarMeExpr *CreateNewPiExpr(const MeExpr &opnd); + void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, BB &bb, MeStmt &generatedBy, bool isToken); + void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, MeStmt &generatedBy); + MeExpr *ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &oldVar, MeExpr &repExpr); + MeExpr *NewMeExpr(MeExpr &meExpr); + bool ReplaceMeExprStmtOpnd(uint32 opndID, MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update); + bool ReplaceStmtWithNewVar(MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update); + + Dominance *GetDominace() { + return dom; + } + + MemPool *GetMemPool() { + return memPool; + } + + MapleAllocator &GetAllocator() { + return allocator; + } + + const MeIRMap *GetIRMap() const { + return irMap; + } + + MeFunction *meFunc; + Dominance *dom; + MeIRMap *irMap; + MemPool *memPool; + MapleAllocator allocator; + SSIType ssiType{}; + std::vector newDefPoints; + std::map newDef2Old; + std::map, MeExpr*, StmtComparator> modifiedStmt; + std::map> modifiedPhi; + std::set visitedBBs; + // used for ABC opt + std::map* arrayChecks; + std::map>* containsBB; + // used for check cast opt + std::map inferredType; +}; +} +#endif diff --git a/src/maple_me/src/me_abco.cpp b/src/maple_me/src/me_abco.cpp index 9025343e0fbb8303345d1cd03f4f1a284c22f10b..dcad696bf9710766ac826acd17b83560ace981d4 100755 --- a/src/maple_me/src/me_abco.cpp +++ b/src/maple_me/src/me_abco.cpp @@ -20,9 +20,15 @@ namespace maple { bool MeABC::isDebug = false; constexpr int kNumOpnds = 2; -constexpr int kPiStmtUpperBound = 2; constexpr int kPiListSize = 2; +void MeABC::AddEdgePair(ESSABaseNode &from, ESSABaseNode &to, int64 value, EdgeType type) { + InequalEdge *pairEdge1 = inequalityGraph->AddEdge(from, to, value, type); + InequalEdge *pairEdge2 = inequalityGraph->AddEdge(to, from, -value, type); + pairEdge1->SetPairEdge(*pairEdge2); + pairEdge2->SetPairEdge(*pairEdge1); +} + void MeABC::ABCCollectArrayExpr(MeStmt &meStmt, MeExpr &meExpr, bool isUpdate) { if (meExpr.GetOp() == OP_array) { auto *nMeExpr = static_cast(&meExpr); @@ -48,128 +54,6 @@ void MeABC::ABCCollectArrayExpr(MeStmt &meStmt, MeExpr &meExpr, bool isUpdate) { } } -bool MeABC::IsLegal(MeStmt &meStmt) { - CHECK_FATAL(meStmt.IsCondBr(), "must be"); - auto *brMeStmt = static_cast(&meStmt); - MeExpr *meCmp = brMeStmt->GetOpnd(); - // may be we need consider ne, eq, and only one opnd in branch insn - if (meCmp->GetMeOp() != kMeOpOp) { - return false; - } - auto *opMeExpr = static_cast(meCmp); - CHECK_FATAL(opMeExpr->GetNumOpnds() == kNumOpnds, "must be"); - if (opMeExpr->GetOp() != OP_ge && opMeExpr->GetOp() != OP_le && - opMeExpr->GetOp() != OP_lt && opMeExpr->GetOp() != OP_gt && - opMeExpr->GetOp() != OP_ne && opMeExpr->GetOp() != OP_eq) { - return false; - } - MeExpr *opnd1 = opMeExpr->GetOpnd(0); - MeExpr *opnd2 = opMeExpr->GetOpnd(1); - if ((opnd1->GetMeOp() != kMeOpVar && opnd1->GetMeOp() != kMeOpConst) || - (opnd2->GetMeOp() != kMeOpVar && opnd2->GetMeOp() != kMeOpConst)) { - return false; - } - if (!IsPrimitivePureScalar(opnd1->GetPrimType()) || !IsPrimitivePureScalar(opnd2->GetPrimType())) { - return false; - } - return true; -} - -VarMeExpr *MeABC::CreateNewPiExpr(const MeExpr &opnd) { - if (opnd.GetMeOp() == kMeOpConst) { - return nullptr; - } - CHECK_FATAL(opnd.GetMeOp() == kMeOpVar, "must be"); - SSATab &ssaTab = irMap->GetSSATab(); - OriginalSt *ost = ssaTab.GetOriginalStFromID(static_cast(&opnd)->GetOStIdx()); - CHECK_NULL_FATAL(ost); - CHECK_FATAL(!ost->IsVolatile(), "must be"); - VarMeExpr *var = irMap->NewInPool(irMap->GetExprID(), ost->GetIndex(), - irMap->GetVerst2MeExprTable().size()); - irMap->SetExprID(irMap->GetExprID() + 1); - irMap->PushBackVerst2MeExprTable(var); - ost->PushbackVersionIndex(var->GetVstIdx()); - var->InitBase(opnd.GetOp(), opnd.GetPrimType(), 0); - return var; -} - -void MeABC::CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, BB &bb, MeStmt &generatedBy, bool isToken) { - if (lhs == nullptr) { - return; - } - PiassignMeStmt *meStmt = GetMemPool()->New(&GetAllocator()); - meStmt->SetGeneratedBy(generatedBy); - meStmt->SetRHS(*(static_cast(&rhs))); - meStmt->SetLHS(*lhs); - meStmt->SetBB(&bb); - meStmt->SetIsToken(isToken); - lhs->SetDefBy(kDefByStmt); - lhs->SetDefStmt(meStmt); - bb.InsertPi(*(generatedBy.GetBB()), *meStmt); - DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPi); - newDef->SetDefPi(*meStmt); - newDefPoints.push_back(newDef); - newDef2Old[newDef] = static_cast(&rhs); -} - -void MeABC::CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, MeStmt &generatedBy) { - if (lhs == nullptr) { - return; - } - PiassignMeStmt *meStmt = GetMemPool()->New(&GetAllocator()); - meStmt->SetGeneratedBy(generatedBy); - meStmt->SetRHS(*(static_cast(&rhs))); - meStmt->SetLHS(*lhs); - meStmt->SetBB(generatedBy.GetBB()); - lhs->SetDefBy(kDefByStmt); - lhs->SetDefStmt(meStmt); - generatedBy.GetBB()->InsertMeStmtAfter(&generatedBy, meStmt); - DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPi); - newDef->SetDefPi(*meStmt); - newDefPoints.push_back(newDef); - newDef2Old[newDef] = static_cast(&rhs); -} - -void MeABC::InsertPiNodes() { - for (auto bIt = meFunc->valid_begin(), eIt = meFunc->valid_end(); bIt != eIt; ++bIt) { - BB *bb = *bIt; - if (bb->GetKind() == kBBCondGoto && IsLegal(*(bb->GetLastMe()))) { - auto *brMeStmt = static_cast(bb->GetLastMe()); - BB *brTarget = bb->GetSucc(1); - BB *brFallThru = bb->GetSucc(0); - CHECK_FATAL(brMeStmt->GetOffset() == brTarget->GetBBLabel(), "must be"); - auto *opMeExpr = static_cast(brMeStmt->GetOpnd()); - MeExpr *opnd1 = opMeExpr->GetOpnd(0); - MeExpr *opnd2 = opMeExpr->GetOpnd(1); - VarMeExpr *brTargetNewOpnd1 = CreateNewPiExpr(*opnd1); - VarMeExpr *brTargetNewOpnd2 = CreateNewPiExpr(*opnd2); - CreateNewPiStmt(brTargetNewOpnd1, *opnd1, *brTarget, *brMeStmt, true); - CreateNewPiStmt(brTargetNewOpnd2, *opnd2, *brTarget, *brMeStmt, true); - VarMeExpr *brFallThruNewOpnd1 = CreateNewPiExpr(*opnd1); - VarMeExpr *brFallThruNewOpnd2 = CreateNewPiExpr(*opnd2); - CreateNewPiStmt(brFallThruNewOpnd1, *opnd1, *brFallThru, *brMeStmt, false); - CreateNewPiStmt(brFallThruNewOpnd2, *opnd2, *brFallThru, *brMeStmt, false); - } - auto it = containsBB.find(bb); - if (it == containsBB.end()) { - continue; - } - std::vector &arryChk = it->second; - for (MeStmt *meStmt : arryChk) { - NaryMeExpr *naryMeExpr = arrayChecks[meStmt]; - CHECK_FATAL(naryMeExpr->GetOpnds().size() == kNumOpnds, "must be"); - MeExpr *opnd1 = naryMeExpr->GetOpnd(0); - MeExpr *opnd2 = naryMeExpr->GetOpnd(1); - // consider whether we should create pi if opnd2 is const - CHECK_FATAL(opnd1->GetMeOp() == kMeOpVar, "must be"); - CHECK_FATAL(opnd1->GetPrimType() == PTY_ref, "must be"); - CHECK_FATAL(opnd2->GetMeOp() == kMeOpVar || opnd2->GetMeOp() == kMeOpConst, "must be"); - VarMeExpr *arrayCheckOpnd2 = CreateNewPiExpr(*opnd2); - CreateNewPiStmt(arrayCheckOpnd2, *opnd2, *meStmt); - } - } -} - bool MeABC::CollectABC() { auto eIt = meFunc->valid_end(); for (auto bIt = meFunc->valid_begin(); bIt != eIt; ++bIt) { @@ -182,422 +66,6 @@ bool MeABC::CollectABC() { return !arrayChecks.empty(); } -bool MeABC::ExistedPhiNode(BB &bb, VarMeExpr &rhs) { - return bb.GetMePhiList().find(rhs.GetOStIdx()) != bb.GetMePhiList().end(); -} - -bool MeABC::ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs) { - MapleMap> &piList = bb.GetPiList(); - auto it = piList.find(&parentBB); - if (it == piList.end()) { - return false; - } - std::vector &piStmts = it->second; - CHECK_FATAL(!piStmts.empty(), "should not be empty"); - CHECK_FATAL(piStmts.size() <= kPiStmtUpperBound, "must be"); - PiassignMeStmt *pi1 = piStmts.at(0); - if (pi1->GetLHS()->GetOStIdx() == rhs.GetOStIdx()) { - return true; - } - if (piStmts.size() == kPiStmtUpperBound) { - PiassignMeStmt *pi2 = piStmts.at(1); - if (pi2->GetLHS()->GetOStIdx() == rhs.GetOStIdx()) { - return true; - } - } - return false; -} - -void MeABC::CreatePhi(VarMeExpr &rhs, BB &dfBB) { - VarMeExpr *phiNewLHS = CreateNewPiExpr(rhs); - ASSERT_NOT_NULL(phiNewLHS); - MePhiNode *newPhi = GetMemPool()->New(phiNewLHS, &GetAllocator()); - newPhi->SetDefBB(&dfBB); - newPhi->GetOpnds().resize(dfBB.GetPred().size(), &rhs); - newPhi->SetPiAdded(); - dfBB.GetMePhiList().insert(std::make_pair(phiNewLHS->GetOStIdx(), newPhi)); - DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPhi); - newDef->SetDefPhi(*newPhi); - newDefPoints.push_back(newDef); - newDef2Old[newDef] = &rhs; -} - -void MeABC::InsertPhiNodes() { - for (size_t i = 0; i < newDefPoints.size(); ++i) { - DefPoint *newDefStmt = newDefPoints[i]; - BB *newDefBB = newDefStmt->GetBB(); - CHECK_NULL_FATAL(newDefBB); - VarMeExpr *rhs = newDefStmt->GetRHS(); - if (newDefStmt->IsPiStmt()) { - BB *genByBB = newDefStmt->GetGeneratedByBB(); - if (!dom->Dominate(*genByBB, *newDefBB)) { - if (!ExistedPhiNode(*newDefBB, *rhs)) { - CreatePhi(*rhs, *newDefBB); - } - continue; - } - } - BB *oldDefBB = rhs->DefByBB(); - if (oldDefBB == nullptr) { - oldDefBB = meFunc->GetCommonEntryBB(); - CHECK_FATAL(rhs->IsZeroVersion(irMap->GetSSATab()), "must be"); - } - CHECK_NULL_FATAL(oldDefBB); - MapleSet &dfs = dom->GetDomFrontier(newDefBB->GetBBId()); - for (auto bbID : dfs) { - BB *dfBB = meFunc->GetBBFromID(bbID); - if (!dom->Dominate(*oldDefBB, *dfBB)) { - MapleSet &dfsTmp = dom->GetDomFrontier(oldDefBB->GetBBId()); - CHECK_FATAL(dfsTmp.find(bbID) != dfsTmp.end(), "must be"); - continue; - } - if (ExistedPhiNode(*dfBB, *rhs)) { - continue; - } - CreatePhi(*rhs, *dfBB); - } - } -} - -void MeABC::RenameStartPiBr(DefPoint &newDefPoint) { - const OStIdx &ostIdx = newDefPoint.GetOStIdx(); - BB *newDefBB = newDefPoint.GetBB(); - if (!ExistedPhiNode(*(newDefPoint.GetBB()), *(newDefPoint.GetRHS()))) { - RenameStartPhi(newDefPoint); - return; - } - MePhiNode* phi = newDefBB->GetMePhiList()[ostIdx]; - BB *genByBB = newDefPoint.GetGeneratedByBB(); - size_t index = 0; - while (index < newDefBB->GetPred().size()) { - if (newDefBB->GetPred(index) == genByBB) { - break; - } - ++index; - } - CHECK_FATAL(index < newDefBB->GetPred().size(), "must be"); - ScalarMeExpr*oldVar = phi->GetOpnd(index); - phi->SetOpnd(index, newDefPoint.GetLHS()); - if (!phi->IsPiAdded()) { - if (modifiedPhi.find(phi) == modifiedPhi.end()) { - modifiedPhi[phi] = std::vector(phi->GetOpnds().size(), nullptr); - } - if (modifiedPhi[phi][index] == nullptr) { - modifiedPhi[phi][index] = oldVar; - } - } -} - -void MeABC::RenameStartPiArray(DefPoint &newDefPoint) { - BB *newDefBB = newDefPoint.GetBB(); - MeStmt *piStmt = newDefPoint.GetPiStmt(); - if (piStmt != newDefBB->GetLastMe()) { - for (MeStmt *meStmt = piStmt->GetNext(); meStmt != nullptr; meStmt = meStmt->GetNext()) { - if (ReplaceStmt(*meStmt, *(newDefPoint.GetLHS()), *(newDef2Old[&newDefPoint]))) { - return; - } - } - } - ReplacePiPhiInSuccs(*newDefBB, *(newDefPoint.GetLHS())); - const MapleSet &children = dom->GetDomChildren(newDefBB->GetBBId()); - for (const BBId &child : children) { - ReplaceBB(*(meFunc->GetBBFromID(child)), *newDefBB, newDefPoint); - } -} - -void MeABC::RenameStartPhi(DefPoint &newDefPoint) { - BB *newDefBB = newDefPoint.GetBB(); - for (MeStmt &meStmt : newDefBB->GetMeStmts()) { - if (ReplaceStmt(meStmt, *newDefPoint.GetLHS(), *newDef2Old[&newDefPoint])) { - return; - } - } - ReplacePiPhiInSuccs(*newDefBB, *(newDefPoint.GetLHS())); - const MapleSet &children = dom->GetDomChildren(newDefBB->GetBBId()); - for (const BBId &child : children) { - ReplaceBB(*(meFunc->GetBBFromID(child)), *newDefBB, newDefPoint); - } -} - -void MeABC::ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar) { - for (BB *succBB : bb.GetSucc()) { - MapleMap> &piList = succBB->GetPiList(); - auto it1 = piList.find(&bb); - if (it1 != piList.end()) { - std::vector &piStmts = it1->second; - // the size of pi statements must be 1 or 2 - CHECK_FATAL(!piStmts.empty(), "should not be empty"); - CHECK_FATAL(piStmts.size() <= 2, "must be"); - PiassignMeStmt *pi1 = piStmts.at(0); - if (pi1->GetLHS()->GetOStIdx() == newVar.GetOStIdx()) { - pi1->SetRHS(newVar); - continue; - } - if (piStmts.size() == kPiStmtUpperBound) { - PiassignMeStmt *pi2 = piStmts.at(1); - if (pi2->GetLHS()->GetOStIdx() == newVar.GetOStIdx()) { - pi2->SetRHS(newVar); - continue; - } - } - } - size_t index = 0; - while (index < succBB->GetPred().size()) { - if (succBB->GetPred(index) == &bb) { - break; - } - ++index; - } - CHECK_FATAL(index < succBB->GetPred().size(), "must be"); - MapleMap &phiList = succBB->GetMePhiList(); - auto it2 = phiList.find(newVar.GetOStIdx()); - if (it2 != phiList.end()) { - MePhiNode *phi = it2->second; - ScalarMeExpr *oldVar = phi->GetOpnd(index); - phi->SetOpnd(index, &newVar); - if (!phi->IsPiAdded()) { - if (modifiedPhi.find(phi) == modifiedPhi.end()) { - modifiedPhi[phi] = std::vector(phi->GetOpnds().size(), nullptr); - } - if (modifiedPhi[phi][index] == nullptr) { - modifiedPhi[phi][index] = oldVar; - } - } - } - } -} - -MeExpr *MeABC::NewMeExpr(MeExpr &meExpr) { - switch (meExpr.GetMeOp()) { - case kMeOpIvar: { - auto &ivarMeExpr = static_cast(meExpr); - IvarMeExpr *newIvarExpr = GetMemPool()->New(irMap->GetExprID(), ivarMeExpr); - irMap->SetExprID(irMap->GetExprID() + 1); - return newIvarExpr; - } - case kMeOpOp: { - auto &opMeExpr = static_cast(meExpr); - OpMeExpr *newOpMeExpr = GetMemPool()->New(opMeExpr, irMap->GetExprID()); - irMap->SetExprID(irMap->GetExprID() + 1); - return newOpMeExpr; - } - case kMeOpNary: { - auto &naryMeExpr = static_cast(meExpr); - NaryMeExpr *newNaryMeExpr = GetMemPool()->New(&GetAllocator(), irMap->GetExprID(), naryMeExpr); - irMap->SetExprID(irMap->GetExprID() + 1); - newNaryMeExpr->InitBase(meExpr.GetOp(), meExpr.GetPrimType(), meExpr.GetNumOpnds()); - return newNaryMeExpr; - } - default: - CHECK_FATAL(false, "impossible"); - } -} - -MeExpr *MeABC::ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &meExpr, MeExpr &repExpr) { - if (origExpr.IsLeaf()) { - return &origExpr; - } - switch (origExpr.GetMeOp()) { - case kMeOpOp: { - auto &opMeExpr = static_cast(origExpr); - OpMeExpr newMeExpr(opMeExpr, kInvalidExprID); - bool needRehash = false; - for (uint32 i = 0; i < kOperandNumTernary; ++i) { - if (opMeExpr.GetOpnd(i) == nullptr) { - continue; - } - if (opMeExpr.GetOpnd(i) == &meExpr) { - needRehash = true; - newMeExpr.SetOpnd(i, &repExpr); - } else if (!opMeExpr.GetOpnd(i)->IsLeaf()) { - newMeExpr.SetOpnd(i, ReplaceMeExprExpr(*newMeExpr.GetOpnd(i), meExpr, repExpr)); - if (newMeExpr.GetOpnd(i) != opMeExpr.GetOpnd(i)) { - needRehash = true; - } - } - } - return needRehash ? NewMeExpr(newMeExpr) : &origExpr; - } - case kMeOpNary: { - auto &naryMeExpr = static_cast(origExpr); - NaryMeExpr newMeExpr(&GetAllocator(), kInvalidExprID, naryMeExpr); - const MapleVector &opnds = naryMeExpr.GetOpnds(); - bool needRehash = false; - for (size_t i = 0; i < opnds.size(); ++i) { - MeExpr *opnd = opnds[i]; - if (opnd == &meExpr) { - newMeExpr.SetOpnd(i, &repExpr); - needRehash = true; - } else if (!opnd->IsLeaf()) { - newMeExpr.SetOpnd(i, ReplaceMeExprExpr(*newMeExpr.GetOpnd(i), meExpr, repExpr)); - if (newMeExpr.GetOpnd(i) != opnd) { - needRehash = true; - } - } - } - return needRehash ? NewMeExpr(newMeExpr) : &origExpr; - } - case kMeOpIvar: { - auto &ivarExpr = static_cast(origExpr); - IvarMeExpr newMeExpr(kInvalidExprID, ivarExpr); - bool needRehash = false; - if (ivarExpr.GetBase() == &meExpr) { - newMeExpr.SetBase(&repExpr); - needRehash = true; - } else if (!ivarExpr.GetBase()->IsLeaf()) { - newMeExpr.SetBase(ReplaceMeExprExpr(*newMeExpr.GetBase(), meExpr, repExpr)); - if (newMeExpr.GetBase() != ivarExpr.GetBase()) { - needRehash = true; - } - } - return needRehash ? NewMeExpr(newMeExpr) : &origExpr; - } - default: - CHECK_FATAL(false, "NYI"); - } -} - -bool MeABC::ReplaceMeExprStmtOpnd(uint32 opndID, MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update) { - MeExpr *opnd = meStmt.GetOpnd(opndID); - bool isFromIassign = (meStmt.GetOp() == OP_iassign) && (opndID == 0); - if (isFromIassign) { - opnd = static_cast(&meStmt)->GetLHSVal(); - } - bool replaced = false; - MeExpr *newExpr = nullptr; - if (opnd == &oldVar) { - if (isFromIassign) { - static_cast(&meStmt)->SetLHSVal(static_cast(&newVar)); - } else { - meStmt.SetOpnd(opndID, &newVar); - } - replaced = true; - } else if (!opnd->IsLeaf()) { - newExpr = ReplaceMeExprExpr(*opnd, oldVar, newVar); - replaced = (newExpr != opnd); - if (isFromIassign) { - static_cast(&meStmt)->SetLHSVal(static_cast(newExpr)); - } else { - meStmt.SetOpnd(opndID, newExpr); - } - } - if (replaced && update) { - if (modifiedStmt.find(std::make_pair(&meStmt, opndID)) == modifiedStmt.end()) { - modifiedStmt[std::make_pair(&meStmt, opndID)] = opnd; - } - } - return replaced; -} - -bool MeABC::ReplaceStmtWithNewVar(MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update) { - switch (meStmt.GetOp()) { - case OP_dassign: - case OP_maydassign: - case OP_brtrue: - case OP_brfalse: { - return ReplaceMeExprStmtOpnd(0, meStmt, oldVar, newVar, update); - } - case OP_iassign: { - bool baseIsReplaced = ReplaceMeExprStmtOpnd(0, meStmt, oldVar, newVar, update); - bool rhsReplaced = ReplaceMeExprStmtOpnd(1, meStmt, oldVar, newVar, update); - return baseIsReplaced || rhsReplaced; - } - case OP_regassign: { - CHECK_FATAL(false, "should not happen"); - } - default: { - break; - } - } - return false; -} - -bool MeABC::ReplaceStmt(MeStmt &meStmt, VarMeExpr &newVar, VarMeExpr &oldVar) { - if (meStmt.GetOp() == OP_piassign) { - auto *pi = static_cast(&meStmt); - if (pi->GetRHS() == &oldVar) { - pi->SetRHS(newVar); - } - } else { - (void)ReplaceStmtWithNewVar(meStmt, oldVar, newVar, true); - } - const OStIdx &ostIdx = newVar.GetOStIdx(); - MapleMap *chiList = meStmt.GetChiList(); - if (chiList != nullptr && chiList->find(ostIdx) != chiList->end()) { - return true; - } - MeExpr *lhs = meStmt.GetAssignedLHS(); - if (lhs != nullptr && lhs->GetMeOp() == kMeOpVar && static_cast(lhs)->GetOStIdx() == ostIdx) { - return true; - } - lhs = meStmt.GetLHS(); - return (lhs != nullptr && lhs->GetMeOp() == kMeOpVar && static_cast(lhs)->GetOStIdx() == ostIdx); -} - -void MeABC::Rename() { - for (size_t i = 0; i < newDefPoints.size(); ++i) { - DefPoint *newDefStmt = newDefPoints[i]; - visitedBBs.clear(); - if (newDefStmt->IsPiStmt()) { - if (newDefStmt->IsGeneratedByBr()) { - RenameStartPiBr(*newDefStmt); - } else { - RenameStartPiArray(*newDefStmt); - } - } else { - RenameStartPhi(*newDefStmt); - } - } -} - -void MeABC::ReplaceBB(BB &bb, BB &parentBB, DefPoint &newDefPoint) { - if (visitedBBs.find(&bb) != visitedBBs.end()) { - return; - } - visitedBBs.insert(&bb); - if (ExistedPhiNode(bb, *(newDefPoint.GetLHS())) || ExistedPiNode(bb, parentBB, *(newDefPoint.GetLHS()))) { - return; - } - for (MeStmt &meStmt : bb.GetMeStmts()) { - if (ReplaceStmt(meStmt, *(newDefPoint.GetLHS()), *(newDef2Old[&newDefPoint]))) { - return; - } - } - ReplacePiPhiInSuccs(bb, *(newDefPoint.GetLHS())); - const MapleSet &children = dom->GetDomChildren(bb.GetBBId()); - for (const BBId &child : children) { - ReplaceBB(*(meFunc->GetBBFromID(child)), bb, newDefPoint); - } -} - -void MeABC::RemoveExtraNodes() { - for (DefPoint *defP : newDefPoints) { - defP->RemoveFromBB(); - } - for (auto pair : modifiedStmt) { - MeStmt *meStmt = pair.first.first; - MeExpr *newVar = nullptr; - if ((meStmt->GetOp() == OP_iassign) && (pair.first.second == 0)) { - newVar = static_cast(meStmt)->GetLHSVal(); - } else { - newVar = meStmt->GetOpnd(pair.first.second); - } - MeExpr *oldVar = pair.second; - bool replaced = ReplaceStmtWithNewVar(*meStmt, *newVar, *oldVar, false); - CHECK_FATAL(replaced, "must be"); - } - for (auto pair : modifiedPhi) { - MePhiNode *phi = pair.first; - for (size_t i = 0; i < pair.second.size(); ++i) { - size_t index = i; - ScalarMeExpr*oldVar = pair.second[i]; - if (oldVar != nullptr) { - phi->SetOpnd(index, oldVar); - } - } - } -} - bool MeABC::IsVirtualVar(const VarMeExpr &var, const SSATab &ssaTab) const { const OriginalSt *ost = ssaTab.GetOriginalStFromID(var.GetOStIdx()); return ost->GetIndirectLev() > 0; @@ -618,16 +86,11 @@ ESSABaseNode *MeABC::GetOrCreateRHSNode(MeExpr &expr) { void MeABC::BuildPhiInGraph(MePhiNode &phi) { if (!IsPrimitivePureScalar(phi.GetLHS()->GetPrimType())) { MeExpr *varExpr = phi.GetLHS(); - std::set visitedPhi; - ConstMeExpr dummyExpr(kInvalidExprID, nullptr); - varExpr = TryToResolveVar(*varExpr, visitedPhi, dummyExpr, false); - if (varExpr != nullptr && varExpr != &dummyExpr) { + varExpr = TryToResolveVar(*varExpr, false); + if (varExpr != nullptr) { ESSAArrayNode *arrayNode = inequalityGraph->GetOrCreateArrayNode(*(phi.GetLHS())); ESSAVarNode *varNode = inequalityGraph->GetOrCreateVarNode(*varExpr); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*arrayNode, *varNode, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*varNode, *arrayNode, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*arrayNode, *varNode, 0, EdgeType::kUpper); } return; } @@ -785,25 +248,13 @@ bool MeABC::BuildBrMeStmtInGraph(MeStmt &meStmt) { break; } case OP_ne: { - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*brFallThruOpnd1, *brFallThruOpnd2, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*brFallThruOpnd2, *brFallThruOpnd1, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*brFallThruOpnd2, *brFallThruOpnd1, 0, EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*brFallThruOpnd1, *brFallThruOpnd2, 0, EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*brFallThruOpnd1, *brFallThruOpnd2, 0, EdgeType::kUpper); + AddEdgePair(*brFallThruOpnd2, *brFallThruOpnd1, 0, EdgeType::kLower); break; } case OP_eq: { - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*brTargetOpnd1, *brTargetOpnd2, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*brTargetOpnd2, *brTargetOpnd1, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*brTargetOpnd2, *brTargetOpnd1, 0, EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*brTargetOpnd1, *brTargetOpnd2, 0, EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*brTargetOpnd1, *brTargetOpnd2, 0, EdgeType::kUpper); + AddEdgePair(*brTargetOpnd2, *brTargetOpnd1, 0, EdgeType::kLower); break; } default: @@ -812,6 +263,16 @@ bool MeABC::BuildBrMeStmtInGraph(MeStmt &meStmt) { return true; } +MeExpr *MeABC::TryToResolveVar(MeExpr &expr, bool isConst) { + std::set visitedPhi; + ConstMeExpr dummyExpr(kInvalidExprID, nullptr); + MeExpr *tmp = TryToResolveVar(expr, visitedPhi, dummyExpr, isConst); + if (tmp != nullptr && tmp != &dummyExpr) { + return tmp; + } + return nullptr; +} + MeExpr *MeABC::TryToResolveVar(MeExpr &expr, std::set &visitedPhi, MeExpr &dummyExpr, bool isConst) { CHECK_FATAL(expr.GetMeOp() == kMeOpVar, "must be"); auto *var = static_cast(&expr); @@ -888,24 +349,19 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { AddUseDef(*(opMeExpr->GetOpnd(0))); // Try to resolve Var is assigned from Const MeExpr *varExpr = opMeExpr->GetOpnd(0); - std::set visitedPhi; - ConstMeExpr dummyExpr(kInvalidExprID, nullptr); - varExpr = TryToResolveVar(*varExpr, visitedPhi, dummyExpr, true); - if (varExpr != nullptr && varExpr != &dummyExpr) { + varExpr = TryToResolveVar(*varExpr, true); + if (varExpr != nullptr) { CHECK_FATAL(varExpr->GetMeOp() == kMeOpConst, "must be"); ESSAConstNode *constNode = inequalityGraph->GetOrCreateConstNode( static_cast(varExpr)->GetIntValue()); - (void)inequalityGraph->AddEdge(*arrLength, *constNode, 0, EdgeType::kNone); + AddEdgePair(*arrLength, *constNode, 0, EdgeType::kNone); } } else { CHECK_FATAL(opMeExpr->GetOpnd(0)->GetMeOp() == kMeOpConst, "must be"); rhsNode = inequalityGraph->GetOrCreateConstNode( static_cast(opMeExpr->GetOpnd(0))->GetIntValue()); + AddEdgePair(*arrLength, *rhsNode, 0, EdgeType::kNone); } - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*arrLength, *rhsNode, 0, EdgeType::kNone); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *arrLength, 0, EdgeType::kNone); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; } case OP_sub: { @@ -923,23 +379,16 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { if (opnd1->GetMeOp() == kMeOpConst && opnd2->GetMeOp() == kMeOpConst) { CHECK_FATAL(false, "consider this pattern"); } else if (opnd2->GetMeOp() == kMeOpConst) { + visited.clear(); if (!HasRelativeWithLength(*opnd1)) { return false; } ESSABaseNode *rhsNode = GetOrCreateRHSNode(*opnd1); AddUseDef(*opnd1); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, + AddEdgePair(*rhsNode, *lhsNode, -static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, - static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, + AddEdgePair(*lhsNode, *rhsNode, static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, - -static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; }else { // support this pattern later @@ -947,30 +396,21 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { } } else if (opnd1->GetMeOp() == kMeOpVar && opnd2->GetMeOp() == kMeOpVar) { // Try to resolve Var is assigned from Const - std::set visitedPhi; - ConstMeExpr dummyExpr(kInvalidExprID, nullptr); - opnd2 = TryToResolveVar(*opnd2, visitedPhi, dummyExpr, true); - if (opnd2 == nullptr || opnd2 == &dummyExpr) { + opnd2 = TryToResolveVar(*opnd2, true); + if (opnd2 == nullptr) { return false; } CHECK_FATAL(opnd2->GetMeOp() == kMeOpConst, "must be"); + visited.clear(); if (!HasRelativeWithLength(*opnd1)) { return false; } ESSABaseNode *rhsNode = GetOrCreateRHSNode(*opnd1); AddUseDef(*opnd1); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, + AddEdgePair(*rhsNode, *lhsNode, -static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, - static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, + AddEdgePair(*lhsNode, *rhsNode, static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, - -static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; } else { // support this pattern later @@ -993,35 +433,28 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { CHECK_FATAL(opnd1->GetMeOp() == kMeOpVar, "must be"); CHECK_FATAL(opnd2->GetMeOp() == kMeOpVar, "must be"); MeExpr *tmpVar = opnd2; - std::set visitedPhi; - ConstMeExpr dummyExpr(kInvalidExprID, nullptr); - tmpVar = TryToResolveVar(*tmpVar, visitedPhi, dummyExpr, true); - if (tmpVar != nullptr && tmpVar != &dummyExpr) { + tmpVar = TryToResolveVar(*tmpVar, true); + if (tmpVar != nullptr) { CHECK_FATAL(tmpVar->GetMeOp() == kMeOpConst, "must be"); + visited.clear(); if (!HasRelativeWithLength(*opnd1)) { return false; } AddUseDef(*opnd1); ESSABaseNode *rhsNode = GetOrCreateRHSNode(*opnd1); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, + AddEdgePair(*rhsNode, *lhsNode, static_cast(tmpVar)->GetIntValue(), EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, - -static_cast(tmpVar)->GetIntValue(), EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, + AddEdgePair(*lhsNode, *rhsNode, -static_cast(tmpVar)->GetIntValue(), EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, - static_cast(tmpVar)->GetIntValue(), EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; } + visited.clear(); if (HasRelativeWithLength(*opnd1)) { unresolveEdge[std::make_pair(lhs, opnd1)] = opnd2; AddUseDef(*opnd1); AddUseDef(*opnd2); } + visited.clear(); if (HasRelativeWithLength(*opnd2)) { unresolveEdge[std::make_pair(lhs, opnd2)] = opnd1; AddUseDef(*opnd1); @@ -1032,42 +465,28 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { if (opnd1->GetMeOp() == kMeOpConst && opnd2->GetMeOp() == kMeOpConst) { CHECK_FATAL(false, "consider this pattern"); } else if (opnd2->GetMeOp() == kMeOpConst) { + visited.clear(); if (!HasRelativeWithLength(*opnd1)) { return false; } ESSABaseNode *rhsNode = GetOrCreateRHSNode(*opnd1); AddUseDef(*opnd1); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, + AddEdgePair(*rhsNode, *lhsNode, static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, - -static_cast(opnd2)->GetIntValue(), EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, + AddEdgePair(*lhsNode, *rhsNode, -static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, - static_cast(opnd2)->GetIntValue(), EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; } else { + visited.clear(); if (!HasRelativeWithLength(*opnd2)) { return false; } ESSABaseNode *rhsNode = GetOrCreateRHSNode(*opnd2); AddUseDef(*opnd2); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, + AddEdgePair(*rhsNode, *lhsNode, static_cast(opnd1)->GetIntValue(), EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, - -static_cast(opnd1)->GetIntValue(), EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, + AddEdgePair(*lhsNode, *rhsNode, -static_cast(opnd1)->GetIntValue(), EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, - static_cast(opnd1)->GetIntValue(), EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); return true; } CHECK_FATAL(false, "impossible"); @@ -1118,6 +537,7 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { } if (opnd2->GetMeOp() == kMeOpConst) { if (static_cast(opnd2)->GetIntValue() > 0) { + visited.clear(); if (HasRelativeWithLength(*opnd1)) { unresolveEdge[std::make_pair(lhs, nullptr)] = opnd2; AddUseDef(*opnd1); @@ -1126,12 +546,11 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { return false; } MeExpr *varExpr = opnd2; - std::set visitedPhi; - ConstMeExpr dummyExpr(kInvalidExprID, nullptr); - varExpr = TryToResolveVar(*varExpr, visitedPhi, dummyExpr, true); - if (varExpr != nullptr && varExpr != &dummyExpr) { + varExpr = TryToResolveVar(*varExpr, true); + if (varExpr != nullptr) { CHECK_FATAL(varExpr->GetMeOp() == kMeOpConst, "must be"); if (static_cast(varExpr)->GetIntValue() > 0) { + visited.clear(); if (HasRelativeWithLength(*opnd1)) { unresolveEdge[std::make_pair(lhs, nullptr)] = opnd2; AddUseDef(*opnd1); @@ -1166,10 +585,7 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { } ESSAVarNode *ivarNode = inequalityGraph->GetOrCreateVarNode(*rhs); ESSAArrayNode *arrayNode = inequalityGraph->GetOrCreateArrayNode(*lhs); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*ivarNode, *arrayNode, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*arrayNode, *ivarNode, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*ivarNode, *arrayNode, 0, EdgeType::kUpper); if (rhs->GetMeOp() == kMeOpVar) { AddUseDef(*rhs); } @@ -1180,24 +596,12 @@ bool MeABC::BuildAssignInGraph(MeStmt &meStmt) { if (rhs->GetMeOp() == kMeOpVar) { AddUseDef(*rhs); ESSABaseNode *rhsNode = GetOrCreateRHSNode(*rhs); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, 0, EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, 0, EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*rhsNode, *lhsNode, 0, EdgeType::kUpper); + AddEdgePair(*lhsNode, *rhsNode, 0, EdgeType::kLower); } else { ESSAConstNode *rhsNode = inequalityGraph->GetOrCreateConstNode(static_cast(rhs)->GetIntValue()); - InequalEdge *pairEdge1 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, 0, EdgeType::kUpper); - InequalEdge *pairEdge2 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, 0, EdgeType::kUpper); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); - pairEdge1 = inequalityGraph->AddEdge(*lhsNode, *rhsNode, 0, EdgeType::kLower); - pairEdge2 = inequalityGraph->AddEdge(*rhsNode, *lhsNode, 0, EdgeType::kLower); - pairEdge1->SetPairEdge(*pairEdge2); - pairEdge2->SetPairEdge(*pairEdge1); + AddEdgePair(*rhsNode, *lhsNode, 0, EdgeType::kUpper); + AddEdgePair(*lhsNode, *rhsNode, 0, EdgeType::kLower); } return true; } @@ -1228,7 +632,14 @@ bool MeABC::HasRelativeWithLength(MeExpr &meExpr) { } } else if (meStmt->GetOp() == OP_piassign) { PiassignMeStmt *piMeStmt = static_cast(meStmt); - return HasRelativeWithLength(*piMeStmt->GetRHS()); + if (piMeStmt->GetGeneratedBy()->IsCondBr()) { + auto *opMeExpr = static_cast(piMeStmt->GetGeneratedBy()->GetOpnd(0)); + MeExpr *opOpnd1 = opMeExpr->GetOpnd(0); + MeExpr *opOpnd2 = opMeExpr->GetOpnd(1); + return HasRelativeWithLength(*opOpnd1) || HasRelativeWithLength(*opOpnd2); + } else { + return HasRelativeWithLength(*piMeStmt->GetRHS()); + } } break; } @@ -1244,7 +655,19 @@ bool MeABC::HasRelativeWithLength(MeExpr &meExpr) { } return true; } - case kDefByPhi: + case kDefByPhi: { + MePhiNode &defPhi = varOpnd1->GetDefPhi(); + if (visited.find(&defPhi) != visited.end()) { + return false; + } + visited.insert(&defPhi); + for (MeExpr *expr : defPhi.GetOpnds()) { + if (HasRelativeWithLength(*expr)) { + return true; + } + } + break; + } case kDefByNo: case kDefByMustDef: break; @@ -1502,6 +925,9 @@ void MeABC::DeleteABC() { bool replaced = CleanABCInStmt(*meStmt, *naryMeExpr); CHECK_FATAL(replaced, "must be"); } + inequalityGraph = nullptr; + prove = nullptr; + ssi = nullptr; } void MeABC::InitNewStartPoint(MeStmt &meStmt, MeExpr &opnd1, MeExpr &opnd2, bool clearGraph) { @@ -1617,13 +1043,7 @@ void MeABC::ProcessCallParameters(CallMeStmt &callNode) { void MeABC::ExecuteABCO() { MeABC::isDebug = false; if (CollectABC()) { - if (MeABC::isDebug) { - LogInfo::MapleLogger() << meFunc->GetName() << "\n"; - irMap->Dump(); - } - InsertPiNodes(); - InsertPhiNodes(); - Rename(); + ssi->ConvertToSSI(); CollectCareInsns(); for (auto pair : arrayNewChecks) { InitNewStartPoint(*(pair.first), *((static_cast(pair.second))->GetOpnd(0)), @@ -1636,10 +1056,8 @@ void MeABC::ExecuteABCO() { } FindRedundantABC(*(pair.first), *(static_cast(pair.second))); } - RemoveExtraNodes(); + ssi->ConvertToSSA(); DeleteABC(); - inequalityGraph = nullptr; - prove = nullptr; } } diff --git a/src/mpl2mpl/src/analyzector.cpp b/src/maple_me/src/me_analyzector.cpp similarity index 47% rename from src/mpl2mpl/src/analyzector.cpp rename to src/maple_me/src/me_analyzector.cpp index 333a5ebbb2c10c90edee22785377686a1941c5de..75d5ebf4a10ca5456efb75af441d02f1157f22f0 100644 --- a/src/mpl2mpl/src/analyzector.cpp +++ b/src/maple_me/src/me_analyzector.cpp @@ -12,61 +12,63 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v1 for more details. */ -#include "analyzector.h" +#include "me_analyzector.h" #include "utils.h" // AnalyzeCtor analyzes which fields are assigned inside of each constructor namespace maple { -void AnalyzeCtor::ProcessFunc(MIRFunction *func) { - if (!func->IsConstructor() || func->IsEmpty() || func->GetParamSize() == 0) { + +void AnalyzeCtor::ProcessFunc() { + MIRFunction *curFunc = func->GetMirFunc(); + if (!curFunc->IsConstructor() || curFunc->IsEmpty() || curFunc->GetParamSize() == 0) { return; } - SetCurrentFunction(*func); hasSideEffect = false; - fieldSet.clear(); - if (func->GetBody() != nullptr) { - ProcessBlock(*func->GetBody()); + if (curFunc->GetBody() != nullptr) { + for (BB *bb : func->GetAllBBs()) { + if (bb == nullptr) + continue; + for (auto &meStmt : bb->GetMeStmts()) { + ProcessStmt(meStmt); + } + } } - PUIdx puIdx = func->GetPuidx(); - const MapleMap*> &puIdxFieldMap = GetMIRModule().GetPuIdxFieldInitializedMap(); - CHECK_FATAL(puIdxFieldMap.find(puIdx) == puIdxFieldMap.end(), - "%s has been processed before", func->GetName().c_str()); + PUIdx puIdx = curFunc->GetPuidx(); // if the function has calls with sideeffect, conservatively // we assume all fields are modified in ctor if (hasSideEffect) { MapleSet *fieldSubSet = - GetMIRModule().GetMemPool()->New>(std::less(), - GetMIRModule().GetMPAllocator().Adapter()); + func->GetMIRModule().GetMemPool()->New>(std::less(), + func->GetMIRModule().GetMPAllocator().Adapter()); fieldSubSet->insert(0); // write to all - GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); + func->GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); } else if (!fieldSet.empty()) { MapleSet *fieldSubSet = - GetMIRModule().GetMemPool()->New>(std::less(), - GetMIRModule().GetMPAllocator().Adapter()); + func->GetMIRModule().GetMemPool()->New>(std::less(), + func->GetMIRModule().GetMPAllocator().Adapter()); std::copy(fieldSet.begin(), fieldSet.end(), std::inserter(*fieldSubSet, fieldSubSet->begin())); - GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); - } else { - // no fields are assigned in constructor - GetMIRModule().SetPuIdxFieldSet(puIdx, nullptr); + func->GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); } } // collect field ids which are assigned inside the stmt and mark sideeffect // flag for non-ctor calls -void AnalyzeCtor::ProcessStmt(StmtNode &stmt) { - switch (stmt.GetOpCode()) { +void AnalyzeCtor::ProcessStmt(MeStmt &stmt) { + switch (stmt.GetOp()) { case OP_iassign: { - auto &iassign = static_cast(stmt); - MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx()); - MIRType *pointedType = utils::ToRef(safe_cast(baseType)).GetPointedType(); + auto &iassign = static_cast(stmt); + auto &ivarMeExpr = static_cast(*iassign.GetLHSVal()); + MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivarMeExpr.GetTyIdx()); + ASSERT(baseType != nullptr, "null ptr check"); + auto *pointedType = static_cast(baseType)->GetPointedType(); auto structType = safe_cast(pointedType); if (structType != nullptr) { - MIRType *fieldType = structType->GetFieldType(iassign.GetFieldID()); + MIRType *fieldType = structType->GetFieldType(ivarMeExpr.GetFieldID()); if (fieldType->GetPrimType() != PTY_ref) { break; } } - fieldSet.insert(iassign.GetFieldID()); + fieldSet.insert(ivarMeExpr.GetFieldID()); break; } case OP_callassigned: @@ -85,22 +87,13 @@ void AnalyzeCtor::ProcessStmt(StmtNode &stmt) { } } -void AnalyzeCtor::Finish() { - if (!trace) { - return; - } - for (auto &pit : GetMIRModule().GetPuIdxFieldInitializedMap()) { - GlobalTables::GetFunctionTable().GetFunctionFromPuidx(pit.first)->Dump(true); - LogInfo::MapleLogger() << "field:"; - MapleSet *fieldIDSet = pit.second; - if (fieldIDSet == nullptr) { - LogInfo::MapleLogger() << "write nothing\n"; - continue; - } - for (FieldID fid : *fieldIDSet) { - LogInfo::MapleLogger() << fid << " "; - } - LogInfo::MapleLogger() << '\n'; - } +AnalysisResult *MeDoAnalyzeCtor::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *moduleResultMgr) { + auto *dom = static_cast(m->GetAnalysisResult(MeFuncPhase_DOMINANCE, func)); + auto *kh = static_cast(moduleResultMgr->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + ASSERT_NOT_NULL(dom); + ASSERT_NOT_NULL(m->GetAnalysisResult(MeFuncPhase_IRMAP, func)); + AnalyzeCtor analyzeCtor(*func, *dom, *kh); + analyzeCtor.ProcessFunc(); + return nullptr; } } // namespace maple diff --git a/src/maple_me/src/me_bb_layout.cpp b/src/maple_me/src/me_bb_layout.cpp index 7d0a8b2627d2ca802131c75af45ead3182859e08..deb37255e9fb001cf24ce8b9f459d8bc643761cc 100644 --- a/src/maple_me/src/me_bb_layout.cpp +++ b/src/maple_me/src/me_bb_layout.cpp @@ -431,7 +431,7 @@ void BBLayout::ResolveUnconditionalFallThru(BB &bb, BB &nextBB) { } void BBLayout::FixEndTryBB(BB &bb) { - BBId prevID = bb.GetBBId() - 1; + BBId prevID = bb.GetBBId() - 1UL; for (BBId id = prevID; id != 0; --id) { auto prevBB = func.GetBBFromID(id); if (prevBB != nullptr) { diff --git a/src/maple_me/src/me_phase_manager.cpp b/src/maple_me/src/me_phase_manager.cpp index 1849bbf4ea363c8fe6d6bbf8e4c709b5d0cc668b..abd5c6da4d2bbaf206b6116a41ddc57b88d899c9 100644 --- a/src/maple_me/src/me_phase_manager.cpp +++ b/src/maple_me/src/me_phase_manager.cpp @@ -25,6 +25,7 @@ #include "me_profile_gen.h" #include "me_profile_use.h" #include "me_loop_canon.h" +#include "me_analyzector.h" #include "me_abco.h" #include "me_dse.h" #include "me_hdse.h" diff --git a/src/maple_me/src/me_ssi.cpp b/src/maple_me/src/me_ssi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..82d99c1df04aa1314a7d8f37005510fcfeb9ef52 --- /dev/null +++ b/src/maple_me/src/me_ssi.cpp @@ -0,0 +1,659 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_ssi.h" + +namespace maple { +bool MeSSI::isDebug = false; +constexpr int kNumOpnds = 2; +constexpr int kPiStmtUpperBound = 2; + +VarMeExpr *MeSSI::CreateNewPiExpr(const MeExpr &opnd) { + if (opnd.GetMeOp() == kMeOpConst) { + return nullptr; + } + CHECK_FATAL(opnd.GetMeOp() == kMeOpVar, "must be"); + SSATab &ssaTab = irMap->GetSSATab(); + OriginalSt *ost = ssaTab.GetOriginalStFromID(static_cast(&opnd)->GetOStIdx()); + CHECK_NULL_FATAL(ost); + CHECK_FATAL(!ost->IsVolatile(), "must be"); + VarMeExpr *var = irMap->NewInPool(irMap->GetExprID(), ost->GetIndex(), + irMap->GetVerst2MeExprTable().size()); + irMap->SetExprID(irMap->GetExprID() + 1); + irMap->PushBackVerst2MeExprTable(var); + ost->PushbackVersionIndex(var->GetVstIdx()); + var->InitBase(opnd.GetOp(), opnd.GetPrimType(), 0); + return var; +} + +void MeSSI::CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, BB &bb, MeStmt &generatedBy, bool isToken) { + if (lhs == nullptr) { + return; + } + PiassignMeStmt *meStmt = GetMemPool()->New(&GetAllocator()); + meStmt->SetGeneratedBy(generatedBy); + meStmt->SetRHS(*(static_cast(&rhs))); + meStmt->SetLHS(*lhs); + meStmt->SetBB(&bb); + meStmt->SetIsToken(isToken); + lhs->SetDefBy(kDefByStmt); + lhs->SetDefStmt(meStmt); + bb.InsertPi(*(generatedBy.GetBB()), *meStmt); + DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPi); + newDef->SetDefPi(*meStmt); + newDefPoints.push_back(newDef); + newDef2Old[newDef] = static_cast(&rhs); +} + +void MeSSI::CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, MeStmt &generatedBy) { + if (lhs == nullptr) { + return; + } + PiassignMeStmt *meStmt = GetMemPool()->New(&GetAllocator()); + meStmt->SetGeneratedBy(generatedBy); + meStmt->SetRHS(*(static_cast(&rhs))); + meStmt->SetLHS(*lhs); + meStmt->SetBB(generatedBy.GetBB()); + lhs->SetDefBy(kDefByStmt); + lhs->SetDefStmt(meStmt); + generatedBy.GetBB()->InsertMeStmtAfter(&generatedBy, meStmt); + DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPi); + newDef->SetDefPi(*meStmt); + newDefPoints.push_back(newDef); + newDef2Old[newDef] = static_cast(&rhs); +} + +void MeSSI::AddPiForABCOpt(BB &bb) { + CHECK_FATAL(containsBB != nullptr && arrayChecks != nullptr, "must be"); + auto it = containsBB->find(&bb); + if (it == containsBB->end()) { + return; + } + std::vector &arryChk = it->second; + for (MeStmt *meStmt : arryChk) { + NaryMeExpr *naryMeExpr = (*arrayChecks)[meStmt]; + CHECK_FATAL(naryMeExpr->GetOpnds().size() == kNumOpnds, "must be"); + MeExpr *opnd1 = naryMeExpr->GetOpnd(0); + MeExpr *opnd2 = naryMeExpr->GetOpnd(1); + CHECK_FATAL(opnd1->GetMeOp() == kMeOpVar, "must be"); + CHECK_FATAL(opnd1->GetPrimType() == PTY_ref, "must be"); + CHECK_FATAL(opnd2->GetMeOp() == kMeOpVar || opnd2->GetMeOp() == kMeOpConst, "must be"); + VarMeExpr *arrayCheckOpnd2 = CreateNewPiExpr(*opnd2); + CreateNewPiStmt(arrayCheckOpnd2, *opnd2, *meStmt); + } +} + +NaryMeExpr *MeSSI::GetInstanceOfType(MeExpr &e) { + CHECK_FATAL(e.GetMeOp() == kMeOpVar, "must b"); + VarMeExpr *var = static_cast(&e); + if (var->GetPrimType() != PTY_u1 || var->GetDefBy() != kDefByStmt) { + return nullptr; + } + MeStmt *defStmt = var->GetDefStmt(); + if (defStmt == nullptr || defStmt->GetOp() != OP_dassign) { + return nullptr; + } + MeExpr *expr = defStmt->GetRHS(); + if (expr == nullptr || expr->GetOp() != OP_intrinsicopwithtype) { + return nullptr; + } + auto *callNode = safe_cast(expr); + if (callNode == nullptr || callNode->GetIntrinsic() != INTRN_JAVA_INSTANCE_OF) { + return nullptr; + } + if (callNode->GetOpnd(0)->GetMeOp() != kMeOpVar && callNode->GetOpnd(0)->GetMeOp() != kMeOpIvar) { + return nullptr; + } + return callNode; +} + +uint8_t MeSSI::AnalysisBranch(MeStmt &meStmt) { + CHECK_FATAL(meStmt.IsCondBr(), "must be"); + auto *brMeStmt = static_cast(&meStmt); + MeExpr *meCmp = brMeStmt->GetOpnd(); + uint8_t result = 0; + if (meCmp->GetMeOp() == kMeOpVar) { + NaryMeExpr *instanceofType = GetInstanceOfType(*meCmp); + if (instanceofType != nullptr) { + result = result | kCheckCastOpt; + } + } else if (meCmp->GetMeOp() == kMeOpOp) { + auto *opMeExpr = static_cast(meCmp); + if (opMeExpr->GetNumOpnds() != kNumOpnds) { + return result; + } + if (opMeExpr->GetOp() != OP_ge && opMeExpr->GetOp() != OP_le && + opMeExpr->GetOp() != OP_lt && opMeExpr->GetOp() != OP_gt && + opMeExpr->GetOp() != OP_ne && opMeExpr->GetOp() != OP_eq) { + return result; + } + MeExpr *opnd1 = opMeExpr->GetOpnd(0); + MeExpr *opnd2 = opMeExpr->GetOpnd(1); + if ((opnd1->GetMeOp() != kMeOpVar && opnd1->GetMeOp() != kMeOpConst) || + (opnd2->GetMeOp() != kMeOpVar && opnd2->GetMeOp() != kMeOpConst)) { + return result; + } + if (IsPrimitivePureScalar(opnd1->GetPrimType()) && IsPrimitivePureScalar(opnd2->GetPrimType())) { + return result | kArrayBoundsCheckOpt; + } + if (opMeExpr->GetOp() == OP_ne || opMeExpr->GetOp() == OP_eq) { + CHECK_FATAL(IsAddress(opnd1->GetPrimType()) == IsPrimitivePoint(opnd1->GetPrimType()), "MUST BE"); + CHECK_FATAL(IsAddress(opnd2->GetPrimType()) == 1, "MUST BE"); + return result | kNullPointerCheckOpt | kCondBaseRCOpt; + } + } + return result; +} + +void MeSSI::AddNullPointerInfoForVar() { + CHECK_FATAL(false, "NYI"); +} + +void MeSSI::InsertPiNodes() { + for (auto bIt = meFunc->valid_begin(), eIt = meFunc->valid_end(); bIt != eIt; ++bIt) { + BB *bb = *bIt; + if (bb->GetKind() == kBBCondGoto) { + uint8_t careOpt = AnalysisBranch(*(bb->GetLastMe())); + SSIType careBranch{}; + careBranch.SetOptType(careOpt & ssiType.GetOptType()); + auto *brMeStmt = static_cast(bb->GetLastMe()); + BB *brTarget = bb->GetSucc(1); + BB *brFallThru = bb->GetSucc(0); + CHECK_FATAL(brMeStmt->GetOffset() == brTarget->GetBBLabel(), "must be"); + if (careBranch.GetOptKindType(kCheckCastOpt)) { + NaryMeExpr *instanceofType = GetInstanceOfType(*brMeStmt->GetOpnd()); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(instanceofType->GetTyIdx()); + MeExpr *opnd = instanceofType->GetOpnd(0); + if (opnd->GetMeOp() == kMeOpIvar) { + opnd = static_cast(opnd)->GetMu(); + } + VarMeExpr *brTargetNewOpnd = CreateNewPiExpr(*opnd); + bool isToken = (brMeStmt->GetOp() == OP_brtrue); + CreateNewPiStmt(brTargetNewOpnd, *opnd, *brTarget, *brMeStmt, true); + VarMeExpr *brFallThruNewOpnd = CreateNewPiExpr(*opnd); + CreateNewPiStmt(brFallThruNewOpnd, *opnd, *brFallThru, *brMeStmt, false); + // Add Type Info For Var + if (isToken) { + inferredType[brTargetNewOpnd] = mirType; + } else { + inferredType[brFallThruNewOpnd] = mirType; + } + } else if (careBranch.GetOptType() != 0) { + CHECK_FATAL(brMeStmt->GetOpnd()->GetMeOp() == kMeOpOp, "must be"); + auto *opMeExpr = static_cast(brMeStmt->GetOpnd()); + MeExpr *opnd1 = opMeExpr->GetOpnd(0); + MeExpr *opnd2 = opMeExpr->GetOpnd(1); + VarMeExpr *brTargetNewOpnd1 = CreateNewPiExpr(*opnd1); + VarMeExpr *brTargetNewOpnd2 = CreateNewPiExpr(*opnd2); + CreateNewPiStmt(brTargetNewOpnd1, *opnd1, *brTarget, *brMeStmt, true); + CreateNewPiStmt(brTargetNewOpnd2, *opnd2, *brTarget, *brMeStmt, true); + VarMeExpr *brFallThruNewOpnd1 = CreateNewPiExpr(*opnd1); + VarMeExpr *brFallThruNewOpnd2 = CreateNewPiExpr(*opnd2); + CreateNewPiStmt(brFallThruNewOpnd1, *opnd1, *brFallThru, *brMeStmt, false); + CreateNewPiStmt(brFallThruNewOpnd2, *opnd2, *brFallThru, *brMeStmt, false); + if (careBranch.GetOptKindType(kNullPointerCheckOpt) || careBranch.GetOptKindType(kCondBaseRCOpt)) { + AddNullPointerInfoForVar(); + } + } + } + if (ssiType.GetOptKindType(kArrayBoundsCheckOpt)) { + AddPiForABCOpt(*bb); + } + } +} + +bool MeSSI::ExistedPhiNode(BB &bb, VarMeExpr &rhs) { + return bb.GetMePhiList().find(rhs.GetOStIdx()) != bb.GetMePhiList().end(); +} + +bool MeSSI::ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs) { + MapleMap> &piList = bb.GetPiList(); + auto it = piList.find(&parentBB); + if (it == piList.end()) { + return false; + } + std::vector &piStmts = it->second; + CHECK_FATAL(!piStmts.empty(), "should not be empty"); + CHECK_FATAL(piStmts.size() <= kPiStmtUpperBound, "must be"); + PiassignMeStmt *pi1 = piStmts.at(0); + if (pi1->GetLHS()->GetOStIdx() == rhs.GetOStIdx()) { + return true; + } + if (piStmts.size() == kPiStmtUpperBound) { + PiassignMeStmt *pi2 = piStmts.at(1); + if (pi2->GetLHS()->GetOStIdx() == rhs.GetOStIdx()) { + return true; + } + } + return false; +} + +void MeSSI::CreatePhi(VarMeExpr &rhs, BB &dfBB) { + VarMeExpr *phiNewLHS = CreateNewPiExpr(rhs); + ASSERT_NOT_NULL(phiNewLHS); + MePhiNode *newPhi = GetMemPool()->New(phiNewLHS, &GetAllocator()); + newPhi->SetDefBB(&dfBB); + newPhi->GetOpnds().resize(dfBB.GetPred().size(), &rhs); + newPhi->SetPiAdded(); + dfBB.GetMePhiList().insert(std::make_pair(phiNewLHS->GetOStIdx(), newPhi)); + DefPoint *newDef = GetMemPool()->New(DefPoint::DefineKind::kDefByPhi); + newDef->SetDefPhi(*newPhi); + newDefPoints.push_back(newDef); + newDef2Old[newDef] = &rhs; +} + +void MeSSI::InsertPhiNodes() { + for (size_t i = 0; i < newDefPoints.size(); ++i) { + DefPoint *newDefStmt = newDefPoints[i]; + BB *newDefBB = newDefStmt->GetBB(); + CHECK_NULL_FATAL(newDefBB); + VarMeExpr *rhs = newDefStmt->GetRHS(); + if (newDefStmt->IsPiStmt()) { + BB *genByBB = newDefStmt->GetGeneratedByBB(); + if (!dom->Dominate(*genByBB, *newDefBB)) { + if (!ExistedPhiNode(*newDefBB, *rhs)) { + CreatePhi(*rhs, *newDefBB); + } + continue; + } + } + BB *oldDefBB = rhs->DefByBB(); + if (oldDefBB == nullptr) { + oldDefBB = meFunc->GetCommonEntryBB(); + CHECK_FATAL(rhs->IsZeroVersion(irMap->GetSSATab()), "must be"); + } + CHECK_NULL_FATAL(oldDefBB); + MapleSet &dfs = dom->GetDomFrontier(newDefBB->GetBBId()); + for (auto bbID : dfs) { + BB *dfBB = meFunc->GetBBFromID(bbID); + if (!dom->Dominate(*oldDefBB, *dfBB)) { + MapleSet &dfsTmp = dom->GetDomFrontier(oldDefBB->GetBBId()); + CHECK_FATAL(dfsTmp.find(bbID) != dfsTmp.end(), "must be"); + continue; + } + if (ExistedPhiNode(*dfBB, *rhs)) { + continue; + } + CreatePhi(*rhs, *dfBB); + } + } +} + +void MeSSI::RenameStartPiBr(DefPoint &newDefPoint) { + const OStIdx &ostIdx = newDefPoint.GetOStIdx(); + BB *newDefBB = newDefPoint.GetBB(); + if (!ExistedPhiNode(*(newDefPoint.GetBB()), *(newDefPoint.GetRHS()))) { + RenameStartPhi(newDefPoint); + return; + } + MePhiNode* phi = newDefBB->GetMePhiList()[ostIdx]; + BB *genByBB = newDefPoint.GetGeneratedByBB(); + size_t index = 0; + while (index < newDefBB->GetPred().size()) { + if (newDefBB->GetPred(index) == genByBB) { + break; + } + ++index; + } + CHECK_FATAL(index < newDefBB->GetPred().size(), "must be"); + ScalarMeExpr*oldVar = phi->GetOpnd(index); + phi->SetOpnd(index, newDefPoint.GetLHS()); + if (!phi->IsPiAdded()) { + if (modifiedPhi.find(phi) == modifiedPhi.end()) { + modifiedPhi[phi] = std::vector(phi->GetOpnds().size(), nullptr); + } + if (modifiedPhi[phi][index] == nullptr) { + modifiedPhi[phi][index] = oldVar; + } + } +} + +void MeSSI::RenameStartPiArray(DefPoint &newDefPoint) { + BB *newDefBB = newDefPoint.GetBB(); + MeStmt *piStmt = newDefPoint.GetPiStmt(); + if (piStmt != newDefBB->GetLastMe()) { + for (MeStmt *meStmt = piStmt->GetNext(); meStmt != nullptr; meStmt = meStmt->GetNext()) { + if (ReplaceStmt(*meStmt, *(newDefPoint.GetLHS()), *(newDef2Old[&newDefPoint]))) { + return; + } + } + } + ReplacePiPhiInSuccs(*newDefBB, *(newDefPoint.GetLHS())); + const MapleSet &children = dom->GetDomChildren(newDefBB->GetBBId()); + for (const BBId &child : children) { + ReplaceBB(*(meFunc->GetBBFromID(child)), *newDefBB, newDefPoint); + } +} + +void MeSSI::RenameStartPhi(DefPoint &newDefPoint) { + BB *newDefBB = newDefPoint.GetBB(); + for (MeStmt &meStmt : newDefBB->GetMeStmts()) { + if (ReplaceStmt(meStmt, *newDefPoint.GetLHS(), *newDef2Old[&newDefPoint])) { + return; + } + } + ReplacePiPhiInSuccs(*newDefBB, *(newDefPoint.GetLHS())); + const MapleSet &children = dom->GetDomChildren(newDefBB->GetBBId()); + for (const BBId &child : children) { + ReplaceBB(*(meFunc->GetBBFromID(child)), *newDefBB, newDefPoint); + } +} + +void MeSSI::ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar) { + for (BB *succBB : bb.GetSucc()) { + MapleMap> &piList = succBB->GetPiList(); + auto it1 = piList.find(&bb); + if (it1 != piList.end()) { + std::vector &piStmts = it1->second; + // the size of pi statements must be 1 or 2 + CHECK_FATAL(!piStmts.empty(), "should not be empty"); + CHECK_FATAL(piStmts.size() <= kPiStmtUpperBound, "must be"); + PiassignMeStmt *pi1 = piStmts.at(0); + if (pi1->GetLHS()->GetOStIdx() == newVar.GetOStIdx()) { + pi1->SetRHS(newVar); + continue; + } + if (piStmts.size() == kPiStmtUpperBound) { + PiassignMeStmt *pi2 = piStmts.at(1); + if (pi2->GetLHS()->GetOStIdx() == newVar.GetOStIdx()) { + pi2->SetRHS(newVar); + continue; + } + } + } + size_t index = 0; + while (index < succBB->GetPred().size()) { + if (succBB->GetPred(index) == &bb) { + break; + } + ++index; + } + CHECK_FATAL(index < succBB->GetPred().size(), "must be"); + MapleMap &phiList = succBB->GetMePhiList(); + auto it2 = phiList.find(newVar.GetOStIdx()); + if (it2 != phiList.end()) { + MePhiNode *phi = it2->second; + ScalarMeExpr *oldVar = phi->GetOpnd(index); + phi->SetOpnd(index, &newVar); + if (!phi->IsPiAdded()) { + if (modifiedPhi.find(phi) == modifiedPhi.end()) { + modifiedPhi[phi] = std::vector(phi->GetOpnds().size(), nullptr); + } + if (modifiedPhi[phi][index] == nullptr) { + modifiedPhi[phi][index] = oldVar; + } + } + } + } +} + +MeExpr *MeSSI::NewMeExpr(MeExpr &meExpr) { + switch (meExpr.GetMeOp()) { + case kMeOpIvar: { + auto &ivarMeExpr = static_cast(meExpr); + IvarMeExpr *newIvarExpr = GetMemPool()->New(irMap->GetExprID(), ivarMeExpr); + irMap->SetExprID(irMap->GetExprID() + 1); + return newIvarExpr; + } + case kMeOpOp: { + auto &opMeExpr = static_cast(meExpr); + OpMeExpr *newOpMeExpr = GetMemPool()->New(opMeExpr, irMap->GetExprID()); + irMap->SetExprID(irMap->GetExprID() + 1); + return newOpMeExpr; + } + case kMeOpNary: { + auto &naryMeExpr = static_cast(meExpr); + NaryMeExpr *newNaryMeExpr = GetMemPool()->New(&GetAllocator(), irMap->GetExprID(), naryMeExpr); + irMap->SetExprID(irMap->GetExprID() + 1); + newNaryMeExpr->InitBase(meExpr.GetOp(), meExpr.GetPrimType(), meExpr.GetNumOpnds()); + return newNaryMeExpr; + } + default: + CHECK_FATAL(false, "impossible"); + } +} + +MeExpr *MeSSI::ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &meExpr, MeExpr &repExpr) { + if (origExpr.IsLeaf()) { + return &origExpr; + } + switch (origExpr.GetMeOp()) { + case kMeOpOp: { + auto &opMeExpr = static_cast(origExpr); + OpMeExpr newMeExpr(opMeExpr, kInvalidExprID); + bool needRehash = false; + for (uint32 i = 0; i < kOperandNumTernary; ++i) { + if (opMeExpr.GetOpnd(i) == nullptr) { + continue; + } + if (opMeExpr.GetOpnd(i) == &meExpr) { + needRehash = true; + newMeExpr.SetOpnd(i, &repExpr); + } else if (!opMeExpr.GetOpnd(i)->IsLeaf()) { + newMeExpr.SetOpnd(i, ReplaceMeExprExpr(*newMeExpr.GetOpnd(i), meExpr, repExpr)); + if (newMeExpr.GetOpnd(i) != opMeExpr.GetOpnd(i)) { + needRehash = true; + } + } + } + return needRehash ? NewMeExpr(newMeExpr) : &origExpr; + } + case kMeOpNary: { + auto &naryMeExpr = static_cast(origExpr); + NaryMeExpr newMeExpr(&GetAllocator(), kInvalidExprID, naryMeExpr); + const MapleVector &opnds = naryMeExpr.GetOpnds(); + bool needRehash = false; + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + if (opnd == &meExpr) { + newMeExpr.SetOpnd(i, &repExpr); + needRehash = true; + } else if (!opnd->IsLeaf()) { + newMeExpr.SetOpnd(i, ReplaceMeExprExpr(*newMeExpr.GetOpnd(i), meExpr, repExpr)); + if (newMeExpr.GetOpnd(i) != opnd) { + needRehash = true; + } + } + } + return needRehash ? NewMeExpr(newMeExpr) : &origExpr; + } + case kMeOpIvar: { + auto &ivarExpr = static_cast(origExpr); + IvarMeExpr newMeExpr(kInvalidExprID, ivarExpr); + bool needRehash = false; + if (ivarExpr.GetBase() == &meExpr) { + newMeExpr.SetBase(&repExpr); + needRehash = true; + } else if (!ivarExpr.GetBase()->IsLeaf()) { + newMeExpr.SetBase(ReplaceMeExprExpr(*newMeExpr.GetBase(), meExpr, repExpr)); + if (newMeExpr.GetBase() != ivarExpr.GetBase()) { + needRehash = true; + } + } + if (ivarExpr.GetMu() == &meExpr) { + CHECK_FATAL(repExpr.GetMeOp() == kMeOpVar, "must be"); + newMeExpr.SetMuVal(static_cast(&repExpr)); + needRehash = true; + } + return needRehash ? NewMeExpr(newMeExpr) : &origExpr; + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +bool MeSSI::ReplaceMeExprStmtOpnd(uint32 opndID, MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update) { + MeExpr *opnd = meStmt.GetOpnd(opndID); + bool isFromIassign = (meStmt.GetOp() == OP_iassign) && (opndID == 0); + if (isFromIassign) { + opnd = static_cast(&meStmt)->GetLHSVal(); + } + bool replaced = false; + MeExpr *newExpr = nullptr; + if (opnd == &oldVar) { + if (isFromIassign) { + static_cast(&meStmt)->SetLHSVal(static_cast(&newVar)); + } else { + meStmt.SetOpnd(opndID, &newVar); + } + replaced = true; + } else if (!opnd->IsLeaf()) { + newExpr = ReplaceMeExprExpr(*opnd, oldVar, newVar); + replaced = (newExpr != opnd); + if (isFromIassign) { + static_cast(&meStmt)->SetLHSVal(static_cast(newExpr)); + } else { + meStmt.SetOpnd(opndID, newExpr); + } + } + if (replaced && update) { + if (modifiedStmt.find(std::make_pair(&meStmt, opndID)) == modifiedStmt.end()) { + modifiedStmt[std::make_pair(&meStmt, opndID)] = opnd; + } + } + return replaced; +} + +bool MeSSI::ReplaceStmtWithNewVar(MeStmt &meStmt, MeExpr &oldVar, MeExpr &newVar, bool update) { + switch (meStmt.GetOp()) { + case OP_dassign: + case OP_maydassign: + case OP_brtrue: + case OP_brfalse: { + return ReplaceMeExprStmtOpnd(0, meStmt, oldVar, newVar, update); + } + case OP_iassign: { + bool baseIsReplaced = ReplaceMeExprStmtOpnd(0, meStmt, oldVar, newVar, update); + bool rhsReplaced = ReplaceMeExprStmtOpnd(1, meStmt, oldVar, newVar, update); + return baseIsReplaced || rhsReplaced; + } + case OP_intrinsiccallwithtype: { + auto &callNode = static_cast(meStmt); + if (callNode.GetIntrinsic() != INTRN_JAVA_CHECK_CAST) { + break; + } + return ReplaceMeExprStmtOpnd(0, meStmt, oldVar, newVar, update); + } + case OP_regassign: { + CHECK_FATAL(false, "should not happen"); + } + default: { + break; + } + } + return false; +} + +bool MeSSI::ReplaceStmt(MeStmt &meStmt, VarMeExpr &newVar, VarMeExpr &oldVar) { + if (meStmt.GetOp() == OP_piassign) { + auto *pi = static_cast(&meStmt); + if (pi->GetRHS() == &oldVar) { + pi->SetRHS(newVar); + } + } else { + (void)ReplaceStmtWithNewVar(meStmt, oldVar, newVar, true); + } + const OStIdx &ostIdx = newVar.GetOStIdx(); + MapleMap *chiList = meStmt.GetChiList(); + if (chiList != nullptr && chiList->find(ostIdx) != chiList->end()) { + return true; + } + MeExpr *lhs = meStmt.GetAssignedLHS(); + if (lhs != nullptr && lhs->GetMeOp() == kMeOpVar && static_cast(lhs)->GetOStIdx() == ostIdx) { + return true; + } + lhs = meStmt.GetLHS(); + return (lhs != nullptr && lhs->GetMeOp() == kMeOpVar && static_cast(lhs)->GetOStIdx() == ostIdx); +} + +void MeSSI::Rename() { + for (size_t i = 0; i < newDefPoints.size(); ++i) { + DefPoint *newDefStmt = newDefPoints[i]; + visitedBBs.clear(); + if (newDefStmt->IsPiStmt()) { + if (newDefStmt->IsGeneratedByBr()) { + RenameStartPiBr(*newDefStmt); + } else { + RenameStartPiArray(*newDefStmt); + } + } else { + RenameStartPhi(*newDefStmt); + } + } +} + +void MeSSI::ReplaceBB(BB &bb, BB &parentBB, DefPoint &newDefPoint) { + if (visitedBBs.find(&bb) != visitedBBs.end()) { + return; + } + visitedBBs.insert(&bb); + if (ExistedPhiNode(bb, *(newDefPoint.GetLHS())) || ExistedPiNode(bb, parentBB, *(newDefPoint.GetLHS()))) { + return; + } + for (MeStmt &meStmt : bb.GetMeStmts()) { + if (ReplaceStmt(meStmt, *(newDefPoint.GetLHS()), *(newDef2Old[&newDefPoint]))) { + return; + } + } + ReplacePiPhiInSuccs(bb, *(newDefPoint.GetLHS())); + const MapleSet &children = dom->GetDomChildren(bb.GetBBId()); + for (const BBId &child : children) { + ReplaceBB(*(meFunc->GetBBFromID(child)), bb, newDefPoint); + } +} + +void MeSSI::RemoveExtraNodes() { + for (DefPoint *defP : newDefPoints) { + defP->RemoveFromBB(); + } + for (auto pair : modifiedStmt) { + MeStmt *meStmt = pair.first.first; + MeExpr *newVar = nullptr; + if ((meStmt->GetOp() == OP_iassign) && (pair.first.second == 0)) { + newVar = static_cast(meStmt)->GetLHSVal(); + } else { + newVar = meStmt->GetOpnd(pair.first.second); + } + MeExpr *oldVar = pair.second; + bool replaced = ReplaceStmtWithNewVar(*meStmt, *newVar, *oldVar, false); + CHECK_FATAL(replaced, "must be"); + } + for (auto pair : modifiedPhi) { + MePhiNode *phi = pair.first; + for (size_t i = 0; i < pair.second.size(); ++i) { + size_t index = i; + ScalarMeExpr*oldVar = pair.second[i]; + if (oldVar != nullptr) { + phi->SetOpnd(index, oldVar); + } + } + } +} + +MIRType *MeSSI::GetInferredType(MeExpr *expr) { + if (inferredType.find(expr) == inferredType.end()) { + return nullptr; + } + return inferredType[expr]; +} + +void MeSSI::ConvertToSSI() { + InsertPiNodes(); + InsertPhiNodes(); + Rename(); +} +void MeSSI::ConvertToSSA() { + RemoveExtraNodes(); +} +} diff --git a/src/maple_phase/include/phase.h b/src/maple_phase/include/phase.h index 8c01283cb5b92364e9ecf39df2ff955ce5ad12fd..6315c510bf173801779cd488dfcace61be0d384f 100644 --- a/src/maple_phase/include/phase.h +++ b/src/maple_phase/include/phase.h @@ -94,7 +94,7 @@ class Phase { } private: - unsigned int memPoolCount = 0; + uint32 memPoolCount = 0; std::set memPools; MemPoolCtrler *mpCtrler = &memPoolCtrler; }; @@ -155,9 +155,8 @@ class AnalysisResultManager { } void InvalidIRbaseAnalysisResult(UnitIR &ir) { - PhaseIDT id; for (auto it = analysisPhases.begin(); it != analysisPhases.end(); ++it) { - id = it->first; + PhaseIDT id = it->first; InvalidAnalysisResult(id, &ir); } } diff --git a/src/maple_util/include/mpl_number.h b/src/maple_util/include/mpl_number.h index 69b0b118f3053d4ef06de9399b5f831f79afd270..24ed9b49cc698444d4ff510b27eb7592d51a7e3b 100644 --- a/src/maple_util/include/mpl_number.h +++ b/src/maple_util/include/mpl_number.h @@ -284,7 +284,7 @@ using Index = Number; namespace std { template inline string to_string(maple::utils::Number val) { - return std::to_string (val.get()); + return std::to_string(val.get()); } } #endif //MAPLE_UTIL_INCLUDE_MPL_NUMBER_H diff --git a/src/maple_util/include/mpl_scheduler.h b/src/maple_util/include/mpl_scheduler.h index a38af23f6dba0f8b9a3c9732755503b738811152..bad99f003d0246953cad3c99161f0ae77e196544 100644 --- a/src/maple_util/include/mpl_scheduler.h +++ b/src/maple_util/include/mpl_scheduler.h @@ -28,21 +28,17 @@ #include "types_def.h" namespace maple { -#define MP_SYNC \ - (stmt) GlobalLock(); \ - stmt GlobalUnlock(); - class MplTaskParam { public: MplTaskParam() = default; - virtual ~MplTaskParam() = default; + ~MplTaskParam() = default; }; class MplTask { public: MplTask() : taskId(0) {} - virtual ~MplTask() {} + virtual ~MplTask() = default; void SetTaskId(uint32 id) { taskId = id; @@ -75,13 +71,13 @@ class MplTask { class MplSchedulerParam { public: MplSchedulerParam() = default; - virtual ~MplSchedulerParam() = default; + ~MplSchedulerParam() = default; }; class MplScheduler { public: explicit MplScheduler(const std::string &name); - virtual ~MplScheduler() {} + virtual ~MplScheduler() = default; void Init(); virtual void AddTask(MplTask *task); @@ -134,7 +130,7 @@ class MplScheduler { ThreadStatus statusFinish; virtual int FinishTask(const MplTask &task); virtual MplTask *GetTaskToRun(); - virtual uint32 GetTaskIdsFinishSize(); + virtual size_t GetTaskIdsFinishSize(); virtual MplTask *GetTaskFinishFirst(); virtual void RemoveTaskFinish(uint32 id); virtual void TaskIdFinish(uint32 id); @@ -151,11 +147,11 @@ class MplScheduler { virtual void CallbackThreadFinishEnd() {} - virtual MplTaskParam *CallbackGetTaskRunParam() { + virtual MplTaskParam *CallbackGetTaskRunParam() const { return nullptr; } - virtual MplTaskParam *CallbackGetTaskFinishParam() { + virtual MplTaskParam *CallbackGetTaskFinishParam() const { return nullptr; } }; diff --git a/src/maple_util/include/profile.h b/src/maple_util/include/profile.h index dd2515748da286dd0586f20c12617764d0fa787b..912a4ae69f4ddb116049cf85ab0ff7aa0d4271a0 100644 --- a/src/maple_util/include/profile.h +++ b/src/maple_util/include/profile.h @@ -30,6 +30,7 @@ struct IRProfileDesc { uint64 funcHash = 0; IRProfileDesc() = default; IRProfileDesc(uint64 hash, uint32 start, uint32 end) : counterStart(start), counterEnd(end), funcHash(hash) {} + ~IRProfileDesc() = default; }; class Profile { @@ -48,6 +49,7 @@ class Profile { : funcHash(hash), totalCounter(num), counter(counter) {} BBInfo(uint64 hash, uint32 num, const std::initializer_list &iList) : funcHash(hash), totalCounter(num), counter(iList) {} + ~BBInfo() = default; }; static const uint8 stringEnd; @@ -99,6 +101,8 @@ class Profile { std::unordered_map funcBBProfUseInfo; std::unordered_map funcDesc; std::vector counterTab; + static const std::string preClassHot[]; + static const std::string preMethodHot[]; bool CheckProfileHeader(const Header &header) const; std::string GetProfileNameByType(uint8 type) const; std::string GetFunctionName(uint32 classIdx, uint32 methodIdx, uint32 sigIdx) const; diff --git a/src/maple_util/include/safe_cast.h b/src/maple_util/include/safe_cast.h index 5b42bd9eb1439ea49a8fdc909f9686e52d1a092a..058beaade44ce2d1358ea0024b649faa8a6cd95c 100644 --- a/src/maple_util/include/safe_cast.h +++ b/src/maple_util/include/safe_cast.h @@ -18,11 +18,11 @@ namespace maple { template -struct SafeCastCondition : std::false_type {}; +struct SafeCastCondition : public std::false_type {}; #define REGISTER_SAFE_CAST(type, condition) \ template<> \ -struct SafeCastCondition : std::true_type { \ +struct SafeCastCondition : public std::true_type { \ template \ static inline bool DoIt(const FromT &from) { \ return (condition); \ @@ -46,7 +46,7 @@ struct InstanceOfImpl -struct EnabledSafeCast : utils::meta_or, SafeCastCondition>::type {}; +struct EnabledSafeCast : public utils::meta_or, SafeCastCondition>::type {}; } template &Profile::GetMeta(uint8 type) { return methodMeta; default: CHECK_FATAL(false, "type not found"); - return classMeta; } } diff --git a/src/mempool/include/maple_string.h b/src/mempool/include/maple_string.h index fd97636d9b6b01aeae62c3c040bc2c4837083d3a..dc89a230b3583cd86ff6ce6b88ce060c9e39a105 100644 --- a/src/mempool/include/maple_string.h +++ b/src/mempool/include/maple_string.h @@ -28,12 +28,12 @@ class MapleString { MapleString(const MapleString &str); MapleString(const char *str, MemPool *memPool); MapleString(const char *str, size_t size, MemPool *memPool); // copyin - MapleString(unsigned int size, MemPool *memPool); + MapleString(size_t size, MemPool *memPool); MapleString(const MapleString &str, MemPool *memPool); MapleString(const std::string &str, MemPool *memPool); ~MapleString() = default; - unsigned int length() const { + size_t length() const { return dataLength; } @@ -69,11 +69,11 @@ class MapleString { if (str == nullptr) { return *this; } - unsigned int size = static_cast(strlen(str)); + size_t size = strlen(str); CHECK_FATAL(size <= UINT_MAX - 1, "str too large"); // if data is null, old_size = 0, else +1 - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t oldSize = ((data == nullptr) ? 0 : (dataLength + 1)); if (oldSize < (1 + size)) { data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (1 + size) * sizeof(char))); } @@ -91,10 +91,10 @@ class MapleString { } MapleString &operator=(const std::string &str) { - unsigned int size = str.length(); + size_t size = str.length(); CHECK_FATAL(size <= UINT_MAX - 1, "str too large"); - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t oldSize = (data == nullptr) ? 0 : (dataLength + 1); if (oldSize < (1 + size)) { data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (1 + size) * sizeof(char))); } @@ -114,10 +114,10 @@ class MapleString { if (&str == this) { return *this; } - unsigned int size = str.dataLength; + size_t size = str.dataLength; CHECK_FATAL(size <= UINT_MAX - 1, "str too large"); - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t oldSize = (data == nullptr) ? 0 : (dataLength + 1); data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (1 + size) * sizeof(char))); CHECK_FATAL(data != nullptr, "null ptr check"); if (size == 0) { @@ -132,7 +132,7 @@ class MapleString { } MapleString &operator+=(const char c) { - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t oldSize = ((data == nullptr) ? 0 : (dataLength + 1)); CHECK_FATAL(oldSize <= UINT_MAX - 1, "str too large"); data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (dataLength + 1 + 1) * sizeof(char))); @@ -146,8 +146,8 @@ class MapleString { if (str == nullptr) { return *this; } - unsigned int size = static_cast(strlen(str)); - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t size = strlen(str); + size_t oldSize = ((data == nullptr) ? 0 : (dataLength + 1)); CHECK_FATAL(size <= UINT_MAX - oldSize, "str too large"); data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (dataLength + size + 1) * sizeof(char))); @@ -160,7 +160,7 @@ class MapleString { } MapleString &operator+=(const MapleString &str) { - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t oldSize = ((data == nullptr) ? 0 : (dataLength + 1)); CHECK_FATAL(str.dataLength <= UINT_MAX - oldSize, "str too large"); data = static_cast( @@ -173,8 +173,8 @@ class MapleString { } MapleString &operator+=(const std::string &str) { - unsigned int size = str.length(); - unsigned int oldSize = (data == nullptr) ? 0 : (dataLength + 1); + size_t size = str.length(); + size_t oldSize = ((data == nullptr) ? 0 : (dataLength + 1)); CHECK_FATAL(size <= UINT_MAX - oldSize, "str too large"); data = static_cast(memPool->Realloc(data, oldSize * sizeof(char), (dataLength + size + 1) * sizeof(char))); @@ -197,27 +197,27 @@ class MapleString { MapleString &push_back(const char c); MapleString &append(const MapleString &str); - MapleString &append(const MapleString &str, unsigned int subPos, unsigned int subLen); + MapleString &append(const MapleString &str, size_t subPos, size_t subLen); MapleString &append(const char *s); - MapleString &append(const char *s, unsigned int n); - MapleString &append(unsigned int n, char c); + MapleString &append(const char *s, size_t n); + MapleString &append(size_t n, char c); MapleString &append(const std::string &str); - size_t find(const MapleString &str, unsigned int pos = 0) const; - size_t find(const char *s, unsigned int pos = 0) const; - size_t find(const char *s, unsigned int pos, unsigned int n) const; - size_t find(char c, unsigned int pos = 0) const; - size_t find_last_of(const char*, unsigned int pos = 0) const; - MapleString substr(unsigned int pos, unsigned int len) const; - MapleString &insert(unsigned int pos, const MapleString &str); - MapleString &insert(unsigned int pos, const MapleString &str, unsigned int subPos, unsigned int subLen); - MapleString &insert(unsigned int pos, const char *s); - MapleString &insert(unsigned int pos, const char *s, unsigned int n); - MapleString &insert(unsigned int pos, unsigned int n, char c); + size_t find(const MapleString &str, size_t pos = 0) const; + size_t find(const char *s, size_t pos = 0) const; + size_t find(const char *s, size_t pos, size_t n) const; + size_t find(char c, size_t pos = 0) const; + size_t find_last_of(const char*, size_t pos = 0) const; + MapleString substr(size_t pos, size_t len) const; + MapleString &insert(size_t pos, const MapleString &str); + MapleString &insert(size_t pos, const MapleString &str, size_t subPos, size_t subLen); + MapleString &insert(size_t pos, const char *s); + MapleString &insert(size_t pos, const char *s, size_t n); + MapleString &insert(size_t pos, size_t n, char c); MapleString &assign(const MapleString &str); - MapleString &assign(const MapleString &str, unsigned int subPos, unsigned int subLen); + MapleString &assign(const MapleString &str, size_t subPos, size_t subLen); MapleString &assign(const char *s); - MapleString &assign(const char *s, unsigned int n); - MapleString &assign(unsigned int n, char c); + MapleString &assign(const char *s, size_t n); + MapleString &assign(size_t n, char c); private: inline static size_t StrLen(const char *s) { @@ -236,7 +236,7 @@ class MapleString { char *data = nullptr; MemPool *memPool = nullptr; - unsigned int dataLength = 0; + size_t dataLength = 0; }; template diff --git a/src/mempool/include/mempool.h b/src/mempool/include/mempool.h index 870f91c53516c6937951e2bf3f8b29f3da6b7d20..a94da888b87aaef0aab34e1cb29d632ed567e135 100644 --- a/src/mempool/include/mempool.h +++ b/src/mempool/include/mempool.h @@ -50,8 +50,8 @@ class MemPoolCtrler { private: // Methods struct MemBlock { - unsigned int available; // Available memory size - unsigned int origSize; // original size + size_t available; // Available memory size + size_t origSize; // original size void *ptr; // Current pointer to the first available position }; @@ -67,7 +67,7 @@ class MemPoolCtrler { // Free small/large size memory block list std::list freeMemBlocks; - std::map> largeFreeMemBlocks; + std::map> largeFreeMemBlocks; std::set memPools; // set of mempools managed by it }; @@ -115,7 +115,7 @@ class MemPool { // New Array template template - T *NewArray(unsigned int num) { + T *NewArray(size_t num) { void *p = Malloc(sizeof(T) * num); CHECK_FATAL(p != nullptr, "ERROR: NewArray error"); p = new (p) T[num]; diff --git a/src/mpl2mpl/BUILD.gn b/src/mpl2mpl/BUILD.gn index 1d050a34f1590bfd8b442e863c12efbf11ab611a..9303c664e08ea4979ab75c27be2fc580a5c37a09 100644 --- a/src/mpl2mpl/BUILD.gn +++ b/src/mpl2mpl/BUILD.gn @@ -36,7 +36,6 @@ src_libmpl2mpl = [ "src/vtable_impl.cpp", "src/class_hierarchy.cpp", "src/constantfold.cpp", - "src/analyzector.cpp", "src/coderelayout.cpp", "src/annotation_analysis.cpp", ]