From 97ae7fd89d0199ea41a2ffa12864dd0ba07bedd7 Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 28 Oct 2022 14:00:10 -0700 Subject: [PATCH] arm64 standardize for abstract mop version 1 -O0 running ctorture --- src/mapleall/maple_be/BUILD.gn | 2 + .../include/cg/aarch64/aarch64_MPISel.h | 121 +++ .../maple_be/include/cg/aarch64/aarch64_cg.h | 8 + .../include/cg/aarch64/aarch64_cgfunc.h | 28 +- .../include/cg/aarch64/aarch64_md.def | 4 +- .../include/cg/aarch64/aarch64_phases.def | 8 +- .../include/cg/aarch64/aarch64_standardize.h | 63 ++ .../maple_be/include/cg/abstract_mmir.def | 18 +- .../maple_be/include/cg/cg_irbuilder.h | 11 +- src/mapleall/maple_be/include/cg/cg_option.h | 18 + src/mapleall/maple_be/include/cg/cg_options.h | 1 + src/mapleall/maple_be/include/cg/cgbb.h | 3 + src/mapleall/maple_be/include/cg/cgfunc.h | 110 ++- src/mapleall/maple_be/include/cg/immvalid.def | 30 + src/mapleall/maple_be/include/cg/isa.h | 5 + src/mapleall/maple_be/include/cg/isel.h | 68 +- src/mapleall/maple_be/include/cg/reg_info.h | 112 +++ .../maple_be/include/cg/standardize.h | 11 +- .../maple_be/include/cg/x86_64/x64_MPISel.h | 44 +- .../include/cg/x86_64/x64_standardize.h | 9 +- .../src/cg/aarch64/aarch64_MPISel.cpp | 706 +++++++++++++++++- .../src/cg/aarch64/aarch64_cgfunc.cpp | 65 +- .../src/cg/aarch64/aarch64_offset_adjust.cpp | 27 +- .../src/cg/aarch64/aarch64_reaching.cpp | 3 + .../src/cg/aarch64/aarch64_standardize.cpp | 234 ++++++ src/mapleall/maple_be/src/cg/cg.cpp | 4 + src/mapleall/maple_be/src/cg/cg_cfg.cpp | 2 + src/mapleall/maple_be/src/cg/cg_irbuilder.cpp | 9 +- src/mapleall/maple_be/src/cg/cg_option.cpp | 5 + src/mapleall/maple_be/src/cg/cg_options.cpp | 6 + src/mapleall/maple_be/src/cg/cg_ssa.cpp | 2 +- src/mapleall/maple_be/src/cg/cgbb.cpp | 28 + src/mapleall/maple_be/src/cg/cgfunc.cpp | 28 +- src/mapleall/maple_be/src/cg/insn.cpp | 2 +- src/mapleall/maple_be/src/cg/isel.cpp | 240 +++--- src/mapleall/maple_be/src/cg/standardize.cpp | 40 +- .../maple_be/src/cg/x86_64/x64_MPIsel.cpp | 184 ++++- .../src/cg/x86_64/x64_standardize.cpp | 40 +- 38 files changed, 2010 insertions(+), 289 deletions(-) create mode 100644 src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h create mode 100644 src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index 0abb44fdd1..824abfe19d 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -132,6 +132,8 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_cfgo.cpp", "src/cg/aarch64/aarch64_isolate_fastpath.cpp", "src/cg/aarch64/aarch64_rematerialize.cpp", + "src/cg/aarch64/aarch64_MPISel.cpp", + "src/cg/aarch64/aarch64_standardize.cpp", ] src_libcgx86phases = [ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h index aac9a9bd87..ba08c2ab6b 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -12,3 +12,124 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_MPISEL_H +#define MAPLEBE_INCLUDE_AARCH64_MPISEL_H + +#include "isel.h" +#include "aarch64_isa.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class AArch64MPIsel : public MPISel { + public: + AArch64MPIsel(MemPool &mp, CGFunc &f) : MPISel(mp, f) {} + ~AArch64MPIsel() override = default; + + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectFloatConst(MIRFloatConst &doubleConst, PrimType primType, const BaseNode &parent) const override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType, const BaseNode &parent) const override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, DassignNode &s) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + Operand *SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCtz32(IntrinsicopNode &node) override; + Operand *SelectClz32(IntrinsicopNode &node) override; + Operand *SelectSin(IntrinsicopNode &node) override; + Operand *SelectSinh(IntrinsicopNode &node) override; + Operand *SelectAsin(IntrinsicopNode &node) override; + Operand *SelectCos(IntrinsicopNode &node) override; + Operand *SelectCosh(IntrinsicopNode &node) override; + Operand *SelectAcos(IntrinsicopNode &node) override; + Operand *SelectAtan(IntrinsicopNode &node) override; + Operand *SelectExp(IntrinsicopNode &node) override; + Operand *SelectLog(IntrinsicopNode &node) override; + Operand *SelectLog10(IntrinsicopNode &node) override; + Operand *SelectSinf(IntrinsicopNode &node) override; + Operand *SelectSinhf(IntrinsicopNode &node) override; + Operand *SelectAsinf(IntrinsicopNode &node) override; + Operand *SelectCosf(IntrinsicopNode &node) override; + Operand *SelectCoshf(IntrinsicopNode &node) override; + Operand *SelectAcosf(IntrinsicopNode &node) override; + Operand *SelectAtanf(IntrinsicopNode &node) override; + Operand *SelectExpf(IntrinsicopNode &node) override; + Operand *SelectLogf(IntrinsicopNode &node) override; + Operand *SelectLog10f(IntrinsicopNode &node) override; + Operand *SelectFfs(IntrinsicopNode &node) override; + Operand *SelectMemcmp(IntrinsicopNode &node) override; + Operand *SelectStrlen(IntrinsicopNode &node) override; + Operand *SelectStrcmp(IntrinsicopNode &node) override; + Operand *SelectStrncmp(IntrinsicopNode &node) override; + Operand *SelectStrchr(IntrinsicopNode &node) override; + Operand *SelectStrrchr(IntrinsicopNode &node) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, RegOperand *baseReg = nullptr) override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) override; + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) override; + Insn &AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + void SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + bool IsSymbolRequireIndirection(const MIRSymbol &symbol) override; + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_AARCH64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 651a40670c..87ee30fd45 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -32,6 +32,8 @@ #include "aarch64_cfgo.h" #include "aarch64_rematerialize.h" #include "aarch64_pgo_gen.h" +#include "aarch64_MPISel.h" +#include "aarch64_standardize.h" namespace maplebe { constexpr int64 kShortBRDistance = (8 * 1024); @@ -204,6 +206,12 @@ class AArch64CG : public CG { Rematerializer *CreateRematerializer(MemPool &mp) const override { return mp.New(); } + MPISel *CreateMPIsel(MemPool &mp, CGFunc &f) const override { + return mp.New(mp, f); + } + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. * i. mov reg2, reg1 * ii. add/sub reg2, reg1, 0/zero register diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 36d7157c48..673b7c502b 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -75,6 +75,8 @@ class AArch64CGFunc : public CGFunc { return refCount; } + void Link2ISel(MPISel *p) override; + int32 GetBeginOffset() const { return beginOffset; } @@ -83,13 +85,13 @@ class AArch64CGFunc : public CGFunc { MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; regno_t NewVRflag() override { - ASSERT(maxRegCount > kRFLAG, "CG internal error."); + ASSERT(GetMaxRegNum() > kRFLAG, "CG internal error."); constexpr uint8 size = 4; - if (maxRegCount <= kRFLAG) { - maxRegCount += (kRFLAG + kVRegisterNumber); - vRegTable.resize(maxRegCount); + if (GetMaxRegNum() <= kRFLAG) { + IncMaxRegNum(kRFLAG + kVRegisterNumber); + vReg.VRegTableResize(GetMaxRegNum()); } - new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + vReg.VRegTableValuesSet(kRFLAG, kRegTyCc, size); return kRFLAG; } @@ -97,6 +99,8 @@ class AArch64CGFunc : public CGFunc { RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); MIRStructType *GetLmbcStructArgType(BaseNode &stmt, size_t argNo); + void HandleFuncCfg(CGCFG *cfg) override; + void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); @@ -108,6 +112,7 @@ class AArch64CGFunc : public CGFunc { void HandleRetCleanup(NaryStmtNode &retNode) override; void MergeReturn() override; RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(DassignNode &stmt, Operand &opnd0) override; void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; @@ -134,6 +139,7 @@ class AArch64CGFunc : public CGFunc { void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, Operand &origOpnd1, PrimType primType, bool signedCond); @@ -309,7 +315,6 @@ class AArch64CGFunc : public CGFunc { LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; LabelOperand &GetOrCreateLabelOperand(BB &bb) override; uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; - RegOperand *SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) override; RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; @@ -352,10 +357,15 @@ class AArch64CGFunc : public CGFunc { RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); void SelectVectorZip(PrimType rType, Operand *o1, Operand *o2); void SelectStackSave(); void SelectStackRestore(const IntrinsiccallNode &intrnNode); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); @@ -884,15 +894,11 @@ class AArch64CGFunc : public CGFunc { void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); - void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); - void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); - void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); - void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, @@ -900,7 +906,6 @@ class AArch64CGFunc : public CGFunc { MOperator SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const; Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); @@ -915,7 +920,6 @@ class AArch64CGFunc : public CGFunc { bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, LabelOperand &targetOpnd, Operand &opnd0); void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); - void SelectCVaStart(const IntrinsiccallNode &intrnNode); void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); void SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall); void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 3784d8dceb..385a305d6b 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -22,9 +22,9 @@ DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mo /* MOP_wmovrr */ DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) /* MOP_wmovri32 */ -DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable32) /* MOP_xmovri64 */ -DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable64) /* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 48e835dbb3..8217fa6712 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -15,7 +15,13 @@ ADDTARGETPHASE("layoutstackframe", true); ADDTARGETPHASE("createstartendlabel", true); ADDTARGETPHASE("buildehfunc", !GetMIRModule()->IsCModule()); - ADDTARGETPHASE("handlefunction", true); + + ADDTARGETPHASE("handlefunction", !CGOptions::UseNewCg()); + ADDTARGETPHASE("instructionselector", CGOptions::UseNewCg()); + ADDTARGETPHASE("instructionstandardize", CGOptions::UseNewCg()); + ADDTARGETPHASE("handlecfg", CGOptions::UseNewCg()); + ADDTARGETPHASE("patchlongbranch", CGOptions::UseNewCg() && CGOptions::DoFixLongBranch()); + ADDTARGETPHASE("moveargs", true); /* SSA PHASES */ ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h new file mode 100644 index 0000000000..71e6ca6bcf --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { + +enum TargetOperandAction : uint8 { + kAbtractReg, + kAbtractMem, + kAbtractImm, + kAbtractNone, +}; + +struct TargetMopGen { + AArch64MOP_t targetMop; + std::vector targetOpndAction; + std::vector mappingOrder; +}; + +class AbstractIR2Target { + public: + abstract::AbstractMOP_t abstractMop; + std::vector targetMap; +}; + +class AArch64Standardize : public Standardize { + public: + explicit AArch64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(true); + } + + ~AArch64Standardize() override = default; + + private: + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzCommentOp(Insn &insn) override; + + Operand *GetInsnResult(Insn *insn); + void SelectTargetInsn(Insn *insn); +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def index c2d595600a..cce1418bad 100644 --- a/src/mapleall/maple_be/include/cg/abstract_mmir.def +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -108,14 +108,14 @@ DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1) /* Support three address basic operations (Floating point) */ - DEFINE_MOP(MOP_add_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"add_8","",1) - DEFINE_MOP(MOP_add_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"add_16","",1) - DEFINE_MOP(MOP_add_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"add_32","",1) - DEFINE_MOP(MOP_add_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"add_64","",1) - DEFINE_MOP(MOP_sub_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) - DEFINE_MOP(MOP_sub_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) - DEFINE_MOP(MOP_sub_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) - DEFINE_MOP(MOP_sub_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) + DEFINE_MOP(MOP_add_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"add_f_8","",1) + DEFINE_MOP(MOP_add_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"add_f_16","",1) + DEFINE_MOP(MOP_add_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"add_f_32","",1) + DEFINE_MOP(MOP_add_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"add_f_64","",1) + DEFINE_MOP(MOP_sub_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"sub_f_8","",1) + DEFINE_MOP(MOP_sub_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"sub_f_16","",1) + DEFINE_MOP(MOP_sub_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"sub_f_32","",1) + DEFINE_MOP(MOP_sub_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"sub_f_64","",1) /* shift -- shl/ashr/lshr */ DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1) @@ -146,4 +146,4 @@ DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) /* MOP_comment */ - DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT|ISCOMMENT,0,"//","0", 0) diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index 3ada2f5c29..b4efc49226 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -16,6 +16,7 @@ #ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H #define MAPLEBE_INCLUDE_CG_IRBUILDER_H +#include "reg_info.h" #include "insn.h" #include "operand.h" @@ -55,15 +56,17 @@ class InsnBuilder { uint32 createdInsnNum = 0; }; -constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ class OperandBuilder { public: explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) - : alloc(&mp), virtualRegNum(mirPregNum) {} + : alloc(&mp) { + virtualReg.SetCount(mirPregNum); + } /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + OfstOperand &CreateOfst(int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size); RegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr); @@ -76,14 +79,14 @@ class OperandBuilder { CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); uint32 GetCurrentVRegNum() const { - return virtualRegNum; + return virtualReg.GetCount(); } protected: MapleAllocator alloc; private: - uint32 virtualRegNum = 0; + VregInfo virtualReg; /* reg bank for multiple use */ }; } diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index 3ddc69b636..084febacc5 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -636,6 +636,22 @@ class CGOptions { return doCFGO; } + static void EnableNewCg() { + useNewCg = true; + } + + static void DisableNewCg() { + useNewCg = false; + } + + static bool UseNewCg() { + return useNewCg; + } + + static bool DoFixLongBranch() { + return CGOptions::GetInstance().GetOptimizeLevel() == kLevel0; + } + static void EnableRegSavesOpt() { doRegSavesOpt = true; } @@ -659,6 +675,7 @@ class CGOptions { static bool UseSsaPreSave() { return useSsaPreSave; } + static void EnableSsuPreRestore() { useSsuPreRestore = true; } @@ -1363,6 +1380,7 @@ class CGOptions { static bool doRegSavesOpt; static bool useSsaPreSave; static bool useSsuPreRestore; + static bool useNewCg; static bool dumpOptimizeCommonLog; static bool checkArrayStore; static bool exclusiveEH; diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h index 9b37bf93bd..20573d214c 100644 --- a/src/mapleall/maple_be/include/cg/cg_options.h +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -43,6 +43,7 @@ extern maplecl::Option lsraOptcallee; extern maplecl::Option calleeregsPlacement; extern maplecl::Option ssapreSave; extern maplecl::Option ssupreRestore; +extern maplecl::Option newCg; extern maplecl::Option prepeep; extern maplecl::Option peep; extern maplecl::Option preschedule; diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 91ac332c64..e0e55990a5 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -202,6 +202,9 @@ class BB { void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + /* prepend all insns from bb before insn */ + void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn); + /* append all insns from bb into this bb */ void AppendBBInsns(BB &bb); diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index aba84963ec..2899ccd110 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -32,7 +32,7 @@ #include "mir_function.h" #include "debug_info.h" #include "maple_phase_manager.h" - +#include "isel.h" /* Maple MP header */ #include "mempool_allocator.h" @@ -50,33 +50,6 @@ struct MemOpndCmp { } }; -class VirtualRegNode { - public: - VirtualRegNode() = default; - - VirtualRegNode(RegType type, uint32 size) - : regType(type), size(size), regNO(kInvalidRegNO) {} - - virtual ~VirtualRegNode() = default; - - void AssignPhysicalRegister(regno_t phyRegNO) { - regNO = phyRegNO; - } - - RegType GetType() const { - return regType; - } - - uint32 GetSize() const { - return size; - } - - private: - RegType regType = kRegTyUndef; - uint32 size = 0; /* size in bytes */ - regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ -}; - class SpillMemOperandSet { public: explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} @@ -104,6 +77,8 @@ class SpillMemOperandSet { MapleSet reuseSpillLocMem; }; +class MPISel; + #if TARGARM32 class LiveRange; #endif /* TARGARM32 */ @@ -121,6 +96,8 @@ class CGFunc { StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); virtual ~CGFunc(); + void InitFactory(); + const std::string &GetName() const { return func.GetName(); } @@ -209,8 +186,14 @@ class CGFunc { void SetCleanupLabel(BB &cleanupEntry); bool ExitbbNotInCleanupArea(const BB &bb) const; uint32 GetMaxRegNum() const { - return maxRegCount; + return vReg.GetMaxRegCount(); }; + void SetMaxRegNum(uint32 num) { + vReg.SetMaxRegCount(num); + } + void IncMaxRegNum(uint32 num) { + vReg.IncMaxRegCount(num); + } void DumpCFG() const; void DumpBBInfo(const BB *bb) const; void DumpCGIR() const; @@ -414,6 +397,8 @@ class CGFunc { virtual RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) = 0; virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; + virtual void HandleFuncCfg(CGCFG *cfg) { AddCommonExitBB(); } + /* For ebo issue. */ virtual Operand *GetTrueOpnd() { return nullptr; @@ -447,27 +432,7 @@ class CGFunc { if (CGOptions::UseGeneralRegOnly()) { CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); } - /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ - /* and vRegTable increases 80 elements. */ - if (vRegCount >= maxRegCount) { - ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); - maxRegCount += kRegIncrStepLen; - vRegTable.resize(maxRegCount); - } -#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 - if (size < k4ByteSize) { - size = k4ByteSize; - } -#if TARGAARCH64 - /* cannot handle 128 size register */ - if (regType == kRegTyInt && size > k8ByteSize) { - size = k8ByteSize; - } -#endif - ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); -#endif - new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); - return vRegCount++; + return vReg.GetNextVregNO(regType, size); } virtual regno_t NewVRflag() { @@ -520,17 +485,17 @@ class CGFunc { /* return Register Type */ virtual RegType GetRegisterType(regno_t rNum) const { - CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); - return vRegTable[rNum].GetType(); + CHECK(rNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); + return vReg.VRegTableGetType(rNum); } #if TARGX86_64 uint32 GetMaxVReg() const { - return vRegCount + opndBuilder->GetCurrentVRegNum(); + return vReg.GetCount() + opndBuilder->GetCurrentVRegNum(); } #else uint32 GetMaxVReg() const { - return vRegCount; + return vReg.GetCount(); } #endif @@ -543,7 +508,7 @@ class CGFunc { } uint32 GetVRegSize(regno_t vregNum) { - CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + CHECK(vregNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; } @@ -1096,7 +1061,7 @@ class CGFunc { } regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { - return regno_t(idx + firstMapleIrVRegNO); + return regno_t(idx + kBaseVirtualRegNO); } bool GetHasProEpilogue() const { @@ -1247,10 +1212,6 @@ class CGFunc { vregsToPregsMap[vRegNum] = pidx; } - uint32 GetFirstMapleIrVRegNO() const { - return firstMapleIrVRegNO; - } - void SetHasAsm() { hasAsm = true; } @@ -1271,6 +1232,18 @@ class CGFunc { return needStackProtect; } + virtual void Link2ISel(MPISel *p) { + (void)p; + } + + void SetISel(MPISel *p) { + isel = p; + } + + MPISel *GetISel() { + return isel; + } + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const { PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); if (pri == -1) { @@ -1293,13 +1266,10 @@ class CGFunc { return insn; } protected: - uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ uint32 firstNonPregVRegNO; - uint32 vRegCount; /* for assigning a number for each CG virtual register */ + VregInfo vReg; /* for assigning a number for each CG virtual register */ uint32 ssaVRegCount = 0; /* vreg count in ssa */ - uint32 maxRegCount; /* for the current virtual register number limit */ size_t lSymSize; /* size of local symbol table imported */ - MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ MapleVector bbVec; MapleUnorderedMap vRegOperandTable; MapleUnorderedMap pRegSpillMemOperands; @@ -1314,7 +1284,6 @@ class CGFunc { uint32 totalInsns = 0; int32 structCopySize = 0; int32 maxParamStackSize = 0; - static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ bool hasVLAOrAlloca = false; bool hasAlloca = false; @@ -1339,7 +1308,7 @@ class CGFunc { PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { if (IsVRegNOForPseudoRegister(vRegNO)) { - return PregIdx(vRegNO - firstMapleIrVRegNO); + return PregIdx(vRegNO - kBaseVirtualRegNO); } return VRegNOToPRegIdx(vRegNO); } @@ -1347,7 +1316,7 @@ class CGFunc { bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { /* 0 is not allowed for preg index */ uint32 n = static_cast(vRegNum); - return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + return (kBaseVirtualRegNO < n && n < firstNonPregVRegNO); } PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { @@ -1359,7 +1328,7 @@ class CGFunc { } VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { - return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + return vReg.VRegTableElementGet(GetVirtualRegNOFromPseudoRegIdx(idx)); } PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { @@ -1454,12 +1423,17 @@ class CGFunc { /* save stack protect kinds which can trigger stack protect */ uint8 stackProtectInfo = 0; bool needStackProtect = false; + + /* cross reference isel class pointer */ + MPISel *isel = nullptr; }; /* class CGFunc */ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPatchLongBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 5b38d448c2..272b97dacc 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -14,6 +14,24 @@ bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; }; +/* This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file */ +/* was IsMoveWidableImmediate */ +bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); @@ -77,6 +95,18 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { #endif } +bool IsSingleInstructionMovable32(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 32) || + IsMoveWidableImmediateCopy(~static_cast(value), 32) || + IsBitmaskImmediate(static_cast(value), 32)); +} + +bool IsSingleInstructionMovable64(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 64) || + IsMoveWidableImmediateCopy(~static_cast(value), 64) || + IsBitmaskImmediate(static_cast(value), 64)); +} + bool Imm12BitValid(int64 value) { bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); // for target linux-aarch64-gnu diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index 5948612f81..6f13582c3f 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -51,6 +51,7 @@ enum MopProperty : maple::uint8 { kInsnInlineAsm, kInsnSpecialIntrisic, kInsnIsNop, + kInsnIsComment, }; using regno_t = uint32_t; #define ISABSTRACT 1ULL @@ -83,6 +84,7 @@ using regno_t = uint32_t; #define INLINEASM (1ULL << kInsnInlineAsm) #define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) #define ISNOP (1ULL << kInsnIsNop) +#define ISCOMMENT (1ULL << kInsnIsComment) constexpr maplebe::regno_t kInvalidRegNO = 0; /* @@ -214,6 +216,9 @@ struct InsnDesc { bool IsSpecialIntrinsic() const { return properties & SPINTRINSIC; } + bool IsComment() const { + return properties & ISCOMMENT; + } MOperator GetOpc() const { return opc; } diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h index fe28a9c9bb..fb365cea12 100644 --- a/src/mapleall/maple_be/include/cg/isel.h +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -54,13 +54,13 @@ class MPISel { Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); - Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + virtual Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0) = 0; Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; - Operand *SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labelIdx) const; + template Operand *SelectLiteral(T &c, MIRFunction &func, uint32 labelIdx) const; void SelectCallCommon(StmtNode &stmt, const MPISel &iSel); void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); @@ -68,8 +68,8 @@ class MPISel { void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type); void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); - virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; - virtual void SelectReturn() = 0; + virtual void SelectReturn(NaryStmtNode &retNode) = 0; + virtual void SelectReturn(bool noOpnd) = 0; virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) = 0; virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; @@ -79,12 +79,13 @@ class MPISel { virtual void SelectCall(CallNode &callNode) = 0; virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; - virtual Operand *SelectDoubleConst(MIRDoubleConst &intConst, PrimType primType) const = 0; + virtual Operand *SelectFloatConst(MIRFloatConst &floatConst, PrimType primType, const BaseNode &parent) const = 0; + virtual Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType, const BaseNode &parent) const = 0; virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; - virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) = 0; Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); @@ -97,19 +98,47 @@ class MPISel { const BaseNode &parent) = 0; virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCtz32(IntrinsicopNode &node) = 0; + virtual Operand *SelectClz32(IntrinsicopNode &node) = 0; + virtual Operand *SelectSin(IntrinsicopNode &node) = 0; + virtual Operand *SelectSinh(IntrinsicopNode &node) = 0; + virtual Operand *SelectAsin(IntrinsicopNode &node) = 0; + virtual Operand *SelectCos(IntrinsicopNode &node) = 0; + virtual Operand *SelectCosh(IntrinsicopNode &node) = 0; + virtual Operand *SelectAcos(IntrinsicopNode &node) = 0; + virtual Operand *SelectAtan(IntrinsicopNode &node) = 0; + virtual Operand *SelectExp(IntrinsicopNode &node) = 0; + virtual Operand *SelectLog(IntrinsicopNode &node) = 0; + virtual Operand *SelectLog10(IntrinsicopNode &node) = 0; + virtual Operand *SelectSinf(IntrinsicopNode &node) = 0; + virtual Operand *SelectSinhf(IntrinsicopNode &node) = 0; + virtual Operand *SelectAsinf(IntrinsicopNode &node) = 0; + virtual Operand *SelectCosf(IntrinsicopNode &node) = 0; + virtual Operand *SelectCoshf(IntrinsicopNode &node) = 0; + virtual Operand *SelectAcosf(IntrinsicopNode &node) = 0; + virtual Operand *SelectAtanf(IntrinsicopNode &node) = 0; + virtual Operand *SelectExpf(IntrinsicopNode &node) = 0; + virtual Operand *SelectLogf(IntrinsicopNode &node) = 0; + virtual Operand *SelectLog10f(IntrinsicopNode &node) = 0; + virtual Operand *SelectFfs(IntrinsicopNode &node) = 0; + virtual Operand *SelectMemcmp(IntrinsicopNode &node) = 0; + virtual Operand *SelectStrlen(IntrinsicopNode &node) = 0; + virtual Operand *SelectStrcmp(IntrinsicopNode &node) = 0; + virtual Operand *SelectStrncmp(IntrinsicopNode &node) = 0; + virtual Operand *SelectStrchr(IntrinsicopNode &node) = 0; + virtual Operand *SelectStrrchr(IntrinsicopNode &node) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, DassignNode &s) = 0; Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); protected: MemPool *isMp; CGFunc *cgFunc; - void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); - void SelectCopy(Operand &dest, Operand &src, PrimType toType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType = PTY_unknown); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType = PTY_unknown); void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); void SelectCvtInt2Float(RegOperand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); @@ -119,12 +148,22 @@ class MPISel { MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + + virtual void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + CHECK_FATAL(false, "NYI"); + } + virtual void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + CHECK_FATAL(false, "NYI"); + } + virtual bool IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return false; + } private: StmtNode *HandleFuncEntry(); - void HandleFuncExit() const; void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); - virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const = 0; + virtual void HandleFuncExit() const = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, RegOperand *baseReg = nullptr) = 0; virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const = 0; virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); @@ -138,7 +177,6 @@ class MPISel { void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType); void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); - void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 611e0ead09..cb60372c21 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -20,6 +20,118 @@ namespace maplebe { +constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +constexpr uint32 kRegIncrStepLen = 80; /* reg number increate step length */ + +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) + : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class VregInfo { + public: + /* Only one place to allocate vreg within cg. + 'static' can be removed and initialized here if only allocation is from only one source. */ + static uint32 virtualRegCount; + static uint32 maxRegCount; + static std::vector vRegTable; + + uint32 GetNextVregNO(RegType type, uint32 size) { + /* when vReg reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (virtualRegCount >= maxRegCount) { + ASSERT(virtualRegCount < maxRegCount + 1, "MAINTAIN FAILED"); + maxRegCount += kRegIncrStepLen; + VRegTableResize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (type == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + VRegTableValuesSet(virtualRegCount, type, size); + + uint32 temp = virtualRegCount; + ++virtualRegCount; + return temp; + } + void Inc(uint32 v) { + virtualRegCount += v; + } + uint32 GetCount() const { + return virtualRegCount; + } + void SetCount(uint32 v) { + /* Vreg number can only increase. */ + if (virtualRegCount < v) { + virtualRegCount = v; + } + } + + /* maxRegCount related stuff */ + uint32 GetMaxRegCount() const { + return maxRegCount; + } + void SetMaxRegCount(uint32 num) { + maxRegCount = num; + } + void IncMaxRegCount(uint32 num) { + maxRegCount += num; + } + + /* vRegTable related stuff */ + void VRegTableResize(uint32 sz) { + vRegTable.resize(sz); + } + uint32 VRegTableSize() const { + return vRegTable.size(); + } + uint32 VRegTableGetSize(uint32 idx) const { + return vRegTable[idx].GetSize(); + } + RegType VRegTableGetType(uint32 idx) const { + return vRegTable[idx].GetType(); + } + VirtualRegNode &VRegTableElementGet(uint32 idx) { + return vRegTable[idx]; + } + void VRegTableElementSet(uint32 idx, VirtualRegNode *node) { + vRegTable[idx] = *node; + } + void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) { + new (&vRegTable[idx]) VirtualRegNode(rt, sz); + } +}; + class RegisterInfo { public: explicit RegisterInfo(MapleAllocator &mallocator) diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h index dac0e28702..573432e940 100644 --- a/src/mapleall/maple_be/include/cg/standardize.h +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -41,6 +41,10 @@ class Standardize { void DoStandardize(); + CGFunc *GetCgFunc() { + return cgFunc; + } + protected: void SetAddressMapping(bool needMapping) { needAddrMapping = needMapping; @@ -55,9 +59,10 @@ class Standardize { virtual void StdzMov(Insn &insn) = 0; virtual void StdzStrLdr(Insn &insn) = 0; virtual void StdzBasicOp(Insn &insn) = 0; - virtual void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn) = 0; + virtual void StdzShiftOp(Insn &insn) = 0; + virtual void StdzCommentOp(Insn &insn) = 0; CGFunc *cgFunc; bool needAddrMapping = false; }; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h index 6a26100ccb..b0d400d038 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -24,8 +24,9 @@ class X64MPIsel : public MPISel { public: X64MPIsel(MemPool &mp, CGFunc &f) : MPISel(mp, f) {} ~X64MPIsel() override = default; - void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; - void SelectReturn() override; + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; @@ -33,14 +34,15 @@ class X64MPIsel : public MPISel { Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; - Operand *SelectDoubleConst(MIRDoubleConst &intConst, PrimType primType) const override; + Operand *SelectFloatConst(MIRFloatConst &doubleConst, PrimType primType, const BaseNode &parent) const override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType, const BaseNode &parent) const override; void SelectGoto(GotoNode &stmt) override; void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; - void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, DassignNode &s) override; void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; - void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; void SelectIgoto(Operand &opnd0) override; Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -55,7 +57,7 @@ class X64MPIsel : public MPISel { Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; void SelectAsm(AsmNode &node) override; private: - MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, RegOperand *baseReg = nullptr) override; MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds); @@ -89,6 +91,36 @@ class X64MPIsel : public MPISel { void SelectPseduoForReturn(std::vector &retRegs); RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); RegOperand *PrepareMemcpyParm(uint64 copySize); + Operand *SelectCtz32(IntrinsicopNode &node) override; + Operand *SelectClz32(IntrinsicopNode &node) override; + Operand *SelectSin(IntrinsicopNode &node) override; + Operand *SelectSinh(IntrinsicopNode &node) override; + Operand *SelectAsin(IntrinsicopNode &node) override; + Operand *SelectCos(IntrinsicopNode &node) override; + Operand *SelectCosh(IntrinsicopNode &node) override; + Operand *SelectAcos(IntrinsicopNode &node) override; + Operand *SelectAtan(IntrinsicopNode &node) override; + Operand *SelectExp(IntrinsicopNode &node) override; + Operand *SelectLog(IntrinsicopNode &node) override; + Operand *SelectLog10(IntrinsicopNode &node) override; + Operand *SelectSinf(IntrinsicopNode &node) override; + Operand *SelectSinhf(IntrinsicopNode &node) override; + Operand *SelectAsinf(IntrinsicopNode &node) override; + Operand *SelectCosf(IntrinsicopNode &node) override; + Operand *SelectCoshf(IntrinsicopNode &node) override; + Operand *SelectAcosf(IntrinsicopNode &node) override; + Operand *SelectAtanf(IntrinsicopNode &node) override; + Operand *SelectExpf(IntrinsicopNode &node) override; + Operand *SelectLogf(IntrinsicopNode &node) override; + Operand *SelectLog10f(IntrinsicopNode &node) override; + Operand *SelectFfs(IntrinsicopNode &node) override; + Operand *SelectMemcmp(IntrinsicopNode &node) override; + Operand *SelectStrlen(IntrinsicopNode &node) override; + Operand *SelectStrcmp(IntrinsicopNode &node) override; + Operand *SelectStrncmp(IntrinsicopNode &node) override; + Operand *SelectStrchr(IntrinsicopNode &node) override; + Operand *SelectStrrchr(IntrinsicopNode &node) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; /* save param pass by reg */ std::vector> paramPassByReg; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h index 46353bc7e0..dd84920da6 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -31,10 +31,11 @@ class X64Standardize : public Standardize { void StdzMov(Insn &insn) override; void StdzStrLdr(Insn &insn) override; void StdzBasicOp(Insn &insn) override; - void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) override; - void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; - void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; - void StdzFloatingNeg(Insn &insn, CGFunc &cgFunc); + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzFloatingNeg(Insn &insn); + void StdzCommentOp(Insn &insn) override; }; } #endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp index 72ba534f1b..b3f2cec6ef 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -11,4 +11,708 @@ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. - */ \ No newline at end of file + */ + +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +//#include "x64_isa_tbl.h" +#include "aarch64_cg.h" +#include "isel.h" +#include "aarch64_MPISel.h" + +namespace maplebe { +/* local Handle functions in isel, do not delete or move */ +void HandleGoto(StmtNode &stmt, MPISel &iSel); +void HandleLabel(StmtNode &stmt, const MPISel &iSel); + +void AArch64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); +} + +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { + PrimType symType; + int32 fieldOffset = 0; + bool isCopy = IsParamStructCopy(symbol); + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + if (baseReg || !isCopy) { + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + if (isCopy) { + opndSz = (baseReg) ? opndSz : k64BitSize; + } + if (baseReg) { + AArch64CGFunc *a64func = static_cast(cgFunc); + OfstOperand *ofstOpnd = &a64func->GetOrCreateOfstOpnd(static_cast(fieldOffset), k32BitSize); + return a64func->GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, opndSz, baseReg, nullptr, ofstOpnd, nullptr); + } else { + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); + } +} +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { + return static_cast(cgFunc)->GetOrCreateMemOpnd(symbol, offset, opndSize); +} + +Operand *AArch64MPIsel::SelectFloatConst(MIRFloatConst &floatConst, PrimType primType, const BaseNode &parent) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + return aarchCGFunc->HandleFmovImm(primType, floatConst.GetIntValue(), floatConst, parent); +} + +Operand *AArch64MPIsel::SelectDoubleConst(MIRDoubleConst &dblConst, PrimType primType, const BaseNode &parent) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + return aarchCGFunc->HandleFmovImm(primType, dblConst.GetIntValue(), dblConst, parent); +} + +void AArch64MPIsel::SelectReturn(NaryStmtNode &retNode) { + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc->GetFunction().StructReturnedInRegs()) { + opnd = cgFunc->HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc->SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc->SelectReturn(opnd); +} + +void AArch64MPIsel::SelectReturn(bool noOpnd) { + /* if return operand exist, cgFunc->SelectReturn will generate it */ + if (noOpnd) { + MOperator mOp = MOP_xuncond; + LabelOperand &targetOpnd = cgFunc->GetOrCreateLabelOperand(cgFunc->GetReturnLabel()->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { + int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple AArch64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL(false, "unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void AArch64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + */ +void AArch64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + aarch64CGFunc->SelectParmList(naryNode, srcOpnds); +} + +bool AArch64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +bool AArch64MPIsel::IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return IsParamStructCopy(symbol); +} + +void AArch64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { +} + +void AArch64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + CHECK_FATAL(false, "Invalid MPISel function"); +} + +void AArch64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void AArch64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(MOP_xbl, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void AArch64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRh, DassignNode &stmt) { + (void)lhsInfo; + (void)symbolMem; + (void)opndRh; + cgFunc->SelectAggDassign(stmt); +} + +void AArch64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) { + (void)opndRhs; + cgFunc->SelectAggIassign(stmt, AddrOpnd); +} + +Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void AArch64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R0, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R1, k64BitSize, kRegTyInt)); + } + } +} + +void AArch64MPIsel::SelectCall(CallNode &callNode) { + cgFunc->SelectCall(callNode); +} + +void AArch64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + cgFunc->SelectIcall(iCallNode, opnd0); +} + +Operand &AArch64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void AArch64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = MOP_xuncond; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->SetCurBBKind(BB::kBBGoto); + return; +} + +void AArch64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = MOP_xbr; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void AArch64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectCVaStart(intrnNode); +} + +void AArch64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by AArch64 isel CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void AArch64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand *indexOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(*indexOpnd, opnd0, opnd1, srcType); + if (indexOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + indexOpnd = static_cast(&cgFunc->SelectCopy(*indexOpnd, PTY_u64, PTY_u64)); + } + + /* load the address of the switch table */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + Operand *disp = static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *indexOpnd, k8BitShift); + RegOperand &tgt = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xbr, AArch64CG::kMd[MOP_xbr]); + jmpInsn.AddOpndChain(tgt); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *AArch64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + return cgFunc->SelectAddrof(expr, parent, false); +} + +Operand *AArch64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + return &cgFunc->SelectAddrofFunc(expr, parent); +} + +Operand *AArch64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, expr.GetOffset()); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return &dst; +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void AArch64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + auto &condGotoNode = static_cast(stmt); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (!kOpcodeInfo.IsCompare(condNode.GetOpCode())) { + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode.GetOpCode() == OP_constval) { + auto &constValNode = static_cast(condNode); + if ((constValNode.GetConstVal()->IsZero() && (OP_brfalse == condOp)) || + (!constValNode.GetConstVal()->IsZero() && (OP_brtrue == condOp))) { + auto *gotoStmt = cgFunc->GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, *this); // isel's + auto *labelStmt = cgFunc->GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc->CreateLabel()); + HandleLabel(*labelStmt, *this); + } + return; + } + /* 1 operand condNode, cmp it with zero */ + opnd0 = HandleExpr(stmt, condNode); // isel's + opnd1 = &cgFunc->CreateImmOperand(condNode.GetPrimType(), 0); + } else { + /* 2 operands condNode */ + opnd0 = HandleExpr(stmt, *condNode.Opnd(0)); // isel's + opnd1 = HandleExpr(stmt, *condNode.Opnd(1)); // isel's + } + cgFunc->SelectCondGoto(stmt, *opnd0, *opnd1); + cgFunc->SetCurBBKind(BB::kBBIf); +} + +Operand *AArch64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + return cgFunc->SelectStrConst(*cgFunc->GetMemoryPool()->New( + constStr.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); +} + +Operand &AArch64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + if (IsPrimitiveFloat(primType)) { + retReg = V0; + } else { + retReg = R0; + } + break; + case kSregRetval1: + if (IsPrimitiveFloat(primType)) { + retReg = V1; + } else { + retReg = R1; + } + break; + default: + CHECK_FATAL(false, "GetTargetRetOperand: NIY"); + break; + } + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *AArch64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + + return resOpnd; +} + +void AArch64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + cgFunc->SelectMpy(resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectDiv(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectRem(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + CHECK_FATAL(false, "Invalid MPISel function"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectCmpOp(node, opnd0, opnd1, parent); +} + +void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "Invalid MPISel function"); +} + +Operand *AArch64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + return cgFunc->SelectSelect(expr, cond, trueOpnd, falseOpnd, parent); +} + +void AArch64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType) { +} + +Operand *AArch64MPIsel::SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) { + return cgFunc->SelectExtractbits(node, opnd0, parent); +} + +void AArch64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectBswap(node, opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCtz32(IntrinsicopNode &node) { + return cgFunc->SelectCctz(node); +} + +Operand *AArch64MPIsel::SelectClz32(IntrinsicopNode &node) { + return cgFunc->SelectCclz(node); +} + +Operand *AArch64MPIsel::SelectSin(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "sin"); +} + +Operand *AArch64MPIsel::SelectSinh(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "sinh"); +} + +Operand *AArch64MPIsel::SelectAsin(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "asin"); +} + +Operand *AArch64MPIsel::SelectCos(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "cos"); +} + +Operand *AArch64MPIsel::SelectCosh(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "cosh"); +} + +Operand *AArch64MPIsel::SelectAcos(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "acos"); +} + +Operand *AArch64MPIsel::SelectAtan(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "atan"); +} + +Operand *AArch64MPIsel::SelectExp(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "exp"); +} + +Operand *AArch64MPIsel::SelectLog(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "log"); +} + +Operand *AArch64MPIsel::SelectLog10(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "log10"); +} + +Operand *AArch64MPIsel::SelectSinf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "sinf"); +} + +Operand *AArch64MPIsel::SelectSinhf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "sinhf"); +} + +Operand *AArch64MPIsel::SelectAsinf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "asinf"); +} + +Operand *AArch64MPIsel::SelectCosf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "cosf"); +} + +Operand *AArch64MPIsel::SelectCoshf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "coshf"); +} + +Operand *AArch64MPIsel::SelectAcosf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "acosf"); +} + +Operand *AArch64MPIsel::SelectAtanf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "atanf"); +} + +Operand *AArch64MPIsel::SelectExpf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "expf"); +} + +Operand *AArch64MPIsel::SelectLogf(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "logf"); +} + +Operand *AArch64MPIsel::SelectLog10f(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "log10f"); +} + +Operand *AArch64MPIsel::SelectFfs(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "ffs"); +} + +Operand *AArch64MPIsel::SelectMemcmp(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "memcmp"); +} + +Operand *AArch64MPIsel::SelectStrlen(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "strlen"); +} + +Operand *AArch64MPIsel::SelectStrcmp(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "strcmp"); +} + +Operand *AArch64MPIsel::SelectStrncmp(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "strncmp"); +} + +Operand *AArch64MPIsel::SelectStrchr(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "strchr"); +} + +Operand *AArch64MPIsel::SelectStrrchr(IntrinsicopNode &node) { + return cgFunc->SelectIntrinsicOpWithOneParam(node, "strrchr"); +} + +Operand *AArch64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0) { + return cgFunc->SelectAbs(node, opnd0); +} + +void AArch64MPIsel::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + static_cast(cgFunc)->SelectCvtFloat2Float(resOpnd, srcOpnd, fromType, toType); +} + +void AArch64MPIsel::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + static_cast(cgFunc)->SelectCvtFloat2Int(resOpnd, srcOpnd, itype, ftype); +} + +RegOperand &AArch64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &AArch64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RFP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void AArch64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SelectAsm(node); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index e193dc7456..6f2aff3a2d 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -51,6 +51,20 @@ CondOperand AArch64CGFunc::ccOperands[kCcLast] = { CondOperand(CC_AL), }; +Operand *AArch64CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { +#if 0 + Operand *opnd; + if (CGOptions::UseNewCg()) { + MPISel *isel = GetISel(); + opnd = isel->HandleExpr(parent, expr); + } else { + opnd = CGFunc::HandleExpr(parent, expr); + } + return opnd; +#endif + return CGFunc::HandleExpr(parent, expr); +} + namespace { constexpr int32 kSignedDimension = 2; /* signed and unsigned */ constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ @@ -7227,9 +7241,9 @@ RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 s RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); - ASSERT(vRegNO < vRegTable.size(), "index out of range"); - uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); - RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); + ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO)); vRegOperandTable[vRegNO] = res; return *res; } @@ -7251,14 +7265,14 @@ RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd } else { auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); regno_t newRegNO = newRegOpnd->GetRegisterNumber(); - if (newRegNO >= maxRegCount) { - maxRegCount = newRegNO + kRegIncrStepLen; - vRegTable.resize(maxRegCount); + if (newRegNO >= GetMaxRegNum()) { + SetMaxRegNum(newRegNO + kRegIncrStepLen); + vReg.VRegTableResize(GetMaxRegNum()); } vRegOperandTable[newRegNO] = newRegOpnd; VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); - vRegTable[newRegNO] = *vregNode; - vRegCount = maxRegCount; + vReg.VRegTableElementSet(newRegNO, vregNode); + vReg.SetCount(GetMaxRegNum()); return *newRegOpnd; } } @@ -8992,7 +9006,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { CHECK_FATAL(false, "nyi"); } } - LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); } @@ -9891,7 +9904,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); } uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); @@ -9947,7 +9960,7 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { auto p = spillRegMemOperands.find(vrNum); if (p == spillRegMemOperands.end()) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); } uint32 memBitSize = k64BitSize; @@ -12413,4 +12426,34 @@ bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targ } CHECK_FATAL(false, "CFG error"); } + +void AArch64CGFunc::Link2ISel(MPISel *p) { + SetISel(p); + CGFunc::InitFactory(); +} + +void AArch64CGFunc::HandleFuncCfg(CGCFG *cfg) { + RemoveUnreachableBB(); + AddCommonExitBB(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupBB(); + DetermineReturnTypeofCall(); + cfg->UnreachCodeAnalysis(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + cfg->WontExitAnalysis(); + } + CG *cg = GetCG(); + if (cg->GetCGOptions().IsLazyBinding() && cg->IsLibcore()) { + ProcessLazyBinding(); + } + if (cg->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (cg->GetCGOptions().DoEnableHotColdSplit()) { + cfg->CheckCFGFreq(); + } + NeedStackProtect(); +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp index 01be7f0a07..ef33447b6e 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -39,27 +39,28 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGF stackBaseOpnd = true; } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - if (((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) || - (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX)) && - memOpnd.GetBaseRegister() != nullptr) { - if (memOpnd.GetBaseRegister()->IsOfVary()) { - memOpnd.SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); + auto *memOpnd = &static_cast(opnd); + if (((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) || + (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX)) && + memOpnd->GetBaseRegister() != nullptr) { + if (memOpnd->GetBaseRegister()->IsOfVary()) { + memOpnd->SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); } - RegOperand *memBaseReg = memOpnd.GetBaseRegister(); + RegOperand *memBaseReg = memOpnd->GetBaseRegister(); if (memBaseReg->GetRegisterNumber() == RFP) { RegOperand &newBaseOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( - memOpnd.GetAddrMode(), memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), - memOpnd.GetOffsetImmediate(), memOpnd.GetSymbol()); + memOpnd->GetAddrMode(), memOpnd->GetSize(), &newBaseOpnd, memOpnd->GetIndexRegister(), + memOpnd->GetOffsetImmediate(), memOpnd->GetSymbol()); insn.SetOperand(i, newMemOpnd); + memOpnd = &newMemOpnd; stackBaseOpnd = true; } } - if ((memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + if ((memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd->IsIntactIndexed()) { continue; } - OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + OfstOperand *ofstOpnd = memOpnd->GetOffsetImmediate(); if (ofstOpnd == nullptr) { continue; } @@ -69,10 +70,10 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGF ofstOpnd->SetVary(kAdjustVary); } if (ofstOpnd->GetVary() == kAdjustVary || ofstOpnd->GetVary() == kNotVary) { - bool condition = aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, i); + bool condition = aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), memOpnd, i); if (!condition) { MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( - memOpnd, memOpnd.GetSize(), static_cast(R16), false, &insn, insn.IsLoadStorePair()); + *memOpnd, memOpnd->GetSize(), static_cast(R16), false, &insn, insn.IsLoadStorePair()); insn.SetOperand(i, newMemOpnd); } } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 797eca00fd..4b730d792c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -154,6 +154,9 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { if (regNO == kInvalidRegNO) { return; } + if (bb.GetKind() == BB::kBBGoto) { + return; /* a goto block should not have unreachable instr */ + } if (regNO == R0) { RegOperand ®Opnd = diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp new file mode 100644 index 0000000000..cb7eb59be6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_standardize.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "insn.h" + +namespace maplebe { + +using namespace abstract; +AbstractIR2Target abstract2TargetTable[kMopLast] { + {MOP_undef, {{MOP_pseudo_none, {}, {}}}}, + + {MOP_copy_ri_8, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_8, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_16, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_16, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_32, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_32, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_64, {{MOP_xmovri64, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_64, {{MOP_xmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_copy_fi_8, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_8, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_16, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_16, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_32, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_32, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_64, {{MOP_xvmovdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_64, {{MOP_xvmovd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_16_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_16, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_64_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_8, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_16, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_32, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_32, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_fr_u32, {{MOP_vcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_fr_u64, {{MOP_xvcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_fr_i32, {{MOP_vcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_fr_i64, {{MOP_xvcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_rf_u32, {{MOP_vcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_rf_u64, {{MOP_xvcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_rf_i32, {{MOP_vcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_rf_i64, {{MOP_xvcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_ff_64_32, {{MOP_xvcvtdf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_ff_32_64, {{MOP_xvcvtfd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_str_8, {{MOP_wstrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_16, {{MOP_wstrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_32, {{MOP_wstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_64, {{MOP_xstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_8, {{MOP_wldrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_16, {{MOP_wldrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_32, {{MOP_wldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_64, {{MOP_xldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_32, {{MOP_sstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_64, {{MOP_dstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_32, {{MOP_sldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_64, {{MOP_dldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + + {MOP_add_8, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_16, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_32, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_64, {{MOP_xaddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_8, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_16, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_32, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_64, {{MOP_xsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_8, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_16, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_32, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_64, {{MOP_xiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_8, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_16, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_32, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_64, {{MOP_xeorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_8, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_16, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_32, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_64, {{MOP_xandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_add_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_32, {{MOP_sadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_f_64, {{MOP_dadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_32, {{MOP_ssub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_64, {{MOP_dsub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_shl_8, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_16, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_32, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_64, {{MOP_xlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_8, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_16, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_32, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_64, {{MOP_xasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_8, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_16, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_32, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_64, {{MOP_xlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_neg_8, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_16, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_32, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_64, {{MOP_xinegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_32, {{MOP_wfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_64, {{MOP_xfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_8, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_16, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_32, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_64, {{MOP_xnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_comment, {{MOP_nop, {kAbtractNone}, {}}}}, +}; + +Operand *AArch64Standardize::GetInsnResult(Insn *insn) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + if (insn->OpndIsDef(i)) { + return &(insn->GetOperand(i)); + } + } + return nullptr; +} + +void AArch64Standardize::SelectTargetInsn(Insn *insn) { + MOperator abstractMop = insn->GetMachineOpcode(); + CHECK_FATAL(abstractMop < kMopLast, "SelectTargetInsn: abstract instruction opcode out-of-bound"); + AbstractIR2Target &entry = abstract2TargetTable[abstractMop]; + CHECK_FATAL(entry.abstractMop == abstractMop, "SelectTargetInsn: Invalid abstract instruction"); + + uint32 numTargetInsn = entry.targetMap.size(); + for (uint32 j = 0; j < numTargetInsn; ++j) { + TargetMopGen &targetMopGen = entry.targetMap[j]; + uint32 numMapOrder = targetMopGen.mappingOrder.size(); + MOperator targetMop = targetMopGen.targetMop; + Insn *newInsn = &GetCgFunc()->GetInsnBuilder()->BuildInsn(targetMop, AArch64CG::kMd[targetMop]); + newInsn->ResizeOpnds(numMapOrder); + for (uint32 i = 0; i < numMapOrder; ++i) { + uint8 order = targetMopGen.mappingOrder[i]; + switch (targetMopGen.targetOpndAction[i]) { + case kAbtractReg: + case kAbtractMem: + newInsn->SetOperand(order, insn->GetOperand(i)); + break; + case kAbtractImm: { + const InsnDesc *md = &AArch64CG::kMd[targetMop]; + ImmOperand &immOpnd = static_cast(insn->GetOperand(i)); + if (md->IsValidImmOpnd(immOpnd.GetValue())) { + newInsn->SetOperand(order, immOpnd); + } else { + Operand *resOpnd = GetInsnResult(insn); + CHECK_FATAL(resOpnd, "SelectTargetInsn: No result operand"); + AArch64CGFunc *a64func = static_cast(GetCgFunc()); + BB &saveCurBB = *GetCgFunc()->GetCurBB(); + a64func->GetDummyBB()->ClearInsns(); + GetCgFunc()->SetCurBB(*a64func->GetDummyBB()); + a64func->SelectCopyImm(*resOpnd, immOpnd, (resOpnd->GetSize() == k32BitSize) ? PTY_i32 : PTY_i64); + insn->GetBB()->InsertBeforeInsn(*a64func->GetDummyBB(), *insn); + GetCgFunc()->SetCurBB(saveCurBB); + newInsn = nullptr; + } + break; + } + case kAbtractNone: + break; + } + } + if (newInsn) { + insn->GetBB()->InsertInsnBefore(*insn, *newInsn); + } + } + insn->GetBB()->RemoveInsn(*insn); +} + +void AArch64Standardize::StdzMov(maplebe::Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzStrLdr(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzBasicOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzUnaryOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzCvtOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzShiftOp(Insn &insn) { + SelectTargetInsn(&insn); +} +void AArch64Standardize::StdzCommentOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +} diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 9fe0fc7afb..f50c3c26f9 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -20,6 +20,10 @@ using namespace maple; #define JAVALANG (mirModule->IsJavaModule()) +uint32 VregInfo::virtualRegCount = kBaseVirtualRegNO; +uint32 VregInfo::maxRegCount = 0; +std::vector VregInfo::vRegTable; + void Globals::SetTarget(CG &target) { cg = ⌖ } diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 4572d2501e..9ba5e48366 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -977,8 +977,10 @@ void CGCFG::ReverseCriticalEdge(BB &cbb) { bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { CGCFG *cfg = f.GetMemoryPool()->New(f); f.SetTheCFG(cfg); + cfg->MarkLabelTakenBB(); /* build control flow graph */ f.GetTheCFG()->BuildCFG(); + f.HandleFuncCfg(cfg); return false; } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleCFG, handlecfg) diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 8ce0e5803f..fd34792c96 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -87,6 +87,10 @@ ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int *alloc.New(symbol, offset, relocs, false); } +OfstOperand &OperandBuilder::CreateOfst(int64 offset, uint32 size, MemPool *mp) { + return mp ? *mp->New(offset, size) : *alloc.New(offset, size); +} + MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { return mp ? *mp->New(size) : *alloc.New(size); } @@ -94,13 +98,12 @@ MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size) { MemOperand *memOprand = &CreateMem(size); memOprand->SetBaseRegister(baseOpnd); - memOprand->SetOffsetOperand(CreateImm(baseOpnd.GetSize(), offset)); + memOprand->SetOffsetOperand(CreateOfst(offset, baseOpnd.GetSize())); return *memOprand; } RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { - virtualRegNum++; - regno_t vRegNO = kBaseVirtualRegNO + virtualRegNum; + regno_t vRegNO = virtualReg.GetNextVregNO(type, size / k8BitSize); return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); } diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index f5de2bf9bb..a3a72d488b 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -118,6 +118,7 @@ bool CGOptions::doCalleeToSpill = false; bool CGOptions::doRegSavesOpt = false; bool CGOptions::useSsaPreSave = false; bool CGOptions::useSsuPreRestore = false; +bool CGOptions::useNewCg = false; bool CGOptions::replaceASM = false; bool CGOptions::generalRegOnly = false; bool CGOptions::fastMath = false; @@ -504,6 +505,10 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); } + if (opts::cg::newCg.IsEnabledByUser()) { + opts::cg::newCg ? EnableNewCg() : DisableNewCg(); + } + if (opts::cg::lsraBb.IsEnabledByUser()) { SetLSRABBOptSize(opts::cg::lsraBb); } diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp index 635bb79df8..878f7d0a2b 100644 --- a/src/mapleall/maple_be/src/cg/cg_options.cpp +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -160,6 +160,12 @@ maplecl::Option ssupreRestore({"--ssupre-restore"}, {cgCategory}, maplecl::DisableWith("--no-ssupre-restore")); +maplecl::Option newCg({"--newcg"}, + " --newcg \tUse new CG infrastructure\n" + " --no-newcg\n", + {cgCategory}, + maplecl::DisableWith("--no-newcg")); + maplecl::Option prepeep({"--prepeep"}, " --prepeep \tPerform peephole optimization before RA\n" " --no-prepeep\n", diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index 4b4aa8f09e..f28a528e2a 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -123,7 +123,7 @@ void CGSSAInfo::RenameBB(BB &bb) { } AddRenamedBB(bb.GetId()); /* record version stack size */ - size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + kBaseVirtualRegNO + 1 : vRegStk.rbegin()->first + 1; std::vector oriStackSize(tempSize, -1); for (auto it : vRegStk) { diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 1a4b3f7216..45d9e1ae0f 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -25,6 +25,7 @@ const std::string BB::bbNames[BB::kBBLast] = { "BB_goto", "BB_igoto", "BB_ret", + "BB_noret", "BB_intrinsic", "BB_rangegoto", "BB_throw" @@ -180,6 +181,33 @@ void BB::InsertAtBeginning(BB &bb) { bb.firstInsn = bb.lastInsn = nullptr; } +void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) { + if (fromBB.firstInsn == nullptr) { /* nothing to add */ + return; + } + + BB *toBB = beforeInsn.GetBB(); + FOR_BB_INSNS(insn, &fromBB) { + insn->SetBB(toBB); + } + + if (toBB->GetFirstInsn() == nullptr) { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + toBB->SetLastInsn(fromBB.GetLastInsn()); + } else { + if (beforeInsn.GetPrev()) { + beforeInsn.GetPrev()->SetNext(fromBB.GetFirstInsn()); + } else { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + } + fromBB.GetFirstInsn()->SetPrev(beforeInsn.GetPrev()); + beforeInsn.SetPrev(fromBB.GetLastInsn()); + fromBB.GetLastInsn()->SetNext(&beforeInsn); + } + fromBB.SetFirstInsn(nullptr); + fromBB.SetLastInsn(nullptr); +} + /* append all insns from bb into this bb */ void BB::InsertAtEnd(BB &bb) { if (bb.firstInsn == nullptr) { /* nothing to add */ diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 73cfc6ece6..eb16792100 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1418,10 +1418,14 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_asm, HandleAsm); } +/* member of CGFunc */ +void CGFunc::InitFactory() { + InitHandleExprFactory(); +} + CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) - : vRegTable(allocator.Adapter()), - bbVec(allocator.Adapter()), + : bbVec(allocator.Adapter()), vRegOperandTable(allocator.Adapter()), pRegSpillMemOperands(allocator.Adapter()), spillRegMemOperands(allocator.Adapter()), @@ -1454,10 +1458,10 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) { mirModule.SetCurFunction(&func); dummyBB = CreateNewBB(); - vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); - firstNonPregVRegNO = vRegCount; + vReg.SetCount(kBaseVirtualRegNO + func.GetPregTab()->Size()); + firstNonPregVRegNO = vReg.GetCount(); /* maximum register count initial be increased by 1024 */ - maxRegCount = vRegCount + 1024; + SetMaxRegNum(vReg.GetCount() + 1024); if (func.GetMayWriteToAddrofStack()) { SetStackProtectInfo(kAddrofStack); } @@ -1465,7 +1469,7 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, insnBuilder = memPool.New(memPool); opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); - vRegTable.resize(maxRegCount); + vReg.VRegTableResize(GetMaxRegNum()); /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { @@ -1772,6 +1776,7 @@ void CGFunc::CreateLmbcFormalParamInfo() { AssignLmbcFormalParams(); } + void CGFunc::GenerateInstruction() { InitHandleExprFactory(); InitHandleStmtFactory(); @@ -2091,6 +2096,7 @@ void CGFunc::HandleFunction() { GenSaveMethodInfoCode(*firstBB); /* build control flow graph */ theCFG = memPool->New(*this); + theCFG->MarkLabelTakenBB(); theCFG->BuildCFG(); RemoveUnreachableBB(); AddCommonExitBB(); @@ -2099,7 +2105,6 @@ void CGFunc::HandleFunction() { } MarkCleanupBB(); DetermineReturnTypeofCall(); - theCFG->MarkLabelTakenBB(); theCFG->UnreachCodeAnalysis(); if (mirModule.GetSrcLang() == kSrcLangC) { theCFG->WontExitAnalysis(); @@ -2333,6 +2338,15 @@ bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) +bool CgPatchLongBranch::PhaseRun(maplebe::CGFunc &f) { + f.PatchLongBranch(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPatchLongBranch, patchlongbranch) + bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { if (f.GetCG()->GetCGOptions().WithDwarf()) { f.DBGFixCallFrameLocationOffsets(); diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp index a74c72254a..895da9b717 100644 --- a/src/mapleall/maple_be/src/cg/insn.cpp +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -301,7 +301,7 @@ void Insn::SetMOP(const InsnDesc &idesc) { } void Insn::Dump() const { -ASSERT(md != nullptr, "md should not be nullptr"); + ASSERT(md != nullptr, "md should not be nullptr"); LogInfo::MapleLogger() << "< " << GetId() << " > "; LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 45a030c8f9..a21878ed30 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -18,6 +18,7 @@ #include #include "factory.h" #include "cg.h" +#include "cgfunc.h" namespace maplebe { /* register, imm , memory, cond */ @@ -190,6 +191,7 @@ void HandleLabel(StmtNode &stmt, const MPISel &iSel) { ASSERT(stmt.GetOpCode() == OP_label, "error"); auto &label = static_cast(stmt); BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->SetKind(BB::kBBFallthru); newBB->AddLabel(label.GetLabelIdx()); cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); cgFunc->SetCurBB(*newBB); @@ -245,10 +247,9 @@ void HandleReturn(StmtNode &stmt, MPISel &iSel) { auto &retNode = static_cast(stmt); ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); if (retNode.NumOpnds() != 0) { - Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); - iSel.SelectReturn(retNode, *opnd); + iSel.SelectReturn(retNode); } - iSel.SelectReturn(); + iSel.SelectReturn(retNode.NumOpnds() == 0); /* return stmt will jump to the ret BB, so curBB is gotoBB */ cgFunc->SetCurBBKind(BB::kBBGoto); cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); @@ -280,8 +281,7 @@ void HandleCondbr(StmtNode &stmt, MPISel &iSel) { ASSERT(condNode != nullptr, "expect first operand of cond br"); /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used * in most backend architectures */ - Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); - iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + iSel.SelectCondGoto(condGotoNode, *condNode); cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); } @@ -378,9 +378,12 @@ Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, if (mirConst->GetKind() == kConstInt) { auto *mirIntConst = safe_cast(mirConst); return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); + } else if (mirConst->GetKind() == kConstFloatConst) { + auto *mirFloatConst = safe_cast(mirConst); + return iSel.SelectFloatConst(*mirFloatConst, constValNode.GetPrimType(), parent); } else if (mirConst->GetKind() == kConstDoubleConst) { auto *mirDoubleConst = safe_cast(mirConst); - return iSel.SelectDoubleConst(*mirDoubleConst, constValNode.GetPrimType()); + return iSel.SelectDoubleConst(*mirDoubleConst, constValNode.GetPrimType(), parent); } else { CHECK_FATAL(false, "NIY"); } @@ -477,13 +480,76 @@ Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { auto &intrinsicopNode = static_cast(expr); switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_sin: + return iSel.SelectSin(intrinsicopNode); + case INTRN_C_sinh: + return iSel.SelectSinh(intrinsicopNode); + case INTRN_C_asin: + return iSel.SelectAsin(intrinsicopNode); + case INTRN_C_cos: + return iSel.SelectCos(intrinsicopNode); + case INTRN_C_cosh: + return iSel.SelectCosh(intrinsicopNode); + case INTRN_C_acos: + return iSel.SelectAcos(intrinsicopNode); + case INTRN_C_atan: + return iSel.SelectAtan(intrinsicopNode); + case INTRN_C_exp: + return iSel.SelectExp(intrinsicopNode); + case INTRN_C_log: + return iSel.SelectLog(intrinsicopNode); + case INTRN_C_log10: + return iSel.SelectLog10(intrinsicopNode); + + case INTRN_C_sinf: + return iSel.SelectSinf(intrinsicopNode); + case INTRN_C_sinhf: + return iSel.SelectSinhf(intrinsicopNode); + case INTRN_C_asinf: + return iSel.SelectAsinf(intrinsicopNode); + case INTRN_C_cosf: + return iSel.SelectCosf(intrinsicopNode); + case INTRN_C_coshf: + return iSel.SelectCoshf(intrinsicopNode); + case INTRN_C_acosf: + return iSel.SelectAcosf(intrinsicopNode); + case INTRN_C_atanf: + return iSel.SelectAtanf(intrinsicopNode); + case INTRN_C_expf: + return iSel.SelectExpf(intrinsicopNode); + case INTRN_C_logf: + return iSel.SelectLogf(intrinsicopNode); + case INTRN_C_log10f: + return iSel.SelectLog10f(intrinsicopNode); + + case INTRN_C_ffs: + return iSel.SelectFfs(intrinsicopNode); + case INTRN_C_memcmp: + return iSel.SelectMemcmp(intrinsicopNode); + case INTRN_C_strlen: + return iSel.SelectStrlen(intrinsicopNode); + case INTRN_C_strcmp: + return iSel.SelectStrcmp(intrinsicopNode); + case INTRN_C_strncmp: + return iSel.SelectStrncmp(intrinsicopNode); + case INTRN_C_strchr: + return iSel.SelectStrchr(intrinsicopNode); + case INTRN_C_strrchr: + return iSel.SelectStrrchr(intrinsicopNode); + case INTRN_C_rev16_2: case INTRN_C_rev_4: case INTRN_C_rev_8: return iSel.SelectBswap(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_clz32: + case INTRN_C_clz64: + return iSel.SelectClz32(intrinsicopNode); + case INTRN_C_ctz32: + case INTRN_C_ctz64: + return iSel.SelectCtz32(intrinsicopNode); default: - ASSERT(false, "NIY, unsupported intrinsicop."); + CHECK_FATAL(false, "NIY, unsupported intrinsicop."); return nullptr; } } @@ -567,8 +633,14 @@ Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { void MPISel::DoMPIS() { isel::InitHandleStmtFactory(); isel::InitHandleExprFactory(); + GetCurFunc()->Link2ISel(this); + SrcPosition lastLocPos = SrcPosition(); + SrcPosition lastMplPos = SrcPosition(); StmtNode *secondStmt = HandleFuncEntry(); for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GetCurFunc()->GenerateLoc(stmt, lastLocPos, lastMplPos); + auto function = CreateProductFunction(stmt->GetOpCode()); CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); function(*stmt, *this); @@ -689,14 +761,22 @@ void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { /* Generate Insn */ if (rhsType == PTY_agg) { /* Agg Type */ - SelectAggDassign(symbolInfo, symbolMem, opndRhs); + DassignNode &s = const_cast(stmt); + SelectAggDassign(symbolInfo, symbolMem, opndRhs, s); return; } PrimType memType = symbolInfo.primType; if (memType == PTY_agg) { memType = PTY_a64; } - SelectCopy(symbolMem, opndRhs, memType, rhsType); + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + MemOperand &stMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID(), ®Opnd); + SelectCopy(stMem, opndRhs, memType, rhsType); + } else { + SelectCopy(symbolMem, opndRhs, memType, rhsType); + } return; } @@ -746,6 +826,15 @@ ImmOperand *MPISel::SelectIntConst(MIRIntConst &intConst, PrimType primType) con return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), intConst.GetExtValue()); } +Operand *MPISel::SelectFloatConst(MIRFloatConst &floatConst, PrimType primType, + const BaseNode &parent [[maybe_unused]]) const { + return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), floatConst.GetIntValue()); +} + +Operand *MPISel::SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType, const BaseNode &parent) const { + return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), doubleConst.GetIntValue()); +} + Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { PrimType primType = node.GetPrimType(); @@ -839,12 +928,22 @@ Operand *MPISel::SelectDread(const BaseNode &parent [[maybe_unused]], const Addr CHECK_FATAL(primType == maple::PTY_agg, "NIY"); return &symbolMem; } - /* for BasicType, load symbolVal to register. */ - RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); /* Generate Insn */ - SelectCopy(regOpnd, symbolMem, primType, symbolType); - return ®Opnd; + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + RegOperand ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &ldMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID(), ®Opnd); + SelectCopy(regOpnd1, ldMem, primType, symbolType); + return ®Opnd1; + } else { + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; + } } Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, @@ -909,7 +1008,7 @@ void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bit } Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], - const ExtractbitsNode &node, Operand &opnd0) { + ExtractbitsNode &node, Operand &opnd0) { PrimType fromType = node.Opnd(0)->GetPrimType(); PrimType toType = node.GetPrimType(); uint8 bitSize = node.GetBitsSize(); @@ -1227,36 +1326,6 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, return &resOpnd; } -Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { - PrimType primType = node.GetPrimType(); - if (IsPrimitiveVector(primType)) { - CHECK_FATAL(false, "NIY"); - } else if (IsPrimitiveFloat(primType)) { - CHECK_FATAL(false, "NIY"); - } else if (IsUnsignedInteger(primType)) { - return &opnd0; - } else { - /* - * abs(x) = (x XOR y) - y - * y = x >>> (bitSize - 1) - */ - uint32 bitSize = GetPrimTypeBitSize(primType); - CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); - ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); - RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); - RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); - RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectSub(resOpnd, tmpOpnd, regOpndy, primType); - return &resOpnd; - } -} - Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); PrimType srcType = node.Opnd(0)->GetPrimType(); @@ -1350,59 +1419,43 @@ StmtNode *MPISel::HandleFuncEntry() { RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); uint32 toSize = GetPrimTypeBitSize(toType); - if (src.IsRegister() && fromSize == toSize) { + bool isReg = src.IsRegister(); + uint32 srcRegSize = isReg ? src.GetSize() : 0; + if ((isReg && fromSize == toSize) || (fromType == PTY_unknown && isReg && srcRegSize == toSize)) { return static_cast(src); } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); - if (fromSize != toSize) { - SelectCopy(dest, src, toType, fromType); - } else { + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); + } else if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); } return dest; } -/* Pretty sure that implicit type conversions will not occur. */ -RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { - ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); - if (src.IsRegister()) { - return static_cast(src); - } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), - cgFunc->GetRegTyFromPrimTy(dtype)); - SelectCopy(dest, src, dtype); - return dest; -} /* This function copy/load/store src to a dest, Once the src and dest types * are different, implicit conversion is executed here. */ void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { - if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (fromType != PTY_unknown && fromSize != toSize) { RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); - RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); SelectCopy(dest, dstRegOpnd, toType); } else { - SelectCopy(dest, src, toType); - } -} - -/* Pretty sure that implicit type conversions will not occur. */ -void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { - ASSERT(dest.GetSize() == src.GetSize(), "NIY"); - if (dest.GetKind() == Operand::kOpdRegister) { - SelectCopyInsn(dest, src, type); - } else if (dest.GetKind() == Operand::kOpdMem) { - if (src.GetKind() != Operand::kOpdRegister) { - RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), - cgFunc->GetRegTyFromPrimTy(type)); - SelectCopyInsn(tempReg, src, type); - SelectCopyInsn(dest, tempReg, type); - } else { - SelectCopyInsn(dest, src, type); + if (dest.GetKind() == Operand::kOpdMem || src.GetKind() == Operand::kOpdMem) { + if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + SelectCopyInsn(tempReg, src, toType); + SelectCopyInsn(dest, tempReg, toType); + } else { + SelectCopyInsn(dest, src, toType); + } + } else if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, toType); + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } - }else { - CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } } @@ -1455,7 +1508,8 @@ void MPISel::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimTyp SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); } -Operand *MPISel::SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labelIdx) const { +template +Operand *MPISel::SelectLiteral(T &c, MIRFunction &func, uint32 labelIdx) const { MIRSymbol *st = func.GetSymTab()->CreateSymbol(kScopeLocal); std::string lblStr(".LB_"); MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); @@ -1475,6 +1529,10 @@ Operand *MPISel::SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labe return nullptr; } +template Operand *MPISel::SelectLiteral(MIRFloatConst &c, MIRFunction &func, uint32 labelIdx) const; + +template Operand *MPISel::SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labelIdx) const; + Operand *MPISel::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { PrimType primType = node.GetPrimType(); RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), @@ -1504,18 +1562,6 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { return nullptr; } -void MPISel::HandleFuncExit() const { - BlockNode *block = cgFunc->GetFunction().GetBody(); - ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); - cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); - /* Set lastbb's frequency */ - cgFunc->SetLastBB(*cgFunc->GetCurBB()); - /* the last BB is return BB */ - cgFunc->GetLastBB()->SetKind(BB::kBBReturn); - - cgFunc->AddCommonExitBB(); -} - bool InstructionSelector::PhaseRun(maplebe::CGFunc &f) { MPISel *mpIS = f.GetCG()->CreateMPIsel(*GetPhaseMemPool(), f); mpIS->DoMPIS(); diff --git a/src/mapleall/maple_be/src/cg/standardize.cpp b/src/mapleall/maple_be/src/cg/standardize.cpp index cf14b253f5..da8ca84a37 100644 --- a/src/mapleall/maple_be/src/cg/standardize.cpp +++ b/src/mapleall/maple_be/src/cg/standardize.cpp @@ -22,6 +22,9 @@ void Standardize::DoStandardize() { /* two address mapping first */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -34,6 +37,9 @@ void Standardize::DoStandardize() { /* standardize for each op */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -44,11 +50,13 @@ void Standardize::DoStandardize() { } else if (insn->IsBasicOp()) { StdzBasicOp(*insn); } else if (insn->IsUnaryOp()) { - StdzUnaryOp(*insn, *cgFunc); + StdzUnaryOp(*insn); } else if (insn->IsConversion()) { - StdzCvtOp(*insn, *cgFunc); + StdzCvtOp(*insn); } else if (insn->IsShift()) { - StdzShiftOp(*insn, *cgFunc); + StdzShiftOp(*insn); + } else if (insn->IsComment()) { + StdzCommentOp(*insn); } else { LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; CHECK_FATAL(false, "NIY"); @@ -61,19 +69,37 @@ void Standardize::AddressMapping(Insn &insn) const { Operand &dest = insn.GetOperand(kInsnFirstOpnd); Operand &src1 = insn.GetOperand(kInsnSecondOpnd); uint32 destSize = dest.GetSize(); + CHECK_FATAL(dest.IsRegister(), "AddressMapping: not reg operand"); + bool isInt = static_cast(dest).GetRegisterType() == kRegTyInt ? true : false; MOperator mOp = abstract::MOP_undef; switch (destSize) { case k8BitSize: - mOp = abstract::MOP_copy_rr_8; + if (isInt) { + mOp = abstract::MOP_copy_rr_8; + } else { + mOp = abstract::MOP_copy_ff_8; + } break; case k16BitSize: - mOp = abstract::MOP_copy_rr_16; + if (isInt) { + mOp = abstract::MOP_copy_rr_16; + } else { + mOp = abstract::MOP_copy_ff_16; + } break; case k32BitSize: - mOp = abstract::MOP_copy_rr_32; + if (isInt) { + mOp = abstract::MOP_copy_rr_32; + } else { + mOp = abstract::MOP_copy_ff_32; + } break; case k64BitSize: - mOp = abstract::MOP_copy_rr_64; + if (isInt) { + mOp = abstract::MOP_copy_rr_64; + } else { + mOp = abstract::MOP_copy_ff_64; + } break; default: break; diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index a0ab915ee2..797ef73824 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -21,8 +21,20 @@ #include "isel.h" namespace maplebe { + +void X64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->PushBackExitBBsVec(*cgFunc->GetLastBB()); +} + /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ -MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const { +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { PrimType symType; int32 fieldOffset = 0; if (fieldId == 0) { @@ -69,7 +81,8 @@ MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uin return *result; } -void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { +void X64MPIsel::SelectReturn(NaryStmtNode &retNode) { + Operand &opnd = *HandleExpr(retNode, *retNode.Opnd(0)); MIRType *retType = cgFunc->GetFunction().GetReturnType(); X64CallConvImpl retLocator(cgFunc->GetBecommon()); CCLocInfo retMech; @@ -131,7 +144,7 @@ void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { } } -void X64MPIsel::SelectReturn() { +void X64MPIsel::SelectReturn(bool noOpnd [[maybe_unused]]) { /* jump to epilogue */ MOperator mOp = x64::MOP_jmpq_l; LabelNode *endLabel = cgFunc->GetEndLabel(); @@ -448,7 +461,15 @@ void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vectorGetLabelIdx(); + Operand *result = SelectLiteral(floatConst, cgFunc->GetFunction(), labelIdxTmp++); + cgFunc->SetLabelIdx(labelIdxTmp); + return result; +} + +Operand *X64MPIsel::SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType, const BaseNode &parent) const { uint32 labelIdxTmp = cgFunc->GetLabelIdx(); Operand *result = SelectLiteral(doubleConst, cgFunc->GetFunction(), labelIdxTmp++); cgFunc->SetLabelIdx(labelIdxTmp); @@ -470,7 +491,8 @@ RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { return ®Result; } -void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs, DassignNode &stmt) { + (void)stmt; /* rhs is Func Return, it must be from Regread */ if (opndRhs.IsRegister()) { SelectIntAggCopyReturn(symbolMem, lhsInfo.size); @@ -612,6 +634,7 @@ void X64MPIsel::SelectIgoto(Operand &opnd0) { Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(opnd0); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBGoto); return; } @@ -793,6 +816,7 @@ void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(dstMemOpnd); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBIgoto); } Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { @@ -907,7 +931,8 @@ static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSign * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node * such as a dread for example */ -void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + Operand &opnd0 = *HandleExpr(stmt, condNode); Opcode opcode = stmt.GetOpCode(); X64MOP_t jmpOperator = x64::MOP_begin; if (opnd0.IsImmediate()) { @@ -1275,4 +1300,151 @@ void X64MPIsel::SelectAsm(AsmNode &node) { cgFunc->SetHasAsm(); CHECK_FATAL(false, "NIY"); } + +Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *X64MPIsel::SelectCtz32(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClz32(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectSin(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectSinh(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAsin(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCos(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCosh(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAcos(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAtan(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectExp(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectLog(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectLog10(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectSinf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectSinhf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAsinf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCosf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCoshf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAcosf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectAtanf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectExpf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectLogf(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectLog10f(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectFfs(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectMemcmp(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectStrlen(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectStrcmp(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectStrncmp(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectStrchr(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectStrrchr(IntrinsicopNode &node [[maybe_unused]] ) { + CHECK_FATAL(false, "NIY"); +} + } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index a506e0e28f..3a162bf2e6 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -56,10 +56,10 @@ void X64Standardize::StdzBasicOp(Insn &insn) { insn.AddOpndChain(src2).AddOpndChain(dest); } -void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzUnaryOp(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { - StdzFloatingNeg(insn, cgFunc); + StdzFloatingNeg(insn); return; } X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); @@ -69,7 +69,7 @@ void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(dest); } -void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); uint32 destSize = OpndDesSize; uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); @@ -95,13 +95,13 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); RegOperand *src = static_cast(opnd0); if (srcSize != OpndSrcSize) { - src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + src = &GetCgFunc()->GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType()); } Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); RegOperand *dest = static_cast(opnd1); if (destSize != OpndDesSize) { - dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + dest = &GetCgFunc()->GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType()); } insn.CleanAllOperand(); @@ -120,14 +120,14 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { * 32: xorl 0x80000000 R1 * movd R1 xmm0 */ -void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzFloatingNeg(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; // mov dest -> tmpOperand0 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; - RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); - Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + RegOperand *tmpOperand0 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); Operand &dest = insn.GetOperand(kInsnFirstOpnd); movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, movInsn0); @@ -135,26 +135,26 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { // 32 : xorl 0x80000000 tmpOperand0 // 64 : movabs 0x8000000000000000 tmpOperand1 // xorq tmpOperand1 tmpOperand0 - ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + ImmOperand &imm = GetCgFunc()->GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); if (mOp == abstract::MOP_neg_f_64) { - Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); - Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + Operand *tmpOperand1 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = GetCgFunc()->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); insn.GetBB()->InsertInsnBefore(insn, movabs); MOperator xorOp = x64::MOP_xorq_r_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } else { MOperator xorOp = x64::MOP_xorl_i_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } // mov tmpOperand0 -> dest - Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Insn &movq = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); insn.GetBB()->InsertInsnBefore(insn, movq); @@ -162,17 +162,17 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { return; } -void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzShiftOp(Insn &insn) { RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); /* count operand cvt -> PTY_u8 */ if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { - countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + countOpnd = &GetCgFunc()->GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); } /* copy count operand to cl(rcx) register */ - RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + RegOperand &clOpnd = GetCgFunc()->GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); X64MOP_t copyMop = x64::MOP_movb_r_r; - Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + Insn ©Insn = GetCgFunc()->GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); insn.GetBB()->InsertInsnBefore(insn, copyInsn); /* shift OP */ @@ -183,4 +183,8 @@ void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); } +void X64Standardize::StdzCommentOp(Insn &insn) { + insn.GetBB()->RemoveInsn(insn); +} + } -- Gitee