From bf4fe22a1dfab8e3576b73d7266d3f5bd5102124 Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 10:19:14 -0800 Subject: [PATCH 01/25] arm64 standardize for abstract mop version 1 -O0 running ctorture --- src/mapleall/maple_be/BUILD.gn | 2 + src/mapleall/maple_be/CMakeLists.txt | 2 + .../include/cg/aarch64/aarch64_MPISel.h | 121 +++ .../maple_be/include/cg/aarch64/aarch64_cg.h | 8 + .../include/cg/aarch64/aarch64_cgfunc.h | 31 +- .../include/cg/aarch64/aarch64_md.def | 4 +- .../include/cg/aarch64/aarch64_peep.h | 23 + .../include/cg/aarch64/aarch64_phases.def | 6 +- .../include/cg/aarch64/aarch64_standardize.h | 67 ++ .../maple_be/include/cg/abstract_mmir.def | 41 +- .../maple_be/include/cg/cg_irbuilder.h | 13 +- src/mapleall/maple_be/include/cg/cg_option.h | 18 + src/mapleall/maple_be/include/cg/cg_options.h | 1 + src/mapleall/maple_be/include/cg/cgbb.h | 3 + src/mapleall/maple_be/include/cg/cgfunc.h | 113 +-- src/mapleall/maple_be/include/cg/immvalid.def | 30 + src/mapleall/maple_be/include/cg/isa.h | 5 + src/mapleall/maple_be/include/cg/isel.h | 65 +- src/mapleall/maple_be/include/cg/reg_info.h | 115 +++ .../maple_be/include/cg/standardize.h | 15 +- .../maple_be/include/cg/x86_64/x64_MPISel.h | 45 +- .../cg/x86_64/x64_abstract_mapping.def | 33 +- .../maple_be/include/cg/x86_64/x64_cgfunc.h | 2 +- .../include/cg/x86_64/x64_standardize.h | 9 +- src/mapleall/maple_be/src/be/lower.cpp | 2 +- .../src/cg/aarch64/aarch64_MPISel.cpp | 748 +++++++++++++++++- .../src/cg/aarch64/aarch64_cgfunc.cpp | 81 +- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 50 ++ .../src/cg/aarch64/aarch64_reaching.cpp | 3 + .../src/cg/aarch64/aarch64_standardize.cpp | 335 ++++++++ src/mapleall/maple_be/src/cg/cg.cpp | 5 + src/mapleall/maple_be/src/cg/cg_cfg.cpp | 2 + src/mapleall/maple_be/src/cg/cg_irbuilder.cpp | 17 +- src/mapleall/maple_be/src/cg/cg_option.cpp | 5 + src/mapleall/maple_be/src/cg/cg_options.cpp | 6 + src/mapleall/maple_be/src/cg/cg_ssa.cpp | 2 +- src/mapleall/maple_be/src/cg/cgbb.cpp | 28 + src/mapleall/maple_be/src/cg/cgfunc.cpp | 30 +- src/mapleall/maple_be/src/cg/insn.cpp | 2 +- src/mapleall/maple_be/src/cg/isel.cpp | 292 +++++-- src/mapleall/maple_be/src/cg/standardize.cpp | 42 +- .../maple_be/src/cg/x86_64/x64_MPIsel.cpp | 174 +++- .../maple_be/src/cg/x86_64/x64_cgfunc.cpp | 2 +- .../src/cg/x86_64/x64_standardize.cpp | 62 +- 44 files changed, 2356 insertions(+), 304 deletions(-) create mode 100644 src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h create mode 100644 src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index 461e7eefdf..b949fc9ac4 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -130,6 +130,8 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_cfgo.cpp", "src/cg/aarch64/aarch64_isolate_fastpath.cpp", "src/cg/aarch64/aarch64_rematerialize.cpp", + "src/cg/aarch64/aarch64_MPISel.cpp", + "src/cg/aarch64/aarch64_standardize.cpp", ] src_libcgx86phases = [ diff --git a/src/mapleall/maple_be/CMakeLists.txt b/src/mapleall/maple_be/CMakeLists.txt index 836dbfd396..13f5809920 100755 --- a/src/mapleall/maple_be/CMakeLists.txt +++ b/src/mapleall/maple_be/CMakeLists.txt @@ -106,6 +106,8 @@ if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") src/cg/aarch64/aarch64_pgo_gen.cpp src/cg/aarch64/aarch64_isolate_fastpath.cpp src/cg/aarch64/aarch64_rematerialize.cpp + src/cg/aarch64/aarch64_MPISel.cpp + src/cg/aarch64/aarch64_standardize.cpp src/cg/cfi_generator.cpp src/cg/cfgo.cpp src/cg/local_opt.cpp diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h index aac9a9bd87..1b6ea1a2bd 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -12,3 +12,124 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_MPISEL_H +#define MAPLEBE_INCLUDE_AARCH64_MPISEL_H + +#include "isel.h" +#include "aarch64_isa.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class AArch64MPIsel : public MPISel { + public: + AArch64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} + ~AArch64MPIsel() override = default; + + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + Operand *SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) override; + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) override; + Insn &AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + bool IsSymbolRequireIndirection(const MIRSymbol &symbol) override; + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, + const BaseNode &parent); + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_AARCH64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 6555183b50..3da19ed9dc 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -32,6 +32,8 @@ #include "aarch64_cfgo.h" #include "aarch64_rematerialize.h" #include "aarch64_pgo_gen.h" +#include "aarch64_MPISel.h" +#include "aarch64_standardize.h" namespace maplebe { constexpr int64 kShortBRDistance = (8 * 1024); @@ -213,6 +215,12 @@ class AArch64CG : public CG { Rematerializer *CreateRematerializer(MemPool &mp) const override { return mp.New(); } + MPISel *CreateMPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) const override { + return mp.New(mp, aIRBuilder, f); + } + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. * i. mov reg2, reg1 * ii. add/sub reg2, reg1, 0/zero register diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 6a1eaed390..08d1714b07 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -67,6 +67,8 @@ class AArch64CGFunc : public CGFunc { return refCount; } + void Link2ISel(MPISel *p) override; + int32 GetBeginOffset() const { return beginOffset; } @@ -75,13 +77,13 @@ class AArch64CGFunc : public CGFunc { MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; regno_t NewVRflag() override { - ASSERT(maxRegCount > kRFLAG, "CG internal error."); + ASSERT(GetMaxRegNum() > kRFLAG, "CG internal error."); constexpr uint8 size = 4; - if (maxRegCount <= kRFLAG) { - maxRegCount += (kRFLAG + kVRegisterNumber); - vRegTable.resize(maxRegCount); + if (GetMaxRegNum() <= kRFLAG) { + IncMaxRegNum(kRFLAG + kVRegisterNumber); + vReg.VRegTableResize(GetMaxRegNum()); } - new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + vReg.VRegTableValuesSet(kRFLAG, kRegTyCc, size); return kRFLAG; } @@ -89,6 +91,7 @@ class AArch64CGFunc : public CGFunc { RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); MIRStructType *GetLmbcStructArgType(BaseNode &stmt, size_t argNo) const; + void HandleFuncCfg(CGCFG *cfg) override; void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); @@ -100,6 +103,7 @@ class AArch64CGFunc : public CGFunc { void HandleRetCleanup(NaryStmtNode &retNode) override; void MergeReturn() override; RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(DassignNode &stmt, Operand &opnd0) override; void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; @@ -116,7 +120,7 @@ class AArch64CGFunc : public CGFunc { bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); bool GetNumReturnRegsForIassignfpoff(MIRType *rType, PrimType &primType, uint32 &numRegs); void GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType); - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; @@ -126,6 +130,7 @@ class AArch64CGFunc : public CGFunc { void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, Operand &origOpnd1, PrimType primType, bool signedCond); @@ -135,6 +140,7 @@ class AArch64CGFunc : public CGFunc { void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &srcOpnd) override; void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; + Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) override; Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, const std::string &name) override; Operand *SelectCclz(IntrinsicopNode &intrnNode) override; @@ -302,7 +308,6 @@ class AArch64CGFunc : public CGFunc { LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; LabelOperand &GetOrCreateLabelOperand(BB &bb) override; uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; - RegOperand *SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) override; RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; @@ -345,10 +350,15 @@ class AArch64CGFunc : public CGFunc { RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); void SelectVectorZip(PrimType rType, Operand *o1, Operand *o2); void SelectStackSave(); void SelectStackRestore(const IntrinsiccallNode &intrnNode); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); @@ -867,15 +877,11 @@ class AArch64CGFunc : public CGFunc { void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); - void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); - void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); - void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); - void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, @@ -883,10 +889,8 @@ class AArch64CGFunc : public CGFunc { MOperator SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const; Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); - Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); int64 GetOrCreatSpillRegLocation(regno_t vrNum) { AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); @@ -898,7 +902,6 @@ class AArch64CGFunc : public CGFunc { bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, LabelOperand &targetOpnd, Operand &opnd0); void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); - void SelectCVaStart(const IntrinsiccallNode &intrnNode); void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); void SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall); void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 4e84c64354..8cbf5aabb6 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -22,9 +22,9 @@ DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mo /* MOP_wmovrr */ DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) /* MOP_wmovri32 */ -DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable32) /* MOP_xmovri64 */ -DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable64) /* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index d83d65fcdd..d183afed72 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -963,6 +963,29 @@ class RemoveMovingtoSameRegPattern : public CGPeepPattern { } }; +/* + * mov dest1, imm + * mul dest2, reg1, dest1 + * ===> if imm is 2^n + * mov dest1, imm + * lsl dest2, reg1, n + */ +class MulImmToShiftPattern : public CGPeepPattern { + public: + MulImmToShiftPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~MulImmToShiftPattern() override = default; + std::string GetPatternName() override { + return "MulImmToShiftPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + private: + Insn *movInsn = nullptr; + uint32 shiftVal = 0; + MOperator newMop = MOP_undef; +}; + /* * Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp, when they are * back to back and the [MEM] they access is conjointed. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 7ca39b865f..1727a4fd31 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -15,9 +15,13 @@ ADDTARGETPHASE("layoutstackframe", true); ADDTARGETPHASE("createstartendlabel", true); ADDTARGETPHASE("buildehfunc", !GetMIRModule()->IsCModule()); - ADDTARGETPHASE("handlefunction", true); + ADDTARGETPHASE("handlefunction", !CGOptions::UseNewCg()); + ADDTARGETPHASE("instructionselector", CGOptions::UseNewCg()); + ADDTARGETPHASE("handlecfg", CGOptions::UseNewCg()); + ADDTARGETPHASE("patchlongbranch", CGOptions::UseNewCg() && CGOptions::DoFixLongBranch()); ADDTARGETPHASE("cgprofuse", Options::profileUse); ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("instructionstandardize", CGOptions::UseNewCg()); /* SSA PHASES */ ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h new file mode 100644 index 0000000000..866bce5f43 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { + +enum TargetOperandAction : uint8 { + kAbtractReg, + kAbtractMem, + kAbtractImm, + kAbtractNone, +}; + +struct TargetMopGen { + AArch64MOP_t targetMop; + std::vector targetOpndAction; + std::vector mappingOrder; +}; + +class AbstractIR2Target { + public: + abstract::AbstractMOP_t abstractMop; + std::vector targetMap; +}; + +class AArch64Standardize : public Standardize { + public: + explicit AArch64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(false); + } + + ~AArch64Standardize() override = default; + + private: + void Legalize() override; + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzCommentOp(Insn &insn) override; + + Operand *UpdateRegister(Operand &opnd, std::map ®Map, bool allocate); + void TraverseOperands(Insn *insn, std::map ®Map, bool allocate); + Operand *GetInsnResult(Insn *insn); + Insn *HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order); + void SelectTargetInsn(Insn *insn); +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def index 83d5658589..acc419fec2 100644 --- a/src/mapleall/maple_be/include/cg/abstract_mmir.def +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -51,17 +51,40 @@ DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + /* register truncate */ + DEFINE_MOP(MOP_zext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r16","",1) + DEFINE_MOP(MOP_sext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r16","",1) + DEFINE_MOP(MOP_zext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r32","",1) + DEFINE_MOP(MOP_sext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r32","",1) + DEFINE_MOP(MOP_zext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r32","",1) + DEFINE_MOP(MOP_sext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r32","",1) + + DEFINE_MOP(MOP_zext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r64","",1) + DEFINE_MOP(MOP_sext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r64","",1) + DEFINE_MOP(MOP_zext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r64","",1) + DEFINE_MOP(MOP_sext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r64","",1) + DEFINE_MOP(MOP_zext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r64","",1) + DEFINE_MOP(MOP_sext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r64","",1) + /* int2float conversion */ - DEFINE_MOP(MOP_cvt_fr_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u32","",1) - DEFINE_MOP(MOP_cvt_fr_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u64","",1) - DEFINE_MOP(MOP_cvt_fr_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i32","",1) - DEFINE_MOP(MOP_cvt_fr_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i64","",1) + DEFINE_MOP(MOP_cvt_f32_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u32","",1) + DEFINE_MOP(MOP_cvt_f64_u32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u32","",1) + DEFINE_MOP(MOP_cvt_f32_u64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u64","",1) + DEFINE_MOP(MOP_cvt_f64_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u64","",1) + DEFINE_MOP(MOP_cvt_f32_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i32","",1) + DEFINE_MOP(MOP_cvt_f64_i32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i32","",1) + DEFINE_MOP(MOP_cvt_f32_i64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i64","",1) + DEFINE_MOP(MOP_cvt_f64_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i64","",1) /* float2int conversion */ - DEFINE_MOP(MOP_cvt_rf_u32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u32","",1) - DEFINE_MOP(MOP_cvt_rf_u64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u64","",1) - DEFINE_MOP(MOP_cvt_rf_i32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i32","",1) - DEFINE_MOP(MOP_cvt_rf_i64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i64","",1) + DEFINE_MOP(MOP_cvt_u32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f32","",1) + DEFINE_MOP(MOP_cvt_u64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f32","",1) + DEFINE_MOP(MOP_cvt_u32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f64","",1) + DEFINE_MOP(MOP_cvt_u64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f64","",1) + DEFINE_MOP(MOP_cvt_i32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f32","",1) + DEFINE_MOP(MOP_cvt_i64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f32","",1) + DEFINE_MOP(MOP_cvt_i32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f64","",1) + DEFINE_MOP(MOP_cvt_i64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f64","",1) /* float conversion */ DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_64_32","",1) @@ -150,4 +173,4 @@ DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) /* MOP_comment */ - DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT|ISCOMMENT,0,"//","0", 0) diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index 794f7868c9..d18f242855 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -16,6 +16,7 @@ #ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H #define MAPLEBE_INCLUDE_CG_IRBUILDER_H +#include "reg_info.h" #include "insn.h" #include "operand.h" @@ -56,14 +57,18 @@ class InsnBuilder { uint32 createdInsnNum = 0; }; -constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ class OperandBuilder { public: - explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) : alloc(&mp), virtualRegNum(mirPregNum) {} + explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) + : alloc(&mp) { + virtualReg.SetCount(mirPregNum); + } + /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + OfstOperand &CreateOfst(int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &offImm, MemPool *mp = nullptr); @@ -79,14 +84,14 @@ class OperandBuilder { CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); uint32 GetCurrentVRegNum() const { - return virtualRegNum; + return virtualReg.GetCount(); } protected: MapleAllocator alloc; private: - uint32 virtualRegNum = 0; + VregInfo virtualReg; /* reg bank for multiple use */ }; diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index 29d0c77fcb..f737972767 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -640,6 +640,22 @@ class CGOptions { return doCFGO; } + static void EnableNewCg() { + useNewCg = true; + } + + static void DisableNewCg() { + useNewCg = false; + } + + static bool UseNewCg() { + return useNewCg; + } + + static bool DoFixLongBranch() { + return CGOptions::GetInstance().GetOptimizeLevel() == kLevel0; + } + static void EnableRegSavesOpt() { doRegSavesOpt = true; } @@ -663,6 +679,7 @@ class CGOptions { static bool UseSsaPreSave() { return useSsaPreSave; } + static void EnableSsuPreRestore() { useSsuPreRestore = true; } @@ -1375,6 +1392,7 @@ class CGOptions { static bool doRegSavesOpt; static bool useSsaPreSave; static bool useSsuPreRestore; + static bool useNewCg; static bool dumpOptimizeCommonLog; static bool checkArrayStore; static bool exclusiveEH; diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h index 17dd1e174d..3d2d8c922b 100644 --- a/src/mapleall/maple_be/include/cg/cg_options.h +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -44,6 +44,7 @@ extern maplecl::Option lsraOptcallee; extern maplecl::Option calleeregsPlacement; extern maplecl::Option ssapreSave; extern maplecl::Option ssupreRestore; +extern maplecl::Option newCg; extern maplecl::Option prepeep; extern maplecl::Option peep; extern maplecl::Option preschedule; diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 38476f804e..1db79af742 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -203,6 +203,9 @@ class BB { void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + /* prepend all insns from bb before insn */ + void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn); + /* append all insns from bb into this bb */ void AppendBBInsns(BB &bb); diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 3162fb6ea0..3f40e73f43 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -32,7 +32,6 @@ #include "mir_function.h" #include "debug_info.h" #include "maple_phase_manager.h" - /* Maple MP header */ #include "mempool_allocator.h" @@ -50,33 +49,6 @@ struct MemOpndCmp { } }; -class VirtualRegNode { - public: - VirtualRegNode() = default; - - VirtualRegNode(RegType type, uint32 size) - : regType(type), size(size), regNO(kInvalidRegNO) {} - - virtual ~VirtualRegNode() = default; - - void AssignPhysicalRegister(regno_t phyRegNO) { - regNO = phyRegNO; - } - - RegType GetType() const { - return regType; - } - - uint32 GetSize() const { - return size; - } - - private: - RegType regType = kRegTyUndef; - uint32 size = 0; /* size in bytes */ - regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ -}; - class SpillMemOperandSet { public: explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} @@ -104,6 +76,8 @@ class SpillMemOperandSet { MapleSet reuseSpillLocMem; }; +class MPISel; + #if defined(TARGARM32) && TARGARM32 class LiveRange; #endif /* TARGARM32 */ @@ -121,6 +95,8 @@ class CGFunc { StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); virtual ~CGFunc(); + void InitFactory(); + const std::string &GetName() const { return func.GetName(); } @@ -211,8 +187,14 @@ class CGFunc { void SetCleanupLabel(BB &cleanupEntry); bool ExitbbNotInCleanupArea(const BB &bb) const; uint32 GetMaxRegNum() const { - return maxRegCount; + return vReg.GetMaxRegCount(); }; + void SetMaxRegNum(uint32 num) { + vReg.SetMaxRegCount(num); + } + void IncMaxRegNum(uint32 num) { + vReg.IncMaxRegCount(num); + } void DumpCFG() const; void DumpBBInfo(const BB *bb) const; void DumpCGIR() const; @@ -230,7 +212,7 @@ class CGFunc { virtual void SelectAbort() = 0; virtual void SelectAssertNull(UnaryStmtNode &stmt) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(DassignNode &stmt) = 0; + virtual void SelectAggDassign(const DassignNode &stmt) = 0; virtual void SelectIassign(IassignNode &stmt) = 0; virtual void SelectIassignoff(IassignoffNode &stmt) = 0; virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; @@ -417,6 +399,8 @@ class CGFunc { virtual RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) = 0; virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; + virtual void HandleFuncCfg(CGCFG *cfg) { AddCommonExitBB(); } + /* For ebo issue. */ virtual Operand *GetTrueOpnd() { return nullptr; @@ -430,6 +414,7 @@ class CGFunc { LabelIdx CreateLabel(); RegOperand *GetVirtualRegisterOperand(regno_t vRegNO) { + std::unordered_map &vRegOperandTable = vReg.vRegOperandTable; auto it = vRegOperandTable.find(vRegNO); return it == vRegOperandTable.end() ? nullptr : it->second; } @@ -450,27 +435,7 @@ class CGFunc { if (CGOptions::UseGeneralRegOnly()) { CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); } - /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ - /* and vRegTable increases 80 elements. */ - if (vRegCount >= maxRegCount) { - ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); - maxRegCount += kRegIncrStepLen; - vRegTable.resize(maxRegCount); - } -#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 - if (size < k4ByteSize) { - size = k4ByteSize; - } -#if TARGAARCH64 - /* cannot handle 128 size register */ - if (regType == kRegTyInt && size > k8ByteSize) { - size = k8ByteSize; - } -#endif - ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); -#endif - new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); - return vRegCount++; + return vReg.GetNextVregNO(regType, size); } virtual regno_t NewVRflag() { @@ -524,17 +489,17 @@ class CGFunc { /* return Register Type */ virtual RegType GetRegisterType(regno_t rNum) const { - CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); - return vRegTable[rNum].GetType(); + CHECK(rNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); + return vReg.VRegTableGetType(rNum); } #if defined(TARGX86_64) && TARGX86_64 uint32 GetMaxVReg() const { - return vRegCount + opndBuilder->GetCurrentVRegNum(); + return vReg.GetCount() + opndBuilder->GetCurrentVRegNum(); } #else uint32 GetMaxVReg() const { - return vRegCount; + return vReg.GetCount(); } #endif @@ -547,7 +512,7 @@ class CGFunc { } uint32 GetVRegSize(regno_t vregNum) { - CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + CHECK(vregNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; } @@ -1100,7 +1065,7 @@ class CGFunc { } regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { - return regno_t(idx + firstMapleIrVRegNO); + return regno_t(idx + kBaseVirtualRegNO); } bool GetHasProEpilogue() const { @@ -1251,10 +1216,6 @@ class CGFunc { vregsToPregsMap[vRegNum] = pidx; } - uint32 GetFirstMapleIrVRegNO() const { - return firstMapleIrVRegNO; - } - void SetHasAsm() { hasAsm = true; } @@ -1275,6 +1236,18 @@ class CGFunc { return needStackProtect; } + virtual void Link2ISel(MPISel *p) { + (void)p; + } + + void SetISel(MPISel *p) { + isel = p; + } + + MPISel *GetISel() { + return isel; + } + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const { PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); if (pri == -1) { @@ -1292,15 +1265,11 @@ class CGFunc { } protected: - uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ uint32 firstNonPregVRegNO; - uint32 vRegCount; /* for assigning a number for each CG virtual register */ + VregInfo vReg; /* for assigning a number for each CG virtual register */ uint32 ssaVRegCount = 0; /* vreg count in ssa */ - uint32 maxRegCount; /* for the current virtual register number limit */ size_t lSymSize; /* size of local symbol table imported */ - MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ MapleVector bbVec; - MapleUnorderedMap vRegOperandTable; MapleUnorderedMap pRegSpillMemOperands; MapleUnorderedMap spillRegMemOperands; MapleUnorderedMap reuseSpillLocMem; @@ -1313,7 +1282,6 @@ class CGFunc { uint32 totalInsns = 0; int32 structCopySize = 0; int32 maxParamStackSize = 0; - static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ bool hasVLAOrAlloca = false; bool hasAlloca = false; @@ -1338,7 +1306,7 @@ class CGFunc { PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { if (IsVRegNOForPseudoRegister(vRegNO)) { - return PregIdx(vRegNO - firstMapleIrVRegNO); + return PregIdx(vRegNO - kBaseVirtualRegNO); } return VRegNOToPRegIdx(vRegNO); } @@ -1346,7 +1314,7 @@ class CGFunc { bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { /* 0 is not allowed for preg index */ uint32 n = static_cast(vRegNum); - return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + return (kBaseVirtualRegNO < n && n < firstNonPregVRegNO); } PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { @@ -1358,7 +1326,7 @@ class CGFunc { } VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { - return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + return vReg.VRegTableElementGet(GetVirtualRegNOFromPseudoRegIdx(idx)); } PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { @@ -1478,12 +1446,17 @@ class CGFunc { uint8 stackProtectInfo = 0; bool needStackProtect = false; uint32 priority = 0; + + /* cross reference isel class pointer */ + MPISel *isel = nullptr; }; /* class CGFunc */ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPatchLongBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 3524d92bce..4a6df76761 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -14,6 +14,24 @@ bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; }; +/* This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file */ +/* was IsMoveWidableImmediate */ +bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); @@ -77,6 +95,18 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { #endif } +bool IsSingleInstructionMovable32(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 32) || + IsMoveWidableImmediateCopy(~static_cast(value), 32) || + IsBitmaskImmediate(static_cast(value), 32)); +} + +bool IsSingleInstructionMovable64(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 64) || + IsMoveWidableImmediateCopy(~static_cast(value), 64) || + IsBitmaskImmediate(static_cast(value), 64)); +} + bool Imm12BitValid(int64 value) { bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); // for target linux-aarch64-gnu diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index f4d69c919c..53ef48af09 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -51,6 +51,7 @@ enum MopProperty : maple::uint8 { kInsnInlineAsm, kInsnSpecialIntrisic, kInsnIsNop, + kInsnIsComment, }; using regno_t = uint32_t; #define ISABSTRACT 1ULL @@ -83,6 +84,7 @@ using regno_t = uint32_t; #define INLINEASM (1ULL << kInsnInlineAsm) #define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) #define ISNOP (1ULL << kInsnIsNop) +#define ISCOMMENT (1ULL << kInsnIsComment) constexpr maplebe::regno_t kInvalidRegNO = 0; /* @@ -254,6 +256,9 @@ struct InsnDesc { bool IsSpecialIntrinsic() const { return (properties & SPINTRINSIC) != 0; } + bool IsComment() const { + return properties & ISCOMMENT; + } MOperator GetOpc() const { return opc; } diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h index 5490ec3462..9e83f4b38d 100644 --- a/src/mapleall/maple_be/include/cg/isel.h +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -54,9 +54,9 @@ class MPISel { Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); - Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + virtual Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; @@ -67,8 +67,8 @@ class MPISel { void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type); void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); - virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; - virtual void SelectReturn() = 0; + virtual void SelectReturn(NaryStmtNode &retNode) = 0; + virtual void SelectReturn(bool noOpnd) = 0; virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) = 0; virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; @@ -78,13 +78,13 @@ class MPISel { virtual void SelectCall(CallNode &callNode) = 0; virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; - virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; - virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const = 0; virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; - virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) = 0; Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); @@ -98,13 +98,40 @@ class MPISel { virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; virtual Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) = 0; Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); template Operand *SelectLiteral(T &c, MIRFunction &func, uint32 labelIdx) const { @@ -130,10 +157,8 @@ class MPISel { protected: MemPool *isMp; CGFunc *cgFunc; - void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); - void SelectCopy(Operand &dest, Operand &src, PrimType toType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType = PTY_unknown); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType = PTY_unknown); void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); void SelectCvtInt2Float(RegOperand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); @@ -143,12 +168,23 @@ class MPISel { MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + + virtual void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + CHECK_FATAL(false, "NYI"); + } + virtual void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + CHECK_FATAL(false, "NYI"); + } + virtual bool IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return false; + } private: StmtNode *HandleFuncEntry(); - void HandleFuncExit() const; void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); - virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const = 0; + virtual void HandleFuncExit() const = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) = 0; virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const = 0; virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); @@ -162,7 +198,6 @@ class MPISel { void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType); void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); - void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 8a1100c12d..8099522b46 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -20,6 +20,121 @@ namespace maplebe { constexpr size_t kSpillMemOpndNum = 4; +constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +constexpr uint32 kRegIncrStepLen = 80; /* reg number increate step length */ + +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) + : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class VregInfo { + public: + /* Only one place to allocate vreg within cg. + 'static' can be removed and initialized here if only allocation is from only one source. */ + static uint32 virtualRegCount; + static uint32 maxRegCount; + static std::vector vRegTable; + static std::unordered_map vRegOperandTable; + + uint32 GetNextVregNO(RegType type, uint32 size) { + /* when vReg reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (virtualRegCount >= maxRegCount) { + ASSERT(virtualRegCount < maxRegCount + 1, "MAINTAIN FAILED"); + maxRegCount += kRegIncrStepLen; + VRegTableResize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (type == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + VRegTableValuesSet(virtualRegCount, type, size); + + uint32 temp = virtualRegCount; + ++virtualRegCount; + return temp; + } + void Inc(uint32 v) { + virtualRegCount += v; + } + uint32 GetCount() const { + return virtualRegCount; + } + void SetCount(uint32 v) { + /* Vreg number can only increase. */ + if (virtualRegCount < v) { + virtualRegCount = v; + } + } + + /* maxRegCount related stuff */ + uint32 GetMaxRegCount() const { + return maxRegCount; + } + void SetMaxRegCount(uint32 num) { + maxRegCount = num; + } + void IncMaxRegCount(uint32 num) { + maxRegCount += num; + } + + /* vRegTable related stuff */ + void VRegTableResize(uint32 sz) { + vRegTable.resize(sz); + } + uint32 VRegTableSize() const { + return vRegTable.size(); + } + uint32 VRegTableGetSize(uint32 idx) const { + return vRegTable[idx].GetSize(); + } + RegType VRegTableGetType(uint32 idx) const { + return vRegTable[idx].GetType(); + } + VirtualRegNode &VRegTableElementGet(uint32 idx) { + return vRegTable[idx]; + } + void VRegTableElementSet(uint32 idx, VirtualRegNode *node) { + vRegTable[idx] = *node; + } + void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) { + new (&vRegTable[idx]) VirtualRegNode(rt, sz); + } + void VRegOperandTableSet(regno_t regNO, RegOperand *rp) { + vRegOperandTable[regNO] = rp; + } +}; class RegisterInfo { public: diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h index dac0e28702..322497a9e3 100644 --- a/src/mapleall/maple_be/include/cg/standardize.h +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -41,6 +41,10 @@ class Standardize { void DoStandardize(); + CGFunc *GetCgFunc() { + return cgFunc; + } + protected: void SetAddressMapping(bool needMapping) { needAddrMapping = needMapping; @@ -48,16 +52,17 @@ class Standardize { bool NeedAddressMapping(const Insn &insn) { /* Operand number for two addressing mode is 2 */ /* and 3 for three addressing mode */ - needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); - return needAddrMapping; + return needAddrMapping && ((insn.GetOperandSize() > 2) || (insn.IsUnaryOp())); } private: + virtual void Legalize() {}; virtual void StdzMov(Insn &insn) = 0; virtual void StdzStrLdr(Insn &insn) = 0; virtual void StdzBasicOp(Insn &insn) = 0; - virtual void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn) = 0; + virtual void StdzShiftOp(Insn &insn) = 0; + virtual void StdzCommentOp(Insn &insn) = 0; CGFunc *cgFunc; bool needAddrMapping = false; }; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h index dba290b0e9..16751bf209 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -24,8 +24,9 @@ class X64MPIsel : public MPISel { public: X64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} ~X64MPIsel() override = default; - void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; - void SelectReturn() override; + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; @@ -33,14 +34,14 @@ class X64MPIsel : public MPISel { Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; - Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; void SelectGoto(GotoNode &stmt) override; void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; - void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; - void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; void SelectIgoto(Operand &opnd0) override; Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -55,10 +56,38 @@ class X64MPIsel : public MPISel { Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; void SelectAsm(AsmNode &node) override; private: - MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds); @@ -80,8 +109,8 @@ class X64MPIsel : public MPISel { RegOperand &GetTargetBasicPointer(PrimType primType) override; std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); void SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused); - void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); - void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; bool IsParamStructCopy(const MIRSymbol &symbol); diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def index 3880b3cd5a..106b4d0b89 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -97,7 +97,7 @@ DEFINE_MAPPING(abstract::MOP_neg_16, x64::MOP_negw_r) DEFINE_MAPPING(abstract::MOP_neg_32, x64::MOP_negl_r) DEFINE_MAPPING(abstract::MOP_neg_64, x64::MOP_negq_r) -/* CvtOp */ +/* CvtOp expand */ DEFINE_MAPPING(abstract::MOP_zext_rr_16_8, x64::MOP_movzbw_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_8, x64::MOP_movsbw_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) @@ -110,19 +110,32 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) -DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) + +/* CvtOp truncate */ +DEFINE_MAPPING(abstract::MOP_zext_rr_8_16, x64::MOP_movzbw_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_16, x64::MOP_movsbw_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_32, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_32, x64::MOP_movsbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_32, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_32, x64::MOP_movswl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_64, x64::MOP_movsbq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_64, x64::MOP_movswq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_64, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_64, x64::MOP_movslq_r_r) /* Floating CvtOp int2float */ -DEFINE_MAPPING(abstract::MOP_cvt_fr_u64, x64::MOP_cvtsi2sdq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_u32, x64::MOP_cvtsi2ssq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i32, x64::MOP_cvtsi2ssl_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_u64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_u32, x64::MOP_cvtsi2ssq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_i32, x64::MOP_cvtsi2ssl_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_i64, x64::MOP_cvtsi2sdq_r) /* Floating CvtOp float2int */ -DEFINE_MAPPING(abstract::MOP_cvt_rf_u32, x64::MOP_cvttss2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_u64, x64::MOP_cvttsd2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i32, x64::MOP_cvttss2sil_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u32_f32, x64::MOP_cvttss2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u64_f64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_i32_f32, x64::MOP_cvttss2sil_r) +DEFINE_MAPPING(abstract::MOP_cvt_i64_f64, x64::MOP_cvttsd2siq_r) /* Floating CvtOp float2float */ DEFINE_MAPPING(abstract::MOP_cvt_ff_64_32, x64::MOP_cvtss2sd_r) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h index f4a96484da..bd8b9d5850 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -49,7 +49,7 @@ class X64CGFunc : public CGFunc { void SelectAbort() override; void SelectAssertNull(UnaryStmtNode &stmt) override; void SelectAsm(AsmNode &node) override; - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h index 46353bc7e0..dd84920da6 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -31,10 +31,11 @@ class X64Standardize : public Standardize { void StdzMov(Insn &insn) override; void StdzStrLdr(Insn &insn) override; void StdzBasicOp(Insn &insn) override; - void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) override; - void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; - void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; - void StdzFloatingNeg(Insn &insn, CGFunc &cgFunc); + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzFloatingNeg(Insn &insn); + void StdzCommentOp(Insn &insn) override; }; } #endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index 8713b9ec1c..c33e843ed0 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -839,7 +839,7 @@ StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, BaseNode *baseAddr) { auto bitSize = fieldType->GetFieldSize(); - auto primType = fieldType->GetPrimType(); + auto primType = GetRegPrimType(fieldType->GetPrimType()); auto byteOffset = byteBitOffsets.first; auto bitOffset = byteBitOffsets.second; auto *builder = mirModule.GetMIRBuilder(); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp index 72ba534f1b..5b679a209c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -11,4 +11,750 @@ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. - */ \ No newline at end of file + */ + +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "isel.h" +#include "aarch64_MPISel.h" + +namespace maplebe { +/* local Handle functions in isel, do not delete or move */ +void HandleGoto(StmtNode &stmt, MPISel &iSel); +void HandleLabel(StmtNode &stmt, const MPISel &iSel); + +void AArch64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); +} + +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { + PrimType symType; + uint64 fieldOffset = 0; + bool isCopy = IsParamStructCopy(symbol); + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + if (baseReg || !isCopy) { + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + if (isCopy) { + opndSz = (baseReg) ? opndSz : k64BitSize; + } + if (baseReg) { + AArch64CGFunc *a64func = static_cast(cgFunc); + OfstOperand *ofstOpnd = &a64func->GetOrCreateOfstOpnd(fieldOffset, k32BitSize); + return *a64func->CreateMemOperand(opndSz, *baseReg, *ofstOpnd); + } else { + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); + } +} +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { + return static_cast(cgFunc)->GetOrCreateMemOpnd(symbol, offset, opndSize); +} + +Operand *AArch64MPIsel::SelectFloatingConst(MIRConst &mirConst, PrimType primType, const BaseNode &parent) const { + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const"); + AArch64CGFunc *a64Func = static_cast(cgFunc); + if (primType == PTY_f64) { + auto *dblConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, dblConst->GetIntValue(), *dblConst, parent); + } else { + auto *floatConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, floatConst->GetIntValue(), *floatConst, parent); + } +} + +void AArch64MPIsel::SelectReturn(NaryStmtNode &retNode) { + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc->GetFunction().StructReturnedInRegs()) { + opnd = cgFunc->HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc->SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc->SelectReturn(opnd); +} + +void AArch64MPIsel::SelectReturn(bool noOpnd) { + /* if return operand exist, cgFunc->SelectReturn will generate it */ + if (noOpnd) { + MOperator mOp = MOP_xuncond; + LabelOperand &targetOpnd = cgFunc->GetOrCreateLabelOperand(cgFunc->GetReturnLabel()->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { + uint32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple AArch64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL_FALSE("unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void AArch64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + */ +void AArch64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + aarch64CGFunc->SelectParmList(naryNode, srcOpnds); +} + +bool AArch64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +bool AArch64MPIsel::IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return IsParamStructCopy(symbol); +} + +void AArch64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { + (void)symbolMem; + (void)aggSize; +} + +void AArch64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + (void)lhs; + (void)rhs; + (void)copySize; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +void AArch64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, + PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void AArch64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(MOP_xbl, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, static_cast(copySize)); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void AArch64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRh, const DassignNode &stmt) { + (void)lhsInfo; + (void)symbolMem; + (void)opndRh; + cgFunc->SelectAggDassign(stmt); +} + +void AArch64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) { + (void)opndRhs; + cgFunc->SelectAggIassign(stmt, addrOpnd); +} + +Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void AArch64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R0, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R1, k64BitSize, kRegTyInt)); + } + } +} + +void AArch64MPIsel::SelectCall(CallNode &callNode) { + cgFunc->SelectCall(callNode); +} + +void AArch64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + cgFunc->SelectIcall(iCallNode, opnd0); +} + +Operand &AArch64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void AArch64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = MOP_xuncond; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->SetCurBBKind(BB::kBBGoto); + return; +} + +void AArch64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = MOP_xbr; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void AArch64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectCVaStart(intrnNode); +} + +void AArch64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL_FALSE("Intrinsic %d: %s not implemented by AArch64 isel CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void AArch64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand *indexOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(*indexOpnd, opnd0, opnd1, srcType); + if (indexOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + indexOpnd = static_cast(&cgFunc->SelectCopy(*indexOpnd, PTY_u64, PTY_u64)); + } + + /* load the address of the switch table */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + AArch64CGFunc *a64func = static_cast(cgFunc); + BitShiftOperand &bitOpnd = a64func->CreateBitShiftOperand(BitShiftOperand::kLSL, k3BitSize, k8BitShift); + Operand *disp = static_cast(cgFunc)->CreateMemOperand(k64BitSize, baseOpnd, *indexOpnd, bitOpnd); + RegOperand &tgt = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xbr, AArch64CG::kMd[MOP_xbr]); + jmpInsn.AddOpndChain(tgt); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *AArch64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + return cgFunc->SelectAddrof(expr, parent, false); +} + +Operand *AArch64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + return &cgFunc->SelectAddrofFunc(expr, parent); +} + +Operand *AArch64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + (void)parent; + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, expr.GetOffset()); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return &dst; +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void AArch64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + auto &condGotoNode = static_cast(stmt); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (!kOpcodeInfo.IsCompare(condNode.GetOpCode())) { + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode.GetOpCode() == OP_constval) { + auto &constValNode = static_cast(condNode); + if (((OP_brfalse == condOp) && constValNode.GetConstVal()->IsZero()) || + ((OP_brtrue == condOp) && !constValNode.GetConstVal()->IsZero())) { + auto *gotoStmt = cgFunc->GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, *this); // isel's + auto *labelStmt = cgFunc->GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc->CreateLabel()); + HandleLabel(*labelStmt, *this); + } + return; + } + /* 1 operand condNode, cmp it with zero */ + opnd0 = HandleExpr(stmt, condNode); // isel's + opnd1 = &cgFunc->CreateImmOperand(condNode.GetPrimType(), 0); + } else { + /* 2 operands condNode */ + opnd0 = HandleExpr(stmt, *condNode.Opnd(0)); // isel's + opnd1 = HandleExpr(stmt, *condNode.Opnd(1)); // isel's + } + cgFunc->SelectCondGoto(stmt, *opnd0, *opnd1); + cgFunc->SetCurBBKind(BB::kBBIf); +} + +Operand *AArch64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + return cgFunc->SelectStrConst(*cgFunc->GetMemoryPool()->New( + constStr.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); +} + +Operand &AArch64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + if (IsPrimitiveFloat(primType)) { + retReg = V0; + } else { + retReg = R0; + } + break; + case kSregRetval1: + if (IsPrimitiveFloat(primType)) { + retReg = V1; + } else { + retReg = R1; + } + break; + default: + CHECK_FATAL_FALSE("GetTargetRetOperand: NIY"); + break; + } + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *AArch64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + (void)parent; + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL_FALSE("NIY"); + } + + return resOpnd; +} + +void AArch64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + cgFunc->SelectMpy(resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectDiv(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectRem(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + (void)opnd0; + (void)opnd1; + (void)primType; + (void)opcode; + CHECK_FATAL_FALSE("Invalid MPISel function"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectCmpOp(node, opnd0, opnd1, parent); +} + +void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + (void)opnd0; + (void)opnd1; + (void)primType; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +Operand *AArch64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + return cgFunc->SelectSelect(expr, cond, trueOpnd, falseOpnd, parent); +} + +Operand *AArch64MPIsel::SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) { + return cgFunc->SelectExtractbits(node, opnd0, parent); +} + +void AArch64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, const BaseNode &parent) { + PrimType ptype = intrnNode.Opnd(0)->GetPrimType(); + Operand *opnd = &opnd0; + AArch64CGFunc *a64func = static_cast(cgFunc); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return a64func->SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = a64func->CreateRegisterOperandOfType(ptype); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(a64func->PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + cgFunc->GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &a64func->CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + a64func->SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectBswap(node, opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCctz(node); +} + +Operand *AArch64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCclz(node); +} + +Operand *AArch64MPIsel::SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atan", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "exp", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinhf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "coshf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atanf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "expf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "logf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10f", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "ffs", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectAbs(node, opnd0); +} + +void AArch64MPIsel::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + static_cast(cgFunc)->SelectCvtFloat2Float(resOpnd, srcOpnd, fromType, toType); +} + +void AArch64MPIsel::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + static_cast(cgFunc)->SelectCvtFloat2Int(resOpnd, srcOpnd, itype, ftype); +} + +RegOperand &AArch64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &AArch64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RFP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void AArch64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SelectAsm(node); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 22ada925f9..1002db7291 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -51,6 +51,20 @@ CondOperand AArch64CGFunc::ccOperands[kCcLast] = { CondOperand(CC_AL), }; +Operand *AArch64CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { +#ifdef NEWCG + Operand *opnd; + if (CGOptions::UseNewCg()) { + MPISel *isel = GetISel(); + opnd = isel->HandleExpr(parent, expr); + } else { + opnd = CGFunc::HandleExpr(parent, expr); + } + return opnd; +#endif + return CGFunc::HandleExpr(parent, expr); +} + namespace { constexpr int32 kSignedDimension = 2; /* signed and unsigned */ constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ @@ -1698,7 +1712,7 @@ bool AArch64CGFunc::IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, return false; } -void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { +void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); uint32 lhsOffset = 0; MIRType *lhsType = lhsSymbol->GetType(); @@ -7314,23 +7328,23 @@ RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 s } RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { - ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); - ASSERT(vRegNO < vRegTable.size(), "index out of range"); - uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); - RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); - vRegOperandTable[vRegNO] = res; + ASSERT((vReg.vRegOperandTable.find(vRegNO) == vReg.vRegOperandTable.end()), "already exist"); + ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO)); + vReg.vRegOperandTable[vRegNO] = res; return *res; } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { - auto it = vRegOperandTable.find(vRegNO); - return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); + auto it = vReg.vRegOperandTable.find(vRegNO); + return (it != vReg.vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { regno_t regNO = regOpnd.GetRegisterNumber(); - auto it = vRegOperandTable.find(regNO); - if (it != vRegOperandTable.end()) { + auto it = vReg.vRegOperandTable.find(regNO); + if (it != vReg.vRegOperandTable.end()) { it->second->SetSize(regOpnd.GetSize()); it->second->SetRegisterNumber(regNO); it->second->SetRegisterType(regOpnd.GetRegisterType()); @@ -7339,14 +7353,14 @@ RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd } else { auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); regno_t newRegNO = newRegOpnd->GetRegisterNumber(); - if (newRegNO >= maxRegCount) { - maxRegCount = newRegNO + kRegIncrStepLen; - vRegTable.resize(maxRegCount); + if (newRegNO >= GetMaxRegNum()) { + SetMaxRegNum(newRegNO + kRegIncrStepLen); + vReg.VRegTableResize(GetMaxRegNum()); } - vRegOperandTable[newRegNO] = newRegOpnd; + vReg.vRegOperandTable[newRegNO] = newRegOpnd; VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); - vRegTable[newRegNO] = *vregNode; - vRegCount = maxRegCount; + vReg.VRegTableElementSet(newRegNO, vregNode); + vReg.SetCount(GetMaxRegNum()); return *newRegOpnd; } } @@ -9134,7 +9148,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { CHECK_FATAL(false, "nyi"); } } - LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); } @@ -9997,7 +10010,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); } uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); @@ -10053,7 +10066,7 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { auto p = spillRegMemOperands.find(vrNum); if (p == spillRegMemOperands.end()) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); } uint32 memBitSize = k64BitSize; @@ -12554,4 +12567,34 @@ bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targ } CHECK_FATAL(false, "CFG error"); } + +void AArch64CGFunc::Link2ISel(MPISel *p) { + SetISel(p); + CGFunc::InitFactory(); +} + +void AArch64CGFunc::HandleFuncCfg(CGCFG *cfg) { + RemoveUnreachableBB(); + AddCommonExitBB(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupBB(); + DetermineReturnTypeofCall(); + cfg->UnreachCodeAnalysis(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + cfg->WontExitAnalysis(); + } + CG *cg = GetCG(); + if (cg->GetCGOptions().IsLazyBinding() && cg->IsLibcore()) { + ProcessLazyBinding(); + } + if (cg->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (cg->GetCGOptions().DoEnableHotColdSplit()) { + cfg->CheckCFGFreq(); + } + NeedStackProtect(); +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 4ef9fcaf24..4271aa6277 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -204,6 +204,11 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { manager->Optimize(true); break; } + case MOP_xmulrrr: + case MOP_wmulrrr: { + manager->Optimize(!cgFunc->IsAfterRegAlloc()); + break; + } default: break; } @@ -2490,6 +2495,51 @@ void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { } } +bool MulImmToShiftPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + movInsn = ssaInfo->GetDefInsn(useReg); + if (movInsn == nullptr) { + return false; + } + MOperator prevMop = movInsn->GetMachineOpcode(); + if (prevMop != MOP_wmovri32 && prevMop != MOP_xmovri64) { + return false; + } + ImmOperand &immOpnd = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (immOpnd.IsNegative()) { + return false; + } + int64 immVal = immOpnd.GetValue(); + /* 0 considered power of 2 */ + if ((immVal & (immVal - 1)) != 0) { + return false; + } + shiftVal = static_cast(log2(immVal)); + newMop = (prevMop == MOP_xmovri64) ? MOP_xlslrri6 : MOP_wlslrri5; + return true; +} + +void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { + /* mov x0,imm and mul to shift */ + if (!CheckCondition(insn)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &shiftOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd), shiftOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(movInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { Insn *prevInsn = insn.GetPrev(); if (!cgFunc.GetMirModule().IsCModule()) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 7e6f8e5578..22c83e80bb 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -154,6 +154,9 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { if (regNO == kInvalidRegNO) { return; } + if (bb.GetKind() == BB::kBBGoto) { + return; /* a goto block should not have unreachable instr */ + } if (regNO == R0) { RegOperand ®Opnd = diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp new file mode 100644 index 0000000000..79cfd3b9a3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp @@ -0,0 +1,335 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_standardize.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "insn.h" + +namespace maplebe { + +using namespace abstract; +static AbstractIR2Target abstract2TargetTable[kMopLast] { + {MOP_undef, {{MOP_pseudo_none, {}, {}}}}, + + {MOP_copy_ri_8, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_8, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_16, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_16, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_32, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_32, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_64, {{MOP_xmovri64, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_64, {{MOP_xmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_copy_fi_8, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_8, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_16, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_16, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_32, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_32, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_64, {{MOP_xvmovdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_64, {{MOP_xvmovd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_16_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_16, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_64_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_8, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_16, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_32, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_32, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_8_16, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_16, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_8_32, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_32, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_16_32, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_32, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_8_64, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_64, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_16_64, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_64, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_64, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_64, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_f32_u32, {{MOP_vcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_u32, {{MOP_vcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_u64, {{MOP_xvcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_u64, {{MOP_xvcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_i32, {{MOP_vcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_i32, {{MOP_vcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_i64, {{MOP_xvcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_i64, {{MOP_xvcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_u32_f32, {{MOP_vcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u64_f32, {{MOP_xvcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u32_f64, {{MOP_vcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u64_f64, {{MOP_xvcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i32_f32, {{MOP_vcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i64_f32, {{MOP_xvcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i32_f64, {{MOP_vcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i64_f64, {{MOP_xvcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_ff_64_32, {{MOP_xvcvtdf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_ff_32_64, {{MOP_xvcvtfd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_str_8, {{MOP_wstrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_16, {{MOP_wstrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_32, {{MOP_wstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_64, {{MOP_xstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_8, {{MOP_wldrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_16, {{MOP_wldrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_32, {{MOP_wldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_64, {{MOP_xldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_32, {{MOP_sstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_64, {{MOP_dstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_32, {{MOP_sldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_64, {{MOP_dldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + + {MOP_add_8, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_16, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_32, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_64, {{MOP_xaddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_8, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_16, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_32, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_64, {{MOP_xsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_8, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_16, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_32, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_64, {{MOP_xiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_8, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_16, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_32, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_64, {{MOP_xeorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_8, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_16, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_32, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_64, {{MOP_xandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_and_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_32, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_64, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_32, {{MOP_sadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_f_64, {{MOP_dadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_32, {{MOP_ssub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_64, {{MOP_dsub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_shl_8, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_16, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_32, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_64, {{MOP_xlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_8, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_16, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_32, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_64, {{MOP_xasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_8, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_16, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_32, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_64, {{MOP_xlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_neg_8, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_16, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_32, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_64, {{MOP_xinegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_32, {{MOP_wfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_64, {{MOP_xfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_8, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_16, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_32, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_64, {{MOP_xnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_comment, {{MOP_nop, {kAbtractNone}, {}}}}, +}; + +Operand *AArch64Standardize::GetInsnResult(Insn *insn) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + if (insn->OpndIsDef(i)) { + return &(insn->GetOperand(i)); + } + } + return nullptr; +} + +Insn *AArch64Standardize::HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order) { + const InsnDesc *md = &AArch64CG::kMd[targetMop]; + ImmOperand &immOpnd = static_cast(insn->GetOperand(idx)); + if (md->IsValidImmOpnd(immOpnd.GetValue())) { + newInsn->SetOperand(order, immOpnd); + } else { + Operand *resOpnd = GetInsnResult(insn); + CHECK_FATAL(resOpnd, "SelectTargetInsn: No result operand"); + AArch64CGFunc *a64func = static_cast(GetCgFunc()); + BB &saveCurBB = *GetCgFunc()->GetCurBB(); + a64func->GetDummyBB()->ClearInsns(); + GetCgFunc()->SetCurBB(*a64func->GetDummyBB()); + a64func->SelectCopyImm(*resOpnd, immOpnd, (resOpnd->GetSize() == k32BitSize) ? PTY_i32 : PTY_i64); + insn->GetBB()->InsertBeforeInsn(*a64func->GetDummyBB(), *insn); + GetCgFunc()->SetCurBB(saveCurBB); + newInsn = nullptr; + } + return newInsn; +} + +void AArch64Standardize::SelectTargetInsn(Insn *insn) { + MOperator abstractMop = insn->GetMachineOpcode(); + CHECK_FATAL(abstractMop < kMopLast, "SelectTargetInsn: abstract instruction opcode out-of-bound"); + AbstractIR2Target &entry = abstract2TargetTable[abstractMop]; + CHECK_FATAL(entry.abstractMop == abstractMop, "SelectTargetInsn: Invalid abstract instruction"); + + for (uint32 j = 0; j < entry.targetMap.size(); ++j) { + TargetMopGen &targetMopGen = entry.targetMap[j]; + MOperator targetMop = targetMopGen.targetMop; + if (targetMop == MOP_nop) { + continue; + } + Insn *newInsn = &GetCgFunc()->GetInsnBuilder()->BuildInsn(targetMop, AArch64CG::kMd[targetMop]); + newInsn->ResizeOpnds(targetMopGen.mappingOrder.size()); + for (uint32 i = 0; i < targetMopGen.mappingOrder.size(); ++i) { + uint8 order = targetMopGen.mappingOrder[i]; + switch (targetMopGen.targetOpndAction[i]) { + case kAbtractReg: + case kAbtractMem: + newInsn->SetOperand(order, insn->GetOperand(i)); + break; + case kAbtractImm: { + newInsn = HandleTargetImm(insn, newInsn, i, targetMop, order); + break; + } + case kAbtractNone: + break; + } + } + if (newInsn) { + insn->GetBB()->InsertInsnBefore(*insn, *newInsn); + } + } + insn->GetBB()->RemoveInsn(*insn); +} + +Operand *AArch64Standardize::UpdateRegister(Operand &opnd, std::map ®Map, bool allocate) { + if (!opnd.IsRegister()) { + return &opnd; + } + RegOperand ®Opnd = static_cast(opnd); + if (regOpnd.IsPhysicalRegister()) { + if (allocate && opnd.GetSize() < k32BitSize) { + opnd.SetSize(k32BitSize); + } + return &opnd; + } + if (!allocate && opnd.GetSize() >= k32BitSize) { + return &opnd; + } + regno_t regno = regOpnd.GetRegisterNumber(); + regno_t mappedRegno; + auto regItem = regMap.find(regno); + if (regItem == regMap.end()) { + if (allocate) { + return &opnd; + } + regno_t vreg = GetCgFunc()->NewVReg(regOpnd.GetRegisterType(), k4ByteSize); + regMap[regno] = mappedRegno = vreg; + } else { + mappedRegno = regItem->second; + } + if (!allocate) { + return &opnd; + } + return &GetCgFunc()->GetOrCreateVirtualRegisterOperand(mappedRegno); +} + +void AArch64Standardize::TraverseOperands(Insn *insn, std::map ®Map, bool allocate) { + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + MapleList &list = static_cast(opnd).GetOperands(); + for (uint j = 0; j < list.size(); ++j) { + RegOperand *lopnd = list.front(); + list.pop_front(); + list.push_back(static_cast(UpdateRegister(*lopnd, regMap, allocate))); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &mopnd = static_cast(opnd); + Operand *base = mopnd.GetBaseRegister(); + if (base) { + RegOperand *ropnd = static_cast(UpdateRegister(*base, regMap, allocate)); + mopnd.SetBaseRegister(*ropnd); + } + } else { + insn->SetOperand(i, *UpdateRegister(opnd, regMap, allocate)); + } + } +} + +void AArch64Standardize::Legalize() { + std::map regMap; + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, false); + } + } + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, true); + } + } +} + +void AArch64Standardize::StdzMov(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzStrLdr(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzBasicOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzUnaryOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzCvtOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzShiftOp(Insn &insn) { + SelectTargetInsn(&insn); +} +void AArch64Standardize::StdzCommentOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +} diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 9fe0fc7afb..ca0aab8f38 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -20,6 +20,11 @@ using namespace maple; #define JAVALANG (mirModule->IsJavaModule()) +uint32 VregInfo::virtualRegCount = kBaseVirtualRegNO; +uint32 VregInfo::maxRegCount = 0; +std::vector VregInfo::vRegTable; +std::unordered_map VregInfo::vRegOperandTable; + void Globals::SetTarget(CG &target) { cg = ⌖ } diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 1ee586a177..29c11b3cbc 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -992,8 +992,10 @@ void CGCFG::ReverseCriticalEdge(BB &cbb) { bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { CGCFG *cfg = f.GetMemoryPool()->New(f); f.SetTheCFG(cfg); + cfg->MarkLabelTakenBB(); /* build control flow graph */ f.GetTheCFG()->BuildCFG(); + f.HandleFuncCfg(cfg); return false; } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleCFG, handlecfg) diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 66309c4ab8..cdd5a70b9a 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -91,12 +91,16 @@ ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int *alloc.New(symbol, offset, relocs, false); } +OfstOperand &OperandBuilder::CreateOfst(int64 offset, uint32 size, MemPool *mp) { + return mp ? *mp->New(offset, size) : *alloc.New(offset, size); +} + MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { return mp ? *mp->New(size) : *alloc.New(size); } MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp) { - ImmOperand &ofstOperand = CreateImm(baseOpnd.GetSize(), offset); + OfstOperand &ofstOperand = CreateOfst(offset, baseOpnd.GetSize()); if (mp != nullptr) { return *mp->New(size, baseOpnd, ofstOperand); } @@ -119,13 +123,16 @@ MemOperand &OperandBuilder::CreateMem(uint32 size, RegOperand &baseOpnd, ImmOper } RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { - virtualRegNum++; - regno_t vRegNO = kBaseVirtualRegNO + virtualRegNum; - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + regno_t vRegNO = virtualReg.GetNextVregNO(type, size / k8BitSize); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index 32aaef97e1..e69dfc9bb5 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -119,6 +119,7 @@ bool CGOptions::doCalleeToSpill = false; bool CGOptions::doRegSavesOpt = false; bool CGOptions::useSsaPreSave = false; bool CGOptions::useSsuPreRestore = false; +bool CGOptions::useNewCg = false; bool CGOptions::replaceASM = false; bool CGOptions::generalRegOnly = false; bool CGOptions::fastMath = false; @@ -511,6 +512,10 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); } + if (opts::cg::newCg.IsEnabledByUser()) { + opts::cg::newCg ? EnableNewCg() : DisableNewCg(); + } + if (opts::cg::lsraBb.IsEnabledByUser()) { SetLSRABBOptSize(opts::cg::lsraBb); } diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp index 4d451e7259..2c0d0b620a 100644 --- a/src/mapleall/maple_be/src/cg/cg_options.cpp +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -166,6 +166,12 @@ maplecl::Option ssupreRestore({"--ssupre-restore"}, {cgCategory}, maplecl::DisableWith("--no-ssupre-restore")); +maplecl::Option newCg({"--newcg"}, + " --newcg \tUse new CG infrastructure\n" + " --no-newcg\n", + {cgCategory}, + maplecl::DisableWith("--no-newcg")); + maplecl::Option prepeep({"--prepeep"}, " --prepeep \tPerform peephole optimization before RA\n" " --no-prepeep\n", diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index 4b4aa8f09e..f28a528e2a 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -123,7 +123,7 @@ void CGSSAInfo::RenameBB(BB &bb) { } AddRenamedBB(bb.GetId()); /* record version stack size */ - size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + kBaseVirtualRegNO + 1 : vRegStk.rbegin()->first + 1; std::vector oriStackSize(tempSize, -1); for (auto it : vRegStk) { diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 1a4b3f7216..45d9e1ae0f 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -25,6 +25,7 @@ const std::string BB::bbNames[BB::kBBLast] = { "BB_goto", "BB_igoto", "BB_ret", + "BB_noret", "BB_intrinsic", "BB_rangegoto", "BB_throw" @@ -180,6 +181,33 @@ void BB::InsertAtBeginning(BB &bb) { bb.firstInsn = bb.lastInsn = nullptr; } +void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) { + if (fromBB.firstInsn == nullptr) { /* nothing to add */ + return; + } + + BB *toBB = beforeInsn.GetBB(); + FOR_BB_INSNS(insn, &fromBB) { + insn->SetBB(toBB); + } + + if (toBB->GetFirstInsn() == nullptr) { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + toBB->SetLastInsn(fromBB.GetLastInsn()); + } else { + if (beforeInsn.GetPrev()) { + beforeInsn.GetPrev()->SetNext(fromBB.GetFirstInsn()); + } else { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + } + fromBB.GetFirstInsn()->SetPrev(beforeInsn.GetPrev()); + beforeInsn.SetPrev(fromBB.GetLastInsn()); + fromBB.GetLastInsn()->SetNext(&beforeInsn); + } + fromBB.SetFirstInsn(nullptr); + fromBB.SetLastInsn(nullptr); +} + /* append all insns from bb into this bb */ void BB::InsertAtEnd(BB &bb) { if (bb.firstInsn == nullptr) { /* nothing to add */ diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index d80a758c8f..dfd797844b 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1423,11 +1423,14 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_asm, HandleAsm); } +/* member of CGFunc */ +void CGFunc::InitFactory() { + InitHandleExprFactory(); +} + CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) - : vRegTable(allocator.Adapter()), - bbVec(allocator.Adapter()), - vRegOperandTable(allocator.Adapter()), + : bbVec(allocator.Adapter()), pRegSpillMemOperands(allocator.Adapter()), spillRegMemOperands(allocator.Adapter()), reuseSpillLocMem(allocator.Adapter()), @@ -1468,18 +1471,19 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, SetHasAlloca(func.HasVlaOrAlloca()); dummyBB = CreateNewBB(); - vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); - firstNonPregVRegNO = vRegCount; + vReg.SetCount(kBaseVirtualRegNO + func.GetPregTab()->Size()); + firstNonPregVRegNO = vReg.GetCount(); /* maximum register count initial be increased by 1024 */ - maxRegCount = vRegCount + 1024; + SetMaxRegNum(vReg.GetCount() + 1024); if (func.GetMayWriteToAddrofStack()) { SetStackProtectInfo(kAddrofStack); } + vReg.vRegOperandTable.clear(); insnBuilder = memPool.New(memPool); opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); - vRegTable.resize(maxRegCount); + vReg.VRegTableResize(GetMaxRegNum()); /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { @@ -1786,6 +1790,7 @@ void CGFunc::CreateLmbcFormalParamInfo() { AssignLmbcFormalParams(); } + void CGFunc::GenerateInstruction() { InitHandleExprFactory(); InitHandleStmtFactory(); @@ -2102,6 +2107,7 @@ void CGFunc::HandleFunction() { GenSaveMethodInfoCode(*firstBB); /* build control flow graph */ theCFG = memPool->New(*this); + theCFG->MarkLabelTakenBB(); theCFG->BuildCFG(); RemoveUnreachableBB(); AddCommonExitBB(); @@ -2110,7 +2116,6 @@ void CGFunc::HandleFunction() { } MarkCleanupBB(); DetermineReturnTypeofCall(); - theCFG->MarkLabelTakenBB(); theCFG->UnreachCodeAnalysis(); if (mirModule.GetSrcLang() == kSrcLangC) { theCFG->WontExitAnalysis(); @@ -2344,6 +2349,15 @@ bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) +bool CgPatchLongBranch::PhaseRun(maplebe::CGFunc &f) { + f.PatchLongBranch(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPatchLongBranch, patchlongbranch) + bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { if (f.GetCG()->GetCGOptions().WithDwarf()) { f.DBGFixCallFrameLocationOffsets(); diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp index ff1edaec7c..3767b15ad6 100644 --- a/src/mapleall/maple_be/src/cg/insn.cpp +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -299,7 +299,7 @@ void Insn::SetMOP(const InsnDesc &idesc) { } void Insn::Dump() const { -ASSERT(md != nullptr, "md should not be nullptr"); + ASSERT(md != nullptr, "md should not be nullptr"); LogInfo::MapleLogger() << "< " << GetId() << " > "; LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index a4f7594549..6f4b9ca3cd 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -18,8 +18,11 @@ #include #include "factory.h" #include "cg.h" +#include "cgfunc.h" namespace maplebe { +/* Does not support size larget than 64 bits */ +#define PTY128MOD(pty) ((pty) = (((pty) == PTY_i128) ? PTY_i64 : (((pty) == PTY_u128) ? PTY_u64 : (pty)))) /* register, imm , memory, cond */ #define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ @@ -90,6 +93,13 @@ std::map> fastCvtMappingTableI = { DEF_USE_EXTEND_MAPPING_TBL(16, 32), DEF_USE_EXTEND_MAPPING_TBL(16, 64), DEF_USE_EXTEND_MAPPING_TBL(32, 64), + DEF_USE_EXTEND_MAPPING_TBL(16, 8), /* Truncate Mapping */ + DEF_USE_EXTEND_MAPPING_TBL(32, 8), + DEF_USE_EXTEND_MAPPING_TBL(64, 8), + DEF_USE_EXTEND_MAPPING_TBL(32, 16), + DEF_USE_EXTEND_MAPPING_TBL(64, 16), + DEF_USE_EXTEND_MAPPING_TBL(64, 32), + }; #undef DEF_USE_EXTEND_MAPPING_TBL #undef DEF_EXTEND_MAPPING_TBL @@ -101,7 +111,7 @@ static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) { if (fromSize < k8BitSize || fromSize > k64BitSize) { CHECK_FATAL(false, "unsupport type"); } - /* Extend: fromSize < toSize */ + /* Extend/Truncate: fromSize < toSize */ auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); if (tableDriven == fastCvtMappingTableI.end()) { CHECK_FATAL(false, "unsupport cvt"); @@ -190,6 +200,7 @@ void HandleLabel(StmtNode &stmt, const MPISel &iSel) { ASSERT(stmt.GetOpCode() == OP_label, "error"); auto &label = static_cast(stmt); BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->SetKind(BB::kBBFallthru); newBB->AddLabel(label.GetLabelIdx()); cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); cgFunc->SetCurBB(*newBB); @@ -245,10 +256,9 @@ void HandleReturn(StmtNode &stmt, MPISel &iSel) { auto &retNode = static_cast(stmt); ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); if (retNode.NumOpnds() != 0) { - Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); - iSel.SelectReturn(retNode, *opnd); + iSel.SelectReturn(retNode); } - iSel.SelectReturn(); + iSel.SelectReturn(retNode.NumOpnds() == 0); /* return stmt will jump to the ret BB, so curBB is gotoBB */ cgFunc->SetCurBBKind(BB::kBBGoto); cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); @@ -280,8 +290,7 @@ void HandleCondbr(StmtNode &stmt, MPISel &iSel) { ASSERT(condNode != nullptr, "expect first operand of cond br"); /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used * in most backend architectures */ - Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); - iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + iSel.SelectCondGoto(condGotoNode, *condNode); cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); } @@ -371,7 +380,7 @@ Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); } -Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, const MPISel &iSel) { +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, const MPISel &iSel) { auto &constValNode = static_cast(expr); MIRConst *mirConst = constValNode.GetConstVal(); ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); @@ -380,10 +389,10 @@ Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); } else if (mirConst->GetKind() == kConstDoubleConst) { auto *mirDoubleConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType(), parent); } else if (mirConst->GetKind() == kConstFloatConst) { auto *mirFloatConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType(), parent); } else { CHECK_FATAL(false, "NIY"); } @@ -438,7 +447,7 @@ Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { } Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { - return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); } Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { @@ -480,6 +489,63 @@ Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { auto &intrinsicopNode = static_cast(expr); switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_sin: + return iSel.SelectCsin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinh: + return iSel.SelectCsinh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asin: + return iSel.SelectCasin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cos: + return iSel.SelectCcos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosh: + return iSel.SelectCcosh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acos: + return iSel.SelectCacos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atan: + return iSel.SelectCatan(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_exp: + return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log: + return iSel.SelectClog(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10: + return iSel.SelectClog10(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_sinf: + return iSel.SelectCsinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinhf: + return iSel.SelectCsinhf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asinf: + return iSel.SelectCasinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosf: + return iSel.SelectCcosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_coshf: + return iSel.SelectCcoshf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acosf: + return iSel.SelectCacosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atanf: + return iSel.SelectCatanf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_expf: + return iSel.SelectCexpf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_logf: + return iSel.SelectClogf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10f: + return iSel.SelectClog10f(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_ffs: + return iSel.SelectCffs(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_memcmp: + return iSel.SelectCmemcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strlen: + return iSel.SelectCstrlen(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strcmp: + return iSel.SelectCstrcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strncmp: + return iSel.SelectCstrncmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strchr: + return iSel.SelectCstrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strrchr: + return iSel.SelectCstrrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_rev16_2: case INTRN_C_rev_4: case INTRN_C_rev_8: @@ -490,10 +556,8 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { case INTRN_C_ctz32: case INTRN_C_ctz64: return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); - case INTRN_C_exp: - return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); default: - ASSERT(false, "NIY, unsupported intrinsicop."); + CHECK_FATAL_FALSE("NIY, unsupported intrinsicop."); return nullptr; } } @@ -578,8 +642,14 @@ Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { void MPISel::DoMPIS() { isel::InitHandleStmtFactory(); isel::InitHandleExprFactory(); + GetCurFunc()->Link2ISel(this); + SrcPosition lastLocPos = SrcPosition(); + SrcPosition lastMplPos = SrcPosition(); StmtNode *secondStmt = HandleFuncEntry(); for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GetCurFunc()->GenerateLoc(stmt, lastLocPos, lastMplPos); + auto function = CreateProductFunction(stmt->GetOpCode()); CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); function(*stmt, *this); @@ -700,14 +770,21 @@ void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { /* Generate Insn */ if (rhsType == PTY_agg) { /* Agg Type */ - SelectAggDassign(symbolInfo, symbolMem, opndRhs); + SelectAggDassign(symbolInfo, symbolMem, opndRhs, stmt); return; } PrimType memType = symbolInfo.primType; if (memType == PTY_agg) { memType = PTY_a64; } - SelectCopy(symbolMem, opndRhs, memType, rhsType); + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + MemOperand &stMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID(), ®Opnd); + SelectCopy(stMem, opndRhs, memType, rhsType); + } else { + SelectCopy(symbolMem, opndRhs, memType, rhsType); + } return; } @@ -776,6 +853,7 @@ Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type) { + PTY128MOD(opnd0Type); if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { SelectCopy(resOpnd, opnd0, opnd0Type); return; @@ -804,8 +882,10 @@ void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcod void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PTY128MOD(rhsType); PregIdx pregIdx = stmt.GetRegIdx(); PrimType regType = stmt.GetPrimType(); + PTY128MOD(regType); RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); SelectCopy(regOpnd, opnd0, regType, rhsType); @@ -850,17 +930,28 @@ Operand *MPISel::SelectDread(const BaseNode &parent [[maybe_unused]], const Addr CHECK_FATAL(primType == maple::PTY_agg, "NIY"); return &symbolMem; } - /* for BasicType, load symbolVal to register. */ - RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); /* Generate Insn */ - SelectCopy(regOpnd, symbolMem, primType, symbolType); - return ®Opnd; + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + RegOperand ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &ldMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID(), ®Opnd); + SelectCopy(regOpnd1, ldMem, primType, symbolType); + return ®Opnd1; + } else { + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; + } } Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { PrimType primType = node.GetPrimType(); + PTY128MOD(primType); RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); @@ -920,7 +1011,7 @@ void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bit } Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], - const ExtractbitsNode &node, Operand &opnd0) { + ExtractbitsNode &node, Operand &opnd0) { PrimType fromType = node.Opnd(0)->GetPrimType(); PrimType toType = node.GetPrimType(); uint8 bitSize = node.GetBitsSize(); @@ -943,31 +1034,62 @@ Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], } Operand *MPISel::SelectCvt(const BaseNode &parent [[maybe_unused]], const TypeCvtNode &node, Operand &opnd0) { - PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType fromType = node.FromType(); + PTY128MOD(fromType); PrimType toType = node.GetPrimType(); + PTY128MOD(toType); if (fromType == toType) { return &opnd0; } + RegOperand *regOpnd0; + if (!opnd0.IsRegister()) { + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(fromType), + cgFunc->GetRegTyFromPrimTy(fromType)); + SelectCopy(result, opnd0, fromType, fromType); + regOpnd0 = &result; + } else { + regOpnd0 = &static_cast(opnd0); + } RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); if (IsPrimitiveInteger(toType) && IsPrimitiveInteger(fromType)) { - SelectIntCvt(*resOpnd, opnd0, toType, fromType); + SelectIntCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { - SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + SelectCvtInt2Float(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveFloat(fromType)) { - SelectFloatCvt(*resOpnd, opnd0, toType, fromType); + SelectFloatCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveInteger(toType) && IsPrimitiveFloat(fromType)) { - SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + SelectCvtFloat2Int(*resOpnd, *regOpnd0, toType, fromType); } else { CHECK_FATAL(false, "NIY cvt"); } return resOpnd; } - void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 toSize = GetPrimTypeBitSize(toType); bool isSigned = !IsPrimitiveUnsigned(toType); +#if TARGAARCH64 + /* + * Due to fp precision, should use one insn to perform cvt. + */ + MOperator mOp = abstract::MOP_undef; + switch (fromType) { + case PTY_f64: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f64 : MOP_cvt_u32_f64) : + ((isSigned) ? MOP_cvt_i64_f64 : MOP_cvt_u64_f64); + break; + case PTY_f32: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f32 : MOP_cvt_u32_f32) : + ((isSigned) ? MOP_cvt_i64_f32 : MOP_cvt_u64_f32); + break; + default: + CHECK_FATAL(false, "NYI"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else PrimType newToType = toType; // cvt f64/32 -> u16 / u8 -> cvt f u32 + cvt u32 -> u8 if (toSize < k32BitSize) { @@ -978,9 +1100,9 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectFloatCvt(tmpFloatOpnd, opnd0, newToType, fromType); MOperator mOp = abstract::MOP_undef; if (newToSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i32 : abstract::MOP_cvt_rf_u32; + mOp = isSigned ? abstract::MOP_cvt_i32_f32 : abstract::MOP_cvt_u32_f32; } else if (newToSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i64 : abstract::MOP_cvt_rf_u64; + mOp = isSigned ? abstract::MOP_cvt_i64_f64 : abstract::MOP_cvt_u64_f64; } else { CHECK_FATAL(false, "niy"); } @@ -993,18 +1115,39 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectIntCvt(resOpnd, tmpIntOpnd, toType, newToType); } cgFunc->GetCurBB()->AppendInsn(insn); +#endif } void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); bool isSigned = !IsPrimitiveUnsigned(fromType); +#if TARGAARCH64 + /* Due to fp precision, convert is done with one instruction */ + MOperator mOp = abstract::MOP_undef; + switch (toType) { + case PTY_f64: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f64_i32 : MOP_cvt_f64_u32) : + ((isSigned) ? MOP_cvt_f64_i64 : MOP_cvt_f64_u64); + break; + case PTY_f32: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f32_i32 : MOP_cvt_f32_u32) : + ((isSigned) ? MOP_cvt_f32_i64 : MOP_cvt_f32_u64); + break; + default: + CHECK_FATAL(false, "NYI"); + } + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else MOperator mOp = abstract::MOP_undef; PrimType newFromType = PTY_begin; if (fromSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i32 : abstract::MOP_cvt_fr_u32; + mOp = isSigned ? abstract::MOP_cvt_f32_i32 : abstract::MOP_cvt_f32_u32; newFromType = PTY_f32; } else if (fromSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i64 : abstract::MOP_cvt_fr_u64; + mOp = isSigned ? abstract::MOP_cvt_f64_i64 : abstract::MOP_cvt_f64_u64; newFromType = PTY_f64; } else { CHECK_FATAL(false, "niy"); @@ -1016,6 +1159,7 @@ void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType to (void)insn.AddOpndChain(tmpFloatOpnd).AddOpndChain(regOpnd0); cgFunc->GetCurBB()->AppendInsn(insn); SelectFloatCvt(resOpnd, tmpFloatOpnd, toType, newFromType); +#endif } void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { @@ -1026,7 +1170,7 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, * The signedness of operands would be shown in the expression. */ RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); - if (toSize <= fromSize) { + if (toSize == fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; @@ -1182,6 +1326,7 @@ Operand *MPISel::SelectIread(const BaseNode &parent [[maybe_unused]], const Irea } /* for BasicType, load val in addr to register. */ PrimType primType = expr.GetPrimType(); + PTY128MOD(primType); RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); SelectCopy(result, memOpnd, primType, lhsInfo.primType); @@ -1256,7 +1401,7 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, return &resOpnd; } -Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { +Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { PrimType primType = node.GetPrimType(); if (IsPrimitiveVector(primType)) { CHECK_FATAL(false, "NIY"); @@ -1273,7 +1418,7 @@ Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); - Operand *opnd1 = SelectFloatingConst(*c, PTY_f64); + Operand *opnd1 = SelectFloatingConst(*c, PTY_f64, parent); RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); @@ -1396,59 +1541,48 @@ StmtNode *MPISel::HandleFuncEntry() { RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); uint32 toSize = GetPrimTypeBitSize(toType); - if (src.IsRegister() && fromSize == toSize) { + bool isReg = src.IsRegister(); + uint32 srcRegSize = isReg ? src.GetSize() : 0; + if ((isReg && fromSize == toSize) || (fromType == PTY_unknown && isReg && srcRegSize == toSize)) { return static_cast(src); } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); - if (fromSize != toSize) { - SelectCopy(dest, src, toType, fromType); - } else { + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); + } else if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); } return dest; } -/* Pretty sure that implicit type conversions will not occur. */ -RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { - ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); - if (src.IsRegister()) { - return static_cast(src); - } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), - cgFunc->GetRegTyFromPrimTy(dtype)); - SelectCopy(dest, src, dtype); - return dest; -} /* This function copy/load/store src to a dest, Once the src and dest types * are different, implicit conversion is executed here. */ void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { - if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + PTY128MOD(fromType); + PTY128MOD(toType); + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (fromType != PTY_unknown && fromSize != toSize) { RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); - RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); SelectCopy(dest, dstRegOpnd, toType); } else { - SelectCopy(dest, src, toType); - } -} - -/* Pretty sure that implicit type conversions will not occur. */ -void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { - ASSERT(dest.GetSize() == src.GetSize(), "NIY"); - if (dest.GetKind() == Operand::kOpdRegister) { - SelectCopyInsn(dest, src, type); - } else if (dest.GetKind() == Operand::kOpdMem) { - if (src.GetKind() != Operand::kOpdRegister) { - RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), - cgFunc->GetRegTyFromPrimTy(type)); - SelectCopyInsn(tempReg, src, type); - SelectCopyInsn(dest, tempReg, type); - } else { - SelectCopyInsn(dest, src, type); + if (dest.GetKind() == Operand::kOpdMem || src.GetKind() == Operand::kOpdMem) { + if ((dest.GetKind() == Operand::kOpdMem && src.GetKind() == Operand::kOpdRegister) || + (dest.GetKind() == Operand::kOpdRegister && src.GetKind() == Operand::kOpdMem)) { + SelectCopyInsn(dest, src, toType); + } else if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + SelectCopyInsn(tempReg, src, toType); + SelectCopyInsn(dest, tempReg, toType); + } else { + SelectCopyInsn(dest, src, toType); + } + } else if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, toType); + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } - }else { - CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } } @@ -1531,18 +1665,6 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { return nullptr; } -void MPISel::HandleFuncExit() const { - BlockNode *block = cgFunc->GetFunction().GetBody(); - ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); - cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); - /* Set lastbb's frequency */ - cgFunc->SetLastBB(*cgFunc->GetCurBB()); - /* the last BB is return BB */ - cgFunc->GetLastBB()->SetKind(BB::kBBReturn); - - cgFunc->AddCommonExitBB(); -} - void InstructionSelector::GetAnalysisDependence(AnalysisDep &aDep) const { aDep.AddRequired(); aDep.AddPreserved(); diff --git a/src/mapleall/maple_be/src/cg/standardize.cpp b/src/mapleall/maple_be/src/cg/standardize.cpp index cf14b253f5..b3a14f7a0d 100644 --- a/src/mapleall/maple_be/src/cg/standardize.cpp +++ b/src/mapleall/maple_be/src/cg/standardize.cpp @@ -22,6 +22,9 @@ void Standardize::DoStandardize() { /* two address mapping first */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -30,10 +33,13 @@ void Standardize::DoStandardize() { } } } - + Legalize(); /* standardize for each op */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -44,11 +50,13 @@ void Standardize::DoStandardize() { } else if (insn->IsBasicOp()) { StdzBasicOp(*insn); } else if (insn->IsUnaryOp()) { - StdzUnaryOp(*insn, *cgFunc); + StdzUnaryOp(*insn); } else if (insn->IsConversion()) { - StdzCvtOp(*insn, *cgFunc); + StdzCvtOp(*insn); } else if (insn->IsShift()) { - StdzShiftOp(*insn, *cgFunc); + StdzShiftOp(*insn); + } else if (insn->IsComment()) { + StdzCommentOp(*insn); } else { LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; CHECK_FATAL(false, "NIY"); @@ -61,19 +69,37 @@ void Standardize::AddressMapping(Insn &insn) const { Operand &dest = insn.GetOperand(kInsnFirstOpnd); Operand &src1 = insn.GetOperand(kInsnSecondOpnd); uint32 destSize = dest.GetSize(); + CHECK_FATAL(dest.IsRegister(), "AddressMapping: not reg operand"); + bool isInt = static_cast(dest).GetRegisterType() == kRegTyInt ? true : false; MOperator mOp = abstract::MOP_undef; switch (destSize) { case k8BitSize: - mOp = abstract::MOP_copy_rr_8; + if (isInt) { + mOp = abstract::MOP_copy_rr_8; + } else { + mOp = abstract::MOP_copy_ff_8; + } break; case k16BitSize: - mOp = abstract::MOP_copy_rr_16; + if (isInt) { + mOp = abstract::MOP_copy_rr_16; + } else { + mOp = abstract::MOP_copy_ff_16; + } break; case k32BitSize: - mOp = abstract::MOP_copy_rr_32; + if (isInt) { + mOp = abstract::MOP_copy_rr_32; + } else { + mOp = abstract::MOP_copy_ff_32; + } break; case k64BitSize: - mOp = abstract::MOP_copy_rr_64; + if (isInt) { + mOp = abstract::MOP_copy_rr_64; + } else { + mOp = abstract::MOP_copy_ff_64; + } break; default: break; diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index fa3e08afb2..9e66289c19 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -21,8 +21,20 @@ #include "isel.h" namespace maplebe { + +void X64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->PushBackExitBBsVec(*cgFunc->GetLastBB()); +} + /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ -MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const { +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { PrimType symType; int32 fieldOffset = 0; if (fieldId == 0) { @@ -65,7 +77,8 @@ MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uin return *result; } -void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { +void X64MPIsel::SelectReturn(NaryStmtNode &retNode) { + Operand &opnd = *HandleExpr(retNode, *retNode.Opnd(0)); MIRType *retType = cgFunc->GetFunction().GetReturnType(); X64CallConvImpl retLocator(cgFunc->GetBecommon()); CCLocInfo retMech; @@ -125,7 +138,7 @@ void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { } } -void X64MPIsel::SelectReturn() { +void X64MPIsel::SelectReturn(bool noOpnd [[maybe_unused]]) { /* jump to epilogue */ MOperator mOp = x64::MOP_jmpq_l; LabelNode *endLabel = cgFunc->GetEndLabel(); @@ -137,7 +150,7 @@ void X64MPIsel::SelectReturn() { cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); } -void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { +void X64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); for (int32 i = 0; i < copyTime; ++i) { ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -150,7 +163,7 @@ void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symS } } -void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { +void X64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -196,13 +209,13 @@ void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &p CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); if (ploc.reg1 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); } if (ploc.reg2 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); } if (ploc.reg3 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); } } } @@ -452,7 +465,7 @@ void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vectorGetLabelIdx(); Operand *result = nullptr; @@ -480,7 +493,8 @@ RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { return ®Result; } -void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs, const DassignNode &stmt) { + (void)stmt; /* rhs is Func Return, it must be from Regread */ if (opndRhs.IsRegister()) { SelectIntAggCopyReturn(symbolMem, lhsInfo.size); @@ -621,6 +635,7 @@ void X64MPIsel::SelectIgoto(Operand &opnd0) { Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(opnd0); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBGoto); return; } @@ -801,6 +816,7 @@ void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(dstMemOpnd); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBIgoto); } Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { @@ -912,7 +928,8 @@ static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSign * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node * such as a dread for example */ -void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + Operand &opnd0 = *HandleExpr(stmt, condNode); Opcode opcode = stmt.GetOpCode(); X64MOP_t jmpOperator = x64::MOP_begin; if (opnd0.IsImmediate()) { @@ -1335,4 +1352,139 @@ void X64MPIsel::SelectAsm(AsmNode &node) { cgFunc->SetHasAsm(); CHECK_FATAL(false, "NIY"); } + +Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *X64MPIsel::SelectCsin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCasin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcosh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCacos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCatan(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog10(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinhf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCasinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcoshf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCacosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCatanf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCexpf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClogf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog10f(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCffs(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCmemcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrlen(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrncmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp index 360888b726..87a39c4f09 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -73,7 +73,7 @@ void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { void X64CGFunc::SelectAsm(AsmNode &node) { CHECK_FATAL(false, "NIY"); } -void X64CGFunc::SelectAggDassign(DassignNode &stmt) { +void X64CGFunc::SelectAggDassign(const DassignNode &stmt) { CHECK_FATAL(false, "NIY"); } void X64CGFunc::SelectIassign(IassignNode &stmt) { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index a506e0e28f..61247f856b 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -56,10 +56,10 @@ void X64Standardize::StdzBasicOp(Insn &insn) { insn.AddOpndChain(src2).AddOpndChain(dest); } -void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzUnaryOp(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { - StdzFloatingNeg(insn, cgFunc); + StdzFloatingNeg(insn); return; } X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); @@ -69,7 +69,7 @@ void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(dest); } -void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); uint32 destSize = OpndDesSize; uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); @@ -80,12 +80,30 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; break; - case abstract::MOP_cvt_fr_u32: + case abstract::MOP_cvt_f32_u32: srcSize = k64BitSize; break; - case abstract::MOP_cvt_rf_u32: + case abstract::MOP_cvt_u32_f32: destSize = k64BitSize; break; + case abstract::MOP_zext_rr_8_16: + case abstract::MOP_sext_rr_8_16: + case abstract::MOP_zext_rr_8_32: + case abstract::MOP_sext_rr_8_32: + case abstract::MOP_zext_rr_16_32: + case abstract::MOP_sext_rr_16_32: + case abstract::MOP_zext_rr_8_64: + case abstract::MOP_sext_rr_8_64: + case abstract::MOP_zext_rr_16_64: + case abstract::MOP_sext_rr_16_64: + case abstract::MOP_sext_rr_32_64: + /* reverse operands */ + destSize = OpndSrcSize; + srcSize = OpndDesSize; + break; + case abstract::MOP_zext_rr_32_64: + srcSize = k32BitSize; + destSize = k32BitSize; default: break; } @@ -95,13 +113,13 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); RegOperand *src = static_cast(opnd0); if (srcSize != OpndSrcSize) { - src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + src = &GetCgFunc()->GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType()); } Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); RegOperand *dest = static_cast(opnd1); if (destSize != OpndDesSize) { - dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + dest = &GetCgFunc()->GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType()); } insn.CleanAllOperand(); @@ -120,14 +138,14 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { * 32: xorl 0x80000000 R1 * movd R1 xmm0 */ -void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzFloatingNeg(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; // mov dest -> tmpOperand0 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; - RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); - Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + RegOperand *tmpOperand0 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); Operand &dest = insn.GetOperand(kInsnFirstOpnd); movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, movInsn0); @@ -135,26 +153,26 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { // 32 : xorl 0x80000000 tmpOperand0 // 64 : movabs 0x8000000000000000 tmpOperand1 // xorq tmpOperand1 tmpOperand0 - ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + ImmOperand &imm = GetCgFunc()->GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); if (mOp == abstract::MOP_neg_f_64) { - Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); - Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + Operand *tmpOperand1 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = GetCgFunc()->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); insn.GetBB()->InsertInsnBefore(insn, movabs); MOperator xorOp = x64::MOP_xorq_r_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } else { MOperator xorOp = x64::MOP_xorl_i_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } // mov tmpOperand0 -> dest - Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Insn &movq = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); insn.GetBB()->InsertInsnBefore(insn, movq); @@ -162,17 +180,17 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { return; } -void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzShiftOp(Insn &insn) { RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); /* count operand cvt -> PTY_u8 */ if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { - countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + countOpnd = &GetCgFunc()->GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); } /* copy count operand to cl(rcx) register */ - RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + RegOperand &clOpnd = GetCgFunc()->GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); X64MOP_t copyMop = x64::MOP_movb_r_r; - Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + Insn ©Insn = GetCgFunc()->GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); insn.GetBB()->InsertInsnBefore(insn, copyInsn); /* shift OP */ @@ -183,4 +201,8 @@ void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); } +void X64Standardize::StdzCommentOp(Insn &insn) { + insn.GetBB()->RemoveInsn(insn); +} + } -- Gitee From ffd8f5b555b7b7e4adfa730827b32c0fd3edc475 Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 11:31:50 -0800 Subject: [PATCH 02/25] Update copyright date --- src/mapleall/maple_be/BUILD.gn | 2 +- .../maple_be/include/cg/aarch64/aarch64_cg.h | 2 +- .../maple_be/include/cg/aarch64/aarch64_md.def | 2 +- .../maple_be/include/cg/aarch64/aarch64_peep.h | 2 +- src/mapleall/maple_be/include/cg/cg_option.h | 2 +- src/mapleall/maple_be/include/cg/cgbb.h | 2 +- src/mapleall/maple_be/include/cg/immvalid.def | 14 ++++++++++++++ src/mapleall/maple_be/include/cg/isa.h | 2 +- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 2 +- .../maple_be/src/cg/aarch64/aarch64_reaching.cpp | 2 +- src/mapleall/maple_be/src/cg/cg.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_cfg.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_option.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_ssa.cpp | 2 +- src/mapleall/maple_be/src/cg/cgbb.cpp | 2 +- 15 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index b949fc9ac4..3a2d7938a6 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -1,5 +1,5 @@ # -# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. # # OpenArkCompiler is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 3da19ed9dc..1e1c87159e 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 8cbf5aabb6..2b906c4582 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index d183afed72..158d65b050 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index f737972767..d79d5e2ec3 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 1db79af742..91931477a4 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 4a6df76761..cd63123059 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -1,3 +1,17 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ static std::set ValidBitmaskImmSet = { #include "valid_bitmask_imm.txt" }; diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index 53ef48af09..391e0013c3 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 4271aa6277..f2db71c45e 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 22c83e80bb..988486f67c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index ca0aab8f38..70baa50ee4 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 29c11b3cbc..5929c98b50 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index e69dfc9bb5..fc08849fbd 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index f28a528e2a..4f71d65574 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 45d9e1ae0f..2cfa7712b0 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. -- Gitee From fb997f1ea0c1c0b28f3b5949e9fc95d2641d5264 Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 16:33:21 -0800 Subject: [PATCH 03/25] Fix rangegoto adding successor bb --- src/mapleall/maple_be/src/cg/isel.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 6f4b9ca3cd..9ca42024a3 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -239,6 +239,7 @@ void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) { Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); cgFunc->SetCurBBKind(BB::kBBRangeGoto); iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); + cgFunc->SetCurBB(*cgFunc->StartNewBB(rangeGotoNode)); } void HandleIgoto(StmtNode &stmt, MPISel &iSel) { -- Gitee From d34c756bb2bc9c3d57b75392f4ec0df03e1a357d Mon Sep 17 00:00:00 2001 From: William Chen Date: Tue, 20 Dec 2022 10:08:35 -0800 Subject: [PATCH 04/25] Fix SelectRetype for different type --- src/mapleall/maple_be/src/cg/isel.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 9ca42024a3..82ef4871b0 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1662,6 +1662,18 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { return &SelectCopy2Reg(opnd0, toType, fromType); } + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } CHECK_FATAL(false, "NIY, retype"); return nullptr; } -- Gitee From 43722efca751f366531a9815f47c6bf121f16dd4 Mon Sep 17 00:00:00 2001 From: eching Date: Tue, 20 Dec 2022 10:29:44 -0800 Subject: [PATCH 05/25] Fix mapping of abstract MOP to x64 insn for zext with 64 bit target and truncate from 64 bit source. --- .../maple_be/include/cg/x86_64/x64_abstract_mapping.def | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def index 106b4d0b89..6f2daf65e1 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -104,9 +104,9 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_8, x64::MOP_movsbl_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_16, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_16, x64::MOP_movswl_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_8, x64::MOP_movsbq_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) @@ -118,9 +118,9 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_8_32, x64::MOP_movzbl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_8_32, x64::MOP_movsbl_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_16_32, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_32, x64::MOP_movswl_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_8_64, x64::MOP_movsbq_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_64, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_64, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_64, x64::MOP_movslq_r_r) -- Gitee From 1cba6c677bda0d359b2a8c8d450e854e470be2db Mon Sep 17 00:00:00 2001 From: eching Date: Tue, 27 Dec 2022 17:53:53 -0800 Subject: [PATCH 06/25] Fix inconsistency between MOP insn and operand size in x86_64 caused by 1) x64_standardize changing operand size (StdzCvtOp()) after doing MOP mapping (AddressMapping()) based on original operand size 2) isel not generating truncation insn when operand size of Maple OP_cvt is larger than its fromType. --- src/mapleall/maple_be/src/cg/isel.cpp | 8 ++++++++ src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 82ef4871b0..9111622f0e 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1548,6 +1548,14 @@ RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromT return static_cast(src); } RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (isReg && srcRegSize > toSize && IsPrimitiveInteger(toType)) { + /* truncate */ + MOperator mOp = GetFastCvtMopI(srcRegSize, toSize, false); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(dest).AddOpndChain(static_cast(src)); + cgFunc->GetCurBB()->AppendInsn(insn); + return dest; + } if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); } else if (fromSize != toSize) { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index 61247f856b..51fc99a06d 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -75,7 +75,6 @@ void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); uint32 srcSize = OpndSrcSize; switch (insn.GetMachineOpcode()) { - case abstract::MOP_zext_rr_64_8: case abstract::MOP_zext_rr_64_16: case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; -- Gitee From 5bafa9ead434c98bbc124df4ace07dc5bc9df10c Mon Sep 17 00:00:00 2001 From: eching Date: Wed, 28 Dec 2022 18:39:10 -0800 Subject: [PATCH 07/25] Moved isel fix for mop and operand size mismatch in commit 1cba6c67 from SelectCopy2Reg() to SelectIntCvt(). --- src/mapleall/maple_be/src/cg/isel.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 9111622f0e..73ac57e1b7 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1170,13 +1170,15 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR * The signedness of operands would be shown in the expression. */ - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + bool isSigned = !IsPrimitiveUnsigned(fromType); + uint32 bitSize = opnd0.GetSize(); + PrimType opndType = GetIntegerPrimTypeFromSize(isSigned, bitSize); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType, opndType); if (toSize == fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; } - bool isSigned = !IsPrimitiveUnsigned(fromType); MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); @@ -1548,14 +1550,6 @@ RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromT return static_cast(src); } RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); - if (isReg && srcRegSize > toSize && IsPrimitiveInteger(toType)) { - /* truncate */ - MOperator mOp = GetFastCvtMopI(srcRegSize, toSize, false); - Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); - (void)insn.AddOpndChain(dest).AddOpndChain(static_cast(src)); - cgFunc->GetCurBB()->AppendInsn(insn); - return dest; - } if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); } else if (fromSize != toSize) { -- Gitee From 55756bbc5b21a9df9d523f6921ea1c2dd0eac007 Mon Sep 17 00:00:00 2001 From: eching Date: Thu, 29 Dec 2022 10:49:59 -0800 Subject: [PATCH 08/25] Fix for testcases CF11134-testsuite-EXP_3-test226 and CF11134-testsuite-EXP_3-test408 --- src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index f2db71c45e..82befacad6 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -28,6 +28,7 @@ const std::string kMccLoadRefV = "MCC_LoadVolatileField"; const std::string kMccLoadRefS = "MCC_LoadRefStatic"; const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; const std::string kMccDummy = "MCC_Dummy"; +#define NOMULOPT (true) const std::string GetReadBarrierName(const Insn &insn) { constexpr int32 totalBarrierNamesNum = 5; @@ -2520,6 +2521,9 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { } void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { + if (NOMULOPT) { + return; + } /* mov x0,imm and mul to shift */ if (!CheckCondition(insn)) { return; -- Gitee From f27b70764c0915f166f7732f6789d150d587348f Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 29 Dec 2022 17:09:33 -0800 Subject: [PATCH 09/25] fix MulImmToShiftPattern peep --- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index c04bbf5e2f..5796a1a110 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -28,7 +28,6 @@ const std::string kMccLoadRefV = "MCC_LoadVolatileField"; const std::string kMccLoadRefS = "MCC_LoadRefStatic"; const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; const std::string kMccDummy = "MCC_Dummy"; -#define NOMULOPT (true) const std::string GetReadBarrierName(const Insn &insn) { constexpr int32 totalBarrierNamesNum = 5; @@ -2510,8 +2509,13 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { if (immOpnd.IsNegative()) { return false; } - int64 immVal = immOpnd.GetValue(); - /* 0 considered power of 2 */ + uint64 immVal = immOpnd.GetValue(); + if (immVal == 0) { + shiftVal = 0; + newMop = insn.GetMachineOpcode() == MOP_xmulrrr ? MOP_xmovri64 : MOP_wmovri32; + return true; + } + /* power of 2 */ if ((immVal & (immVal - 1)) != 0) { return false; } @@ -2521,26 +2525,28 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { } void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { - if (NOMULOPT) { - return; - } /* mov x0,imm and mul to shift */ if (!CheckCondition(insn)) { return; } auto *aarch64CGFunc = static_cast(cgFunc); - ImmOperand &shiftOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); - Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), - insn.GetOperand(kInsnSecondOpnd), shiftOpnd); - bb.ReplaceInsn(insn, newInsn); + ImmOperand &immOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); + Insn *newInsn; + if (newMop == MOP_xmovri64 || newMop == MOP_wmovri32) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), immOpnd); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd), immOpnd); + } + bb.ReplaceInsn(insn, *newInsn); /* update ssa info */ - ssaInfo->ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, *newInsn); optSuccess = true; - SetCurrInsn(&newInsn); + SetCurrInsn(newInsn); if (CG_PEEP_DUMP) { std::vector prevs; prevs.emplace_back(movInsn); - DumpAfterPattern(prevs, &insn, &newInsn); + DumpAfterPattern(prevs, &insn, newInsn); } } -- Gitee From 1dd16ea0eb08aec2a75d0c62fbf29a90eb3f026e Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 10:19:14 -0800 Subject: [PATCH 10/25] arm64 standardize for abstract mop version 1 -O0 running ctorture --- src/mapleall/maple_be/BUILD.gn | 2 + src/mapleall/maple_be/CMakeLists.txt | 2 + .../include/cg/aarch64/aarch64_MPISel.h | 121 +++ .../maple_be/include/cg/aarch64/aarch64_cg.h | 8 + .../include/cg/aarch64/aarch64_cgfunc.h | 33 +- .../include/cg/aarch64/aarch64_md.def | 4 +- .../include/cg/aarch64/aarch64_peep.h | 23 + .../include/cg/aarch64/aarch64_phases.def | 6 +- .../include/cg/aarch64/aarch64_standardize.h | 67 ++ .../maple_be/include/cg/abstract_mmir.def | 41 +- .../maple_be/include/cg/cg_irbuilder.h | 13 +- src/mapleall/maple_be/include/cg/cg_option.h | 18 + src/mapleall/maple_be/include/cg/cg_options.h | 1 + src/mapleall/maple_be/include/cg/cgbb.h | 3 + src/mapleall/maple_be/include/cg/cgfunc.h | 113 +-- src/mapleall/maple_be/include/cg/immvalid.def | 30 + src/mapleall/maple_be/include/cg/isa.h | 5 + src/mapleall/maple_be/include/cg/isel.h | 65 +- src/mapleall/maple_be/include/cg/reg_info.h | 115 +++ .../maple_be/include/cg/standardize.h | 15 +- .../maple_be/include/cg/x86_64/x64_MPISel.h | 45 +- .../cg/x86_64/x64_abstract_mapping.def | 33 +- .../maple_be/include/cg/x86_64/x64_cgfunc.h | 2 +- .../include/cg/x86_64/x64_standardize.h | 9 +- src/mapleall/maple_be/src/be/lower.cpp | 2 +- .../src/cg/aarch64/aarch64_MPISel.cpp | 748 +++++++++++++++++- .../src/cg/aarch64/aarch64_cgfunc.cpp | 81 +- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 50 ++ .../src/cg/aarch64/aarch64_reaching.cpp | 3 + .../src/cg/aarch64/aarch64_standardize.cpp | 335 ++++++++ src/mapleall/maple_be/src/cg/cg.cpp | 5 + src/mapleall/maple_be/src/cg/cg_cfg.cpp | 2 + src/mapleall/maple_be/src/cg/cg_irbuilder.cpp | 17 +- src/mapleall/maple_be/src/cg/cg_option.cpp | 5 + src/mapleall/maple_be/src/cg/cg_options.cpp | 6 + src/mapleall/maple_be/src/cg/cg_ssa.cpp | 2 +- src/mapleall/maple_be/src/cg/cgbb.cpp | 28 + src/mapleall/maple_be/src/cg/cgfunc.cpp | 30 +- src/mapleall/maple_be/src/cg/insn.cpp | 2 +- src/mapleall/maple_be/src/cg/isel.cpp | 292 +++++-- src/mapleall/maple_be/src/cg/standardize.cpp | 42 +- .../maple_be/src/cg/x86_64/x64_MPIsel.cpp | 174 +++- .../maple_be/src/cg/x86_64/x64_cgfunc.cpp | 2 +- .../src/cg/x86_64/x64_standardize.cpp | 62 +- 44 files changed, 2357 insertions(+), 305 deletions(-) create mode 100644 src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h create mode 100644 src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index 461e7eefdf..b949fc9ac4 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -130,6 +130,8 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_cfgo.cpp", "src/cg/aarch64/aarch64_isolate_fastpath.cpp", "src/cg/aarch64/aarch64_rematerialize.cpp", + "src/cg/aarch64/aarch64_MPISel.cpp", + "src/cg/aarch64/aarch64_standardize.cpp", ] src_libcgx86phases = [ diff --git a/src/mapleall/maple_be/CMakeLists.txt b/src/mapleall/maple_be/CMakeLists.txt index 836dbfd396..13f5809920 100755 --- a/src/mapleall/maple_be/CMakeLists.txt +++ b/src/mapleall/maple_be/CMakeLists.txt @@ -106,6 +106,8 @@ if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") src/cg/aarch64/aarch64_pgo_gen.cpp src/cg/aarch64/aarch64_isolate_fastpath.cpp src/cg/aarch64/aarch64_rematerialize.cpp + src/cg/aarch64/aarch64_MPISel.cpp + src/cg/aarch64/aarch64_standardize.cpp src/cg/cfi_generator.cpp src/cg/cfgo.cpp src/cg/local_opt.cpp diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h index aac9a9bd87..1b6ea1a2bd 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -12,3 +12,124 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_MPISEL_H +#define MAPLEBE_INCLUDE_AARCH64_MPISEL_H + +#include "isel.h" +#include "aarch64_isa.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class AArch64MPIsel : public MPISel { + public: + AArch64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} + ~AArch64MPIsel() override = default; + + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + Operand *SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) override; + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) override; + Insn &AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + bool IsSymbolRequireIndirection(const MIRSymbol &symbol) override; + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, + const BaseNode &parent); + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_AARCH64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 6555183b50..3da19ed9dc 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -32,6 +32,8 @@ #include "aarch64_cfgo.h" #include "aarch64_rematerialize.h" #include "aarch64_pgo_gen.h" +#include "aarch64_MPISel.h" +#include "aarch64_standardize.h" namespace maplebe { constexpr int64 kShortBRDistance = (8 * 1024); @@ -213,6 +215,12 @@ class AArch64CG : public CG { Rematerializer *CreateRematerializer(MemPool &mp) const override { return mp.New(); } + MPISel *CreateMPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) const override { + return mp.New(mp, aIRBuilder, f); + } + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. * i. mov reg2, reg1 * ii. add/sub reg2, reg1, 0/zero register diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index e4e372024a..5ea9b5adb0 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -67,6 +67,8 @@ class AArch64CGFunc : public CGFunc { return refCount; } + void Link2ISel(MPISel *p) override; + int32 GetBeginOffset() const { return beginOffset; } @@ -75,13 +77,13 @@ class AArch64CGFunc : public CGFunc { MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; regno_t NewVRflag() override { - ASSERT(maxRegCount > kRFLAG, "CG internal error."); + ASSERT(GetMaxRegNum() > kRFLAG, "CG internal error."); constexpr uint8 size = 4; - if (maxRegCount <= kRFLAG) { - maxRegCount += (kRFLAG + kVRegisterNumber); - vRegTable.resize(maxRegCount); + if (GetMaxRegNum() <= kRFLAG) { + IncMaxRegNum(kRFLAG + kVRegisterNumber); + vReg.VRegTableResize(GetMaxRegNum()); } - new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + vReg.VRegTableValuesSet(kRFLAG, kRegTyCc, size); return kRFLAG; } @@ -89,6 +91,7 @@ class AArch64CGFunc : public CGFunc { RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); MIRStructType *GetLmbcStructArgType(BaseNode &stmt, size_t argNo) const; + void HandleFuncCfg(CGCFG *cfg) override; void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); @@ -100,6 +103,7 @@ class AArch64CGFunc : public CGFunc { void HandleRetCleanup(NaryStmtNode &retNode) override; void MergeReturn() override; RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(DassignNode &stmt, Operand &opnd0) override; void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; @@ -116,7 +120,7 @@ class AArch64CGFunc : public CGFunc { bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); bool GetNumReturnRegsForIassignfpoff(MIRType &rType, PrimType &primType, uint32 &numRegs); void GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType); - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; @@ -126,6 +130,7 @@ class AArch64CGFunc : public CGFunc { void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, Operand &origOpnd1, PrimType primType, bool signedCond); @@ -135,6 +140,7 @@ class AArch64CGFunc : public CGFunc { void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &srcOpnd) override; void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; + Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) override; Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, const std::string &name) override; Operand *SelectCclz(IntrinsicopNode &intrnNode) override; @@ -302,7 +308,6 @@ class AArch64CGFunc : public CGFunc { LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; LabelOperand &GetOrCreateLabelOperand(BB &bb) override; uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; - RegOperand *SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) override; RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; @@ -345,10 +350,15 @@ class AArch64CGFunc : public CGFunc { RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); void SelectVectorZip(PrimType rType, Operand *o1, Operand *o2); void SelectStackSave(); void SelectStackRestore(const IntrinsiccallNode &intrnNode); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); @@ -867,16 +877,12 @@ class AArch64CGFunc : public CGFunc { void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); - void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); MOperator SelectExtMopForParmList(PrimType primType); Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); - void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); - void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); - void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, @@ -884,10 +890,8 @@ class AArch64CGFunc : public CGFunc { MOperator SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const; Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); - Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); int64 GetOrCreatSpillRegLocation(regno_t vrNum) { AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); @@ -899,8 +903,7 @@ class AArch64CGFunc : public CGFunc { bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, LabelOperand &targetOpnd, Operand &opnd0); void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); - void SelectCVaStart(const IntrinsiccallNode &intrnNode); - void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccall); + void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); void SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall); void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); void SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 4e84c64354..8cbf5aabb6 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -22,9 +22,9 @@ DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mo /* MOP_wmovrr */ DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) /* MOP_wmovri32 */ -DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable32) /* MOP_xmovri64 */ -DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable64) /* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index 9125b9ef71..ea6e76836d 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -963,6 +963,29 @@ class RemoveMovingtoSameRegPattern : public CGPeepPattern { } }; +/* + * mov dest1, imm + * mul dest2, reg1, dest1 + * ===> if imm is 2^n + * mov dest1, imm + * lsl dest2, reg1, n + */ +class MulImmToShiftPattern : public CGPeepPattern { + public: + MulImmToShiftPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~MulImmToShiftPattern() override = default; + std::string GetPatternName() override { + return "MulImmToShiftPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + private: + Insn *movInsn = nullptr; + uint32 shiftVal = 0; + MOperator newMop = MOP_undef; +}; + /* * Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp, when they are * back to back and the [MEM] they access is conjointed. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 7ca39b865f..1727a4fd31 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -15,9 +15,13 @@ ADDTARGETPHASE("layoutstackframe", true); ADDTARGETPHASE("createstartendlabel", true); ADDTARGETPHASE("buildehfunc", !GetMIRModule()->IsCModule()); - ADDTARGETPHASE("handlefunction", true); + ADDTARGETPHASE("handlefunction", !CGOptions::UseNewCg()); + ADDTARGETPHASE("instructionselector", CGOptions::UseNewCg()); + ADDTARGETPHASE("handlecfg", CGOptions::UseNewCg()); + ADDTARGETPHASE("patchlongbranch", CGOptions::UseNewCg() && CGOptions::DoFixLongBranch()); ADDTARGETPHASE("cgprofuse", Options::profileUse); ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("instructionstandardize", CGOptions::UseNewCg()); /* SSA PHASES */ ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h new file mode 100644 index 0000000000..866bce5f43 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { + +enum TargetOperandAction : uint8 { + kAbtractReg, + kAbtractMem, + kAbtractImm, + kAbtractNone, +}; + +struct TargetMopGen { + AArch64MOP_t targetMop; + std::vector targetOpndAction; + std::vector mappingOrder; +}; + +class AbstractIR2Target { + public: + abstract::AbstractMOP_t abstractMop; + std::vector targetMap; +}; + +class AArch64Standardize : public Standardize { + public: + explicit AArch64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(false); + } + + ~AArch64Standardize() override = default; + + private: + void Legalize() override; + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzCommentOp(Insn &insn) override; + + Operand *UpdateRegister(Operand &opnd, std::map ®Map, bool allocate); + void TraverseOperands(Insn *insn, std::map ®Map, bool allocate); + Operand *GetInsnResult(Insn *insn); + Insn *HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order); + void SelectTargetInsn(Insn *insn); +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def index 83d5658589..acc419fec2 100644 --- a/src/mapleall/maple_be/include/cg/abstract_mmir.def +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -51,17 +51,40 @@ DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + /* register truncate */ + DEFINE_MOP(MOP_zext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r16","",1) + DEFINE_MOP(MOP_sext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r16","",1) + DEFINE_MOP(MOP_zext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r32","",1) + DEFINE_MOP(MOP_sext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r32","",1) + DEFINE_MOP(MOP_zext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r32","",1) + DEFINE_MOP(MOP_sext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r32","",1) + + DEFINE_MOP(MOP_zext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r64","",1) + DEFINE_MOP(MOP_sext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r64","",1) + DEFINE_MOP(MOP_zext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r64","",1) + DEFINE_MOP(MOP_sext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r64","",1) + DEFINE_MOP(MOP_zext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r64","",1) + DEFINE_MOP(MOP_sext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r64","",1) + /* int2float conversion */ - DEFINE_MOP(MOP_cvt_fr_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u32","",1) - DEFINE_MOP(MOP_cvt_fr_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u64","",1) - DEFINE_MOP(MOP_cvt_fr_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i32","",1) - DEFINE_MOP(MOP_cvt_fr_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i64","",1) + DEFINE_MOP(MOP_cvt_f32_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u32","",1) + DEFINE_MOP(MOP_cvt_f64_u32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u32","",1) + DEFINE_MOP(MOP_cvt_f32_u64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u64","",1) + DEFINE_MOP(MOP_cvt_f64_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u64","",1) + DEFINE_MOP(MOP_cvt_f32_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i32","",1) + DEFINE_MOP(MOP_cvt_f64_i32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i32","",1) + DEFINE_MOP(MOP_cvt_f32_i64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i64","",1) + DEFINE_MOP(MOP_cvt_f64_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i64","",1) /* float2int conversion */ - DEFINE_MOP(MOP_cvt_rf_u32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u32","",1) - DEFINE_MOP(MOP_cvt_rf_u64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u64","",1) - DEFINE_MOP(MOP_cvt_rf_i32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i32","",1) - DEFINE_MOP(MOP_cvt_rf_i64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i64","",1) + DEFINE_MOP(MOP_cvt_u32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f32","",1) + DEFINE_MOP(MOP_cvt_u64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f32","",1) + DEFINE_MOP(MOP_cvt_u32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f64","",1) + DEFINE_MOP(MOP_cvt_u64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f64","",1) + DEFINE_MOP(MOP_cvt_i32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f32","",1) + DEFINE_MOP(MOP_cvt_i64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f32","",1) + DEFINE_MOP(MOP_cvt_i32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f64","",1) + DEFINE_MOP(MOP_cvt_i64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f64","",1) /* float conversion */ DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_64_32","",1) @@ -150,4 +173,4 @@ DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) /* MOP_comment */ - DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT|ISCOMMENT,0,"//","0", 0) diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index 794f7868c9..d18f242855 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -16,6 +16,7 @@ #ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H #define MAPLEBE_INCLUDE_CG_IRBUILDER_H +#include "reg_info.h" #include "insn.h" #include "operand.h" @@ -56,14 +57,18 @@ class InsnBuilder { uint32 createdInsnNum = 0; }; -constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ class OperandBuilder { public: - explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) : alloc(&mp), virtualRegNum(mirPregNum) {} + explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) + : alloc(&mp) { + virtualReg.SetCount(mirPregNum); + } + /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + OfstOperand &CreateOfst(int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &offImm, MemPool *mp = nullptr); @@ -79,14 +84,14 @@ class OperandBuilder { CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); uint32 GetCurrentVRegNum() const { - return virtualRegNum; + return virtualReg.GetCount(); } protected: MapleAllocator alloc; private: - uint32 virtualRegNum = 0; + VregInfo virtualReg; /* reg bank for multiple use */ }; diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index 330710ad27..b8fe5ccf19 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -650,6 +650,22 @@ class CGOptions { return doCFGO; } + static void EnableNewCg() { + useNewCg = true; + } + + static void DisableNewCg() { + useNewCg = false; + } + + static bool UseNewCg() { + return useNewCg; + } + + static bool DoFixLongBranch() { + return CGOptions::GetInstance().GetOptimizeLevel() == kLevel0; + } + static void EnableRegSavesOpt() { doRegSavesOpt = true; } @@ -673,6 +689,7 @@ class CGOptions { static bool UseSsaPreSave() { return useSsaPreSave; } + static void EnableSsuPreRestore() { useSsuPreRestore = true; } @@ -1421,6 +1438,7 @@ class CGOptions { static bool doRegSavesOpt; static bool useSsaPreSave; static bool useSsuPreRestore; + static bool useNewCg; static bool dumpOptimizeCommonLog; static bool checkArrayStore; static bool exclusiveEH; diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h index 9912692006..d4dd23dd67 100644 --- a/src/mapleall/maple_be/include/cg/cg_options.h +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -48,6 +48,7 @@ extern maplecl::Option lsraOptcallee; extern maplecl::Option calleeregsPlacement; extern maplecl::Option ssapreSave; extern maplecl::Option ssupreRestore; +extern maplecl::Option newCg; extern maplecl::Option prepeep; extern maplecl::Option peep; extern maplecl::Option preschedule; diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index bbf06c1059..f9d2b210f6 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -203,6 +203,9 @@ class BB { void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + /* prepend all insns from bb before insn */ + void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn); + /* append all insns from bb into this bb */ void AppendBBInsns(BB &bb); diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 68498e8208..2456107b55 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -32,7 +32,6 @@ #include "mir_function.h" #include "debug_info.h" #include "maple_phase_manager.h" - /* Maple MP header */ #include "mempool_allocator.h" @@ -50,33 +49,6 @@ struct MemOpndCmp { } }; -class VirtualRegNode { - public: - VirtualRegNode() = default; - - VirtualRegNode(RegType type, uint32 size) - : regType(type), size(size), regNO(kInvalidRegNO) {} - - virtual ~VirtualRegNode() = default; - - void AssignPhysicalRegister(regno_t phyRegNO) { - regNO = phyRegNO; - } - - RegType GetType() const { - return regType; - } - - uint32 GetSize() const { - return size; - } - - private: - RegType regType = kRegTyUndef; - uint32 size = 0; /* size in bytes */ - regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ -}; - class SpillMemOperandSet { public: explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} @@ -104,6 +76,8 @@ class SpillMemOperandSet { MapleSet reuseSpillLocMem; }; +class MPISel; + #if defined(TARGARM32) && TARGARM32 class LiveRange; #endif /* TARGARM32 */ @@ -121,6 +95,8 @@ class CGFunc { StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); virtual ~CGFunc(); + void InitFactory(); + const std::string &GetName() const { return func.GetName(); } @@ -211,8 +187,14 @@ class CGFunc { void SetCleanupLabel(BB &cleanupEntry); bool ExitbbNotInCleanupArea(const BB &bb) const; uint32 GetMaxRegNum() const { - return maxRegCount; + return vReg.GetMaxRegCount(); }; + void SetMaxRegNum(uint32 num) { + vReg.SetMaxRegCount(num); + } + void IncMaxRegNum(uint32 num) { + vReg.IncMaxRegCount(num); + } void DumpCFG() const; void DumpBBInfo(const BB *bb) const; void DumpCGIR() const; @@ -230,7 +212,7 @@ class CGFunc { virtual void SelectAbort() = 0; virtual void SelectAssertNull(UnaryStmtNode &stmt) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(DassignNode &stmt) = 0; + virtual void SelectAggDassign(const DassignNode &stmt) = 0; virtual void SelectIassign(IassignNode &stmt) = 0; virtual void SelectIassignoff(IassignoffNode &stmt) = 0; virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; @@ -417,6 +399,8 @@ class CGFunc { virtual RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) = 0; virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; + virtual void HandleFuncCfg(CGCFG *cfg) { AddCommonExitBB(); } + /* For ebo issue. */ virtual Operand *GetTrueOpnd() { return nullptr; @@ -430,6 +414,7 @@ class CGFunc { LabelIdx CreateLabel(); RegOperand *GetVirtualRegisterOperand(regno_t vRegNO) { + std::unordered_map &vRegOperandTable = vReg.vRegOperandTable; auto it = vRegOperandTable.find(vRegNO); return it == vRegOperandTable.end() ? nullptr : it->second; } @@ -450,27 +435,7 @@ class CGFunc { if (CGOptions::UseGeneralRegOnly()) { CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); } - /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ - /* and vRegTable increases 80 elements. */ - if (vRegCount >= maxRegCount) { - ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); - maxRegCount += kRegIncrStepLen; - vRegTable.resize(maxRegCount); - } -#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 - if (size < k4ByteSize) { - size = k4ByteSize; - } -#if TARGAARCH64 - /* cannot handle 128 size register */ - if (regType == kRegTyInt && size > k8ByteSize) { - size = k8ByteSize; - } -#endif - ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); -#endif - new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); - return vRegCount++; + return vReg.GetNextVregNO(regType, size); } virtual regno_t NewVRflag() { @@ -524,17 +489,17 @@ class CGFunc { /* return Register Type */ virtual RegType GetRegisterType(regno_t rNum) const { - CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); - return vRegTable[rNum].GetType(); + CHECK(rNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); + return vReg.VRegTableGetType(rNum); } #if defined(TARGX86_64) && TARGX86_64 uint32 GetMaxVReg() const { - return vRegCount + opndBuilder->GetCurrentVRegNum(); + return vReg.GetCount() + opndBuilder->GetCurrentVRegNum(); } #else uint32 GetMaxVReg() const { - return vRegCount; + return vReg.GetCount(); } #endif @@ -547,7 +512,7 @@ class CGFunc { } uint32 GetVRegSize(regno_t vregNum) { - CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + CHECK(vregNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; } @@ -1100,7 +1065,7 @@ class CGFunc { } regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { - return regno_t(idx + firstMapleIrVRegNO); + return regno_t(idx + kBaseVirtualRegNO); } bool GetHasProEpilogue() const { @@ -1251,10 +1216,6 @@ class CGFunc { vregsToPregsMap[vRegNum] = pidx; } - uint32 GetFirstMapleIrVRegNO() const { - return firstMapleIrVRegNO; - } - void SetHasAsm() { hasAsm = true; } @@ -1275,6 +1236,18 @@ class CGFunc { return needStackProtect; } + virtual void Link2ISel(MPISel *p) { + (void)p; + } + + void SetISel(MPISel *p) { + isel = p; + } + + MPISel *GetISel() { + return isel; + } + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const { PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); if (pri == -1) { @@ -1292,15 +1265,11 @@ class CGFunc { } protected: - uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ uint32 firstNonPregVRegNO; - uint32 vRegCount; /* for assigning a number for each CG virtual register */ + VregInfo vReg; /* for assigning a number for each CG virtual register */ uint32 ssaVRegCount = 0; /* vreg count in ssa */ - uint32 maxRegCount; /* for the current virtual register number limit */ size_t lSymSize; /* size of local symbol table imported */ - MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ MapleVector bbVec; - MapleUnorderedMap vRegOperandTable; MapleUnorderedMap pRegSpillMemOperands; MapleUnorderedMap spillRegMemOperands; MapleUnorderedMap reuseSpillLocMem; @@ -1313,7 +1282,6 @@ class CGFunc { uint32 totalInsns = 0; int32 structCopySize = 0; int32 maxParamStackSize = 0; - static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ bool hasVLAOrAlloca = false; bool hasAlloca = false; @@ -1338,7 +1306,7 @@ class CGFunc { PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { if (IsVRegNOForPseudoRegister(vRegNO)) { - return PregIdx(vRegNO - firstMapleIrVRegNO); + return PregIdx(vRegNO - kBaseVirtualRegNO); } return VRegNOToPRegIdx(vRegNO); } @@ -1346,7 +1314,7 @@ class CGFunc { bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { /* 0 is not allowed for preg index */ uint32 n = static_cast(vRegNum); - return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + return (kBaseVirtualRegNO < n && n < firstNonPregVRegNO); } PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { @@ -1358,7 +1326,7 @@ class CGFunc { } VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { - return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + return vReg.VRegTableElementGet(GetVirtualRegNOFromPseudoRegIdx(idx)); } PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { @@ -1478,12 +1446,17 @@ class CGFunc { uint8 stackProtectInfo = 0; bool needStackProtect = false; uint32 priority = 0; + + /* cross reference isel class pointer */ + MPISel *isel = nullptr; }; /* class CGFunc */ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPatchLongBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 3524d92bce..4a6df76761 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -14,6 +14,24 @@ bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; }; +/* This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file */ +/* was IsMoveWidableImmediate */ +bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); @@ -77,6 +95,18 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { #endif } +bool IsSingleInstructionMovable32(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 32) || + IsMoveWidableImmediateCopy(~static_cast(value), 32) || + IsBitmaskImmediate(static_cast(value), 32)); +} + +bool IsSingleInstructionMovable64(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 64) || + IsMoveWidableImmediateCopy(~static_cast(value), 64) || + IsBitmaskImmediate(static_cast(value), 64)); +} + bool Imm12BitValid(int64 value) { bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); // for target linux-aarch64-gnu diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index f4d69c919c..53ef48af09 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -51,6 +51,7 @@ enum MopProperty : maple::uint8 { kInsnInlineAsm, kInsnSpecialIntrisic, kInsnIsNop, + kInsnIsComment, }; using regno_t = uint32_t; #define ISABSTRACT 1ULL @@ -83,6 +84,7 @@ using regno_t = uint32_t; #define INLINEASM (1ULL << kInsnInlineAsm) #define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) #define ISNOP (1ULL << kInsnIsNop) +#define ISCOMMENT (1ULL << kInsnIsComment) constexpr maplebe::regno_t kInvalidRegNO = 0; /* @@ -254,6 +256,9 @@ struct InsnDesc { bool IsSpecialIntrinsic() const { return (properties & SPINTRINSIC) != 0; } + bool IsComment() const { + return properties & ISCOMMENT; + } MOperator GetOpc() const { return opc; } diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h index 5490ec3462..9e83f4b38d 100644 --- a/src/mapleall/maple_be/include/cg/isel.h +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -54,9 +54,9 @@ class MPISel { Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); - Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + virtual Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; @@ -67,8 +67,8 @@ class MPISel { void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type); void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); - virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; - virtual void SelectReturn() = 0; + virtual void SelectReturn(NaryStmtNode &retNode) = 0; + virtual void SelectReturn(bool noOpnd) = 0; virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) = 0; virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; @@ -78,13 +78,13 @@ class MPISel { virtual void SelectCall(CallNode &callNode) = 0; virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; - virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; - virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const = 0; virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; - virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) = 0; Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); @@ -98,13 +98,40 @@ class MPISel { virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; virtual Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) = 0; Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); template Operand *SelectLiteral(T &c, MIRFunction &func, uint32 labelIdx) const { @@ -130,10 +157,8 @@ class MPISel { protected: MemPool *isMp; CGFunc *cgFunc; - void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); - void SelectCopy(Operand &dest, Operand &src, PrimType toType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType = PTY_unknown); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType = PTY_unknown); void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); void SelectCvtInt2Float(RegOperand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); @@ -143,12 +168,23 @@ class MPISel { MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + + virtual void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + CHECK_FATAL(false, "NYI"); + } + virtual void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + CHECK_FATAL(false, "NYI"); + } + virtual bool IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return false; + } private: StmtNode *HandleFuncEntry(); - void HandleFuncExit() const; void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); - virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const = 0; + virtual void HandleFuncExit() const = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) = 0; virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const = 0; virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); @@ -162,7 +198,6 @@ class MPISel { void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType); void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); - void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 8a1100c12d..8099522b46 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -20,6 +20,121 @@ namespace maplebe { constexpr size_t kSpillMemOpndNum = 4; +constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +constexpr uint32 kRegIncrStepLen = 80; /* reg number increate step length */ + +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) + : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class VregInfo { + public: + /* Only one place to allocate vreg within cg. + 'static' can be removed and initialized here if only allocation is from only one source. */ + static uint32 virtualRegCount; + static uint32 maxRegCount; + static std::vector vRegTable; + static std::unordered_map vRegOperandTable; + + uint32 GetNextVregNO(RegType type, uint32 size) { + /* when vReg reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (virtualRegCount >= maxRegCount) { + ASSERT(virtualRegCount < maxRegCount + 1, "MAINTAIN FAILED"); + maxRegCount += kRegIncrStepLen; + VRegTableResize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (type == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + VRegTableValuesSet(virtualRegCount, type, size); + + uint32 temp = virtualRegCount; + ++virtualRegCount; + return temp; + } + void Inc(uint32 v) { + virtualRegCount += v; + } + uint32 GetCount() const { + return virtualRegCount; + } + void SetCount(uint32 v) { + /* Vreg number can only increase. */ + if (virtualRegCount < v) { + virtualRegCount = v; + } + } + + /* maxRegCount related stuff */ + uint32 GetMaxRegCount() const { + return maxRegCount; + } + void SetMaxRegCount(uint32 num) { + maxRegCount = num; + } + void IncMaxRegCount(uint32 num) { + maxRegCount += num; + } + + /* vRegTable related stuff */ + void VRegTableResize(uint32 sz) { + vRegTable.resize(sz); + } + uint32 VRegTableSize() const { + return vRegTable.size(); + } + uint32 VRegTableGetSize(uint32 idx) const { + return vRegTable[idx].GetSize(); + } + RegType VRegTableGetType(uint32 idx) const { + return vRegTable[idx].GetType(); + } + VirtualRegNode &VRegTableElementGet(uint32 idx) { + return vRegTable[idx]; + } + void VRegTableElementSet(uint32 idx, VirtualRegNode *node) { + vRegTable[idx] = *node; + } + void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) { + new (&vRegTable[idx]) VirtualRegNode(rt, sz); + } + void VRegOperandTableSet(regno_t regNO, RegOperand *rp) { + vRegOperandTable[regNO] = rp; + } +}; class RegisterInfo { public: diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h index dac0e28702..322497a9e3 100644 --- a/src/mapleall/maple_be/include/cg/standardize.h +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -41,6 +41,10 @@ class Standardize { void DoStandardize(); + CGFunc *GetCgFunc() { + return cgFunc; + } + protected: void SetAddressMapping(bool needMapping) { needAddrMapping = needMapping; @@ -48,16 +52,17 @@ class Standardize { bool NeedAddressMapping(const Insn &insn) { /* Operand number for two addressing mode is 2 */ /* and 3 for three addressing mode */ - needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); - return needAddrMapping; + return needAddrMapping && ((insn.GetOperandSize() > 2) || (insn.IsUnaryOp())); } private: + virtual void Legalize() {}; virtual void StdzMov(Insn &insn) = 0; virtual void StdzStrLdr(Insn &insn) = 0; virtual void StdzBasicOp(Insn &insn) = 0; - virtual void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn) = 0; + virtual void StdzShiftOp(Insn &insn) = 0; + virtual void StdzCommentOp(Insn &insn) = 0; CGFunc *cgFunc; bool needAddrMapping = false; }; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h index dba290b0e9..16751bf209 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -24,8 +24,9 @@ class X64MPIsel : public MPISel { public: X64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} ~X64MPIsel() override = default; - void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; - void SelectReturn() override; + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; @@ -33,14 +34,14 @@ class X64MPIsel : public MPISel { Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; - Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; void SelectGoto(GotoNode &stmt) override; void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; - void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; - void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; void SelectIgoto(Operand &opnd0) override; Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -55,10 +56,38 @@ class X64MPIsel : public MPISel { Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; void SelectAsm(AsmNode &node) override; private: - MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds); @@ -80,8 +109,8 @@ class X64MPIsel : public MPISel { RegOperand &GetTargetBasicPointer(PrimType primType) override; std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); void SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused); - void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); - void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; bool IsParamStructCopy(const MIRSymbol &symbol); diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def index 3880b3cd5a..106b4d0b89 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -97,7 +97,7 @@ DEFINE_MAPPING(abstract::MOP_neg_16, x64::MOP_negw_r) DEFINE_MAPPING(abstract::MOP_neg_32, x64::MOP_negl_r) DEFINE_MAPPING(abstract::MOP_neg_64, x64::MOP_negq_r) -/* CvtOp */ +/* CvtOp expand */ DEFINE_MAPPING(abstract::MOP_zext_rr_16_8, x64::MOP_movzbw_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_8, x64::MOP_movsbw_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) @@ -110,19 +110,32 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) -DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) + +/* CvtOp truncate */ +DEFINE_MAPPING(abstract::MOP_zext_rr_8_16, x64::MOP_movzbw_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_16, x64::MOP_movsbw_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_32, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_32, x64::MOP_movsbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_32, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_32, x64::MOP_movswl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_64, x64::MOP_movsbq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_64, x64::MOP_movswq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_64, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_64, x64::MOP_movslq_r_r) /* Floating CvtOp int2float */ -DEFINE_MAPPING(abstract::MOP_cvt_fr_u64, x64::MOP_cvtsi2sdq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_u32, x64::MOP_cvtsi2ssq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i32, x64::MOP_cvtsi2ssl_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_u64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_u32, x64::MOP_cvtsi2ssq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_i32, x64::MOP_cvtsi2ssl_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_i64, x64::MOP_cvtsi2sdq_r) /* Floating CvtOp float2int */ -DEFINE_MAPPING(abstract::MOP_cvt_rf_u32, x64::MOP_cvttss2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_u64, x64::MOP_cvttsd2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i32, x64::MOP_cvttss2sil_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u32_f32, x64::MOP_cvttss2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u64_f64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_i32_f32, x64::MOP_cvttss2sil_r) +DEFINE_MAPPING(abstract::MOP_cvt_i64_f64, x64::MOP_cvttsd2siq_r) /* Floating CvtOp float2float */ DEFINE_MAPPING(abstract::MOP_cvt_ff_64_32, x64::MOP_cvtss2sd_r) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h index 00bb4cd236..232a646a3e 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -49,7 +49,7 @@ class X64CGFunc : public CGFunc { void SelectAbort() override; void SelectAssertNull(UnaryStmtNode &stmt) override; void SelectAsm(AsmNode &node) override; - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h index 46353bc7e0..dd84920da6 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -31,10 +31,11 @@ class X64Standardize : public Standardize { void StdzMov(Insn &insn) override; void StdzStrLdr(Insn &insn) override; void StdzBasicOp(Insn &insn) override; - void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) override; - void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; - void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; - void StdzFloatingNeg(Insn &insn, CGFunc &cgFunc); + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzFloatingNeg(Insn &insn); + void StdzCommentOp(Insn &insn) override; }; } #endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index 33cedfdead..2b4b33c4c4 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -839,7 +839,7 @@ StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, BaseNode *baseAddr) { auto bitSize = fieldType->GetFieldSize(); - auto primType = fieldType->GetPrimType(); + auto primType = GetRegPrimType(fieldType->GetPrimType()); auto byteOffset = byteBitOffsets.first; auto bitOffset = byteBitOffsets.second; auto *builder = mirModule.GetMIRBuilder(); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp index 72ba534f1b..5b679a209c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -11,4 +11,750 @@ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. - */ \ No newline at end of file + */ + +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "isel.h" +#include "aarch64_MPISel.h" + +namespace maplebe { +/* local Handle functions in isel, do not delete or move */ +void HandleGoto(StmtNode &stmt, MPISel &iSel); +void HandleLabel(StmtNode &stmt, const MPISel &iSel); + +void AArch64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); +} + +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { + PrimType symType; + uint64 fieldOffset = 0; + bool isCopy = IsParamStructCopy(symbol); + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + if (baseReg || !isCopy) { + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + if (isCopy) { + opndSz = (baseReg) ? opndSz : k64BitSize; + } + if (baseReg) { + AArch64CGFunc *a64func = static_cast(cgFunc); + OfstOperand *ofstOpnd = &a64func->GetOrCreateOfstOpnd(fieldOffset, k32BitSize); + return *a64func->CreateMemOperand(opndSz, *baseReg, *ofstOpnd); + } else { + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); + } +} +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { + return static_cast(cgFunc)->GetOrCreateMemOpnd(symbol, offset, opndSize); +} + +Operand *AArch64MPIsel::SelectFloatingConst(MIRConst &mirConst, PrimType primType, const BaseNode &parent) const { + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const"); + AArch64CGFunc *a64Func = static_cast(cgFunc); + if (primType == PTY_f64) { + auto *dblConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, dblConst->GetIntValue(), *dblConst, parent); + } else { + auto *floatConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, floatConst->GetIntValue(), *floatConst, parent); + } +} + +void AArch64MPIsel::SelectReturn(NaryStmtNode &retNode) { + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc->GetFunction().StructReturnedInRegs()) { + opnd = cgFunc->HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc->SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc->SelectReturn(opnd); +} + +void AArch64MPIsel::SelectReturn(bool noOpnd) { + /* if return operand exist, cgFunc->SelectReturn will generate it */ + if (noOpnd) { + MOperator mOp = MOP_xuncond; + LabelOperand &targetOpnd = cgFunc->GetOrCreateLabelOperand(cgFunc->GetReturnLabel()->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { + uint32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple AArch64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL_FALSE("unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void AArch64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + */ +void AArch64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + aarch64CGFunc->SelectParmList(naryNode, srcOpnds); +} + +bool AArch64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +bool AArch64MPIsel::IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return IsParamStructCopy(symbol); +} + +void AArch64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { + (void)symbolMem; + (void)aggSize; +} + +void AArch64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + (void)lhs; + (void)rhs; + (void)copySize; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +void AArch64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, + PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void AArch64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(MOP_xbl, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, static_cast(copySize)); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void AArch64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRh, const DassignNode &stmt) { + (void)lhsInfo; + (void)symbolMem; + (void)opndRh; + cgFunc->SelectAggDassign(stmt); +} + +void AArch64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) { + (void)opndRhs; + cgFunc->SelectAggIassign(stmt, addrOpnd); +} + +Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void AArch64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R0, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R1, k64BitSize, kRegTyInt)); + } + } +} + +void AArch64MPIsel::SelectCall(CallNode &callNode) { + cgFunc->SelectCall(callNode); +} + +void AArch64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + cgFunc->SelectIcall(iCallNode, opnd0); +} + +Operand &AArch64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void AArch64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = MOP_xuncond; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->SetCurBBKind(BB::kBBGoto); + return; +} + +void AArch64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = MOP_xbr; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void AArch64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectCVaStart(intrnNode); +} + +void AArch64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL_FALSE("Intrinsic %d: %s not implemented by AArch64 isel CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void AArch64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand *indexOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(*indexOpnd, opnd0, opnd1, srcType); + if (indexOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + indexOpnd = static_cast(&cgFunc->SelectCopy(*indexOpnd, PTY_u64, PTY_u64)); + } + + /* load the address of the switch table */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + AArch64CGFunc *a64func = static_cast(cgFunc); + BitShiftOperand &bitOpnd = a64func->CreateBitShiftOperand(BitShiftOperand::kLSL, k3BitSize, k8BitShift); + Operand *disp = static_cast(cgFunc)->CreateMemOperand(k64BitSize, baseOpnd, *indexOpnd, bitOpnd); + RegOperand &tgt = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xbr, AArch64CG::kMd[MOP_xbr]); + jmpInsn.AddOpndChain(tgt); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *AArch64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + return cgFunc->SelectAddrof(expr, parent, false); +} + +Operand *AArch64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + return &cgFunc->SelectAddrofFunc(expr, parent); +} + +Operand *AArch64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + (void)parent; + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, expr.GetOffset()); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return &dst; +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void AArch64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + auto &condGotoNode = static_cast(stmt); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (!kOpcodeInfo.IsCompare(condNode.GetOpCode())) { + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode.GetOpCode() == OP_constval) { + auto &constValNode = static_cast(condNode); + if (((OP_brfalse == condOp) && constValNode.GetConstVal()->IsZero()) || + ((OP_brtrue == condOp) && !constValNode.GetConstVal()->IsZero())) { + auto *gotoStmt = cgFunc->GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, *this); // isel's + auto *labelStmt = cgFunc->GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc->CreateLabel()); + HandleLabel(*labelStmt, *this); + } + return; + } + /* 1 operand condNode, cmp it with zero */ + opnd0 = HandleExpr(stmt, condNode); // isel's + opnd1 = &cgFunc->CreateImmOperand(condNode.GetPrimType(), 0); + } else { + /* 2 operands condNode */ + opnd0 = HandleExpr(stmt, *condNode.Opnd(0)); // isel's + opnd1 = HandleExpr(stmt, *condNode.Opnd(1)); // isel's + } + cgFunc->SelectCondGoto(stmt, *opnd0, *opnd1); + cgFunc->SetCurBBKind(BB::kBBIf); +} + +Operand *AArch64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + return cgFunc->SelectStrConst(*cgFunc->GetMemoryPool()->New( + constStr.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); +} + +Operand &AArch64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + if (IsPrimitiveFloat(primType)) { + retReg = V0; + } else { + retReg = R0; + } + break; + case kSregRetval1: + if (IsPrimitiveFloat(primType)) { + retReg = V1; + } else { + retReg = R1; + } + break; + default: + CHECK_FATAL_FALSE("GetTargetRetOperand: NIY"); + break; + } + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *AArch64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + (void)parent; + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL_FALSE("NIY"); + } + + return resOpnd; +} + +void AArch64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + cgFunc->SelectMpy(resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectDiv(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectRem(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + (void)opnd0; + (void)opnd1; + (void)primType; + (void)opcode; + CHECK_FATAL_FALSE("Invalid MPISel function"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectCmpOp(node, opnd0, opnd1, parent); +} + +void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + (void)opnd0; + (void)opnd1; + (void)primType; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +Operand *AArch64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + return cgFunc->SelectSelect(expr, cond, trueOpnd, falseOpnd, parent); +} + +Operand *AArch64MPIsel::SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) { + return cgFunc->SelectExtractbits(node, opnd0, parent); +} + +void AArch64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, const BaseNode &parent) { + PrimType ptype = intrnNode.Opnd(0)->GetPrimType(); + Operand *opnd = &opnd0; + AArch64CGFunc *a64func = static_cast(cgFunc); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return a64func->SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = a64func->CreateRegisterOperandOfType(ptype); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(a64func->PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + cgFunc->GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &a64func->CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + a64func->SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectBswap(node, opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCctz(node); +} + +Operand *AArch64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCclz(node); +} + +Operand *AArch64MPIsel::SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atan", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "exp", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinhf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "coshf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atanf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "expf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "logf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10f", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "ffs", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectAbs(node, opnd0); +} + +void AArch64MPIsel::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + static_cast(cgFunc)->SelectCvtFloat2Float(resOpnd, srcOpnd, fromType, toType); +} + +void AArch64MPIsel::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + static_cast(cgFunc)->SelectCvtFloat2Int(resOpnd, srcOpnd, itype, ftype); +} + +RegOperand &AArch64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &AArch64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RFP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void AArch64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SelectAsm(node); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index da7b905d7f..bf1443da3e 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -51,6 +51,20 @@ CondOperand AArch64CGFunc::ccOperands[kCcLast] = { CondOperand(CC_AL), }; +Operand *AArch64CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { +#ifdef NEWCG + Operand *opnd; + if (CGOptions::UseNewCg()) { + MPISel *isel = GetISel(); + opnd = isel->HandleExpr(parent, expr); + } else { + opnd = CGFunc::HandleExpr(parent, expr); + } + return opnd; +#endif + return CGFunc::HandleExpr(parent, expr); +} + namespace { constexpr int32 kSignedDimension = 2; /* signed and unsigned */ constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ @@ -1700,7 +1714,7 @@ bool AArch64CGFunc::IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, return false; } -void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { +void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); uint32 lhsOffset = 0; MIRType *lhsType = lhsSymbol->GetType(); @@ -7326,23 +7340,23 @@ RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 s } RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { - ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); - ASSERT(vRegNO < vRegTable.size(), "index out of range"); - uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); - RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); - vRegOperandTable[vRegNO] = res; + ASSERT((vReg.vRegOperandTable.find(vRegNO) == vReg.vRegOperandTable.end()), "already exist"); + ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO)); + vReg.vRegOperandTable[vRegNO] = res; return *res; } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { - auto it = vRegOperandTable.find(vRegNO); - return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); + auto it = vReg.vRegOperandTable.find(vRegNO); + return (it != vReg.vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { regno_t regNO = regOpnd.GetRegisterNumber(); - auto it = vRegOperandTable.find(regNO); - if (it != vRegOperandTable.end()) { + auto it = vReg.vRegOperandTable.find(regNO); + if (it != vReg.vRegOperandTable.end()) { it->second->SetSize(regOpnd.GetSize()); it->second->SetRegisterNumber(regNO); it->second->SetRegisterType(regOpnd.GetRegisterType()); @@ -7351,14 +7365,14 @@ RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd } else { auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); regno_t newRegNO = newRegOpnd->GetRegisterNumber(); - if (newRegNO >= maxRegCount) { - maxRegCount = newRegNO + kRegIncrStepLen; - vRegTable.resize(maxRegCount); + if (newRegNO >= GetMaxRegNum()) { + SetMaxRegNum(newRegNO + kRegIncrStepLen); + vReg.VRegTableResize(GetMaxRegNum()); } - vRegOperandTable[newRegNO] = newRegOpnd; + vReg.vRegOperandTable[newRegNO] = newRegOpnd; VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); - vRegTable[newRegNO] = *vregNode; - vRegCount = maxRegCount; + vReg.VRegTableElementSet(newRegNO, vregNode); + vReg.SetCount(GetMaxRegNum()); return *newRegOpnd; } } @@ -9166,7 +9180,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { CHECK_FATAL(false, "nyi"); } } - LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); } @@ -10030,7 +10043,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); } uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); @@ -10086,7 +10099,7 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { auto p = spillRegMemOperands.find(vrNum); if (p == spillRegMemOperands.end()) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); } uint32 memBitSize = k64BitSize; @@ -12587,4 +12600,34 @@ bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targ } CHECK_FATAL(false, "CFG error"); } + +void AArch64CGFunc::Link2ISel(MPISel *p) { + SetISel(p); + CGFunc::InitFactory(); +} + +void AArch64CGFunc::HandleFuncCfg(CGCFG *cfg) { + RemoveUnreachableBB(); + AddCommonExitBB(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupBB(); + DetermineReturnTypeofCall(); + cfg->UnreachCodeAnalysis(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + cfg->WontExitAnalysis(); + } + CG *cg = GetCG(); + if (cg->GetCGOptions().IsLazyBinding() && cg->IsLibcore()) { + ProcessLazyBinding(); + } + if (cg->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (cg->GetCGOptions().DoEnableHotColdSplit()) { + cfg->CheckCFGFreq(); + } + NeedStackProtect(); +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index ba974f2a8d..80966d8c6a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -204,6 +204,11 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { manager->Optimize(true); break; } + case MOP_xmulrrr: + case MOP_wmulrrr: { + manager->Optimize(!cgFunc->IsAfterRegAlloc()); + break; + } default: break; } @@ -2486,6 +2491,51 @@ void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { } } +bool MulImmToShiftPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + movInsn = ssaInfo->GetDefInsn(useReg); + if (movInsn == nullptr) { + return false; + } + MOperator prevMop = movInsn->GetMachineOpcode(); + if (prevMop != MOP_wmovri32 && prevMop != MOP_xmovri64) { + return false; + } + ImmOperand &immOpnd = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (immOpnd.IsNegative()) { + return false; + } + int64 immVal = immOpnd.GetValue(); + /* 0 considered power of 2 */ + if ((immVal & (immVal - 1)) != 0) { + return false; + } + shiftVal = static_cast(log2(immVal)); + newMop = (prevMop == MOP_xmovri64) ? MOP_xlslrri6 : MOP_wlslrri5; + return true; +} + +void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { + /* mov x0,imm and mul to shift */ + if (!CheckCondition(insn)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &shiftOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd), shiftOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(movInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { Insn *prevInsn = insn.GetPrev(); if (!cgFunc.GetMirModule().IsCModule()) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 5fbd4e572e..42d80fec7a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -154,6 +154,9 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { if (regNO == kInvalidRegNO) { return; } + if (bb.GetKind() == BB::kBBGoto) { + return; /* a goto block should not have unreachable instr */ + } if (regNO == R0) { RegOperand ®Opnd = diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp new file mode 100644 index 0000000000..79cfd3b9a3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp @@ -0,0 +1,335 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_standardize.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "insn.h" + +namespace maplebe { + +using namespace abstract; +static AbstractIR2Target abstract2TargetTable[kMopLast] { + {MOP_undef, {{MOP_pseudo_none, {}, {}}}}, + + {MOP_copy_ri_8, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_8, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_16, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_16, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_32, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_32, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ri_64, {{MOP_xmovri64, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {MOP_copy_rr_64, {{MOP_xmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_copy_fi_8, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_8, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_16, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_16, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_32, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_32, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_fi_64, {{MOP_xvmovdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_copy_ff_64, {{MOP_xvmovd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_16_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_16, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_64_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_8, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_16, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_64_32, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_64_32, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_8_16, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_16, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_8_32, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_32, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_16_32, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_32, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_zext_rr_8_64, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_8_64, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_16_64, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_16_64, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_zext_rr_32_64, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_sext_rr_32_64, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_f32_u32, {{MOP_vcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_u32, {{MOP_vcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_u64, {{MOP_xvcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_u64, {{MOP_xvcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_i32, {{MOP_vcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_i32, {{MOP_vcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f32_i64, {{MOP_xvcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_f64_i64, {{MOP_xvcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_u32_f32, {{MOP_vcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u64_f32, {{MOP_xvcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u32_f64, {{MOP_vcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_u64_f64, {{MOP_xvcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i32_f32, {{MOP_vcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i64_f32, {{MOP_xvcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i32_f64, {{MOP_vcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_i64_f64, {{MOP_xvcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_cvt_ff_64_32, {{MOP_xvcvtdf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_cvt_ff_32_64, {{MOP_xvcvtfd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_str_8, {{MOP_wstrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_16, {{MOP_wstrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_32, {{MOP_wstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_64, {{MOP_xstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_8, {{MOP_wldrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_16, {{MOP_wldrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_32, {{MOP_wldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_64, {{MOP_xldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_str_f_32, {{MOP_sstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_str_f_64, {{MOP_dstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_load_f_32, {{MOP_sldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {MOP_load_f_64, {{MOP_dldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + + {MOP_add_8, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_16, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_32, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_64, {{MOP_xaddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_8, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_16, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_32, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_64, {{MOP_xsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_8, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_16, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_32, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_or_64, {{MOP_xiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_8, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_16, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_32, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_xor_64, {{MOP_xeorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_8, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_16, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_32, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_and_64, {{MOP_xandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_and_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_32, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_and_f_64, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_add_f_32, {{MOP_sadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_add_f_64, {{MOP_dadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_sub_f_32, {{MOP_ssub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_sub_f_64, {{MOP_dsub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_shl_8, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_16, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_32, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_shl_64, {{MOP_xlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_8, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_16, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_32, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_ashr_64, {{MOP_xasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_8, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_16, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_32, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {MOP_lshr_64, {{MOP_xlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {MOP_neg_8, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_16, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_32, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_64, {{MOP_xinegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {MOP_neg_f_32, {{MOP_wfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_neg_f_64, {{MOP_xfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_8, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_16, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_32, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {MOP_not_64, {{MOP_xnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {MOP_comment, {{MOP_nop, {kAbtractNone}, {}}}}, +}; + +Operand *AArch64Standardize::GetInsnResult(Insn *insn) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + if (insn->OpndIsDef(i)) { + return &(insn->GetOperand(i)); + } + } + return nullptr; +} + +Insn *AArch64Standardize::HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order) { + const InsnDesc *md = &AArch64CG::kMd[targetMop]; + ImmOperand &immOpnd = static_cast(insn->GetOperand(idx)); + if (md->IsValidImmOpnd(immOpnd.GetValue())) { + newInsn->SetOperand(order, immOpnd); + } else { + Operand *resOpnd = GetInsnResult(insn); + CHECK_FATAL(resOpnd, "SelectTargetInsn: No result operand"); + AArch64CGFunc *a64func = static_cast(GetCgFunc()); + BB &saveCurBB = *GetCgFunc()->GetCurBB(); + a64func->GetDummyBB()->ClearInsns(); + GetCgFunc()->SetCurBB(*a64func->GetDummyBB()); + a64func->SelectCopyImm(*resOpnd, immOpnd, (resOpnd->GetSize() == k32BitSize) ? PTY_i32 : PTY_i64); + insn->GetBB()->InsertBeforeInsn(*a64func->GetDummyBB(), *insn); + GetCgFunc()->SetCurBB(saveCurBB); + newInsn = nullptr; + } + return newInsn; +} + +void AArch64Standardize::SelectTargetInsn(Insn *insn) { + MOperator abstractMop = insn->GetMachineOpcode(); + CHECK_FATAL(abstractMop < kMopLast, "SelectTargetInsn: abstract instruction opcode out-of-bound"); + AbstractIR2Target &entry = abstract2TargetTable[abstractMop]; + CHECK_FATAL(entry.abstractMop == abstractMop, "SelectTargetInsn: Invalid abstract instruction"); + + for (uint32 j = 0; j < entry.targetMap.size(); ++j) { + TargetMopGen &targetMopGen = entry.targetMap[j]; + MOperator targetMop = targetMopGen.targetMop; + if (targetMop == MOP_nop) { + continue; + } + Insn *newInsn = &GetCgFunc()->GetInsnBuilder()->BuildInsn(targetMop, AArch64CG::kMd[targetMop]); + newInsn->ResizeOpnds(targetMopGen.mappingOrder.size()); + for (uint32 i = 0; i < targetMopGen.mappingOrder.size(); ++i) { + uint8 order = targetMopGen.mappingOrder[i]; + switch (targetMopGen.targetOpndAction[i]) { + case kAbtractReg: + case kAbtractMem: + newInsn->SetOperand(order, insn->GetOperand(i)); + break; + case kAbtractImm: { + newInsn = HandleTargetImm(insn, newInsn, i, targetMop, order); + break; + } + case kAbtractNone: + break; + } + } + if (newInsn) { + insn->GetBB()->InsertInsnBefore(*insn, *newInsn); + } + } + insn->GetBB()->RemoveInsn(*insn); +} + +Operand *AArch64Standardize::UpdateRegister(Operand &opnd, std::map ®Map, bool allocate) { + if (!opnd.IsRegister()) { + return &opnd; + } + RegOperand ®Opnd = static_cast(opnd); + if (regOpnd.IsPhysicalRegister()) { + if (allocate && opnd.GetSize() < k32BitSize) { + opnd.SetSize(k32BitSize); + } + return &opnd; + } + if (!allocate && opnd.GetSize() >= k32BitSize) { + return &opnd; + } + regno_t regno = regOpnd.GetRegisterNumber(); + regno_t mappedRegno; + auto regItem = regMap.find(regno); + if (regItem == regMap.end()) { + if (allocate) { + return &opnd; + } + regno_t vreg = GetCgFunc()->NewVReg(regOpnd.GetRegisterType(), k4ByteSize); + regMap[regno] = mappedRegno = vreg; + } else { + mappedRegno = regItem->second; + } + if (!allocate) { + return &opnd; + } + return &GetCgFunc()->GetOrCreateVirtualRegisterOperand(mappedRegno); +} + +void AArch64Standardize::TraverseOperands(Insn *insn, std::map ®Map, bool allocate) { + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + MapleList &list = static_cast(opnd).GetOperands(); + for (uint j = 0; j < list.size(); ++j) { + RegOperand *lopnd = list.front(); + list.pop_front(); + list.push_back(static_cast(UpdateRegister(*lopnd, regMap, allocate))); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &mopnd = static_cast(opnd); + Operand *base = mopnd.GetBaseRegister(); + if (base) { + RegOperand *ropnd = static_cast(UpdateRegister(*base, regMap, allocate)); + mopnd.SetBaseRegister(*ropnd); + } + } else { + insn->SetOperand(i, *UpdateRegister(opnd, regMap, allocate)); + } + } +} + +void AArch64Standardize::Legalize() { + std::map regMap; + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, false); + } + } + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, true); + } + } +} + +void AArch64Standardize::StdzMov(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzStrLdr(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzBasicOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzUnaryOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzCvtOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzShiftOp(Insn &insn) { + SelectTargetInsn(&insn); +} +void AArch64Standardize::StdzCommentOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +} diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 9fe0fc7afb..ca0aab8f38 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -20,6 +20,11 @@ using namespace maple; #define JAVALANG (mirModule->IsJavaModule()) +uint32 VregInfo::virtualRegCount = kBaseVirtualRegNO; +uint32 VregInfo::maxRegCount = 0; +std::vector VregInfo::vRegTable; +std::unordered_map VregInfo::vRegOperandTable; + void Globals::SetTarget(CG &target) { cg = ⌖ } diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 1ee586a177..29c11b3cbc 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -992,8 +992,10 @@ void CGCFG::ReverseCriticalEdge(BB &cbb) { bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { CGCFG *cfg = f.GetMemoryPool()->New(f); f.SetTheCFG(cfg); + cfg->MarkLabelTakenBB(); /* build control flow graph */ f.GetTheCFG()->BuildCFG(); + f.HandleFuncCfg(cfg); return false; } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleCFG, handlecfg) diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 66309c4ab8..cdd5a70b9a 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -91,12 +91,16 @@ ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int *alloc.New(symbol, offset, relocs, false); } +OfstOperand &OperandBuilder::CreateOfst(int64 offset, uint32 size, MemPool *mp) { + return mp ? *mp->New(offset, size) : *alloc.New(offset, size); +} + MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { return mp ? *mp->New(size) : *alloc.New(size); } MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp) { - ImmOperand &ofstOperand = CreateImm(baseOpnd.GetSize(), offset); + OfstOperand &ofstOperand = CreateOfst(offset, baseOpnd.GetSize()); if (mp != nullptr) { return *mp->New(size, baseOpnd, ofstOperand); } @@ -119,13 +123,16 @@ MemOperand &OperandBuilder::CreateMem(uint32 size, RegOperand &baseOpnd, ImmOper } RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { - virtualRegNum++; - regno_t vRegNO = kBaseVirtualRegNO + virtualRegNum; - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + regno_t vRegNO = virtualReg.GetNextVregNO(type, size / k8BitSize); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index f94b0167ad..c700b871a0 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -121,6 +121,7 @@ bool CGOptions::doCalleeToSpill = false; bool CGOptions::doRegSavesOpt = false; bool CGOptions::useSsaPreSave = false; bool CGOptions::useSsuPreRestore = false; +bool CGOptions::useNewCg = false; bool CGOptions::replaceASM = false; bool CGOptions::generalRegOnly = false; bool CGOptions::fastMath = false; @@ -533,6 +534,10 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); } + if (opts::cg::newCg.IsEnabledByUser()) { + opts::cg::newCg ? EnableNewCg() : DisableNewCg(); + } + if (opts::cg::lsraBb.IsEnabledByUser()) { SetLSRABBOptSize(opts::cg::lsraBb); } diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp index 9fe2534e17..d39813ac8c 100644 --- a/src/mapleall/maple_be/src/cg/cg_options.cpp +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -186,6 +186,12 @@ maplecl::Option ssupreRestore({"--ssupre-restore"}, {cgCategory}, maplecl::DisableWith("--no-ssupre-restore")); +maplecl::Option newCg({"--newcg"}, + " --newcg \tUse new CG infrastructure\n" + " --no-newcg\n", + {cgCategory}, + maplecl::DisableWith("--no-newcg")); + maplecl::Option prepeep({"--prepeep"}, " --prepeep \tPerform peephole optimization before RA\n" " --no-prepeep\n", diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index 4b4aa8f09e..f28a528e2a 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -123,7 +123,7 @@ void CGSSAInfo::RenameBB(BB &bb) { } AddRenamedBB(bb.GetId()); /* record version stack size */ - size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + kBaseVirtualRegNO + 1 : vRegStk.rbegin()->first + 1; std::vector oriStackSize(tempSize, -1); for (auto it : vRegStk) { diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 1a4b3f7216..45d9e1ae0f 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -25,6 +25,7 @@ const std::string BB::bbNames[BB::kBBLast] = { "BB_goto", "BB_igoto", "BB_ret", + "BB_noret", "BB_intrinsic", "BB_rangegoto", "BB_throw" @@ -180,6 +181,33 @@ void BB::InsertAtBeginning(BB &bb) { bb.firstInsn = bb.lastInsn = nullptr; } +void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) { + if (fromBB.firstInsn == nullptr) { /* nothing to add */ + return; + } + + BB *toBB = beforeInsn.GetBB(); + FOR_BB_INSNS(insn, &fromBB) { + insn->SetBB(toBB); + } + + if (toBB->GetFirstInsn() == nullptr) { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + toBB->SetLastInsn(fromBB.GetLastInsn()); + } else { + if (beforeInsn.GetPrev()) { + beforeInsn.GetPrev()->SetNext(fromBB.GetFirstInsn()); + } else { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + } + fromBB.GetFirstInsn()->SetPrev(beforeInsn.GetPrev()); + beforeInsn.SetPrev(fromBB.GetLastInsn()); + fromBB.GetLastInsn()->SetNext(&beforeInsn); + } + fromBB.SetFirstInsn(nullptr); + fromBB.SetLastInsn(nullptr); +} + /* append all insns from bb into this bb */ void BB::InsertAtEnd(BB &bb) { if (bb.firstInsn == nullptr) { /* nothing to add */ diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 916565dd14..df3b272914 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1423,11 +1423,14 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_asm, HandleAsm); } +/* member of CGFunc */ +void CGFunc::InitFactory() { + InitHandleExprFactory(); +} + CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) - : vRegTable(allocator.Adapter()), - bbVec(allocator.Adapter()), - vRegOperandTable(allocator.Adapter()), + : bbVec(allocator.Adapter()), pRegSpillMemOperands(allocator.Adapter()), spillRegMemOperands(allocator.Adapter()), reuseSpillLocMem(allocator.Adapter()), @@ -1468,18 +1471,19 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, SetHasAlloca(func.HasVlaOrAlloca()); dummyBB = CreateNewBB(); - vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); - firstNonPregVRegNO = vRegCount; + vReg.SetCount(kBaseVirtualRegNO + func.GetPregTab()->Size()); + firstNonPregVRegNO = vReg.GetCount(); /* maximum register count initial be increased by 1024 */ - maxRegCount = vRegCount + 1024; + SetMaxRegNum(vReg.GetCount() + 1024); if (func.GetMayWriteToAddrofStack()) { SetStackProtectInfo(kAddrofStack); } + vReg.vRegOperandTable.clear(); insnBuilder = memPool.New(memPool); opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); - vRegTable.resize(maxRegCount); + vReg.VRegTableResize(GetMaxRegNum()); /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { @@ -1785,6 +1789,7 @@ void CGFunc::CreateLmbcFormalParamInfo() { AssignLmbcFormalParams(); } + void CGFunc::GenerateInstruction() { InitHandleExprFactory(); InitHandleStmtFactory(); @@ -2101,6 +2106,7 @@ void CGFunc::HandleFunction() { GenSaveMethodInfoCode(*firstBB); /* build control flow graph */ theCFG = memPool->New(*this); + theCFG->MarkLabelTakenBB(); theCFG->BuildCFG(); RemoveUnreachableBB(); AddCommonExitBB(); @@ -2109,7 +2115,6 @@ void CGFunc::HandleFunction() { } MarkCleanupBB(); DetermineReturnTypeofCall(); - theCFG->MarkLabelTakenBB(); theCFG->UnreachCodeAnalysis(); if (mirModule.GetSrcLang() == kSrcLangC) { theCFG->WontExitAnalysis(); @@ -2343,6 +2348,15 @@ bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) +bool CgPatchLongBranch::PhaseRun(maplebe::CGFunc &f) { + f.PatchLongBranch(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPatchLongBranch, patchlongbranch) + bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { if (f.GetCG()->GetCGOptions().WithDwarf()) { f.DBGFixCallFrameLocationOffsets(); diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp index ff1edaec7c..3767b15ad6 100644 --- a/src/mapleall/maple_be/src/cg/insn.cpp +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -299,7 +299,7 @@ void Insn::SetMOP(const InsnDesc &idesc) { } void Insn::Dump() const { -ASSERT(md != nullptr, "md should not be nullptr"); + ASSERT(md != nullptr, "md should not be nullptr"); LogInfo::MapleLogger() << "< " << GetId() << " > "; LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index a4f7594549..6f4b9ca3cd 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -18,8 +18,11 @@ #include #include "factory.h" #include "cg.h" +#include "cgfunc.h" namespace maplebe { +/* Does not support size larget than 64 bits */ +#define PTY128MOD(pty) ((pty) = (((pty) == PTY_i128) ? PTY_i64 : (((pty) == PTY_u128) ? PTY_u64 : (pty)))) /* register, imm , memory, cond */ #define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ @@ -90,6 +93,13 @@ std::map> fastCvtMappingTableI = { DEF_USE_EXTEND_MAPPING_TBL(16, 32), DEF_USE_EXTEND_MAPPING_TBL(16, 64), DEF_USE_EXTEND_MAPPING_TBL(32, 64), + DEF_USE_EXTEND_MAPPING_TBL(16, 8), /* Truncate Mapping */ + DEF_USE_EXTEND_MAPPING_TBL(32, 8), + DEF_USE_EXTEND_MAPPING_TBL(64, 8), + DEF_USE_EXTEND_MAPPING_TBL(32, 16), + DEF_USE_EXTEND_MAPPING_TBL(64, 16), + DEF_USE_EXTEND_MAPPING_TBL(64, 32), + }; #undef DEF_USE_EXTEND_MAPPING_TBL #undef DEF_EXTEND_MAPPING_TBL @@ -101,7 +111,7 @@ static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) { if (fromSize < k8BitSize || fromSize > k64BitSize) { CHECK_FATAL(false, "unsupport type"); } - /* Extend: fromSize < toSize */ + /* Extend/Truncate: fromSize < toSize */ auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); if (tableDriven == fastCvtMappingTableI.end()) { CHECK_FATAL(false, "unsupport cvt"); @@ -190,6 +200,7 @@ void HandleLabel(StmtNode &stmt, const MPISel &iSel) { ASSERT(stmt.GetOpCode() == OP_label, "error"); auto &label = static_cast(stmt); BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->SetKind(BB::kBBFallthru); newBB->AddLabel(label.GetLabelIdx()); cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); cgFunc->SetCurBB(*newBB); @@ -245,10 +256,9 @@ void HandleReturn(StmtNode &stmt, MPISel &iSel) { auto &retNode = static_cast(stmt); ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); if (retNode.NumOpnds() != 0) { - Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); - iSel.SelectReturn(retNode, *opnd); + iSel.SelectReturn(retNode); } - iSel.SelectReturn(); + iSel.SelectReturn(retNode.NumOpnds() == 0); /* return stmt will jump to the ret BB, so curBB is gotoBB */ cgFunc->SetCurBBKind(BB::kBBGoto); cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); @@ -280,8 +290,7 @@ void HandleCondbr(StmtNode &stmt, MPISel &iSel) { ASSERT(condNode != nullptr, "expect first operand of cond br"); /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used * in most backend architectures */ - Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); - iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + iSel.SelectCondGoto(condGotoNode, *condNode); cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); } @@ -371,7 +380,7 @@ Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); } -Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, const MPISel &iSel) { +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, const MPISel &iSel) { auto &constValNode = static_cast(expr); MIRConst *mirConst = constValNode.GetConstVal(); ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); @@ -380,10 +389,10 @@ Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); } else if (mirConst->GetKind() == kConstDoubleConst) { auto *mirDoubleConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType(), parent); } else if (mirConst->GetKind() == kConstFloatConst) { auto *mirFloatConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType(), parent); } else { CHECK_FATAL(false, "NIY"); } @@ -438,7 +447,7 @@ Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { } Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { - return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); } Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { @@ -480,6 +489,63 @@ Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { auto &intrinsicopNode = static_cast(expr); switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_sin: + return iSel.SelectCsin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinh: + return iSel.SelectCsinh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asin: + return iSel.SelectCasin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cos: + return iSel.SelectCcos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosh: + return iSel.SelectCcosh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acos: + return iSel.SelectCacos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atan: + return iSel.SelectCatan(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_exp: + return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log: + return iSel.SelectClog(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10: + return iSel.SelectClog10(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_sinf: + return iSel.SelectCsinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinhf: + return iSel.SelectCsinhf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asinf: + return iSel.SelectCasinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosf: + return iSel.SelectCcosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_coshf: + return iSel.SelectCcoshf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acosf: + return iSel.SelectCacosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atanf: + return iSel.SelectCatanf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_expf: + return iSel.SelectCexpf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_logf: + return iSel.SelectClogf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10f: + return iSel.SelectClog10f(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_ffs: + return iSel.SelectCffs(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_memcmp: + return iSel.SelectCmemcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strlen: + return iSel.SelectCstrlen(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strcmp: + return iSel.SelectCstrcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strncmp: + return iSel.SelectCstrncmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strchr: + return iSel.SelectCstrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strrchr: + return iSel.SelectCstrrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_rev16_2: case INTRN_C_rev_4: case INTRN_C_rev_8: @@ -490,10 +556,8 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { case INTRN_C_ctz32: case INTRN_C_ctz64: return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); - case INTRN_C_exp: - return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); default: - ASSERT(false, "NIY, unsupported intrinsicop."); + CHECK_FATAL_FALSE("NIY, unsupported intrinsicop."); return nullptr; } } @@ -578,8 +642,14 @@ Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { void MPISel::DoMPIS() { isel::InitHandleStmtFactory(); isel::InitHandleExprFactory(); + GetCurFunc()->Link2ISel(this); + SrcPosition lastLocPos = SrcPosition(); + SrcPosition lastMplPos = SrcPosition(); StmtNode *secondStmt = HandleFuncEntry(); for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GetCurFunc()->GenerateLoc(stmt, lastLocPos, lastMplPos); + auto function = CreateProductFunction(stmt->GetOpCode()); CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); function(*stmt, *this); @@ -700,14 +770,21 @@ void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { /* Generate Insn */ if (rhsType == PTY_agg) { /* Agg Type */ - SelectAggDassign(symbolInfo, symbolMem, opndRhs); + SelectAggDassign(symbolInfo, symbolMem, opndRhs, stmt); return; } PrimType memType = symbolInfo.primType; if (memType == PTY_agg) { memType = PTY_a64; } - SelectCopy(symbolMem, opndRhs, memType, rhsType); + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + MemOperand &stMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID(), ®Opnd); + SelectCopy(stMem, opndRhs, memType, rhsType); + } else { + SelectCopy(symbolMem, opndRhs, memType, rhsType); + } return; } @@ -776,6 +853,7 @@ Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type) { + PTY128MOD(opnd0Type); if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { SelectCopy(resOpnd, opnd0, opnd0Type); return; @@ -804,8 +882,10 @@ void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcod void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PTY128MOD(rhsType); PregIdx pregIdx = stmt.GetRegIdx(); PrimType regType = stmt.GetPrimType(); + PTY128MOD(regType); RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); SelectCopy(regOpnd, opnd0, regType, rhsType); @@ -850,17 +930,28 @@ Operand *MPISel::SelectDread(const BaseNode &parent [[maybe_unused]], const Addr CHECK_FATAL(primType == maple::PTY_agg, "NIY"); return &symbolMem; } - /* for BasicType, load symbolVal to register. */ - RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); /* Generate Insn */ - SelectCopy(regOpnd, symbolMem, primType, symbolType); - return ®Opnd; + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + RegOperand ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &ldMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID(), ®Opnd); + SelectCopy(regOpnd1, ldMem, primType, symbolType); + return ®Opnd1; + } else { + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; + } } Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { PrimType primType = node.GetPrimType(); + PTY128MOD(primType); RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); @@ -920,7 +1011,7 @@ void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bit } Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], - const ExtractbitsNode &node, Operand &opnd0) { + ExtractbitsNode &node, Operand &opnd0) { PrimType fromType = node.Opnd(0)->GetPrimType(); PrimType toType = node.GetPrimType(); uint8 bitSize = node.GetBitsSize(); @@ -943,31 +1034,62 @@ Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], } Operand *MPISel::SelectCvt(const BaseNode &parent [[maybe_unused]], const TypeCvtNode &node, Operand &opnd0) { - PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType fromType = node.FromType(); + PTY128MOD(fromType); PrimType toType = node.GetPrimType(); + PTY128MOD(toType); if (fromType == toType) { return &opnd0; } + RegOperand *regOpnd0; + if (!opnd0.IsRegister()) { + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(fromType), + cgFunc->GetRegTyFromPrimTy(fromType)); + SelectCopy(result, opnd0, fromType, fromType); + regOpnd0 = &result; + } else { + regOpnd0 = &static_cast(opnd0); + } RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); if (IsPrimitiveInteger(toType) && IsPrimitiveInteger(fromType)) { - SelectIntCvt(*resOpnd, opnd0, toType, fromType); + SelectIntCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { - SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + SelectCvtInt2Float(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveFloat(fromType)) { - SelectFloatCvt(*resOpnd, opnd0, toType, fromType); + SelectFloatCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveInteger(toType) && IsPrimitiveFloat(fromType)) { - SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + SelectCvtFloat2Int(*resOpnd, *regOpnd0, toType, fromType); } else { CHECK_FATAL(false, "NIY cvt"); } return resOpnd; } - void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 toSize = GetPrimTypeBitSize(toType); bool isSigned = !IsPrimitiveUnsigned(toType); +#if TARGAARCH64 + /* + * Due to fp precision, should use one insn to perform cvt. + */ + MOperator mOp = abstract::MOP_undef; + switch (fromType) { + case PTY_f64: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f64 : MOP_cvt_u32_f64) : + ((isSigned) ? MOP_cvt_i64_f64 : MOP_cvt_u64_f64); + break; + case PTY_f32: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f32 : MOP_cvt_u32_f32) : + ((isSigned) ? MOP_cvt_i64_f32 : MOP_cvt_u64_f32); + break; + default: + CHECK_FATAL(false, "NYI"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else PrimType newToType = toType; // cvt f64/32 -> u16 / u8 -> cvt f u32 + cvt u32 -> u8 if (toSize < k32BitSize) { @@ -978,9 +1100,9 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectFloatCvt(tmpFloatOpnd, opnd0, newToType, fromType); MOperator mOp = abstract::MOP_undef; if (newToSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i32 : abstract::MOP_cvt_rf_u32; + mOp = isSigned ? abstract::MOP_cvt_i32_f32 : abstract::MOP_cvt_u32_f32; } else if (newToSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i64 : abstract::MOP_cvt_rf_u64; + mOp = isSigned ? abstract::MOP_cvt_i64_f64 : abstract::MOP_cvt_u64_f64; } else { CHECK_FATAL(false, "niy"); } @@ -993,18 +1115,39 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectIntCvt(resOpnd, tmpIntOpnd, toType, newToType); } cgFunc->GetCurBB()->AppendInsn(insn); +#endif } void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); bool isSigned = !IsPrimitiveUnsigned(fromType); +#if TARGAARCH64 + /* Due to fp precision, convert is done with one instruction */ + MOperator mOp = abstract::MOP_undef; + switch (toType) { + case PTY_f64: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f64_i32 : MOP_cvt_f64_u32) : + ((isSigned) ? MOP_cvt_f64_i64 : MOP_cvt_f64_u64); + break; + case PTY_f32: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f32_i32 : MOP_cvt_f32_u32) : + ((isSigned) ? MOP_cvt_f32_i64 : MOP_cvt_f32_u64); + break; + default: + CHECK_FATAL(false, "NYI"); + } + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else MOperator mOp = abstract::MOP_undef; PrimType newFromType = PTY_begin; if (fromSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i32 : abstract::MOP_cvt_fr_u32; + mOp = isSigned ? abstract::MOP_cvt_f32_i32 : abstract::MOP_cvt_f32_u32; newFromType = PTY_f32; } else if (fromSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i64 : abstract::MOP_cvt_fr_u64; + mOp = isSigned ? abstract::MOP_cvt_f64_i64 : abstract::MOP_cvt_f64_u64; newFromType = PTY_f64; } else { CHECK_FATAL(false, "niy"); @@ -1016,6 +1159,7 @@ void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType to (void)insn.AddOpndChain(tmpFloatOpnd).AddOpndChain(regOpnd0); cgFunc->GetCurBB()->AppendInsn(insn); SelectFloatCvt(resOpnd, tmpFloatOpnd, toType, newFromType); +#endif } void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { @@ -1026,7 +1170,7 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, * The signedness of operands would be shown in the expression. */ RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); - if (toSize <= fromSize) { + if (toSize == fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; @@ -1182,6 +1326,7 @@ Operand *MPISel::SelectIread(const BaseNode &parent [[maybe_unused]], const Irea } /* for BasicType, load val in addr to register. */ PrimType primType = expr.GetPrimType(); + PTY128MOD(primType); RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); SelectCopy(result, memOpnd, primType, lhsInfo.primType); @@ -1256,7 +1401,7 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, return &resOpnd; } -Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { +Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { PrimType primType = node.GetPrimType(); if (IsPrimitiveVector(primType)) { CHECK_FATAL(false, "NIY"); @@ -1273,7 +1418,7 @@ Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); - Operand *opnd1 = SelectFloatingConst(*c, PTY_f64); + Operand *opnd1 = SelectFloatingConst(*c, PTY_f64, parent); RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); @@ -1396,59 +1541,48 @@ StmtNode *MPISel::HandleFuncEntry() { RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); uint32 toSize = GetPrimTypeBitSize(toType); - if (src.IsRegister() && fromSize == toSize) { + bool isReg = src.IsRegister(); + uint32 srcRegSize = isReg ? src.GetSize() : 0; + if ((isReg && fromSize == toSize) || (fromType == PTY_unknown && isReg && srcRegSize == toSize)) { return static_cast(src); } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); - if (fromSize != toSize) { - SelectCopy(dest, src, toType, fromType); - } else { + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); + } else if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); } return dest; } -/* Pretty sure that implicit type conversions will not occur. */ -RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { - ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); - if (src.IsRegister()) { - return static_cast(src); - } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), - cgFunc->GetRegTyFromPrimTy(dtype)); - SelectCopy(dest, src, dtype); - return dest; -} /* This function copy/load/store src to a dest, Once the src and dest types * are different, implicit conversion is executed here. */ void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { - if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + PTY128MOD(fromType); + PTY128MOD(toType); + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (fromType != PTY_unknown && fromSize != toSize) { RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); - RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); SelectCopy(dest, dstRegOpnd, toType); } else { - SelectCopy(dest, src, toType); - } -} - -/* Pretty sure that implicit type conversions will not occur. */ -void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { - ASSERT(dest.GetSize() == src.GetSize(), "NIY"); - if (dest.GetKind() == Operand::kOpdRegister) { - SelectCopyInsn(dest, src, type); - } else if (dest.GetKind() == Operand::kOpdMem) { - if (src.GetKind() != Operand::kOpdRegister) { - RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), - cgFunc->GetRegTyFromPrimTy(type)); - SelectCopyInsn(tempReg, src, type); - SelectCopyInsn(dest, tempReg, type); - } else { - SelectCopyInsn(dest, src, type); + if (dest.GetKind() == Operand::kOpdMem || src.GetKind() == Operand::kOpdMem) { + if ((dest.GetKind() == Operand::kOpdMem && src.GetKind() == Operand::kOpdRegister) || + (dest.GetKind() == Operand::kOpdRegister && src.GetKind() == Operand::kOpdMem)) { + SelectCopyInsn(dest, src, toType); + } else if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + SelectCopyInsn(tempReg, src, toType); + SelectCopyInsn(dest, tempReg, toType); + } else { + SelectCopyInsn(dest, src, toType); + } + } else if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, toType); + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } - }else { - CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } } @@ -1531,18 +1665,6 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { return nullptr; } -void MPISel::HandleFuncExit() const { - BlockNode *block = cgFunc->GetFunction().GetBody(); - ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); - cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); - /* Set lastbb's frequency */ - cgFunc->SetLastBB(*cgFunc->GetCurBB()); - /* the last BB is return BB */ - cgFunc->GetLastBB()->SetKind(BB::kBBReturn); - - cgFunc->AddCommonExitBB(); -} - void InstructionSelector::GetAnalysisDependence(AnalysisDep &aDep) const { aDep.AddRequired(); aDep.AddPreserved(); diff --git a/src/mapleall/maple_be/src/cg/standardize.cpp b/src/mapleall/maple_be/src/cg/standardize.cpp index cf14b253f5..b3a14f7a0d 100644 --- a/src/mapleall/maple_be/src/cg/standardize.cpp +++ b/src/mapleall/maple_be/src/cg/standardize.cpp @@ -22,6 +22,9 @@ void Standardize::DoStandardize() { /* two address mapping first */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -30,10 +33,13 @@ void Standardize::DoStandardize() { } } } - + Legalize(); /* standardize for each op */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -44,11 +50,13 @@ void Standardize::DoStandardize() { } else if (insn->IsBasicOp()) { StdzBasicOp(*insn); } else if (insn->IsUnaryOp()) { - StdzUnaryOp(*insn, *cgFunc); + StdzUnaryOp(*insn); } else if (insn->IsConversion()) { - StdzCvtOp(*insn, *cgFunc); + StdzCvtOp(*insn); } else if (insn->IsShift()) { - StdzShiftOp(*insn, *cgFunc); + StdzShiftOp(*insn); + } else if (insn->IsComment()) { + StdzCommentOp(*insn); } else { LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; CHECK_FATAL(false, "NIY"); @@ -61,19 +69,37 @@ void Standardize::AddressMapping(Insn &insn) const { Operand &dest = insn.GetOperand(kInsnFirstOpnd); Operand &src1 = insn.GetOperand(kInsnSecondOpnd); uint32 destSize = dest.GetSize(); + CHECK_FATAL(dest.IsRegister(), "AddressMapping: not reg operand"); + bool isInt = static_cast(dest).GetRegisterType() == kRegTyInt ? true : false; MOperator mOp = abstract::MOP_undef; switch (destSize) { case k8BitSize: - mOp = abstract::MOP_copy_rr_8; + if (isInt) { + mOp = abstract::MOP_copy_rr_8; + } else { + mOp = abstract::MOP_copy_ff_8; + } break; case k16BitSize: - mOp = abstract::MOP_copy_rr_16; + if (isInt) { + mOp = abstract::MOP_copy_rr_16; + } else { + mOp = abstract::MOP_copy_ff_16; + } break; case k32BitSize: - mOp = abstract::MOP_copy_rr_32; + if (isInt) { + mOp = abstract::MOP_copy_rr_32; + } else { + mOp = abstract::MOP_copy_ff_32; + } break; case k64BitSize: - mOp = abstract::MOP_copy_rr_64; + if (isInt) { + mOp = abstract::MOP_copy_rr_64; + } else { + mOp = abstract::MOP_copy_ff_64; + } break; default: break; diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index fa3e08afb2..9e66289c19 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -21,8 +21,20 @@ #include "isel.h" namespace maplebe { + +void X64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->PushBackExitBBsVec(*cgFunc->GetLastBB()); +} + /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ -MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const { +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { PrimType symType; int32 fieldOffset = 0; if (fieldId == 0) { @@ -65,7 +77,8 @@ MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uin return *result; } -void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { +void X64MPIsel::SelectReturn(NaryStmtNode &retNode) { + Operand &opnd = *HandleExpr(retNode, *retNode.Opnd(0)); MIRType *retType = cgFunc->GetFunction().GetReturnType(); X64CallConvImpl retLocator(cgFunc->GetBecommon()); CCLocInfo retMech; @@ -125,7 +138,7 @@ void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { } } -void X64MPIsel::SelectReturn() { +void X64MPIsel::SelectReturn(bool noOpnd [[maybe_unused]]) { /* jump to epilogue */ MOperator mOp = x64::MOP_jmpq_l; LabelNode *endLabel = cgFunc->GetEndLabel(); @@ -137,7 +150,7 @@ void X64MPIsel::SelectReturn() { cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); } -void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { +void X64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); for (int32 i = 0; i < copyTime; ++i) { ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -150,7 +163,7 @@ void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symS } } -void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { +void X64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -196,13 +209,13 @@ void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &p CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); if (ploc.reg1 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); } if (ploc.reg2 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); } if (ploc.reg3 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); } } } @@ -452,7 +465,7 @@ void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vectorGetLabelIdx(); Operand *result = nullptr; @@ -480,7 +493,8 @@ RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { return ®Result; } -void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs, const DassignNode &stmt) { + (void)stmt; /* rhs is Func Return, it must be from Regread */ if (opndRhs.IsRegister()) { SelectIntAggCopyReturn(symbolMem, lhsInfo.size); @@ -621,6 +635,7 @@ void X64MPIsel::SelectIgoto(Operand &opnd0) { Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(opnd0); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBGoto); return; } @@ -801,6 +816,7 @@ void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(dstMemOpnd); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBIgoto); } Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { @@ -912,7 +928,8 @@ static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSign * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node * such as a dread for example */ -void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + Operand &opnd0 = *HandleExpr(stmt, condNode); Opcode opcode = stmt.GetOpCode(); X64MOP_t jmpOperator = x64::MOP_begin; if (opnd0.IsImmediate()) { @@ -1335,4 +1352,139 @@ void X64MPIsel::SelectAsm(AsmNode &node) { cgFunc->SetHasAsm(); CHECK_FATAL(false, "NIY"); } + +Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *X64MPIsel::SelectCsin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCasin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcosh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCacos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCatan(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog10(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCsinhf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCasinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCcoshf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCacosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCatanf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCexpf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClogf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectClog10f(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCffs(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCmemcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrlen(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrncmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + +Operand *X64MPIsel::SelectCstrrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); +} + } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp index 0b29ca4ae9..4146be8dcc 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -73,7 +73,7 @@ void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { void X64CGFunc::SelectAsm(AsmNode &node) { CHECK_FATAL(false, "NIY"); } -void X64CGFunc::SelectAggDassign(DassignNode &stmt) { +void X64CGFunc::SelectAggDassign(const DassignNode &stmt) { CHECK_FATAL(false, "NIY"); } void X64CGFunc::SelectIassign(IassignNode &stmt) { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index a506e0e28f..61247f856b 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -56,10 +56,10 @@ void X64Standardize::StdzBasicOp(Insn &insn) { insn.AddOpndChain(src2).AddOpndChain(dest); } -void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzUnaryOp(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { - StdzFloatingNeg(insn, cgFunc); + StdzFloatingNeg(insn); return; } X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); @@ -69,7 +69,7 @@ void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(dest); } -void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); uint32 destSize = OpndDesSize; uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); @@ -80,12 +80,30 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; break; - case abstract::MOP_cvt_fr_u32: + case abstract::MOP_cvt_f32_u32: srcSize = k64BitSize; break; - case abstract::MOP_cvt_rf_u32: + case abstract::MOP_cvt_u32_f32: destSize = k64BitSize; break; + case abstract::MOP_zext_rr_8_16: + case abstract::MOP_sext_rr_8_16: + case abstract::MOP_zext_rr_8_32: + case abstract::MOP_sext_rr_8_32: + case abstract::MOP_zext_rr_16_32: + case abstract::MOP_sext_rr_16_32: + case abstract::MOP_zext_rr_8_64: + case abstract::MOP_sext_rr_8_64: + case abstract::MOP_zext_rr_16_64: + case abstract::MOP_sext_rr_16_64: + case abstract::MOP_sext_rr_32_64: + /* reverse operands */ + destSize = OpndSrcSize; + srcSize = OpndDesSize; + break; + case abstract::MOP_zext_rr_32_64: + srcSize = k32BitSize; + destSize = k32BitSize; default: break; } @@ -95,13 +113,13 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); RegOperand *src = static_cast(opnd0); if (srcSize != OpndSrcSize) { - src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + src = &GetCgFunc()->GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType()); } Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); RegOperand *dest = static_cast(opnd1); if (destSize != OpndDesSize) { - dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + dest = &GetCgFunc()->GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType()); } insn.CleanAllOperand(); @@ -120,14 +138,14 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { * 32: xorl 0x80000000 R1 * movd R1 xmm0 */ -void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzFloatingNeg(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; // mov dest -> tmpOperand0 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; - RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); - Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + RegOperand *tmpOperand0 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); Operand &dest = insn.GetOperand(kInsnFirstOpnd); movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, movInsn0); @@ -135,26 +153,26 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { // 32 : xorl 0x80000000 tmpOperand0 // 64 : movabs 0x8000000000000000 tmpOperand1 // xorq tmpOperand1 tmpOperand0 - ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + ImmOperand &imm = GetCgFunc()->GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); if (mOp == abstract::MOP_neg_f_64) { - Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); - Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + Operand *tmpOperand1 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = GetCgFunc()->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); insn.GetBB()->InsertInsnBefore(insn, movabs); MOperator xorOp = x64::MOP_xorq_r_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } else { MOperator xorOp = x64::MOP_xorl_i_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } // mov tmpOperand0 -> dest - Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Insn &movq = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); insn.GetBB()->InsertInsnBefore(insn, movq); @@ -162,17 +180,17 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { return; } -void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzShiftOp(Insn &insn) { RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); /* count operand cvt -> PTY_u8 */ if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { - countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + countOpnd = &GetCgFunc()->GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); } /* copy count operand to cl(rcx) register */ - RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + RegOperand &clOpnd = GetCgFunc()->GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); X64MOP_t copyMop = x64::MOP_movb_r_r; - Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + Insn ©Insn = GetCgFunc()->GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); insn.GetBB()->InsertInsnBefore(insn, copyInsn); /* shift OP */ @@ -183,4 +201,8 @@ void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); } +void X64Standardize::StdzCommentOp(Insn &insn) { + insn.GetBB()->RemoveInsn(insn); +} + } -- Gitee From 6cc72ee3d646ffd2d320b20970d94b9d94bc7eee Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 11:31:50 -0800 Subject: [PATCH 11/25] Update copyright date --- src/mapleall/maple_be/BUILD.gn | 2 +- .../maple_be/include/cg/aarch64/aarch64_cg.h | 2 +- .../maple_be/include/cg/aarch64/aarch64_md.def | 2 +- .../maple_be/include/cg/aarch64/aarch64_peep.h | 2 +- src/mapleall/maple_be/include/cg/cg_option.h | 2 +- src/mapleall/maple_be/include/cg/cgbb.h | 2 +- src/mapleall/maple_be/include/cg/immvalid.def | 14 ++++++++++++++ src/mapleall/maple_be/include/cg/isa.h | 2 +- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 2 +- .../maple_be/src/cg/aarch64/aarch64_reaching.cpp | 2 +- src/mapleall/maple_be/src/cg/cg.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_cfg.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_option.cpp | 2 +- src/mapleall/maple_be/src/cg/cg_ssa.cpp | 2 +- src/mapleall/maple_be/src/cg/cgbb.cpp | 2 +- 15 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index b949fc9ac4..3a2d7938a6 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -1,5 +1,5 @@ # -# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. # # OpenArkCompiler is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 3da19ed9dc..1e1c87159e 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 8cbf5aabb6..2b906c4582 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index ea6e76836d..2c215ff5ce 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index b8fe5ccf19..a996adc4f9 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index f9d2b210f6..511fe3bca9 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 4a6df76761..cd63123059 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -1,3 +1,17 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ static std::set ValidBitmaskImmSet = { #include "valid_bitmask_imm.txt" }; diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index 53ef48af09..391e0013c3 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 80966d8c6a..d4961cbddc 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 42d80fec7a..9124fc4d07 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index ca0aab8f38..70baa50ee4 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 29c11b3cbc..5929c98b50 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index c700b871a0..1fce62c2e1 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index f28a528e2a..4f71d65574 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 45d9e1ae0f..2cfa7712b0 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. -- Gitee From 5c4f9e4b6fb96a63122bb567d1e4101472cc385a Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 19 Dec 2022 16:33:21 -0800 Subject: [PATCH 12/25] Fix rangegoto adding successor bb --- src/mapleall/maple_be/src/cg/isel.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 6f4b9ca3cd..9ca42024a3 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -239,6 +239,7 @@ void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) { Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); cgFunc->SetCurBBKind(BB::kBBRangeGoto); iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); + cgFunc->SetCurBB(*cgFunc->StartNewBB(rangeGotoNode)); } void HandleIgoto(StmtNode &stmt, MPISel &iSel) { -- Gitee From b3302253728d3263f027d1527f78d17f4f420c38 Mon Sep 17 00:00:00 2001 From: William Chen Date: Tue, 20 Dec 2022 10:08:35 -0800 Subject: [PATCH 13/25] Fix SelectRetype for different type --- src/mapleall/maple_be/src/cg/isel.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 9ca42024a3..82ef4871b0 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1662,6 +1662,18 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { return &SelectCopy2Reg(opnd0, toType, fromType); } + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } CHECK_FATAL(false, "NIY, retype"); return nullptr; } -- Gitee From 7758f6d6828742bc66bf72267551dd2a2a74c84b Mon Sep 17 00:00:00 2001 From: eching Date: Tue, 20 Dec 2022 10:29:44 -0800 Subject: [PATCH 14/25] Fix mapping of abstract MOP to x64 insn for zext with 64 bit target and truncate from 64 bit source. --- .../maple_be/include/cg/x86_64/x64_abstract_mapping.def | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def index 106b4d0b89..6f2daf65e1 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -104,9 +104,9 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_8, x64::MOP_movsbl_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_16, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_16, x64::MOP_movswl_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_8, x64::MOP_movsbq_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) @@ -118,9 +118,9 @@ DEFINE_MAPPING(abstract::MOP_zext_rr_8_32, x64::MOP_movzbl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_8_32, x64::MOP_movsbl_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_16_32, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_32, x64::MOP_movswl_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_8_64, x64::MOP_movsbq_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_64, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_64, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_64, x64::MOP_movslq_r_r) -- Gitee From 59040897948fca5262750b49d63766b714c6a4a6 Mon Sep 17 00:00:00 2001 From: eching Date: Tue, 27 Dec 2022 17:53:53 -0800 Subject: [PATCH 15/25] Fix inconsistency between MOP insn and operand size in x86_64 caused by 1) x64_standardize changing operand size (StdzCvtOp()) after doing MOP mapping (AddressMapping()) based on original operand size 2) isel not generating truncation insn when operand size of Maple OP_cvt is larger than its fromType. --- src/mapleall/maple_be/src/cg/isel.cpp | 8 ++++++++ src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 82ef4871b0..9111622f0e 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1548,6 +1548,14 @@ RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromT return static_cast(src); } RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (isReg && srcRegSize > toSize && IsPrimitiveInteger(toType)) { + /* truncate */ + MOperator mOp = GetFastCvtMopI(srcRegSize, toSize, false); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(dest).AddOpndChain(static_cast(src)); + cgFunc->GetCurBB()->AppendInsn(insn); + return dest; + } if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); } else if (fromSize != toSize) { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index 61247f856b..51fc99a06d 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -75,7 +75,6 @@ void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); uint32 srcSize = OpndSrcSize; switch (insn.GetMachineOpcode()) { - case abstract::MOP_zext_rr_64_8: case abstract::MOP_zext_rr_64_16: case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; -- Gitee From 2d44b37571291b55d3c2a6ae64e1c05d405489f3 Mon Sep 17 00:00:00 2001 From: eching Date: Wed, 28 Dec 2022 18:39:10 -0800 Subject: [PATCH 16/25] Moved isel fix for mop and operand size mismatch in commit 1cba6c67 from SelectCopy2Reg() to SelectIntCvt(). --- src/mapleall/maple_be/src/cg/isel.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 9111622f0e..73ac57e1b7 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1170,13 +1170,15 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR * The signedness of operands would be shown in the expression. */ - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + bool isSigned = !IsPrimitiveUnsigned(fromType); + uint32 bitSize = opnd0.GetSize(); + PrimType opndType = GetIntegerPrimTypeFromSize(isSigned, bitSize); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType, opndType); if (toSize == fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; } - bool isSigned = !IsPrimitiveUnsigned(fromType); MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); @@ -1548,14 +1550,6 @@ RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromT return static_cast(src); } RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); - if (isReg && srcRegSize > toSize && IsPrimitiveInteger(toType)) { - /* truncate */ - MOperator mOp = GetFastCvtMopI(srcRegSize, toSize, false); - Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); - (void)insn.AddOpndChain(dest).AddOpndChain(static_cast(src)); - cgFunc->GetCurBB()->AppendInsn(insn); - return dest; - } if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); } else if (fromSize != toSize) { -- Gitee From cec127fee178054c02cc45e8c610e1cdb428b459 Mon Sep 17 00:00:00 2001 From: eching Date: Thu, 29 Dec 2022 10:49:59 -0800 Subject: [PATCH 17/25] Fix for testcases CF11134-testsuite-EXP_3-test226 and CF11134-testsuite-EXP_3-test408 --- src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index d4961cbddc..171ae21847 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -28,6 +28,7 @@ const std::string kMccLoadRefV = "MCC_LoadVolatileField"; const std::string kMccLoadRefS = "MCC_LoadRefStatic"; const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; const std::string kMccDummy = "MCC_Dummy"; +#define NOMULOPT (true) const std::string GetReadBarrierName(const Insn &insn) { constexpr int32 totalBarrierNamesNum = 5; @@ -2516,6 +2517,9 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { } void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { + if (NOMULOPT) { + return; + } /* mov x0,imm and mul to shift */ if (!CheckCondition(insn)) { return; -- Gitee From 8bee9ce8da8f337b9d9df1b3eb379455a4e15486 Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 29 Dec 2022 17:09:33 -0800 Subject: [PATCH 18/25] fix MulImmToShiftPattern peep --- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 171ae21847..c720f0f8af 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -28,7 +28,6 @@ const std::string kMccLoadRefV = "MCC_LoadVolatileField"; const std::string kMccLoadRefS = "MCC_LoadRefStatic"; const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; const std::string kMccDummy = "MCC_Dummy"; -#define NOMULOPT (true) const std::string GetReadBarrierName(const Insn &insn) { constexpr int32 totalBarrierNamesNum = 5; @@ -2506,8 +2505,13 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { if (immOpnd.IsNegative()) { return false; } - int64 immVal = immOpnd.GetValue(); - /* 0 considered power of 2 */ + uint64 immVal = immOpnd.GetValue(); + if (immVal == 0) { + shiftVal = 0; + newMop = insn.GetMachineOpcode() == MOP_xmulrrr ? MOP_xmovri64 : MOP_wmovri32; + return true; + } + /* power of 2 */ if ((immVal & (immVal - 1)) != 0) { return false; } @@ -2517,26 +2521,28 @@ bool MulImmToShiftPattern::CheckCondition(Insn &insn) { } void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { - if (NOMULOPT) { - return; - } /* mov x0,imm and mul to shift */ if (!CheckCondition(insn)) { return; } auto *aarch64CGFunc = static_cast(cgFunc); - ImmOperand &shiftOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); - Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), - insn.GetOperand(kInsnSecondOpnd), shiftOpnd); - bb.ReplaceInsn(insn, newInsn); + ImmOperand &immOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); + Insn *newInsn; + if (newMop == MOP_xmovri64 || newMop == MOP_wmovri32) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), immOpnd); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd), immOpnd); + } + bb.ReplaceInsn(insn, *newInsn); /* update ssa info */ - ssaInfo->ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, *newInsn); optSuccess = true; - SetCurrInsn(&newInsn); + SetCurrInsn(newInsn); if (CG_PEEP_DUMP) { std::vector prevs; prevs.emplace_back(movInsn); - DumpAfterPattern(prevs, &insn, &newInsn); + DumpAfterPattern(prevs, &insn, newInsn); } } -- Gitee From 0ccdf39488153e0838f9be51ee8b94c8612ce49f Mon Sep 17 00:00:00 2001 From: eching Date: Wed, 4 Jan 2023 16:10:35 -0800 Subject: [PATCH 19/25] Disable changing 64 bit dest size to 32 for MOP_zext_rr_64_16 to fix additional china testcases --- src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index 51fc99a06d..664af1029e 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -75,7 +75,6 @@ void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); uint32 srcSize = OpndSrcSize; switch (insn.GetMachineOpcode()) { - case abstract::MOP_zext_rr_64_16: case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; break; -- Gitee From 317df3060218383b9906e26ea1df63adb91d1281 Mon Sep 17 00:00:00 2001 From: William Chen Date: Thu, 5 Jan 2023 15:35:22 -0800 Subject: [PATCH 20/25] Initialize VregInfo to reset vreg number --- src/mapleall/maple_be/include/cg/cg_irbuilder.h | 1 - src/mapleall/maple_be/include/cg/reg_info.h | 15 +++++++++++++++ src/mapleall/maple_be/src/cg/cg.cpp | 5 +++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index d18f242855..74614c9ae4 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -64,7 +64,6 @@ class OperandBuilder { virtualReg.SetCount(mirPregNum); } - /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 8099522b46..1b6797ac6b 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -58,6 +58,21 @@ class VregInfo { static uint32 maxRegCount; static std::vector vRegTable; static std::unordered_map vRegOperandTable; + static bool initialized; + + VregInfo() { + if (initialized) { + initialized = false; + return; + } + initialized = true; + virtualRegCount = kBaseVirtualRegNO; + maxRegCount = kBaseVirtualRegNO; + vRegTable.clear(); + vRegOperandTable.clear(); + } + + ~VregInfo() = default; uint32 GetNextVregNO(RegType type, uint32 size) { /* when vReg reach to maxRegCount, maxRegCount limit adds 80 every time */ diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 70baa50ee4..a11de75c57 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -24,6 +24,11 @@ uint32 VregInfo::virtualRegCount = kBaseVirtualRegNO; uint32 VregInfo::maxRegCount = 0; std::vector VregInfo::vRegTable; std::unordered_map VregInfo::vRegOperandTable; +/* There are two builders, cgfunc builder (original code selector) and irbuilder (abstract). + * This is to prevent conflict between the two for VregInfo as for arm64 both co-exists. + * When switching to irbuilder completely, then this bool can go away. + */ +bool VregInfo::initialized = false; void Globals::SetTarget(CG &target) { cg = ⌖ -- Gitee From 5ff9691e01b3ddabc948735926143654e5fd63e0 Mon Sep 17 00:00:00 2001 From: William Chen Date: Fri, 6 Jan 2023 17:01:00 -0800 Subject: [PATCH 21/25] Remove fix for bug uncovered by --Be --ilp32 resulting in ubfx with 255 position --- src/mapleall/maple_be/src/be/lower.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index 2b4b33c4c4..33cedfdead 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -839,7 +839,7 @@ StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, BaseNode *baseAddr) { auto bitSize = fieldType->GetFieldSize(); - auto primType = GetRegPrimType(fieldType->GetPrimType()); + auto primType = fieldType->GetPrimType(); auto byteOffset = byteBitOffsets.first; auto bitOffset = byteBitOffsets.second; auto *builder = mirModule.GetMIRBuilder(); -- Gitee From 78121126f52e6c407dfe0674d0ef4b3b1e4ea209 Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 9 Jan 2023 14:54:33 -0800 Subject: [PATCH 22/25] Change x86 intrinsic assert to same as isel. --- .../maple_be/src/cg/x86_64/x64_MPIsel.cpp | 78 ++++++++++++------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index 9e66289c19..a34f35d506 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -1384,107 +1384,133 @@ Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &p } Operand *X64MPIsel::SelectCsin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCsinh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCasin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCcos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCcosh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCacos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCatan(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectClog(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectClog10(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCsinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCsinhf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCasinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCcosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCcoshf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCacosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCatanf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCexpf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectClogf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectClog10f(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCffs(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCmemcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCstrlen(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCstrcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCstrncmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCstrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } Operand *X64MPIsel::SelectCstrrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { - CHECK_FATAL(false, "NIY"); + ASSERT(false, "NIY"); + return nullptr; } } -- Gitee From f0f29659bf83b166f911d8cf1d9f3a691811a88e Mon Sep 17 00:00:00 2001 From: William Chen Date: Mon, 9 Jan 2023 15:08:18 -0800 Subject: [PATCH 23/25] Change isel intrinsic assert --- src/mapleall/maple_be/src/cg/isel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 73ac57e1b7..3da0c78c44 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -558,7 +558,7 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { case INTRN_C_ctz64: return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); default: - CHECK_FATAL_FALSE("NIY, unsupported intrinsicop."); + ASSERT(false, "NIY, unsupported intrinsicop."); return nullptr; } } -- Gitee From f7ec02e21dd656253ac5f772de1c782c1a21f278 Mon Sep 17 00:00:00 2001 From: eching Date: Mon, 9 Jan 2023 19:20:32 -0800 Subject: [PATCH 24/25] Fix X86 BB kind for RangeGoto. Disable integer demotion to fix abort in testcases. --- src/mapleall/maple_be/src/cg/isel.cpp | 9 +-------- src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp | 1 - 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 3da0c78c44..5707bb3ce8 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -93,13 +93,6 @@ std::map> fastCvtMappingTableI = { DEF_USE_EXTEND_MAPPING_TBL(16, 32), DEF_USE_EXTEND_MAPPING_TBL(16, 64), DEF_USE_EXTEND_MAPPING_TBL(32, 64), - DEF_USE_EXTEND_MAPPING_TBL(16, 8), /* Truncate Mapping */ - DEF_USE_EXTEND_MAPPING_TBL(32, 8), - DEF_USE_EXTEND_MAPPING_TBL(64, 8), - DEF_USE_EXTEND_MAPPING_TBL(32, 16), - DEF_USE_EXTEND_MAPPING_TBL(64, 16), - DEF_USE_EXTEND_MAPPING_TBL(64, 32), - }; #undef DEF_USE_EXTEND_MAPPING_TBL #undef DEF_EXTEND_MAPPING_TBL @@ -1174,7 +1167,7 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, uint32 bitSize = opnd0.GetSize(); PrimType opndType = GetIntegerPrimTypeFromSize(isSigned, bitSize); RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType, opndType); - if (toSize == fromSize) { + if (toSize <= fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index a34f35d506..688c1d8162 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -816,7 +816,6 @@ void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(dstMemOpnd); cgFunc->GetCurBB()->AppendInsn(jmpInsn); - cgFunc->SetCurBBKind(BB::kBBIgoto); } Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { -- Gitee From 30a520bf27f062c132f2f7d661fd39a127ad8455 Mon Sep 17 00:00:00 2001 From: William Chen Date: Tue, 10 Jan 2023 15:53:44 -0800 Subject: [PATCH 25/25] Fix x86 SelectAbs --- src/mapleall/maple_be/src/cg/isel.cpp | 47 ------------------- .../maple_be/src/cg/x86_64/x64_MPIsel.cpp | 19 +++++++- 2 files changed, 18 insertions(+), 48 deletions(-) diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 5707bb3ce8..dd18e59b29 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -1397,53 +1397,6 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, return &resOpnd; } -Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { - PrimType primType = node.GetPrimType(); - if (IsPrimitiveVector(primType)) { - CHECK_FATAL(false, "NIY"); - } else if (IsPrimitiveFloat(primType)) { - /* - * fabs(x) = x AND 0x7fffffff ffffffff [set sign bit to 0] - */ - const static uint64 kNaN = 0x7fffffffffffffffUL; - const static double kNaNDouble = *(double*)(&kNaN); - const static uint64 kNaNf = 0x7fffffffUL; - const static double kNaNFloat = *(double*)(&kNaNf); - CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "niy"); - - double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; - MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, - *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); - Operand *opnd1 = SelectFloatingConst(*c, PTY_f64, parent); - - RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); - SelectBand(resOpnd, opnd0, *opnd1, primType); - return &resOpnd; - } else if (IsUnsignedInteger(primType)) { - return &opnd0; - } else { - /* - * abs(x) = (x XOR y) - y - * y = x >>> (bitSize - 1) - */ - uint32 bitSize = GetPrimTypeBitSize(primType); - CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); - ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); - RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); - RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); - RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectSub(resOpnd, tmpOpnd, regOpndy, primType); - return &resOpnd; - } -} - Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); PrimType srcType = node.Opnd(0)->GetPrimType(); diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index 688c1d8162..4196e14a98 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -1357,7 +1357,24 @@ Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &p if (IsPrimitiveVector(primType)) { CHECK_FATAL(false, "NIY"); } else if (IsPrimitiveFloat(primType)) { - CHECK_FATAL(false, "NIY"); + /* + * fabs(x) = x AND 0x7fffffff ffffffff [set sign bit to 0] + */ + const static uint64 kNaN = 0x7fffffffffffffffUL; + const static double kNaNDouble = *(double*)(&kNaN); + const static uint64 kNaNf = 0x7fffffffUL; + const static double kNaNFloat = *(double*)(&kNaNf); + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "niy"); + + double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; + MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, + *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + Operand *opnd1 = SelectFloatingConst(*c, PTY_f64, parent); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBand(resOpnd, opnd0, *opnd1, primType); + return &resOpnd; } else if (IsUnsignedInteger(primType)) { return &opnd0; } else { -- Gitee