diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index c44614ba37a5b8daebbcee505e1f0e16ffb561a5..1eac55441aafb73b9ff328538ba9f6cbef074fd0 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -1,5 +1,5 @@ # -# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. # # OpenArkCompiler is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -130,6 +130,8 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_cfgo.cpp", "src/cg/aarch64/aarch64_isolate_fastpath.cpp", "src/cg/aarch64/aarch64_rematerialize.cpp", + "src/cg/aarch64/aarch64_MPISel.cpp", + "src/cg/aarch64/aarch64_standardize.cpp", ] src_libcgx86phases = [ diff --git a/src/mapleall/maple_be/CMakeLists.txt b/src/mapleall/maple_be/CMakeLists.txt index b47566449d84619ba230d89fe7adaf97b42b0b1c..f68aa12617f775ccb25605d60eefb1b515d08a9a 100755 --- a/src/mapleall/maple_be/CMakeLists.txt +++ b/src/mapleall/maple_be/CMakeLists.txt @@ -106,6 +106,8 @@ if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") src/cg/aarch64/aarch64_pgo_gen.cpp src/cg/aarch64/aarch64_isolate_fastpath.cpp src/cg/aarch64/aarch64_rematerialize.cpp + src/cg/aarch64/aarch64_MPISel.cpp + src/cg/aarch64/aarch64_standardize.cpp src/cg/cfi_generator.cpp src/cg/cfgo.cpp src/cg/local_opt.cpp diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h index aac9a9bd877bf032971be7dfea369b828e65b5c2..1b6ea1a2bdb027d8dc790a81c4667cac2336b837 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -12,3 +12,124 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_MPISEL_H +#define MAPLEBE_INCLUDE_AARCH64_MPISEL_H + +#include "isel.h" +#include "aarch64_isa.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class AArch64MPIsel : public MPISel { + public: + AArch64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} + ~AArch64MPIsel() override = default; + + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + Operand *SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) override; + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) override; + Insn &AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + bool IsSymbolRequireIndirection(const MIRSymbol &symbol) override; + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, + const BaseNode &parent); + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_AARCH64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index d6dd765e972b7b8012116386a7cd5ae2c67ae355..22ec46eeaf05170aca7b1132d3defe3c014bae6a 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -32,6 +32,8 @@ #include "aarch64_cfgo.h" #include "aarch64_rematerialize.h" #include "aarch64_pgo_gen.h" +#include "aarch64_MPISel.h" +#include "aarch64_standardize.h" namespace maplebe { constexpr int64 kShortBRDistance = (8 * 1024); @@ -213,6 +215,12 @@ class AArch64CG : public CG { Rematerializer *CreateRematerializer(MemPool &mp) const override { return mp.New(); } + MPISel *CreateMPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) const override { + return mp.New(mp, aIRBuilder, f); + } + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. * i. mov reg2, reg1 * ii. add/sub reg2, reg1, 0/zero register diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 260343228a732b0887cd18633b390323fff32898..7a00815d01853805f2c9736a2e3012a6f4739eab 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -67,6 +67,8 @@ class AArch64CGFunc : public CGFunc { return refCount; } + void Link2ISel(MPISel *p) override; + int32 GetBeginOffset() const { return beginOffset; } @@ -75,13 +77,13 @@ class AArch64CGFunc : public CGFunc { MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; regno_t NewVRflag() override { - ASSERT(maxRegCount > kRFLAG, "CG internal error."); + ASSERT(GetMaxRegNum() > kRFLAG, "CG internal error."); constexpr uint8 size = 4; - if (maxRegCount <= kRFLAG) { - maxRegCount += (kRFLAG + kVRegisterNumber); - vRegTable.resize(maxRegCount); + if (GetMaxRegNum() <= kRFLAG) { + IncMaxRegNum(kRFLAG + kVRegisterNumber); + vReg.VRegTableResize(GetMaxRegNum()); } - new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + vReg.VRegTableValuesSet(kRFLAG, kRegTyCc, size); return kRFLAG; } @@ -89,6 +91,7 @@ class AArch64CGFunc : public CGFunc { RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); MIRStructType *GetLmbcStructArgType(BaseNode &stmt, size_t argNo) const; + void HandleFuncCfg(CGCFG *cfg) override; void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); @@ -100,6 +103,7 @@ class AArch64CGFunc : public CGFunc { void HandleRetCleanup(NaryStmtNode &retNode) override; void MergeReturn() override; RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(DassignNode &stmt, Operand &opnd0) override; void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; @@ -116,7 +120,7 @@ class AArch64CGFunc : public CGFunc { bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); bool GetNumReturnRegsForIassignfpoff(MIRType &rType, PrimType &primType, uint32 &numRegs); void GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType); - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; @@ -126,6 +130,7 @@ class AArch64CGFunc : public CGFunc { void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, Operand &origOpnd1, PrimType primType, bool signedCond); @@ -135,6 +140,7 @@ class AArch64CGFunc : public CGFunc { void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &srcOpnd) override; void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; + Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) override; Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, const std::string &name) override; Operand *SelectCclz(IntrinsicopNode &intrnNode) override; @@ -303,7 +309,6 @@ class AArch64CGFunc : public CGFunc { LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; LabelOperand &GetOrCreateLabelOperand(BB &bb) override; uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; - RegOperand *SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) override; RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; @@ -347,9 +352,14 @@ class AArch64CGFunc : public CGFunc { RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; RegOperand *SelectVectorIntrinsics(const IntrinsicopNode &intrinsicOp) override; + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); void SelectStackSave(); void SelectStackRestore(const IntrinsiccallNode &intrnNode); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); @@ -868,16 +878,12 @@ class AArch64CGFunc : public CGFunc { void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); - void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); MOperator SelectExtMopForParmList(PrimType primType); Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); - void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); - void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); - void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, @@ -885,10 +891,8 @@ class AArch64CGFunc : public CGFunc { MOperator SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const; Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); - Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); int64 GetOrCreatSpillRegLocation(regno_t vrNum) { AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); @@ -900,7 +904,6 @@ class AArch64CGFunc : public CGFunc { bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, LabelOperand &targetOpnd, Operand &opnd0); void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); - void SelectCVaStart(const IntrinsiccallNode &intrnNode); void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccall); void SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall); void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 0a725970b7d63a7a3a967d0b5d9b74ab8f113b9e..8540f762b43cfd6dde518eb60882279b0616000d 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -22,9 +22,9 @@ DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mo /* MOP_wmovrr */ DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) /* MOP_wmovri32 */ -DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable32) /* MOP_xmovri64 */ -DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1,IsSingleInstructionMovable64) /* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index c96ae0bb7fcb5e13bad1b8fe387c39ac7e6dd1e2..4fc63b5795e0d57f052df7b91b15932b1951374c 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -969,6 +969,29 @@ class RemoveMovingtoSameRegPattern : public CGPeepPattern { } }; +/* + * mov dest1, imm + * mul dest2, reg1, dest1 + * ===> if imm is 2^n + * mov dest1, imm + * lsl dest2, reg1, n + */ +class MulImmToShiftPattern : public CGPeepPattern { + public: + MulImmToShiftPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~MulImmToShiftPattern() override = default; + std::string GetPatternName() override { + return "MulImmToShiftPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + private: + Insn *movInsn = nullptr; + uint32 shiftVal = 0; + MOperator newMop = MOP_undef; +}; + /* * Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp, when they are * back to back and the [MEM] they access is conjointed. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 8e443f1a7558371fceb9f80ee6795b937623f59f..6575357ba290dbaefcd0a3b5d235b9188847e52d 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -15,9 +15,13 @@ ADDTARGETPHASE("layoutstackframe", true); ADDTARGETPHASE("createstartendlabel", true); ADDTARGETPHASE("buildehfunc", !GetMIRModule()->IsCModule()); - ADDTARGETPHASE("handlefunction", true); + ADDTARGETPHASE("handlefunction", !CGOptions::UseNewCg()); + ADDTARGETPHASE("instructionselector", CGOptions::UseNewCg()); + ADDTARGETPHASE("handlecfg", CGOptions::UseNewCg()); + ADDTARGETPHASE("patchlongbranch", CGOptions::UseNewCg() && CGOptions::DoFixLongBranch()); ADDTARGETPHASE("cgprofuse", Options::profileUse); ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("instructionstandardize", CGOptions::UseNewCg()); /* SSA PHASES */ ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..866bce5f439ecd1d54acc049dddd7e3929a9d4d7 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { + +enum TargetOperandAction : uint8 { + kAbtractReg, + kAbtractMem, + kAbtractImm, + kAbtractNone, +}; + +struct TargetMopGen { + AArch64MOP_t targetMop; + std::vector targetOpndAction; + std::vector mappingOrder; +}; + +class AbstractIR2Target { + public: + abstract::AbstractMOP_t abstractMop; + std::vector targetMap; +}; + +class AArch64Standardize : public Standardize { + public: + explicit AArch64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(false); + } + + ~AArch64Standardize() override = default; + + private: + void Legalize() override; + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzCommentOp(Insn &insn) override; + + Operand *UpdateRegister(Operand &opnd, std::map ®Map, bool allocate); + void TraverseOperands(Insn *insn, std::map ®Map, bool allocate); + Operand *GetInsnResult(Insn *insn); + Insn *HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order); + void SelectTargetInsn(Insn *insn); +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def index 83d5658589087514e49b7a336bc7851739f12da7..acc419fec286734e05873cb7833c42c8453983c7 100644 --- a/src/mapleall/maple_be/include/cg/abstract_mmir.def +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -51,17 +51,40 @@ DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + /* register truncate */ + DEFINE_MOP(MOP_zext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r16","",1) + DEFINE_MOP(MOP_sext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r16","",1) + DEFINE_MOP(MOP_zext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r32","",1) + DEFINE_MOP(MOP_sext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r32","",1) + DEFINE_MOP(MOP_zext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r32","",1) + DEFINE_MOP(MOP_sext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r32","",1) + + DEFINE_MOP(MOP_zext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r64","",1) + DEFINE_MOP(MOP_sext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r64","",1) + DEFINE_MOP(MOP_zext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r64","",1) + DEFINE_MOP(MOP_sext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r64","",1) + DEFINE_MOP(MOP_zext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r64","",1) + DEFINE_MOP(MOP_sext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r64","",1) + /* int2float conversion */ - DEFINE_MOP(MOP_cvt_fr_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u32","",1) - DEFINE_MOP(MOP_cvt_fr_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u64","",1) - DEFINE_MOP(MOP_cvt_fr_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i32","",1) - DEFINE_MOP(MOP_cvt_fr_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i64","",1) + DEFINE_MOP(MOP_cvt_f32_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u32","",1) + DEFINE_MOP(MOP_cvt_f64_u32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u32","",1) + DEFINE_MOP(MOP_cvt_f32_u64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u64","",1) + DEFINE_MOP(MOP_cvt_f64_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u64","",1) + DEFINE_MOP(MOP_cvt_f32_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i32","",1) + DEFINE_MOP(MOP_cvt_f64_i32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i32","",1) + DEFINE_MOP(MOP_cvt_f32_i64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i64","",1) + DEFINE_MOP(MOP_cvt_f64_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i64","",1) /* float2int conversion */ - DEFINE_MOP(MOP_cvt_rf_u32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u32","",1) - DEFINE_MOP(MOP_cvt_rf_u64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u64","",1) - DEFINE_MOP(MOP_cvt_rf_i32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i32","",1) - DEFINE_MOP(MOP_cvt_rf_i64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i64","",1) + DEFINE_MOP(MOP_cvt_u32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f32","",1) + DEFINE_MOP(MOP_cvt_u64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f32","",1) + DEFINE_MOP(MOP_cvt_u32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f64","",1) + DEFINE_MOP(MOP_cvt_u64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f64","",1) + DEFINE_MOP(MOP_cvt_i32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f32","",1) + DEFINE_MOP(MOP_cvt_i64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f32","",1) + DEFINE_MOP(MOP_cvt_i32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f64","",1) + DEFINE_MOP(MOP_cvt_i64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f64","",1) /* float conversion */ DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_64_32","",1) @@ -150,4 +173,4 @@ DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) /* MOP_comment */ - DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT|ISCOMMENT,0,"//","0", 0) diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index 794f7868c9010d5d72a9e33b39abf7fe432b54d7..74614c9ae4c763b2a69a0b67f3daaaaff642090e 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -16,6 +16,7 @@ #ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H #define MAPLEBE_INCLUDE_CG_IRBUILDER_H +#include "reg_info.h" #include "insn.h" #include "operand.h" @@ -56,14 +57,17 @@ class InsnBuilder { uint32 createdInsnNum = 0; }; -constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ class OperandBuilder { public: - explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) : alloc(&mp), virtualRegNum(mirPregNum) {} + explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) + : alloc(&mp) { + virtualReg.SetCount(mirPregNum); + } /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + OfstOperand &CreateOfst(int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &offImm, MemPool *mp = nullptr); @@ -79,14 +83,14 @@ class OperandBuilder { CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); uint32 GetCurrentVRegNum() const { - return virtualRegNum; + return virtualReg.GetCount(); } protected: MapleAllocator alloc; private: - uint32 virtualRegNum = 0; + VregInfo virtualReg; /* reg bank for multiple use */ }; diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index af411ca7da34a16b17b57ae9066ad43d426564ee..0d4267061a015038457c39e1403b89efb3b18da6 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -650,6 +650,22 @@ class CGOptions { return doCFGO; } + static void EnableNewCg() { + useNewCg = true; + } + + static void DisableNewCg() { + useNewCg = false; + } + + static bool UseNewCg() { + return useNewCg; + } + + static bool DoFixLongBranch() { + return CGOptions::GetInstance().GetOptimizeLevel() == kLevel0; + } + static void EnableRegSavesOpt() { doRegSavesOpt = true; } @@ -673,6 +689,7 @@ class CGOptions { static bool UseSsaPreSave() { return useSsaPreSave; } + static void EnableSsuPreRestore() { useSsuPreRestore = true; } @@ -1429,6 +1446,7 @@ class CGOptions { static bool doRegSavesOpt; static bool useSsaPreSave; static bool useSsuPreRestore; + static bool useNewCg; static bool dumpOptimizeCommonLog; static bool checkArrayStore; static bool exclusiveEH; diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h index 25b658a067644c0e1bfb835e387c7d44604d6446..ae952ff9c8a5ebd4ec135e573a25798c1637d4c9 100644 --- a/src/mapleall/maple_be/include/cg/cg_options.h +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -47,6 +47,7 @@ extern maplecl::Option lsraOptcallee; extern maplecl::Option calleeregsPlacement; extern maplecl::Option ssapreSave; extern maplecl::Option ssupreRestore; +extern maplecl::Option newCg; extern maplecl::Option prepeep; extern maplecl::Option peep; extern maplecl::Option preschedule; diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 737fa4d7a9cc55f717d7a71b80f089d82ecd9c62..8443b30979b912426bab12d6efb56ab97e86722d 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -203,6 +203,9 @@ class BB { void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + /* prepend all insns from bb before insn */ + void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn); + /* append all insns from bb into this bb */ void AppendBBInsns(BB &bb); diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 279f51f409b9b533abad72b0793baa042623770f..aea6bad341e7f4f27e3f574ba2280887b66e6200 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -32,7 +32,6 @@ #include "mir_function.h" #include "debug_info.h" #include "maple_phase_manager.h" - /* Maple MP header */ #include "mempool_allocator.h" @@ -60,33 +59,6 @@ enum SyncAndAtomicOp { kSyncAndAtomicOpNand }; -class VirtualRegNode { - public: - VirtualRegNode() = default; - - VirtualRegNode(RegType type, uint32 size) - : regType(type), size(size), regNO(kInvalidRegNO) {} - - virtual ~VirtualRegNode() = default; - - void AssignPhysicalRegister(regno_t phyRegNO) { - regNO = phyRegNO; - } - - RegType GetType() const { - return regType; - } - - uint32 GetSize() const { - return size; - } - - private: - RegType regType = kRegTyUndef; - uint32 size = 0; /* size in bytes */ - regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ -}; - class SpillMemOperandSet { public: explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} @@ -114,6 +86,8 @@ class SpillMemOperandSet { MapleSet reuseSpillLocMem; }; +class MPISel; + #if defined(TARGARM32) && TARGARM32 class LiveRange; #endif /* TARGARM32 */ @@ -131,6 +105,8 @@ class CGFunc { StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); virtual ~CGFunc(); + void InitFactory(); + const std::string &GetName() const { return func.GetName(); } @@ -221,8 +197,14 @@ class CGFunc { void SetCleanupLabel(BB &cleanupEntry); bool ExitbbNotInCleanupArea(const BB &bb) const; uint32 GetMaxRegNum() const { - return maxRegCount; + return vReg.GetMaxRegCount(); }; + void SetMaxRegNum(uint32 num) { + vReg.SetMaxRegCount(num); + } + void IncMaxRegNum(uint32 num) { + vReg.IncMaxRegCount(num); + } void DumpCFG() const; void DumpBBInfo(const BB *bb) const; void DumpCGIR() const; @@ -240,7 +222,7 @@ class CGFunc { virtual void SelectAbort() = 0; virtual void SelectAssertNull(UnaryStmtNode &stmt) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(DassignNode &stmt) = 0; + virtual void SelectAggDassign(const DassignNode &stmt) = 0; virtual void SelectIassign(IassignNode &stmt) = 0; virtual void SelectIassignoff(IassignoffNode &stmt) = 0; virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; @@ -428,6 +410,8 @@ class CGFunc { virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; virtual RegOperand *SelectVectorIntrinsics(const IntrinsicopNode &intrinsicNode) = 0; + virtual void HandleFuncCfg(CGCFG *cfg) { AddCommonExitBB(); } + /* For ebo issue. */ virtual Operand *GetTrueOpnd() { return nullptr; @@ -441,6 +425,7 @@ class CGFunc { LabelIdx CreateLabel(); RegOperand *GetVirtualRegisterOperand(regno_t vRegNO) { + std::unordered_map &vRegOperandTable = vReg.vRegOperandTable; auto it = vRegOperandTable.find(vRegNO); return it == vRegOperandTable.end() ? nullptr : it->second; } @@ -461,27 +446,7 @@ class CGFunc { if (CGOptions::UseGeneralRegOnly()) { CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); } - /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ - /* and vRegTable increases 80 elements. */ - if (vRegCount >= maxRegCount) { - ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); - maxRegCount += kRegIncrStepLen; - vRegTable.resize(maxRegCount); - } -#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 - if (size < k4ByteSize) { - size = k4ByteSize; - } -#if TARGAARCH64 - /* cannot handle 128 size register */ - if (regType == kRegTyInt && size > k8ByteSize) { - size = k8ByteSize; - } -#endif - ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); -#endif - new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); - return vRegCount++; + return vReg.GetNextVregNO(regType, size); } virtual regno_t NewVRflag() { @@ -535,17 +500,17 @@ class CGFunc { /* return Register Type */ virtual RegType GetRegisterType(regno_t rNum) const { - CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); - return vRegTable[rNum].GetType(); + CHECK(rNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); + return vReg.VRegTableGetType(rNum); } #if defined(TARGX86_64) && TARGX86_64 uint32 GetMaxVReg() const { - return vRegCount + opndBuilder->GetCurrentVRegNum(); + return vReg.GetCount() + opndBuilder->GetCurrentVRegNum(); } #else uint32 GetMaxVReg() const { - return vRegCount; + return vReg.GetCount(); } #endif @@ -558,7 +523,7 @@ class CGFunc { } uint32 GetVRegSize(regno_t vregNum) { - CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + CHECK(vregNum < vReg.VRegTableSize(), "index out of range in GetVRegSize"); return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; } @@ -1111,7 +1076,7 @@ class CGFunc { } regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { - return regno_t(idx + firstMapleIrVRegNO); + return regno_t(idx + kBaseVirtualRegNO); } bool GetHasProEpilogue() const { @@ -1262,10 +1227,6 @@ class CGFunc { vregsToPregsMap[vRegNum] = pidx; } - uint32 GetFirstMapleIrVRegNO() const { - return firstMapleIrVRegNO; - } - void SetHasAsm() { hasAsm = true; } @@ -1286,6 +1247,18 @@ class CGFunc { return needStackProtect; } + virtual void Link2ISel(MPISel *p) { + (void)p; + } + + void SetISel(MPISel *p) { + isel = p; + } + + MPISel *GetISel() { + return isel; + } + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const { PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); if (pri == -1) { @@ -1303,15 +1276,11 @@ class CGFunc { } protected: - uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ uint32 firstNonPregVRegNO; - uint32 vRegCount; /* for assigning a number for each CG virtual register */ + VregInfo vReg; /* for assigning a number for each CG virtual register */ uint32 ssaVRegCount = 0; /* vreg count in ssa */ - uint32 maxRegCount; /* for the current virtual register number limit */ size_t lSymSize; /* size of local symbol table imported */ - MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ MapleVector bbVec; - MapleUnorderedMap vRegOperandTable; MapleUnorderedMap pRegSpillMemOperands; MapleUnorderedMap spillRegMemOperands; MapleUnorderedMap reuseSpillLocMem; @@ -1324,7 +1293,6 @@ class CGFunc { uint32 totalInsns = 0; int32 structCopySize = 0; int32 maxParamStackSize = 0; - static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ bool hasVLAOrAlloca = false; bool hasAlloca = false; @@ -1349,7 +1317,7 @@ class CGFunc { PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { if (IsVRegNOForPseudoRegister(vRegNO)) { - return PregIdx(vRegNO - firstMapleIrVRegNO); + return PregIdx(vRegNO - kBaseVirtualRegNO); } return VRegNOToPRegIdx(vRegNO); } @@ -1357,7 +1325,7 @@ class CGFunc { bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { /* 0 is not allowed for preg index */ uint32 n = static_cast(vRegNum); - return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + return (kBaseVirtualRegNO < n && n < firstNonPregVRegNO); } PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { @@ -1369,7 +1337,7 @@ class CGFunc { } VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { - return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + return vReg.VRegTableElementGet(GetVirtualRegNOFromPseudoRegIdx(idx)); } PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { @@ -1489,12 +1457,17 @@ class CGFunc { uint8 stackProtectInfo = 0; bool needStackProtect = false; uint32 priority = 0; + + /* cross reference isel class pointer */ + MPISel *isel = nullptr; }; /* class CGFunc */ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPatchLongBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def index 3524d92bce0ea0d75e54ebfe4311fbf2c3862884..cd631230591a1553a722f47bce5ce0dbd90f9af2 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -1,3 +1,17 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ static std::set ValidBitmaskImmSet = { #include "valid_bitmask_imm.txt" }; @@ -14,6 +28,24 @@ bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; }; +/* This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file */ +/* was IsMoveWidableImmediate */ +bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); @@ -77,6 +109,18 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { #endif } +bool IsSingleInstructionMovable32(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 32) || + IsMoveWidableImmediateCopy(~static_cast(value), 32) || + IsBitmaskImmediate(static_cast(value), 32)); +} + +bool IsSingleInstructionMovable64(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 64) || + IsMoveWidableImmediateCopy(~static_cast(value), 64) || + IsBitmaskImmediate(static_cast(value), 64)); +} + bool Imm12BitValid(int64 value) { bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); // for target linux-aarch64-gnu diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index c988422f9a17c8068c348c52abb04b5aed5f3560..4b8f306617649c03d116e1168e9a67a268047b51 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -51,6 +51,7 @@ enum MopProperty : maple::uint8 { kInsnInlineAsm, kInsnSpecialIntrisic, kInsnIsNop, + kInsnIsComment, }; using regno_t = uint32_t; #define ISABSTRACT 1ULL @@ -83,6 +84,7 @@ using regno_t = uint32_t; #define INLINEASM (1ULL << kInsnInlineAsm) #define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) #define ISNOP (1ULL << kInsnIsNop) +#define ISCOMMENT (1ULL << kInsnIsComment) constexpr maplebe::regno_t kInvalidRegNO = 0; /* @@ -289,6 +291,9 @@ struct InsnDesc { bool IsSpecialIntrinsic() const { return (properties & SPINTRINSIC) != 0; } + bool IsComment() const { + return properties & ISCOMMENT; + } MOperator GetOpc() const { return opc; } diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h index 386051a2d0bf45aa1cc6191658ac45357af7d2fb..2293ab80963e038f33460dab897af248bbb3a946 100644 --- a/src/mapleall/maple_be/include/cg/isel.h +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -54,9 +54,9 @@ class MPISel { Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); - Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + virtual Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; @@ -67,8 +67,8 @@ class MPISel { void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type); void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); - virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; - virtual void SelectReturn() = 0; + virtual void SelectReturn(NaryStmtNode &retNode) = 0; + virtual void SelectReturn(bool noOpnd) = 0; virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) = 0; virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; @@ -78,13 +78,13 @@ class MPISel { virtual void SelectCall(CallNode &callNode) = 0; virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; - virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; - virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const = 0; virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; - virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) = 0; Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); @@ -98,13 +98,40 @@ class MPISel { virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; virtual Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual void SelectAsm(AsmNode &node) = 0; - virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) = 0; Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); template Operand *SelectLiteral(T &c, MIRFunction &func, uint32 labelIdx) const { @@ -130,10 +157,8 @@ class MPISel { protected: MemPool *isMp; CGFunc *cgFunc; - void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); - void SelectCopy(Operand &dest, Operand &src, PrimType toType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); - RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType = PTY_unknown); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType = PTY_unknown); void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); void SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); void SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); @@ -143,12 +168,23 @@ class MPISel { MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + + virtual void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + CHECK_FATAL(false, "NYI"); + } + virtual void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + CHECK_FATAL(false, "NYI"); + } + virtual bool IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return false; + } private: StmtNode *HandleFuncEntry() const; - void HandleFuncExit() const; void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); - virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const = 0; + virtual void HandleFuncExit() const = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) = 0; virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const = 0; virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); @@ -162,7 +198,6 @@ class MPISel { void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType) const; void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); - void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 8a1100c12d0f2f2ea8b9a2b3432484f7f853ca16..1b6797ac6b50f72afa4c32fc0b6cd359a2a543fc 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -20,6 +20,136 @@ namespace maplebe { constexpr size_t kSpillMemOpndNum = 4; +constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +constexpr uint32 kRegIncrStepLen = 80; /* reg number increate step length */ + +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) + : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class VregInfo { + public: + /* Only one place to allocate vreg within cg. + 'static' can be removed and initialized here if only allocation is from only one source. */ + static uint32 virtualRegCount; + static uint32 maxRegCount; + static std::vector vRegTable; + static std::unordered_map vRegOperandTable; + static bool initialized; + + VregInfo() { + if (initialized) { + initialized = false; + return; + } + initialized = true; + virtualRegCount = kBaseVirtualRegNO; + maxRegCount = kBaseVirtualRegNO; + vRegTable.clear(); + vRegOperandTable.clear(); + } + + ~VregInfo() = default; + + uint32 GetNextVregNO(RegType type, uint32 size) { + /* when vReg reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (virtualRegCount >= maxRegCount) { + ASSERT(virtualRegCount < maxRegCount + 1, "MAINTAIN FAILED"); + maxRegCount += kRegIncrStepLen; + VRegTableResize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (type == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + VRegTableValuesSet(virtualRegCount, type, size); + + uint32 temp = virtualRegCount; + ++virtualRegCount; + return temp; + } + void Inc(uint32 v) { + virtualRegCount += v; + } + uint32 GetCount() const { + return virtualRegCount; + } + void SetCount(uint32 v) { + /* Vreg number can only increase. */ + if (virtualRegCount < v) { + virtualRegCount = v; + } + } + + /* maxRegCount related stuff */ + uint32 GetMaxRegCount() const { + return maxRegCount; + } + void SetMaxRegCount(uint32 num) { + maxRegCount = num; + } + void IncMaxRegCount(uint32 num) { + maxRegCount += num; + } + + /* vRegTable related stuff */ + void VRegTableResize(uint32 sz) { + vRegTable.resize(sz); + } + uint32 VRegTableSize() const { + return vRegTable.size(); + } + uint32 VRegTableGetSize(uint32 idx) const { + return vRegTable[idx].GetSize(); + } + RegType VRegTableGetType(uint32 idx) const { + return vRegTable[idx].GetType(); + } + VirtualRegNode &VRegTableElementGet(uint32 idx) { + return vRegTable[idx]; + } + void VRegTableElementSet(uint32 idx, VirtualRegNode *node) { + vRegTable[idx] = *node; + } + void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) { + new (&vRegTable[idx]) VirtualRegNode(rt, sz); + } + void VRegOperandTableSet(regno_t regNO, RegOperand *rp) { + vRegOperandTable[regNO] = rp; + } +}; class RegisterInfo { public: diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h index dac0e2870202861d6e81b987474de902d89a296e..322497a9e3c59479fb846a8ebe95d53ae310d271 100644 --- a/src/mapleall/maple_be/include/cg/standardize.h +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -41,6 +41,10 @@ class Standardize { void DoStandardize(); + CGFunc *GetCgFunc() { + return cgFunc; + } + protected: void SetAddressMapping(bool needMapping) { needAddrMapping = needMapping; @@ -48,16 +52,17 @@ class Standardize { bool NeedAddressMapping(const Insn &insn) { /* Operand number for two addressing mode is 2 */ /* and 3 for three addressing mode */ - needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); - return needAddrMapping; + return needAddrMapping && ((insn.GetOperandSize() > 2) || (insn.IsUnaryOp())); } private: + virtual void Legalize() {}; virtual void StdzMov(Insn &insn) = 0; virtual void StdzStrLdr(Insn &insn) = 0; virtual void StdzBasicOp(Insn &insn) = 0; - virtual void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; - virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn) = 0; + virtual void StdzShiftOp(Insn &insn) = 0; + virtual void StdzCommentOp(Insn &insn) = 0; CGFunc *cgFunc; bool needAddrMapping = false; }; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h index dba290b0e937cf1b9bb3c0657b54cde8834150db..16751bf2099b974aa41f2a3a12491b20fd63b938 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -24,8 +24,9 @@ class X64MPIsel : public MPISel { public: X64MPIsel(MemPool &mp, AbstractIRBuilder &aIRBuilder, CGFunc &f) : MPISel(mp, aIRBuilder, f) {} ~X64MPIsel() override = default; - void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; - void SelectReturn() override; + void HandleFuncExit() const override; + void SelectReturn(NaryStmtNode &retNode) override; + void SelectReturn(bool noOpnd) override; void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; @@ -33,14 +34,14 @@ class X64MPIsel : public MPISel { Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; - Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const override; + Operand *SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const override; void SelectGoto(GotoNode &stmt) override; void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; - void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) override; void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; - void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; void SelectIgoto(Operand &opnd0) override; Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -55,10 +56,38 @@ class X64MPIsel : public MPISel { Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; void SelectAsm(AsmNode &node) override; private: - MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0, + RegOperand *baseReg = nullptr) override; MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds); @@ -80,8 +109,8 @@ class X64MPIsel : public MPISel { RegOperand &GetTargetBasicPointer(PrimType primType) override; std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); void SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused); - void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); - void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset); void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; bool IsParamStructCopy(const MIRSymbol &symbol); diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def index 3880b3cd5a8e21b2afab02dadbb017e65fb4ba67..6f2daf65e1471ab61a2be4e142ac55615a3c7122 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -97,32 +97,45 @@ DEFINE_MAPPING(abstract::MOP_neg_16, x64::MOP_negw_r) DEFINE_MAPPING(abstract::MOP_neg_32, x64::MOP_negl_r) DEFINE_MAPPING(abstract::MOP_neg_64, x64::MOP_negq_r) -/* CvtOp */ +/* CvtOp expand */ DEFINE_MAPPING(abstract::MOP_zext_rr_16_8, x64::MOP_movzbw_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_16_8, x64::MOP_movsbw_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_8, x64::MOP_movsbl_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_32_16, x64::MOP_movzwl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_32_16, x64::MOP_movswl_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_8, x64::MOP_movsbq_r_r) -DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwq_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) -DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) + +/* CvtOp truncate */ +DEFINE_MAPPING(abstract::MOP_zext_rr_8_16, x64::MOP_movzbw_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_16, x64::MOP_movsbw_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_32, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_32, x64::MOP_movsbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_32, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_32, x64::MOP_movswl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_8_64, x64::MOP_movzbq_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_8_64, x64::MOP_movsbq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_16_64, x64::MOP_movzwq_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_64, x64::MOP_movswq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_64, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_64, x64::MOP_movslq_r_r) /* Floating CvtOp int2float */ -DEFINE_MAPPING(abstract::MOP_cvt_fr_u64, x64::MOP_cvtsi2sdq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_u32, x64::MOP_cvtsi2ssq_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i32, x64::MOP_cvtsi2ssl_r) -DEFINE_MAPPING(abstract::MOP_cvt_fr_i64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_u64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_u32, x64::MOP_cvtsi2ssq_r) +DEFINE_MAPPING(abstract::MOP_cvt_f32_i32, x64::MOP_cvtsi2ssl_r) +DEFINE_MAPPING(abstract::MOP_cvt_f64_i64, x64::MOP_cvtsi2sdq_r) /* Floating CvtOp float2int */ -DEFINE_MAPPING(abstract::MOP_cvt_rf_u32, x64::MOP_cvttss2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_u64, x64::MOP_cvttsd2siq_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i32, x64::MOP_cvttss2sil_r) -DEFINE_MAPPING(abstract::MOP_cvt_rf_i64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u32_f32, x64::MOP_cvttss2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_u64_f64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_i32_f32, x64::MOP_cvttss2sil_r) +DEFINE_MAPPING(abstract::MOP_cvt_i64_f64, x64::MOP_cvttsd2siq_r) /* Floating CvtOp float2float */ DEFINE_MAPPING(abstract::MOP_cvt_ff_64_32, x64::MOP_cvtss2sd_r) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h index 828391fe1da2b1bfe3737026fbbb3760bf91ccff..e5d436bbe36964f03f7da52323a1143ebaeb5a0a 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -49,7 +49,7 @@ class X64CGFunc : public CGFunc { void SelectAbort() override; void SelectAssertNull(UnaryStmtNode &stmt) override; void SelectAsm(AsmNode &node) override; - void SelectAggDassign(DassignNode &stmt) override; + void SelectAggDassign(const DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h index 46353bc7e0885b5df86ecf156a23a7f3f78b1eaf..dd84920da6dd34649cff3f24c6f56cb5f1d9c13d 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -31,10 +31,11 @@ class X64Standardize : public Standardize { void StdzMov(Insn &insn) override; void StdzStrLdr(Insn &insn) override; void StdzBasicOp(Insn &insn) override; - void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) override; - void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; - void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; - void StdzFloatingNeg(Insn &insn, CGFunc &cgFunc); + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn) override; + void StdzShiftOp(Insn &insn) override; + void StdzFloatingNeg(Insn &insn); + void StdzCommentOp(Insn &insn) override; }; } #endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp index 72ba534f1b21bbd023375a8f6b493d9338cbcc20..5b679a209c4d8bc5bd489bfcc07d27ca8268127a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -11,4 +11,750 @@ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. - */ \ No newline at end of file + */ + +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "isel.h" +#include "aarch64_MPISel.h" + +namespace maplebe { +/* local Handle functions in isel, do not delete or move */ +void HandleGoto(StmtNode &stmt, MPISel &iSel); +void HandleLabel(StmtNode &stmt, const MPISel &iSel); + +void AArch64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); +} + +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { + PrimType symType; + uint64 fieldOffset = 0; + bool isCopy = IsParamStructCopy(symbol); + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + if (baseReg || !isCopy) { + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + if (isCopy) { + opndSz = (baseReg) ? opndSz : k64BitSize; + } + if (baseReg) { + AArch64CGFunc *a64func = static_cast(cgFunc); + OfstOperand *ofstOpnd = &a64func->GetOrCreateOfstOpnd(fieldOffset, k32BitSize); + return *a64func->CreateMemOperand(opndSz, *baseReg, *ofstOpnd); + } else { + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); + } +} +MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { + return static_cast(cgFunc)->GetOrCreateMemOpnd(symbol, offset, opndSize); +} + +Operand *AArch64MPIsel::SelectFloatingConst(MIRConst &mirConst, PrimType primType, const BaseNode &parent) const { + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const"); + AArch64CGFunc *a64Func = static_cast(cgFunc); + if (primType == PTY_f64) { + auto *dblConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, dblConst->GetIntValue(), *dblConst, parent); + } else { + auto *floatConst = safe_cast(mirConst); + return a64Func->HandleFmovImm(primType, floatConst->GetIntValue(), *floatConst, parent); + } +} + +void AArch64MPIsel::SelectReturn(NaryStmtNode &retNode) { + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc->GetFunction().StructReturnedInRegs()) { + opnd = cgFunc->HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc->SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc->SelectReturn(opnd); +} + +void AArch64MPIsel::SelectReturn(bool noOpnd) { + /* if return operand exist, cgFunc->SelectReturn will generate it */ + if (noOpnd) { + MOperator mOp = MOP_xuncond; + LabelOperand &targetOpnd = cgFunc->GetOrCreateLabelOperand(cgFunc->GetReturnLabel()->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { + uint32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void AArch64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple AArch64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL_FALSE("unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void AArch64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, AArch64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + */ +void AArch64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + aarch64CGFunc->SelectParmList(naryNode, srcOpnds); +} + +bool AArch64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +bool AArch64MPIsel::IsSymbolRequireIndirection(const MIRSymbol &symbol) { + return IsParamStructCopy(symbol); +} + +void AArch64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { + (void)symbolMem; + (void)aggSize; +} + +void AArch64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + (void)lhs; + (void)rhs; + (void)copySize; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +void AArch64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, + PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void AArch64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(MOP_xbl, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *AArch64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, static_cast(copySize)); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void AArch64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRh, const DassignNode &stmt) { + (void)lhsInfo; + (void)symbolMem; + (void)opndRh; + cgFunc->SelectAggDassign(stmt); +} + +void AArch64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) { + (void)opndRhs; + cgFunc->SelectAggIassign(stmt, addrOpnd); +} + +Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void AArch64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R0, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(R1, k64BitSize, kRegTyInt)); + } + } +} + +void AArch64MPIsel::SelectCall(CallNode &callNode) { + cgFunc->SelectCall(callNode); +} + +void AArch64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + cgFunc->SelectIcall(iCallNode, opnd0); +} + +Operand &AArch64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void AArch64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = MOP_xuncond; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->SetCurBBKind(BB::kBBGoto); + return; +} + +void AArch64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = MOP_xbr; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void AArch64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectCVaStart(intrnNode); +} + +void AArch64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL_FALSE("Intrinsic %d: %s not implemented by AArch64 isel CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void AArch64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand *indexOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(*indexOpnd, opnd0, opnd1, srcType); + if (indexOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + indexOpnd = static_cast(&cgFunc->SelectCopy(*indexOpnd, PTY_u64, PTY_u64)); + } + + /* load the address of the switch table */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + AArch64CGFunc *a64func = static_cast(cgFunc); + BitShiftOperand &bitOpnd = a64func->CreateBitShiftOperand(BitShiftOperand::kLSL, k3BitSize, k8BitShift); + Operand *disp = static_cast(cgFunc)->CreateMemOperand(k64BitSize, baseOpnd, *indexOpnd, bitOpnd); + RegOperand &tgt = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xbr, AArch64CG::kMd[MOP_xbr]); + jmpInsn.AddOpndChain(tgt); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *AArch64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + return cgFunc->SelectAddrof(expr, parent, false); +} + +Operand *AArch64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + return &cgFunc->SelectAddrofFunc(expr, parent); +} + +Operand *AArch64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + (void)parent; + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, expr.GetOffset()); + cgFunc->GetCurBB()->AppendInsn(cgFunc->GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return &dst; +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void AArch64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + auto &condGotoNode = static_cast(stmt); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (!kOpcodeInfo.IsCompare(condNode.GetOpCode())) { + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode.GetOpCode() == OP_constval) { + auto &constValNode = static_cast(condNode); + if (((OP_brfalse == condOp) && constValNode.GetConstVal()->IsZero()) || + ((OP_brtrue == condOp) && !constValNode.GetConstVal()->IsZero())) { + auto *gotoStmt = cgFunc->GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, *this); // isel's + auto *labelStmt = cgFunc->GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc->CreateLabel()); + HandleLabel(*labelStmt, *this); + } + return; + } + /* 1 operand condNode, cmp it with zero */ + opnd0 = HandleExpr(stmt, condNode); // isel's + opnd1 = &cgFunc->CreateImmOperand(condNode.GetPrimType(), 0); + } else { + /* 2 operands condNode */ + opnd0 = HandleExpr(stmt, *condNode.Opnd(0)); // isel's + opnd1 = HandleExpr(stmt, *condNode.Opnd(1)); // isel's + } + cgFunc->SelectCondGoto(stmt, *opnd0, *opnd1); + cgFunc->SetCurBBKind(BB::kBBIf); +} + +Operand *AArch64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + return cgFunc->SelectStrConst(*cgFunc->GetMemoryPool()->New( + constStr.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); +} + +Operand &AArch64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + if (IsPrimitiveFloat(primType)) { + retReg = V0; + } else { + retReg = R0; + } + break; + case kSregRetval1: + if (IsPrimitiveFloat(primType)) { + retReg = V1; + } else { + retReg = R1; + } + break; + default: + CHECK_FATAL_FALSE("GetTargetRetOperand: NIY"); + break; + } + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *AArch64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + (void)parent; + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL_FALSE("NIY"); + } + + return resOpnd; +} + +void AArch64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + cgFunc->SelectMpy(resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectDiv(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectRem(node, opnd0, opnd1, parent); +} + +Operand *AArch64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + (void)opnd0; + (void)opnd1; + (void)primType; + (void)opcode; + CHECK_FATAL_FALSE("Invalid MPISel function"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return cgFunc->SelectCmpOp(node, opnd0, opnd1, parent); +} + +void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + (void)opnd0; + (void)opnd1; + (void)primType; + CHECK_FATAL_FALSE("Invalid MPISel function"); +} + +Operand *AArch64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + return cgFunc->SelectSelect(expr, cond, trueOpnd, falseOpnd, parent); +} + +Operand *AArch64MPIsel::SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0) { + return cgFunc->SelectExtractbits(node, opnd0, parent); +} + +void AArch64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + AArch64CGFunc *a64func = static_cast(cgFunc); + a64func->SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64MPIsel::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, const BaseNode &parent) { + PrimType ptype = intrnNode.Opnd(0)->GetPrimType(); + Operand *opnd = &opnd0; + AArch64CGFunc *a64func = static_cast(cgFunc); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return a64func->SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = a64func->CreateRegisterOperandOfType(ptype); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(a64func->PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + cgFunc->GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &a64func->CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + a64func->SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectBswap(node, opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCctz(node); +} + +Operand *AArch64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectCclz(node); +} + +Operand *AArch64MPIsel::SelectCsin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasin(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asin", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosh(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosh", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacos(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acos", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatan(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atan", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "exp", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCsinhf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "sinhf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCasinf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "asinf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "cosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCcoshf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "coshf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCacosf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "acosf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCatanf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "atanf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCexpf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "expf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClogf(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "logf", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectClog10f(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "log10f", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCffs(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectIntrinsicOpWithOneParam(node, "ffs", opnd0, parent); +} + +Operand *AArch64MPIsel::SelectCmemcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrlen(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrcmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrncmp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectCstrrchr(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL_FALSE("NYI"); + return nullptr; +} + +Operand *AArch64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + return cgFunc->SelectAbs(node, opnd0); +} + +void AArch64MPIsel::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + static_cast(cgFunc)->SelectCvtFloat2Float(resOpnd, srcOpnd, fromType, toType); +} + +void AArch64MPIsel::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + static_cast(cgFunc)->SelectCvtFloat2Int(resOpnd, srcOpnd, itype, ftype); +} + +RegOperand &AArch64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &AArch64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(RFP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void AArch64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SelectAsm(node); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 45a25ae9935fde148c572e76d5f26480b6114bcf..cc5e2c67eff655d5528ae378c2c8b9ea16d5570b 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -53,6 +53,20 @@ CondOperand AArch64CGFunc::ccOperands[kCcLast] = { CondOperand(CC_AL), }; +Operand *AArch64CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { +#ifdef NEWCG + Operand *opnd; + if (CGOptions::UseNewCg()) { + MPISel *isel = GetISel(); + opnd = isel->HandleExpr(parent, expr); + } else { + opnd = CGFunc::HandleExpr(parent, expr); + } + return opnd; +#endif + return CGFunc::HandleExpr(parent, expr); +} + namespace { constexpr int32 kSignedDimension = 2; /* signed and unsigned */ constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ @@ -1713,7 +1727,7 @@ bool AArch64CGFunc::IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, return false; } -void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { +void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); uint32 lhsOffset = 0; MIRType *lhsType = lhsSymbol->GetType(); @@ -7345,23 +7359,23 @@ RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 s } RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { - ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); - ASSERT(vRegNO < vRegTable.size(), "index out of range"); - uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); - RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); - vRegOperandTable[vRegNO] = res; + ASSERT((vReg.vRegOperandTable.find(vRegNO) == vReg.vRegOperandTable.end()), "already exist"); + ASSERT(vRegNO < vReg.VRegTableSize(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vReg.VRegTableGetSize(vRegNO))) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vReg.VRegTableGetType(vRegNO)); + vReg.vRegOperandTable[vRegNO] = res; return *res; } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { - auto it = vRegOperandTable.find(vRegNO); - return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); + auto it = vReg.vRegOperandTable.find(vRegNO); + return (it != vReg.vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { regno_t regNO = regOpnd.GetRegisterNumber(); - auto it = vRegOperandTable.find(regNO); - if (it != vRegOperandTable.end()) { + auto it = vReg.vRegOperandTable.find(regNO); + if (it != vReg.vRegOperandTable.end()) { it->second->SetSize(regOpnd.GetSize()); it->second->SetRegisterNumber(regNO); it->second->SetRegisterType(regOpnd.GetRegisterType()); @@ -7370,14 +7384,14 @@ RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd } else { auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); regno_t newRegNO = newRegOpnd->GetRegisterNumber(); - if (newRegNO >= maxRegCount) { - maxRegCount = newRegNO + kRegIncrStepLen; - vRegTable.resize(maxRegCount); + if (newRegNO >= GetMaxRegNum()) { + SetMaxRegNum(newRegNO + kRegIncrStepLen); + vReg.VRegTableResize(GetMaxRegNum()); } - vRegOperandTable[newRegNO] = newRegOpnd; + vReg.vRegOperandTable[newRegNO] = newRegOpnd; VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); - vRegTable[newRegNO] = *vregNode; - vRegCount = maxRegCount; + vReg.VRegTableElementSet(newRegNO, vregNode); + vReg.SetCount(GetMaxRegNum()); return *newRegOpnd; } } @@ -9187,7 +9201,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { CHECK_FATAL(false, "nyi"); } } - LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); } @@ -10051,7 +10064,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); } uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); @@ -10106,7 +10119,7 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { auto p = spillRegMemOperands.find(vrNum); if (p == spillRegMemOperands.end()) { - if (vrNum >= vRegTable.size()) { + if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); } uint32 memBitSize = k64BitSize; @@ -12666,4 +12679,34 @@ bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targ } CHECK_FATAL(false, "CFG error"); } + +void AArch64CGFunc::Link2ISel(MPISel *p) { + SetISel(p); + CGFunc::InitFactory(); +} + +void AArch64CGFunc::HandleFuncCfg(CGCFG *cfg) { + RemoveUnreachableBB(); + AddCommonExitBB(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupBB(); + DetermineReturnTypeofCall(); + cfg->UnreachCodeAnalysis(); + if (GetMirModule().GetSrcLang() != kSrcLangC) { + cfg->WontExitAnalysis(); + } + CG *cg = GetCG(); + if (cg->GetCGOptions().IsLazyBinding() && cg->IsLibcore()) { + ProcessLazyBinding(); + } + if (cg->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (cg->GetCGOptions().DoEnableHotColdSplit()) { + cfg->CheckCFGFreq(); + } + NeedStackProtect(); +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index 62e6272ca6090e6c359400616a2fc17f2269492e..1580027c101cd1349dbe4a09de69c620df990efb 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -205,6 +205,11 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { manager->Optimize(true); break; } + case MOP_xmulrrr: + case MOP_wmulrrr: { + manager->Optimize(!cgFunc->IsAfterRegAlloc()); + break; + } default: break; } @@ -2487,6 +2492,61 @@ void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { } } +bool MulImmToShiftPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + movInsn = ssaInfo->GetDefInsn(useReg); + if (movInsn == nullptr) { + return false; + } + MOperator prevMop = movInsn->GetMachineOpcode(); + if (prevMop != MOP_wmovri32 && prevMop != MOP_xmovri64) { + return false; + } + ImmOperand &immOpnd = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (immOpnd.IsNegative()) { + return false; + } + uint64 immVal = immOpnd.GetValue(); + if (immVal == 0) { + shiftVal = 0; + newMop = insn.GetMachineOpcode() == MOP_xmulrrr ? MOP_xmovri64 : MOP_wmovri32; + return true; + } + /* power of 2 */ + if ((immVal & (immVal - 1)) != 0) { + return false; + } + shiftVal = static_cast(log2(immVal)); + newMop = (prevMop == MOP_xmovri64) ? MOP_xlslrri6 : MOP_wlslrri5; + return true; +} + +void MulImmToShiftPattern::Run(BB &bb, Insn &insn) { + /* mov x0,imm and mul to shift */ + if (!CheckCondition(insn)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &immOpnd = aarch64CGFunc->CreateImmOperand(shiftVal, k32BitSize, false); + Insn *newInsn; + if (newMop == MOP_xmovri64 || newMop == MOP_wmovri32) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), immOpnd); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd), immOpnd); + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(movInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { Insn *prevInsn = insn.GetPrev(); if (!cgFunc.GetMirModule().IsCModule()) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 5fbd4e572e4789272b81dd6ddab1d73600f3c8e6..9124fc4d075ed4bb63cc52321fb920a16d1dfd36 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -154,6 +154,9 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { if (regNO == kInvalidRegNO) { return; } + if (bb.GetKind() == BB::kBBGoto) { + return; /* a goto block should not have unreachable instr */ + } if (regNO == R0) { RegOperand ®Opnd = diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9a16279c9f73f4e05462d7f9932aee9adff2f077 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp @@ -0,0 +1,335 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_standardize.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "insn.h" + +namespace maplebe { + +using namespace abstract; +static AbstractIR2Target abstract2TargetTable[abstract::kMopLast] { + {abstract::MOP_undef, {{MOP_pseudo_none, {}, {}}}}, + + {abstract::MOP_copy_ri_8, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {abstract::MOP_copy_rr_8, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ri_16, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {abstract::MOP_copy_rr_16, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ri_32, {{MOP_wmovri32, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {abstract::MOP_copy_rr_32, {{MOP_wmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ri_64, {{MOP_xmovri64, {kAbtractReg, kAbtractImm}, {0, 1}}}}, + {abstract::MOP_copy_rr_64, {{MOP_xmovrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_copy_fi_8, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ff_8, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_fi_16, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ff_16, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_fi_32, {{MOP_xvmovsr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ff_32, {{MOP_xvmovs, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_fi_64, {{MOP_xvmovdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_copy_ff_64, {{MOP_xvmovd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_zext_rr_16_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_16_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_32_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_32_8, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_32_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_32_16, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_zext_rr_64_8, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_64_8, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_64_16, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_64_16, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_64_32, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_64_32, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_zext_rr_8_16, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_8_16, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_8_32, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_8_32, {{MOP_xsxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_16_32, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_16_32, {{MOP_xsxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_zext_rr_8_64, {{MOP_xuxtb32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_8_64, {{MOP_xsxtb64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_16_64, {{MOP_xuxth32, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_16_64, {{MOP_xsxth64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_zext_rr_32_64, {{MOP_xuxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_sext_rr_32_64, {{MOP_xsxtw64, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_cvt_f32_u32, {{MOP_vcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f64_u32, {{MOP_vcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f32_u64, {{MOP_xvcvtufr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f64_u64, {{MOP_xvcvtudr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f32_i32, {{MOP_vcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f64_i32, {{MOP_vcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f32_i64, {{MOP_xvcvtfr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_f64_i64, {{MOP_xvcvtdr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_cvt_u32_f32, {{MOP_vcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_u64_f32, {{MOP_xvcvturf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_u32_f64, {{MOP_vcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_u64_f64, {{MOP_xvcvturd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_i32_f32, {{MOP_vcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_i64_f32, {{MOP_xvcvtrf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_i32_f64, {{MOP_vcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_i64_f64, {{MOP_xvcvtrd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_cvt_ff_64_32, {{MOP_xvcvtdf, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_cvt_ff_32_64, {{MOP_xvcvtfd, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_str_8, {{MOP_wstrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_str_16, {{MOP_wstrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_str_32, {{MOP_wstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_str_64, {{MOP_xstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_8, {{MOP_wldrb, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_16, {{MOP_wldrh, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_32, {{MOP_wldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_64, {{MOP_xldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_str_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_str_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_str_f_32, {{MOP_sstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_str_f_64, {{MOP_dstr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_load_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_load_f_32, {{MOP_sldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + {abstract::MOP_load_f_64, {{MOP_dldr, {kAbtractReg, kAbtractMem}, {0, 1}}}}, + + {abstract::MOP_add_8, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_add_16, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_add_32, {{MOP_waddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_add_64, {{MOP_xaddrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_8, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_16, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_32, {{MOP_wsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_64, {{MOP_xsubrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_or_8, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_or_16, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_or_32, {{MOP_wiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_or_64, {{MOP_xiorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_xor_8, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_xor_16, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_xor_32, {{MOP_weorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_xor_64, {{MOP_xeorrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_and_8, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_and_16, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_and_32, {{MOP_wandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_and_64, {{MOP_xandrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {abstract::MOP_and_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_and_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_and_f_32, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_and_f_64, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_add_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_add_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_add_f_32, {{MOP_sadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_add_f_64, {{MOP_dadd, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_sub_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_sub_f_32, {{MOP_ssub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_sub_f_64, {{MOP_dsub, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {abstract::MOP_shl_8, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_shl_16, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_shl_32, {{MOP_wlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_shl_64, {{MOP_xlslrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_ashr_8, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_ashr_16, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_ashr_32, {{MOP_wasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_ashr_64, {{MOP_xasrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_lshr_8, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_lshr_16, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_lshr_32, {{MOP_wlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + {abstract::MOP_lshr_64, {{MOP_xlsrrrr, {kAbtractReg, kAbtractReg, kAbtractReg}, {0, 1, 2}}}}, + + {abstract::MOP_neg_8, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_neg_16, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_neg_32, {{MOP_winegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_neg_64, {{MOP_xinegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_neg_f_8, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_neg_f_16, {{AArch64MOP_t::MOP_undef, {kAbtractNone}, {}}}}, + {abstract::MOP_neg_f_32, {{MOP_wfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_neg_f_64, {{MOP_xfnegrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_not_8, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_not_16, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_not_32, {{MOP_wnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + {abstract::MOP_not_64, {{MOP_xnotrr, {kAbtractReg, kAbtractReg}, {0, 1}}}}, + + {abstract::MOP_comment, {{MOP_nop, {kAbtractNone}, {}}}}, +}; + +Operand *AArch64Standardize::GetInsnResult(Insn *insn) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + if (insn->OpndIsDef(i)) { + return &(insn->GetOperand(i)); + } + } + return nullptr; +} + +Insn *AArch64Standardize::HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order) { + const InsnDesc *md = &AArch64CG::kMd[targetMop]; + ImmOperand &immOpnd = static_cast(insn->GetOperand(idx)); + if (md->IsValidImmOpnd(immOpnd.GetValue())) { + newInsn->SetOperand(order, immOpnd); + } else { + Operand *resOpnd = GetInsnResult(insn); + CHECK_FATAL(resOpnd, "SelectTargetInsn: No result operand"); + AArch64CGFunc *a64func = static_cast(GetCgFunc()); + BB &saveCurBB = *GetCgFunc()->GetCurBB(); + a64func->GetDummyBB()->ClearInsns(); + GetCgFunc()->SetCurBB(*a64func->GetDummyBB()); + a64func->SelectCopyImm(*resOpnd, immOpnd, (resOpnd->GetSize() == k32BitSize) ? PTY_i32 : PTY_i64); + insn->GetBB()->InsertBeforeInsn(*a64func->GetDummyBB(), *insn); + GetCgFunc()->SetCurBB(saveCurBB); + newInsn = nullptr; + } + return newInsn; +} + +void AArch64Standardize::SelectTargetInsn(Insn *insn) { + MOperator abstractMop = insn->GetMachineOpcode(); + CHECK_FATAL(abstractMop < abstract::kMopLast, "SelectTargetInsn: abstract instruction opcode out-of-bound"); + AbstractIR2Target &entry = abstract2TargetTable[abstractMop]; + CHECK_FATAL(entry.abstractMop == abstractMop, "SelectTargetInsn: Invalid abstract instruction"); + + for (uint32 j = 0; j < entry.targetMap.size(); ++j) { + TargetMopGen &targetMopGen = entry.targetMap[j]; + MOperator targetMop = targetMopGen.targetMop; + if (targetMop == MOP_nop) { + continue; + } + Insn *newInsn = &GetCgFunc()->GetInsnBuilder()->BuildInsn(targetMop, AArch64CG::kMd[targetMop]); + newInsn->ResizeOpnds(targetMopGen.mappingOrder.size()); + for (uint32 i = 0; i < targetMopGen.mappingOrder.size(); ++i) { + uint8 order = targetMopGen.mappingOrder[i]; + switch (targetMopGen.targetOpndAction[i]) { + case kAbtractReg: + case kAbtractMem: + newInsn->SetOperand(order, insn->GetOperand(i)); + break; + case kAbtractImm: { + newInsn = HandleTargetImm(insn, newInsn, i, targetMop, order); + break; + } + case kAbtractNone: + break; + } + } + if (newInsn) { + insn->GetBB()->InsertInsnBefore(*insn, *newInsn); + } + } + insn->GetBB()->RemoveInsn(*insn); +} + +Operand *AArch64Standardize::UpdateRegister(Operand &opnd, std::map ®Map, bool allocate) { + if (!opnd.IsRegister()) { + return &opnd; + } + RegOperand ®Opnd = static_cast(opnd); + if (regOpnd.IsPhysicalRegister()) { + if (allocate && opnd.GetSize() < k32BitSize) { + opnd.SetSize(k32BitSize); + } + return &opnd; + } + if (!allocate && opnd.GetSize() >= k32BitSize) { + return &opnd; + } + regno_t regno = regOpnd.GetRegisterNumber(); + regno_t mappedRegno; + auto regItem = regMap.find(regno); + if (regItem == regMap.end()) { + if (allocate) { + return &opnd; + } + regno_t vreg = GetCgFunc()->NewVReg(regOpnd.GetRegisterType(), k4ByteSize); + regMap[regno] = mappedRegno = vreg; + } else { + mappedRegno = regItem->second; + } + if (!allocate) { + return &opnd; + } + return &GetCgFunc()->GetOrCreateVirtualRegisterOperand(mappedRegno); +} + +void AArch64Standardize::TraverseOperands(Insn *insn, std::map ®Map, bool allocate) { + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + MapleList &list = static_cast(opnd).GetOperands(); + for (uint j = 0; j < list.size(); ++j) { + RegOperand *lopnd = list.front(); + list.pop_front(); + list.push_back(static_cast(UpdateRegister(*lopnd, regMap, allocate))); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &mopnd = static_cast(opnd); + Operand *base = mopnd.GetBaseRegister(); + if (base) { + RegOperand *ropnd = static_cast(UpdateRegister(*base, regMap, allocate)); + mopnd.SetBaseRegister(*ropnd); + } + } else { + insn->SetOperand(i, *UpdateRegister(opnd, regMap, allocate)); + } + } +} + +void AArch64Standardize::Legalize() { + std::map regMap; + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, false); + } + } + FOR_ALL_BB(bb, GetCgFunc()) { + FOR_BB_INSNS(insn, bb) { + TraverseOperands(insn, regMap, true); + } + } +} + +void AArch64Standardize::StdzMov(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzStrLdr(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzBasicOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzUnaryOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzCvtOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +void AArch64Standardize::StdzShiftOp(Insn &insn) { + SelectTargetInsn(&insn); +} +void AArch64Standardize::StdzCommentOp(Insn &insn) { + SelectTargetInsn(&insn); +} + +} diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 9fe0fc7afb0bd583bde9a8ca2186b5c9b20ab150..a11de75c57012d55bad4a12b376c79c9ae2e63d6 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -20,6 +20,16 @@ using namespace maple; #define JAVALANG (mirModule->IsJavaModule()) +uint32 VregInfo::virtualRegCount = kBaseVirtualRegNO; +uint32 VregInfo::maxRegCount = 0; +std::vector VregInfo::vRegTable; +std::unordered_map VregInfo::vRegOperandTable; +/* There are two builders, cgfunc builder (original code selector) and irbuilder (abstract). + * This is to prevent conflict between the two for VregInfo as for arm64 both co-exists. + * When switching to irbuilder completely, then this bool can go away. + */ +bool VregInfo::initialized = false; + void Globals::SetTarget(CG &target) { cg = ⌖ } diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index d29ea8c596927bc728469633156b39ab02acf4bd..b850973b2a456f5c1f17faaecf2d4bcb39bdbcf4 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -997,8 +997,10 @@ void CGCFG::ReverseCriticalEdge(BB &cbb) { bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { CGCFG *cfg = f.GetMemoryPool()->New(f); f.SetTheCFG(cfg); + cfg->MarkLabelTakenBB(); /* build control flow graph */ f.GetTheCFG()->BuildCFG(); + f.HandleFuncCfg(cfg); return false; } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleCFG, handlecfg) diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 66309c4ab893bc2328d0b19fe9cde4b46c4cb706..cdd5a70b9af885143890b416ca57b165fc3b67d6 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -91,12 +91,16 @@ ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int *alloc.New(symbol, offset, relocs, false); } +OfstOperand &OperandBuilder::CreateOfst(int64 offset, uint32 size, MemPool *mp) { + return mp ? *mp->New(offset, size) : *alloc.New(offset, size); +} + MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { return mp ? *mp->New(size) : *alloc.New(size); } MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp) { - ImmOperand &ofstOperand = CreateImm(baseOpnd.GetSize(), offset); + OfstOperand &ofstOperand = CreateOfst(offset, baseOpnd.GetSize()); if (mp != nullptr) { return *mp->New(size, baseOpnd, ofstOperand); } @@ -119,13 +123,16 @@ MemOperand &OperandBuilder::CreateMem(uint32 size, RegOperand &baseOpnd, ImmOper } RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { - virtualRegNum++; - regno_t vRegNO = kBaseVirtualRegNO + virtualRegNum; - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + regno_t vRegNO = virtualReg.GetNextVregNO(type, size / k8BitSize); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { - return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); + virtualReg.vRegOperandTable[vRegNO] = &rp; + return rp; } RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index ae2ae8de59fe9b8114fc2b2e1b5dc8caa566aa1c..37ea4d96654cb1d9540d3571928ca098d175b1ee 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -122,6 +122,7 @@ bool CGOptions::doCalleeToSpill = false; bool CGOptions::doRegSavesOpt = false; bool CGOptions::useSsaPreSave = false; bool CGOptions::useSsuPreRestore = false; +bool CGOptions::useNewCg = false; bool CGOptions::replaceASM = false; bool CGOptions::generalRegOnly = false; bool CGOptions::fastMath = false; @@ -534,6 +535,10 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); } + if (opts::cg::newCg.IsEnabledByUser()) { + opts::cg::newCg ? EnableNewCg() : DisableNewCg(); + } + if (opts::cg::lsraBb.IsEnabledByUser()) { SetLSRABBOptSize(opts::cg::lsraBb); } diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp index d1cbe7892aa7f531c444e20e0365fd338f89135c..484fe7b33aaf174182e32ca0ce8f5d0f99677386 100644 --- a/src/mapleall/maple_be/src/cg/cg_options.cpp +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -184,6 +184,12 @@ maplecl::Option ssupreRestore({"--ssupre-restore"}, {cgCategory}, maplecl::DisableWith("--no-ssupre-restore")); +maplecl::Option newCg({"--newcg"}, + " --newcg \tUse new CG infrastructure\n" + " --no-newcg\n", + {cgCategory}, + maplecl::DisableWith("--no-newcg")); + maplecl::Option prepeep({"--prepeep"}, " --prepeep \tPerform peephole optimization before RA\n" " --no-prepeep\n", diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index 4b4aa8f09ef73946c965233149c60689094544e0..4f71d655749b8cd51d6eb46219a2e765aa7e1087 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -123,7 +123,7 @@ void CGSSAInfo::RenameBB(BB &bb) { } AddRenamedBB(bb.GetId()); /* record version stack size */ - size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + kBaseVirtualRegNO + 1 : vRegStk.rbegin()->first + 1; std::vector oriStackSize(tempSize, -1); for (auto it : vRegStk) { diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 1a4b3f72163b5709189dca7ef1998de09e16621b..2cfa7712b0dbdefe44854ac0f42b7f64e42c3f6b 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -25,6 +25,7 @@ const std::string BB::bbNames[BB::kBBLast] = { "BB_goto", "BB_igoto", "BB_ret", + "BB_noret", "BB_intrinsic", "BB_rangegoto", "BB_throw" @@ -180,6 +181,33 @@ void BB::InsertAtBeginning(BB &bb) { bb.firstInsn = bb.lastInsn = nullptr; } +void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) { + if (fromBB.firstInsn == nullptr) { /* nothing to add */ + return; + } + + BB *toBB = beforeInsn.GetBB(); + FOR_BB_INSNS(insn, &fromBB) { + insn->SetBB(toBB); + } + + if (toBB->GetFirstInsn() == nullptr) { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + toBB->SetLastInsn(fromBB.GetLastInsn()); + } else { + if (beforeInsn.GetPrev()) { + beforeInsn.GetPrev()->SetNext(fromBB.GetFirstInsn()); + } else { + toBB->SetFirstInsn(fromBB.GetFirstInsn()); + } + fromBB.GetFirstInsn()->SetPrev(beforeInsn.GetPrev()); + beforeInsn.SetPrev(fromBB.GetLastInsn()); + fromBB.GetLastInsn()->SetNext(&beforeInsn); + } + fromBB.SetFirstInsn(nullptr); + fromBB.SetLastInsn(nullptr); +} + /* append all insns from bb into this bb */ void BB::InsertAtEnd(BB &bb) { if (bb.firstInsn == nullptr) { /* nothing to add */ diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index caa53fe3104cfce19cc5c65919ac7713890fd17e..4c9d9e50e77fe0525db36df5c5fdf437067bdc81 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1415,11 +1415,14 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_asm, HandleAsm); } +/* member of CGFunc */ +void CGFunc::InitFactory() { + InitHandleExprFactory(); +} + CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) - : vRegTable(allocator.Adapter()), - bbVec(allocator.Adapter()), - vRegOperandTable(allocator.Adapter()), + : bbVec(allocator.Adapter()), pRegSpillMemOperands(allocator.Adapter()), spillRegMemOperands(allocator.Adapter()), reuseSpillLocMem(allocator.Adapter()), @@ -1460,18 +1463,19 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, SetHasAlloca(func.HasVlaOrAlloca()); dummyBB = CreateNewBB(); - vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); - firstNonPregVRegNO = vRegCount; + vReg.SetCount(kBaseVirtualRegNO + func.GetPregTab()->Size()); + firstNonPregVRegNO = vReg.GetCount(); /* maximum register count initial be increased by 1024 */ - maxRegCount = vRegCount + 1024; + SetMaxRegNum(vReg.GetCount() + 1024); if (func.GetMayWriteToAddrofStack()) { SetStackProtectInfo(kAddrofStack); } + vReg.vRegOperandTable.clear(); insnBuilder = memPool.New(memPool); opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); - vRegTable.resize(maxRegCount); + vReg.VRegTableResize(GetMaxRegNum()); /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { @@ -1777,6 +1781,7 @@ void CGFunc::CreateLmbcFormalParamInfo() { AssignLmbcFormalParams(); } + void CGFunc::GenerateInstruction() { InitHandleExprFactory(); InitHandleStmtFactory(); @@ -2093,6 +2098,7 @@ void CGFunc::HandleFunction() { GenSaveMethodInfoCode(*firstBB); /* build control flow graph */ theCFG = memPool->New(*this); + theCFG->MarkLabelTakenBB(); theCFG->BuildCFG(); RemoveUnreachableBB(); AddCommonExitBB(); @@ -2101,7 +2107,6 @@ void CGFunc::HandleFunction() { } MarkCleanupBB(); DetermineReturnTypeofCall(); - theCFG->MarkLabelTakenBB(); theCFG->UnreachCodeAnalysis(); if (mirModule.GetSrcLang() == kSrcLangC) { theCFG->WontExitAnalysis(); @@ -2332,6 +2337,15 @@ bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { } MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) +bool CgPatchLongBranch::PhaseRun(maplebe::CGFunc &f) { + f.PatchLongBranch(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPatchLongBranch, patchlongbranch) + bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { if (f.GetCG()->GetCGOptions().WithDwarf()) { f.DBGFixCallFrameLocationOffsets(); diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp index 4739452b98c3619a7b0112c7a6183d109b14dbe7..d826f3591744525b3aeb45348a0e21db168bcce1 100644 --- a/src/mapleall/maple_be/src/cg/insn.cpp +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -299,7 +299,7 @@ void Insn::SetMOP(const InsnDesc &idesc) { } void Insn::Dump() const { -ASSERT(md != nullptr, "md should not be nullptr"); + ASSERT(md != nullptr, "md should not be nullptr"); LogInfo::MapleLogger() << "< " << GetId() << " > "; LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index a27b15acfda5342b835906eab0ae259dd83e7884..eda698f3a10cf571b98531c159d7fb44b75cadb6 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -18,8 +18,11 @@ #include #include "factory.h" #include "cg.h" +#include "cgfunc.h" namespace maplebe { +/* Does not support size larget than 64 bits */ +#define PTY128MOD(pty) ((pty) = (((pty) == PTY_i128) ? PTY_i64 : (((pty) == PTY_u128) ? PTY_u64 : (pty)))) /* register, imm , memory, cond */ #define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ @@ -101,7 +104,7 @@ static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) { if (fromSize < k8BitSize || fromSize > k64BitSize) { CHECK_FATAL(false, "unsupport type"); } - /* Extend: fromSize < toSize */ + /* Extend/Truncate: fromSize < toSize */ auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); if (tableDriven == fastCvtMappingTableI.end()) { CHECK_FATAL(false, "unsupport cvt"); @@ -190,6 +193,7 @@ void HandleLabel(StmtNode &stmt, const MPISel &iSel) { ASSERT(stmt.GetOpCode() == OP_label, "error"); auto &label = static_cast(stmt); BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->SetKind(BB::kBBFallthru); newBB->AddLabel(label.GetLabelIdx()); cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); cgFunc->SetCurBB(*newBB); @@ -228,6 +232,7 @@ void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) { Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); cgFunc->SetCurBBKind(BB::kBBRangeGoto); iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); + cgFunc->SetCurBB(*cgFunc->StartNewBB(rangeGotoNode)); } void HandleIgoto(StmtNode &stmt, MPISel &iSel) { @@ -245,10 +250,9 @@ void HandleReturn(StmtNode &stmt, MPISel &iSel) { auto &retNode = static_cast(stmt); ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); if (retNode.NumOpnds() != 0) { - Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); - iSel.SelectReturn(retNode, *opnd); + iSel.SelectReturn(retNode); } - iSel.SelectReturn(); + iSel.SelectReturn(retNode.NumOpnds() == 0); /* return stmt will jump to the ret BB, so curBB is gotoBB */ cgFunc->SetCurBBKind(BB::kBBGoto); cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); @@ -280,8 +284,7 @@ void HandleCondbr(StmtNode &stmt, MPISel &iSel) { ASSERT(condNode != nullptr, "expect first operand of cond br"); /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used * in most backend architectures */ - Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); - iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + iSel.SelectCondGoto(condGotoNode, *condNode); cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); } @@ -371,7 +374,7 @@ Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); } -Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, const MPISel &iSel) { +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, const MPISel &iSel) { auto &constValNode = static_cast(expr); MIRConst *mirConst = constValNode.GetConstVal(); ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); @@ -380,10 +383,10 @@ Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); } else if (mirConst->GetKind() == kConstDoubleConst) { auto *mirDoubleConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType(), parent); } else if (mirConst->GetKind() == kConstFloatConst) { auto *mirFloatConst = safe_cast(mirConst); - return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType()); + return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType(), parent); } else { CHECK_FATAL(false, "NIY"); } @@ -438,7 +441,7 @@ Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { } Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { - return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); } Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { @@ -480,6 +483,63 @@ Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { auto &intrinsicopNode = static_cast(expr); switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_sin: + return iSel.SelectCsin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinh: + return iSel.SelectCsinh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asin: + return iSel.SelectCasin(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cos: + return iSel.SelectCcos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosh: + return iSel.SelectCcosh(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acos: + return iSel.SelectCacos(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atan: + return iSel.SelectCatan(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_exp: + return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log: + return iSel.SelectClog(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10: + return iSel.SelectClog10(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_sinf: + return iSel.SelectCsinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_sinhf: + return iSel.SelectCsinhf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_asinf: + return iSel.SelectCasinf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_cosf: + return iSel.SelectCcosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_coshf: + return iSel.SelectCcoshf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_acosf: + return iSel.SelectCacosf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_atanf: + return iSel.SelectCatanf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_expf: + return iSel.SelectCexpf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_logf: + return iSel.SelectClogf(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_log10f: + return iSel.SelectClog10f(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_ffs: + return iSel.SelectCffs(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_memcmp: + return iSel.SelectCmemcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strlen: + return iSel.SelectCstrlen(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strcmp: + return iSel.SelectCstrcmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strncmp: + return iSel.SelectCstrncmp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strchr: + return iSel.SelectCstrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_strrchr: + return iSel.SelectCstrrchr(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_rev16_2: case INTRN_C_rev_4: case INTRN_C_rev_8: @@ -490,8 +550,6 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { case INTRN_C_ctz32: case INTRN_C_ctz64: return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); - case INTRN_C_exp: - return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); default: ASSERT(false, "NIY, unsupported intrinsicop."); return nullptr; @@ -578,8 +636,14 @@ Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { void MPISel::DoMPIS() { isel::InitHandleStmtFactory(); isel::InitHandleExprFactory(); + GetCurFunc()->Link2ISel(this); + SrcPosition lastLocPos = SrcPosition(); + SrcPosition lastMplPos = SrcPosition(); StmtNode *secondStmt = HandleFuncEntry(); for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GetCurFunc()->GenerateLoc(stmt, lastLocPos, lastMplPos); + auto function = CreateProductFunction(stmt->GetOpCode()); CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); function(*stmt, *this); @@ -700,14 +764,21 @@ void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { /* Generate Insn */ if (rhsType == PTY_agg) { /* Agg Type */ - SelectAggDassign(symbolInfo, symbolMem, opndRhs); + SelectAggDassign(symbolInfo, symbolMem, opndRhs, stmt); return; } PrimType memType = symbolInfo.primType; if (memType == PTY_agg) { memType = PTY_a64; } - SelectCopy(symbolMem, opndRhs, memType, rhsType); + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + MemOperand &stMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID(), ®Opnd); + SelectCopy(stMem, opndRhs, memType, rhsType); + } else { + SelectCopy(symbolMem, opndRhs, memType, rhsType); + } return; } @@ -776,6 +847,7 @@ Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, PrimType opnd1Type) { + PTY128MOD(opnd0Type); if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { SelectCopy(resOpnd, opnd0, opnd0Type); return; @@ -804,8 +876,10 @@ void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcod void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PTY128MOD(rhsType); PregIdx pregIdx = stmt.GetRegIdx(); PrimType regType = stmt.GetPrimType(); + PTY128MOD(regType); RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); SelectCopy(regOpnd, opnd0, regType, rhsType); @@ -850,17 +924,28 @@ Operand *MPISel::SelectDread(const BaseNode &parent [[maybe_unused]], const Addr CHECK_FATAL(primType == maple::PTY_agg, "NIY"); return &symbolMem; } - /* for BasicType, load symbolVal to register. */ - RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); /* Generate Insn */ - SelectCopy(regOpnd, symbolMem, primType, symbolType); - return ®Opnd; + if (IsSymbolRequireIndirection(*symbol)) { + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectCopy(regOpnd, symbolMem, PTY_a64, PTY_a64); + RegOperand ®Opnd1 = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &ldMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID(), ®Opnd); + SelectCopy(regOpnd1, ldMem, primType, symbolType); + return ®Opnd1; + } else { + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; + } } Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { PrimType primType = node.GetPrimType(); + PTY128MOD(primType); RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); @@ -920,7 +1005,7 @@ void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bit } Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], - const ExtractbitsNode &node, Operand &opnd0) { + ExtractbitsNode &node, Operand &opnd0) { PrimType fromType = node.Opnd(0)->GetPrimType(); PrimType toType = node.GetPrimType(); uint8 bitSize = node.GetBitsSize(); @@ -943,31 +1028,62 @@ Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], } Operand *MPISel::SelectCvt(const BaseNode &parent [[maybe_unused]], const TypeCvtNode &node, Operand &opnd0) { - PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType fromType = node.FromType(); + PTY128MOD(fromType); PrimType toType = node.GetPrimType(); + PTY128MOD(toType); if (fromType == toType) { return &opnd0; } + RegOperand *regOpnd0; + if (!opnd0.IsRegister()) { + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(fromType), + cgFunc->GetRegTyFromPrimTy(fromType)); + SelectCopy(result, opnd0, fromType, fromType); + regOpnd0 = &result; + } else { + regOpnd0 = &static_cast(opnd0); + } RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); if (IsPrimitiveInteger(toType) && IsPrimitiveInteger(fromType)) { - SelectIntCvt(*resOpnd, opnd0, toType, fromType); + SelectIntCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { - SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + SelectCvtInt2Float(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveFloat(toType) && IsPrimitiveFloat(fromType)) { - SelectFloatCvt(*resOpnd, opnd0, toType, fromType); + SelectFloatCvt(*resOpnd, *regOpnd0, toType, fromType); } else if (IsPrimitiveInteger(toType) && IsPrimitiveFloat(fromType)) { - SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + SelectCvtFloat2Int(*resOpnd, *regOpnd0, toType, fromType); } else { CHECK_FATAL(false, "NIY cvt"); } return resOpnd; } - void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 toSize = GetPrimTypeBitSize(toType); bool isSigned = !IsPrimitiveUnsigned(toType); +#if TARGAARCH64 + /* + * Due to fp precision, should use one insn to perform cvt. + */ + MOperator mOp = abstract::MOP_undef; + switch (fromType) { + case PTY_f64: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f64 : MOP_cvt_u32_f64) : + ((isSigned) ? MOP_cvt_i64_f64 : MOP_cvt_u64_f64); + break; + case PTY_f32: + mOp = (toSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_i32_f32 : MOP_cvt_u32_f32) : + ((isSigned) ? MOP_cvt_i64_f32 : MOP_cvt_u64_f32); + break; + default: + CHECK_FATAL(false, "NYI"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else PrimType newToType = toType; // cvt f64/32 -> u16 / u8 -> cvt f u32 + cvt u32 -> u8 if (toSize < k32BitSize) { @@ -978,9 +1094,9 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectFloatCvt(tmpFloatOpnd, opnd0, newToType, fromType); MOperator mOp = abstract::MOP_undef; if (newToSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i32 : abstract::MOP_cvt_rf_u32; + mOp = isSigned ? abstract::MOP_cvt_i32_f32 : abstract::MOP_cvt_u32_f32; } else if (newToSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_rf_i64 : abstract::MOP_cvt_rf_u64; + mOp = isSigned ? abstract::MOP_cvt_i64_f64 : abstract::MOP_cvt_u64_f64; } else { CHECK_FATAL(false, "niy"); } @@ -993,18 +1109,39 @@ void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType to SelectIntCvt(resOpnd, tmpIntOpnd, toType, newToType); } cgFunc->GetCurBB()->AppendInsn(insn); +#endif } void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); bool isSigned = !IsPrimitiveUnsigned(fromType); +#if TARGAARCH64 + /* Due to fp precision, convert is done with one instruction */ + MOperator mOp = abstract::MOP_undef; + switch (toType) { + case PTY_f64: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f64_i32 : MOP_cvt_f64_u32) : + ((isSigned) ? MOP_cvt_f64_i64 : MOP_cvt_f64_u64); + break; + case PTY_f32: + mOp = (fromSize <= k32BitSize) ? ((isSigned) ? MOP_cvt_f32_i32 : MOP_cvt_f32_u32) : + ((isSigned) ? MOP_cvt_f32_i64 : MOP_cvt_f32_u64); + break; + default: + CHECK_FATAL(false, "NYI"); + } + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +#else MOperator mOp = abstract::MOP_undef; PrimType newFromType = PTY_begin; if (fromSize == k32BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i32 : abstract::MOP_cvt_fr_u32; + mOp = isSigned ? abstract::MOP_cvt_f32_i32 : abstract::MOP_cvt_f32_u32; newFromType = PTY_f32; } else if (fromSize == k64BitSize) { - mOp = isSigned ? abstract::MOP_cvt_fr_i64 : abstract::MOP_cvt_fr_u64; + mOp = isSigned ? abstract::MOP_cvt_f64_i64 : abstract::MOP_cvt_f64_u64; newFromType = PTY_f64; } else { CHECK_FATAL(false, "niy"); @@ -1016,6 +1153,7 @@ void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType to (void)insn.AddOpndChain(tmpFloatOpnd).AddOpndChain(regOpnd0); cgFunc->GetCurBB()->AppendInsn(insn); SelectFloatCvt(resOpnd, tmpFloatOpnd, toType, newFromType); +#endif } void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { @@ -1025,13 +1163,15 @@ void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR * The signedness of operands would be shown in the expression. */ - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + bool isSigned = !IsPrimitiveUnsigned(fromType); + uint32 bitSize = opnd0.GetSize(); + PrimType opndType = GetIntegerPrimTypeFromSize(isSigned, bitSize); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType, opndType); if (toSize <= fromSize) { resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); return; } - bool isSigned = !IsPrimitiveUnsigned(fromType); MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); @@ -1182,6 +1322,7 @@ Operand *MPISel::SelectIread(const BaseNode &parent [[maybe_unused]], const Irea } /* for BasicType, load val in addr to register. */ PrimType primType = expr.GetPrimType(); + PTY128MOD(primType); RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); SelectCopy(result, memOpnd, primType, lhsInfo.primType); @@ -1256,53 +1397,6 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, return &resOpnd; } -Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { - PrimType primType = node.GetPrimType(); - if (IsPrimitiveVector(primType)) { - CHECK_FATAL(false, "NIY"); - } else if (IsPrimitiveFloat(primType)) { - /* - * fabs(x) = x AND 0x7fffffff ffffffff [set sign bit to 0] - */ - const static uint64 kNaN = 0x7fffffffffffffffUL; - const static double kNaNDouble = *(double*)(&kNaN); - const static uint64 kNaNf = 0x7fffffffUL; - const static double kNaNFloat = *(double*)(&kNaNf); - CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "niy"); - - double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; - MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, - *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); - Operand *opnd1 = SelectFloatingConst(*c, PTY_f64); - - RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), - cgFunc->GetRegTyFromPrimTy(primType)); - SelectBand(resOpnd, opnd0, *opnd1, primType); - return &resOpnd; - } else if (IsUnsignedInteger(primType)) { - return &opnd0; - } else { - /* - * abs(x) = (x XOR y) - y - * y = x >>> (bitSize - 1) - */ - uint32 bitSize = GetPrimTypeBitSize(primType); - CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); - RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); - ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); - RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); - RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); - RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, - cgFunc->GetRegTyFromPrimTy(primType)); - SelectSub(resOpnd, tmpOpnd, regOpndy, primType); - return &resOpnd; - } -} - Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); PrimType srcType = node.Opnd(0)->GetPrimType(); @@ -1396,59 +1490,48 @@ StmtNode *MPISel::HandleFuncEntry() const { RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { uint32 fromSize = GetPrimTypeBitSize(fromType); uint32 toSize = GetPrimTypeBitSize(toType); - if (src.IsRegister() && fromSize == toSize) { + bool isReg = src.IsRegister(); + uint32 srcRegSize = isReg ? src.GetSize() : 0; + if ((isReg && fromSize == toSize) || (fromType == PTY_unknown && isReg && srcRegSize == toSize)) { return static_cast(src); } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); - if (fromSize != toSize) { - SelectCopy(dest, src, toType, fromType); - } else { + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + if (fromType == PTY_unknown || fromSize == toSize) { SelectCopy(dest, src, toType); + } else if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); } return dest; } -/* Pretty sure that implicit type conversions will not occur. */ -RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { - ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); - if (src.IsRegister()) { - return static_cast(src); - } - RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), - cgFunc->GetRegTyFromPrimTy(dtype)); - SelectCopy(dest, src, dtype); - return dest; -} /* This function copy/load/store src to a dest, Once the src and dest types * are different, implicit conversion is executed here. */ void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { - if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + PTY128MOD(fromType); + PTY128MOD(toType); + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (fromType != PTY_unknown && fromSize != toSize) { RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); - RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); SelectCopy(dest, dstRegOpnd, toType); } else { - SelectCopy(dest, src, toType); - } -} - -/* Pretty sure that implicit type conversions will not occur. */ -void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { - ASSERT(dest.GetSize() == src.GetSize(), "NIY"); - if (dest.GetKind() == Operand::kOpdRegister) { - SelectCopyInsn(dest, src, type); - } else if (dest.GetKind() == Operand::kOpdMem) { - if (src.GetKind() != Operand::kOpdRegister) { - RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), - cgFunc->GetRegTyFromPrimTy(type)); - SelectCopyInsn(tempReg, src, type); - SelectCopyInsn(dest, tempReg, type); - } else { - SelectCopyInsn(dest, src, type); + if (dest.GetKind() == Operand::kOpdMem || src.GetKind() == Operand::kOpdMem) { + if ((dest.GetKind() == Operand::kOpdMem && src.GetKind() == Operand::kOpdRegister) || + (dest.GetKind() == Operand::kOpdRegister && src.GetKind() == Operand::kOpdMem)) { + SelectCopyInsn(dest, src, toType); + } else if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(toSize, cgFunc->GetRegTyFromPrimTy(toType)); + SelectCopyInsn(tempReg, src, toType); + SelectCopyInsn(dest, tempReg, toType); + } else { + SelectCopyInsn(dest, src, toType); + } + } else if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, toType); + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } - }else { - CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); } } @@ -1527,22 +1610,22 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { return &SelectCopy2Reg(opnd0, toType, fromType); } + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + return resOpnd; + } CHECK_FATAL(false, "NIY, retype"); return nullptr; } -void MPISel::HandleFuncExit() const { - BlockNode *block = cgFunc->GetFunction().GetBody(); - ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); - cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); - /* Set lastbb's frequency */ - cgFunc->SetLastBB(*cgFunc->GetCurBB()); - /* the last BB is return BB */ - cgFunc->GetLastBB()->SetKind(BB::kBBReturn); - - cgFunc->AddCommonExitBB(); -} - void InstructionSelector::GetAnalysisDependence(AnalysisDep &aDep) const { aDep.AddRequired(); aDep.AddPreserved(); diff --git a/src/mapleall/maple_be/src/cg/standardize.cpp b/src/mapleall/maple_be/src/cg/standardize.cpp index cf14b253f5931e460dbebcc406cbdd0131ed9079..b3a14f7a0d5793bc4e6b5a1f703a7aee4083f436 100644 --- a/src/mapleall/maple_be/src/cg/standardize.cpp +++ b/src/mapleall/maple_be/src/cg/standardize.cpp @@ -22,6 +22,9 @@ void Standardize::DoStandardize() { /* two address mapping first */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -30,10 +33,13 @@ void Standardize::DoStandardize() { } } } - + Legalize(); /* standardize for each op */ FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (insn->IsDbgInsn()) { + continue; + } if (insn->IsMachineInstruction()) { continue; } @@ -44,11 +50,13 @@ void Standardize::DoStandardize() { } else if (insn->IsBasicOp()) { StdzBasicOp(*insn); } else if (insn->IsUnaryOp()) { - StdzUnaryOp(*insn, *cgFunc); + StdzUnaryOp(*insn); } else if (insn->IsConversion()) { - StdzCvtOp(*insn, *cgFunc); + StdzCvtOp(*insn); } else if (insn->IsShift()) { - StdzShiftOp(*insn, *cgFunc); + StdzShiftOp(*insn); + } else if (insn->IsComment()) { + StdzCommentOp(*insn); } else { LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; CHECK_FATAL(false, "NIY"); @@ -61,19 +69,37 @@ void Standardize::AddressMapping(Insn &insn) const { Operand &dest = insn.GetOperand(kInsnFirstOpnd); Operand &src1 = insn.GetOperand(kInsnSecondOpnd); uint32 destSize = dest.GetSize(); + CHECK_FATAL(dest.IsRegister(), "AddressMapping: not reg operand"); + bool isInt = static_cast(dest).GetRegisterType() == kRegTyInt ? true : false; MOperator mOp = abstract::MOP_undef; switch (destSize) { case k8BitSize: - mOp = abstract::MOP_copy_rr_8; + if (isInt) { + mOp = abstract::MOP_copy_rr_8; + } else { + mOp = abstract::MOP_copy_ff_8; + } break; case k16BitSize: - mOp = abstract::MOP_copy_rr_16; + if (isInt) { + mOp = abstract::MOP_copy_rr_16; + } else { + mOp = abstract::MOP_copy_ff_16; + } break; case k32BitSize: - mOp = abstract::MOP_copy_rr_32; + if (isInt) { + mOp = abstract::MOP_copy_rr_32; + } else { + mOp = abstract::MOP_copy_ff_32; + } break; case k64BitSize: - mOp = abstract::MOP_copy_rr_64; + if (isInt) { + mOp = abstract::MOP_copy_rr_64; + } else { + mOp = abstract::MOP_copy_ff_64; + } break; default: break; diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index fa3e08afb2a28a74b35bd06e6a337db95b503b15..4196e14a9876c410fc0b5a3ccd68007671723bd2 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -21,8 +21,20 @@ #include "isel.h" namespace maplebe { + +void X64MPIsel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->PushBackExitBBsVec(*cgFunc->GetLastBB()); +} + /* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ -MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const { +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId, RegOperand *baseReg) { PrimType symType; int32 fieldOffset = 0; if (fieldId == 0) { @@ -65,7 +77,8 @@ MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uin return *result; } -void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { +void X64MPIsel::SelectReturn(NaryStmtNode &retNode) { + Operand &opnd = *HandleExpr(retNode, *retNode.Opnd(0)); MIRType *retType = cgFunc->GetFunction().GetReturnType(); X64CallConvImpl retLocator(cgFunc->GetBecommon()); CCLocInfo retMech; @@ -125,7 +138,7 @@ void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { } } -void X64MPIsel::SelectReturn() { +void X64MPIsel::SelectReturn(bool noOpnd [[maybe_unused]]) { /* jump to epilogue */ MOperator mOp = x64::MOP_jmpq_l; LabelNode *endLabel = cgFunc->GetEndLabel(); @@ -137,7 +150,7 @@ void X64MPIsel::SelectReturn() { cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); } -void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { +void X64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); for (int32 i = 0; i < copyTime; ++i) { ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -150,7 +163,7 @@ void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symS } } -void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { +void X64MPIsel::CreateCallStructParamPassByReg(const MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); @@ -196,13 +209,13 @@ void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &p CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); if (ploc.reg1 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, kSecondReg); } if (ploc.reg2 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, kThirdReg); } if (ploc.reg3 != kRinvalid) { - CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, kFourthReg); } } } @@ -452,7 +465,7 @@ void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vectorGetLabelIdx(); Operand *result = nullptr; @@ -480,7 +493,8 @@ RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { return ®Result; } -void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs, const DassignNode &stmt) { + (void)stmt; /* rhs is Func Return, it must be from Regread */ if (opndRhs.IsRegister()) { SelectIntAggCopyReturn(symbolMem, lhsInfo.size); @@ -621,6 +635,7 @@ void X64MPIsel::SelectIgoto(Operand &opnd0) { Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); jmpInsn.AddOpndChain(opnd0); cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->SetCurBBKind(BB::kBBGoto); return; } @@ -912,7 +927,8 @@ static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSign * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node * such as a dread for example */ -void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { + Operand &opnd0 = *HandleExpr(stmt, condNode); Opcode opcode = stmt.GetOpCode(); X64MOP_t jmpOperator = x64::MOP_begin; if (opnd0.IsImmediate()) { @@ -1335,4 +1351,182 @@ void X64MPIsel::SelectAsm(AsmNode &node) { cgFunc->SetHasAsm(); CHECK_FATAL(false, "NIY"); } + +Operand *X64MPIsel::SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + /* + * fabs(x) = x AND 0x7fffffff ffffffff [set sign bit to 0] + */ + const static uint64 kNaN = 0x7fffffffffffffffUL; + const static double kNaNDouble = *(double*)(&kNaN); + const static uint64 kNaNf = 0x7fffffffUL; + const static double kNaNFloat = *(double*)(&kNaNf); + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "niy"); + + double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; + MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, + *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + Operand *opnd1 = SelectFloatingConst(*c, PTY_f64, parent); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBand(resOpnd, opnd0, *opnd1, primType); + return &resOpnd; + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *X64MPIsel::SelectCsin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCsinh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCasin(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCcos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCcosh(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCacos(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCatan(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectClog(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectClog10(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCsinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCsinhf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCasinf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCcosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCcoshf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCacosf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCatanf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCexpf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectClogf(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectClog10f(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCffs(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCmemcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCstrlen(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCstrcmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCstrncmp(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCstrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + +Operand *X64MPIsel::SelectCstrrchr(IntrinsicopNode &node [[maybe_unused]], Operand &opnd0, const BaseNode &parent) { + ASSERT(false, "NIY"); + return nullptr; +} + } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp index f3c15c00a5f945fcbd46850e0ab6eb89e9b1c4cf..4d09a2f97919d1e2bd9ac0e4faaf0b2ce53524d1 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -73,7 +73,7 @@ void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { void X64CGFunc::SelectAsm(AsmNode &node) { CHECK_FATAL(false, "NIY"); } -void X64CGFunc::SelectAggDassign(DassignNode &stmt) { +void X64CGFunc::SelectAggDassign(const DassignNode &stmt) { CHECK_FATAL(false, "NIY"); } void X64CGFunc::SelectIassign(IassignNode &stmt) { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index a506e0e28fdff46c7bcdaef3fee4bcd6f93fe861..664af1029e7bc2456b70156aa2b5757bc7165a8d 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -56,10 +56,10 @@ void X64Standardize::StdzBasicOp(Insn &insn) { insn.AddOpndChain(src2).AddOpndChain(dest); } -void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzUnaryOp(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { - StdzFloatingNeg(insn, cgFunc); + StdzFloatingNeg(insn); return; } X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); @@ -69,23 +69,39 @@ void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(dest); } -void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzCvtOp(Insn &insn) { uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); uint32 destSize = OpndDesSize; uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); uint32 srcSize = OpndSrcSize; switch (insn.GetMachineOpcode()) { - case abstract::MOP_zext_rr_64_8: - case abstract::MOP_zext_rr_64_16: case abstract::MOP_zext_rr_64_32: destSize = k32BitSize; break; - case abstract::MOP_cvt_fr_u32: + case abstract::MOP_cvt_f32_u32: srcSize = k64BitSize; break; - case abstract::MOP_cvt_rf_u32: + case abstract::MOP_cvt_u32_f32: destSize = k64BitSize; break; + case abstract::MOP_zext_rr_8_16: + case abstract::MOP_sext_rr_8_16: + case abstract::MOP_zext_rr_8_32: + case abstract::MOP_sext_rr_8_32: + case abstract::MOP_zext_rr_16_32: + case abstract::MOP_sext_rr_16_32: + case abstract::MOP_zext_rr_8_64: + case abstract::MOP_sext_rr_8_64: + case abstract::MOP_zext_rr_16_64: + case abstract::MOP_sext_rr_16_64: + case abstract::MOP_sext_rr_32_64: + /* reverse operands */ + destSize = OpndSrcSize; + srcSize = OpndDesSize; + break; + case abstract::MOP_zext_rr_32_64: + srcSize = k32BitSize; + destSize = k32BitSize; default: break; } @@ -95,13 +111,13 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); RegOperand *src = static_cast(opnd0); if (srcSize != OpndSrcSize) { - src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + src = &GetCgFunc()->GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType()); } Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); RegOperand *dest = static_cast(opnd1); if (destSize != OpndDesSize) { - dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + dest = &GetCgFunc()->GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType()); } insn.CleanAllOperand(); @@ -120,14 +136,14 @@ void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { * 32: xorl 0x80000000 R1 * movd R1 xmm0 */ -void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzFloatingNeg(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; // mov dest -> tmpOperand0 MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; - RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); - Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + RegOperand *tmpOperand0 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); Operand &dest = insn.GetOperand(kInsnFirstOpnd); movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, movInsn0); @@ -135,26 +151,26 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { // 32 : xorl 0x80000000 tmpOperand0 // 64 : movabs 0x8000000000000000 tmpOperand1 // xorq tmpOperand1 tmpOperand0 - ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + ImmOperand &imm = GetCgFunc()->GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); if (mOp == abstract::MOP_neg_f_64) { - Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); - Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + Operand *tmpOperand1 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = GetCgFunc()->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); insn.GetBB()->InsertInsnBefore(insn, movabs); MOperator xorOp = x64::MOP_xorq_r_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } else { MOperator xorOp = x64::MOP_xorl_i_r; - Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + Insn &xorq = GetCgFunc()->GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); insn.GetBB()->InsertInsnBefore(insn, xorq); } // mov tmpOperand0 -> dest - Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Insn &movq = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); insn.GetBB()->InsertInsnBefore(insn, movq); @@ -162,17 +178,17 @@ void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { return; } -void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { +void X64Standardize::StdzShiftOp(Insn &insn) { RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); /* count operand cvt -> PTY_u8 */ if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { - countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + countOpnd = &GetCgFunc()->GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); } /* copy count operand to cl(rcx) register */ - RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + RegOperand &clOpnd = GetCgFunc()->GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); X64MOP_t copyMop = x64::MOP_movb_r_r; - Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + Insn ©Insn = GetCgFunc()->GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); insn.GetBB()->InsertInsnBefore(insn, copyInsn); /* shift OP */ @@ -183,4 +199,8 @@ void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); } +void X64Standardize::StdzCommentOp(Insn &insn) { + insn.GetBB()->RemoveInsn(insn); +} + }