diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h index 7009aa96519e99465b3e840ca6a8ee5cdb099bfc..205cbf8161d3c258f94243beb781a40aed2b61ed 100644 --- a/src/mapleall/maple_be/include/be/common_utils.h +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -65,6 +65,7 @@ constexpr uint32 k12ByteSize = 12; constexpr uint32 k14ByteSize = 14; constexpr uint32 k15ByteSize = 15; constexpr uint32 k16ByteSize = 16; +constexpr uint32 k32ByteSize = 32; constexpr uint32 k1EightBytesSize = 8; constexpr uint32 k2EightBytesSize = 16; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h index fe5322398e51560db92c53b7d99a44d45afcd3e7..e66975ca2196b0720400d434a759cc104cdbd2bb 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h @@ -42,6 +42,9 @@ class AArch64CallConvImpl { void InitCCLocInfo(CCLocInfo &pLoc) const; + /* for lmbc */ + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize); + /* return value related */ void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 5391df48586072dfdf7b4d8f219d950267916438..389fa7a53003fefc293173e475344fa793f63db5 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -27,6 +27,19 @@ #include "aarch64_call_conv.h" namespace maplebe { +class LmbcArgInfo { + public: + LmbcArgInfo(MapleAllocator &mallocator) + : lmbcCallArgs(mallocator.Adapter()), + lmbcCallArgTypes(mallocator.Adapter()), + lmbcCallArgOffsets(mallocator.Adapter()), + lmbcCallArgNumOfRegs(mallocator.Adapter()) {} + MapleVector lmbcCallArgs; + MapleVector lmbcCallArgTypes; + MapleVector lmbcCallArgOffsets; + MapleVector lmbcCallArgNumOfRegs; // # of regs needed to complete struct +}; + class AArch64CGFunc : public CGFunc { public: AArch64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, @@ -51,7 +64,7 @@ class AArch64CGFunc : public CGFunc { if (f.GetAttr(FUNCATTR_varargs) || f.HasVlaOrAlloca()) { SetHasVLAOrAlloca(true); } - SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule()); + SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule() || f.GetModule()->GetFlavor() == MIRFlavor::kFlavorLmbc); } ~AArch64CGFunc() override = default; @@ -86,6 +99,7 @@ class AArch64CGFunc : public CGFunc { return kRFLAG; } + MIRType *GetAggTyFromCallSite(StmtNode *stmt); RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); @@ -109,9 +123,13 @@ class AArch64CGFunc : public CGFunc { bool needLow12 = false); MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + void LmbcSelectParmList(ListOperand *srcOpnds); void SelectAggDassign(DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; @@ -155,7 +173,8 @@ class AArch64CGFunc : public CGFunc { void SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field = 0); void SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field = 0); Operand *SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, PrimType pty, bool retBool = false); - Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff = false) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; @@ -164,6 +183,7 @@ class AArch64CGFunc : public CGFunc { Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) override; Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; Operand *SelectIntConst(MIRIntConst &intConst) override; Operand *HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent); Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; @@ -272,7 +292,7 @@ class AArch64CGFunc : public CGFunc { RegOperand &CreateRegisterOperandOfType(PrimType primType); RegOperand &CreateRegisterOperandOfType(RegType regType, uint32 byteLen); RegOperand &CreateRflagOperand(); - RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType = PTY_i64); + RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType); MemOperand *GetOrCreatSpillMem(regno_t vrNum); void FreeSpillRegMem(regno_t vrNum); RegOperand &GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType type, uint32 flag = 0); @@ -387,7 +407,13 @@ class AArch64CGFunc : public CGFunc { } RegOperand &GetOrCreateStackBaseRegOperand() override { - return GetOrCreatePhysicalRegisterOperand(RFP, kSizeOfPtr * kBitsPerByte, kRegTyInt); + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + return GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, kRegTyInt); } RegOperand &GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType, @@ -452,6 +478,9 @@ class AArch64CGFunc : public CGFunc { void GenerateCleanupCode(BB &bb) override; bool NeedCleanup() override; void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType); Operand *GetBaseReg(const AArch64SymbolAlloc &symAlloc); int32 GetBaseOffset(const SymbolAlloc &symAlloc) override; @@ -653,6 +682,44 @@ class AArch64CGFunc : public CGFunc { return alignPow; } + LmbcArgInfo *GetLmbcArgInfo() { + return lmbcArgInfo; + } + + void SetLmbcArgInfo(LmbcArgInfo *p) { + lmbcArgInfo = p; + } + + void SetLmbcArgInfo(RegOperand *reg, PrimType pTy, int32 ofst, int32 regs) { + GetLmbcCallArgs().emplace_back(reg); + GetLmbcCallArgTypes().emplace_back(pTy); + GetLmbcCallArgOffsets().emplace_back(ofst); + GetLmbcCallArgNumOfRegs().emplace_back(regs); + } + + void ResetLmbcArgInfo() { + GetLmbcCallArgs().clear(); + GetLmbcCallArgTypes().clear(); + GetLmbcCallArgOffsets().clear(); + GetLmbcCallArgNumOfRegs().clear(); + } + + MapleVector &GetLmbcCallArgs() { + return lmbcArgInfo->lmbcCallArgs; + } + + MapleVector &GetLmbcCallArgTypes() { + return lmbcArgInfo->lmbcCallArgTypes; + } + + MapleVector &GetLmbcCallArgOffsets() { + return lmbcArgInfo->lmbcCallArgOffsets; + } + + MapleVector &GetLmbcCallArgNumOfRegs() { + return lmbcArgInfo->lmbcCallArgNumOfRegs; + } + private: enum RelationOperator : uint8 { kAND, @@ -719,6 +786,7 @@ class AArch64CGFunc : public CGFunc { int32 splitStpldpBaseOffset = 0; regno_t methodHandleVreg = -1; uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5*/ + LmbcArgInfo *lmbcArgInfo = nullptr; void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, AArch64isa::MemoryOrdering memOrd, bool isDirect); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 833dc82cce7d283e9c71f75c08a4e84c7a3f9ad3..88255c497a5f50975b54bd7ff4dc42166683b98c 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -18,15 +18,17 @@ ADDTARGETPHASE("handlefunction", true); ADDTARGETPHASE("moveargs", true); /* SSA PHASES */ - ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + } /* Normal OPT PHASES */ ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole()); ADDTARGETPHASE("ebo", CGOptions::DoEBO()); @@ -34,20 +36,24 @@ ADDTARGETPHASE("ico", CGOptions::DoICO()) ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); - ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()) - ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()) - ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()) + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()); + ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()); + } + ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()); - ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()) + ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()); ADDTARGETPHASE("ebo1", CGOptions::DoEBO()); ADDTARGETPHASE("prescheduling", !GetMIRModule()->IsJavaModule() && CGOptions::DoPreSchedule()); ADDTARGETPHASE("raopt", CGOptions::DoPreLSRAOpt()); ADDTARGETPHASE("cgsplitcriticaledge", GetMIRModule()->IsCModule()); ADDTARGETPHASE("regalloc", true); - ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()) - ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()) - ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); - ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())) + ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()); + ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); + } + ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())); ADDTARGETPHASE("generateproepilog", true); ADDTARGETPHASE("framefinalize", true); ADDTARGETPHASE("dbgfixcallframeoffsets", true); @@ -62,4 +68,4 @@ ADDTARGETPHASE("scheduling", CGOptions::DoSchedule()); ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis()); ADDTARGETPHASE("fixshortbranch", true); - ADDTARGETPHASE("cgemit", true); \ No newline at end of file + ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h index 1d35f1f69a7968404059427055b50d4f54c2be73..cd24e2bafa445bec1305fb64a9631e9700e22397 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h @@ -29,7 +29,11 @@ class AArch64GenProEpilog : public GenProEpilog { public: explicit AArch64GenProEpilog(CGFunc &func) : GenProEpilog(func) { useFP = func.UseFP(); - stackBaseReg = useFP ? R29 : RSP; + if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stackBaseReg = RFP; + } else { + stackBaseReg = useFP ? R29 : RSP; + } exitBB2CallSitesMap.clear(); } ~AArch64GenProEpilog() override = default; diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 91501c4323bcf4f78d6e2b60dfb34431002e8b13..5d8bc7edde404c69ecf7d1db2ea1b8b3831aa4f5 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -76,6 +76,72 @@ class SpillMemOperandSet { MapleSet reuseSpillLocMem; }; +class LmbcFormalParamInfo { + public: + explicit LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz) : + primType(pType), offset(ofst), size(sz), regNO(0), vregNO(0), numRegs(0), fpSize(0), isPureFloat(false) {} + + virtual ~LmbcFormalParamInfo() = default; + + PrimType GetPrimType() { + return primType; + } + void SetPrimType(PrimType pType) { + primType = pType; + } + uint32 GetOffset() { + return offset; + } + void SetOffset(uint32 ofs) { + offset = ofs; + } + uint32 GetSize() { + return size; + } + void SetSize(uint32 sz) { + size = sz; + } + regno_t GetRegNO() { + return regNO; + } + void SetRegNO(regno_t reg) { + regNO = reg; + } + regno_t GetVregNO() { + return vregNO; + } + void SetVregNO(regno_t reg) { + vregNO = reg; + } + uint32 GetNumRegs() { + return numRegs; + } + void SetNumRegs(uint32 num) { + numRegs = num; + } + uint32 GetFpSize() { + return fpSize; + } + void SetFpSize(uint32 sz) { + fpSize = sz; + } + bool IsPureFloat() { + return isPureFloat; + } + void SetIsPureFloat() { + isPureFloat = true; + } + private: + PrimType primType; + uint32 offset; + uint32 size; /* size primtype or struct */ + regno_t regNO; /* param reg num or starting reg num if numRegs > 0 */ + regno_t vregNO; /* if no explicit regassing from IR, create move from param reg */ + uint32 numRegs; /* number of regs for struct param */ + uint32 fpSize; /* size of fp param if isPureFloat */ + bool isPureFloat; +}; + #if TARGARM32 class LiveRange; #endif /* TARGARM32 */ @@ -147,6 +213,10 @@ class CGFunc { virtual bool NeedCleanup() = 0; virtual void GenerateCleanupCodeForExtEpilog(BB &bb) = 0; + void CreateLmbcFormalParamInfo(); + virtual uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) = 0; + virtual void AssignLmbcFormalParams() = 0; + LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); void GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc); int32 GetFreqFromStmt(uint32 stmtId); void GenerateInstruction(); @@ -184,6 +254,9 @@ class CGFunc { virtual void SelectAggDassign(DassignNode &stmt) = 0; virtual void SelectIassign(IassignNode &stmt) = 0; virtual void SelectIassignoff(IassignoffNode &stmt) = 0; + virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; + virtual void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) = 0; + virtual void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) = 0; virtual void SelectReturn(Operand *opnd) = 0; virtual void SelectIgoto(Operand *opnd0) = 0; @@ -220,12 +293,14 @@ class CGFunc { /* select expr */ virtual Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) = 0; virtual RegOperand *SelectRegread(RegreadNode &expr) = 0; - virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff = false) = 0; + virtual Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) = 0; virtual Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) = 0; virtual Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) = 0; + virtual Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) = 0; virtual Operand *SelectIntConst(MIRIntConst &intConst) = 0; virtual Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) = 0; virtual Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) = 0; @@ -867,6 +942,39 @@ class CGFunc { loops.emplace_back(&loop); } + MapleVector &GetLmbcParamVec() { + return lmbcParamVec; + } + + void IncLmbcArgsInRegs(RegType ty) { + if (ty == kRegTyInt) { + lmbcIntArgs++; + } else { + lmbcFpArgs++; + } + } + + int16 GetLmbcArgsInRegs(RegType ty) { + return ty == kRegTyInt ? lmbcIntArgs : lmbcFpArgs; + } + + void ResetLmbcArgsInRegs() { + lmbcIntArgs = 0; + lmbcFpArgs = 0; + } + + void IncLmbcTotalArgs() { + lmbcTotalArgs++; + } + + int16 GetLmbcTotalArgs() { + return lmbcTotalArgs; + } + + void ResetLmbcTotalArgs() { + lmbcTotalArgs = 0; + } + MapleVector &GetAllBBs() { return bbVec; } @@ -1204,6 +1312,10 @@ class CGFunc { MapleVector lrVec; #endif /* TARGARM32 */ MapleVector loops; + MapleVector lmbcParamVec; + int32 lmbcIntArgs = 0; + int32 lmbcFpArgs = 0; + int32 lmbcTotalArgs = 0; CGCFG *theCFG = nullptr; uint32 nextSpillLocation = 0; diff --git a/src/mapleall/maple_be/include/cg/memlayout.h b/src/mapleall/maple_be/include/cg/memlayout.h index 1f9b50ea21af9b7fc2beeb5e5ed74a77f37bffaf..6e27c0e7e26894911d0a43b2eb30baefc4f6689d 100644 --- a/src/mapleall/maple_be/include/cg/memlayout.h +++ b/src/mapleall/maple_be/include/cg/memlayout.h @@ -221,7 +221,7 @@ class MemLayout { return segArgsRegPassed; } - const MemSegment &GetSegArgsStkPass() const { + const MemSegment &GetSegArgsToStkPass() const { return segArgsToStkPass; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp index 46e0ae1922f62c122e276ac8a05b61a97c515a63..9e1dd18d7ef63ccc46bed4540587418e63bc153d 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -20,6 +20,9 @@ namespace maplebe { using namespace maple; void AArch64MoveRegArgs::Run() { + if (cgFunc->GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + return; + } MoveVRegisterArgs(); MoveRegisterArgs(); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp index 8201883bd3442b32a070162c5c66949ab33fc28d..511302deba2104cda686afb2262deb2e29bb7c26 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -141,6 +141,7 @@ uint32 ProcessStructWhenClassifyAggregate(const BECommon &be, MIRStructType &str } } if (isF32 || isF64) { + CHECK_FATAL(numRegs <=classesLength, "ClassifyAggregate: num regs exceed limit"); for (uint32 i = 0; i < numRegs; ++i) { classes[i] = kAArch64FloatClass; } @@ -204,6 +205,28 @@ int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentCla } } +/* external interface to look for pure float struct */ +uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { + if (structType.GetSize() > k32ByteSize) { + return 0; + } + AArch64ArgumentClass classes[kMaxRegCount]; + uint32 numRegs = ProcessStructWhenClassifyAggregate(beCommon, structType, classes, kMaxRegCount, fpSize); + if (numRegs) { + bool isPure = true; + for (uint i = 0; i < numRegs; ++i) { + if (classes[i] != kAArch64FloatClass) { + isPure = false; + break; + } + } + if (isPure) { + return numRegs; + } + } + return 0; +} + void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { pLoc.reg0 = kRinvalid; pLoc.reg1 = kRinvalid; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 92d17e72c5892e6514027733226b0585c6ffcc1e..35564ef8d7448539aa92c64609d071066bb85220 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -149,6 +149,19 @@ MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType, AArch64is } } +bool IsBlkassignForPush(BlkassignoffNode &bNode) +{ + BaseNode *dest = bNode.Opnd(0); + bool spBased = false; + if (dest->GetOpCode() == OP_regread) { + RegreadNode &node = static_cast(*dest); + if (-node.GetRegIdx() == kSregSp) { + spBased = true; + } + } + return spBased; +} + RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType) { RegOperand *resOpnd = nullptr; if (parent.GetOpCode() == OP_regassign) { @@ -156,8 +169,7 @@ RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimTyp PregIdx pregIdx = regAssignNode.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType); } else { resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -1400,9 +1412,7 @@ void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { RegOperand *regOpnd = nullptr; PregIdx pregIdx = stmt.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { - /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); } else { regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -1911,6 +1921,152 @@ void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { SelectCopy(memOpnd, destType, srcOpnd, destType); } +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + uint32 bitlen = GetPrimTypeSize(primType) * kBitsPerByte; + + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k32BitSize); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, srcOpnd, memOpnd); + GetCurBB()->AppendInsn(store); + //std::cout << "=======================>SelectIassignfpoff offset " << offset << "\n"; +} + +void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + uint32 byteLen = GetPrimTypeSize(pTy); + uint32 bitLen = byteLen * kBitsPerByte; + RegType regTy = GetRegTyFromPrimTy(pTy); + int32 curRegArgs = GetLmbcArgsInRegs(regTy); + if (curRegArgs < k8ByteSize) { + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, offset, 1); + } + else { + /* Move into allocated space */ + Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); + Operand ® = LoadIntoRegister(opnd, pTy); + GetCurBB()->AppendInsn( + GetCG()->BuildInstruction(PickStInsn(bitLen, pTy), reg, memOpd)); + } + IncLmbcArgsInRegs(regTy); /* num of args in registers */ + IncLmbcTotalArgs(); /* num of args */ +} + +// Search for CALL/ICALL node, must be called from a blkassignoff node +MIRType *AArch64CGFunc::GetAggTyFromCallSite(StmtNode *stmt) { + for ( ; stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { + break; + } + } + CHECK_FATAL(stmt && (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto), + "blkassign sp not followed by call"); + int32 nargs = GetLmbcTotalArgs(); + MIRType *ty = nullptr; + if (stmt->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + if (fn->GetFormalCount() > 0) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetNthParamTyIdx(nargs)); + } + } else { + /* OP_icall */ + IcallNode *icallNode = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode->GetRetTyIdx()); + MIRFuncType *fType = static_cast(type); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + } + CHECK_FATAL(ty->GetPrimType() == PTY_agg, "Type in lmbc function not agg"); + return ty; +} + +/* If blassign for argument, this function loads the agg arguments into + virtual registers, disregard if there is sufficient physicall call + registers. Argument > 16-bytes are copied to preset space and ptr + result is loaded into virtual register. + If blassign is not for argument, this function simply memcpy */ +void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) +{ + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + PrimType pTy = PTY_i64; + uint32 size = bNode.blockSize; + if (bNode.blockSize <= k16ByteSize) { + if (bNode.blockSize <= k1ByteSize) { + pTy = PTY_i8; + } else if (bNode.blockSize <= k2ByteSize) { + pTy = PTY_i16; + } else if (bNode.blockSize <= k4ByteSize) { + pTy = PTY_i32; + } else if (bNode.blockSize > k8ByteSize) { + size = k8ByteSize; + } + } + + Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + if (IsBlkassignForPush(bNode)) { + /* follow calling convention */ + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + MIRStructType *ty = static_cast(GetAggTyFromCallSite(&bNode)); + uint32 fpregs = FloatParamRegRequired(*ty, size); + if (fpregs > 0) { + /* pure floating point in agg */ + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + for (uint32 i = 0; i < fpregs; i++) { + int s = (i == 0) ? 0 : (i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset + s, fpregs - i); + IncLmbcArgsInRegs(kRegTyFloat); + } + IncLmbcTotalArgs(); + return; + } else if (bNode.blockSize <= k16ByteSize) { + /* integer/mixed types */ + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset, 2); + IncLmbcArgsInRegs(kRegTyInt); + if (bNode.blockSize > k8ByteSize) { + MemOperand &mem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset + k8ByteSize, 1); + IncLmbcArgsInRegs(kRegTyInt); + } + IncLmbcTotalArgs(); + return; + } + } + /* pass on stack */ + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + std::vector opndVec; + opndVec.push_back(regResult); /* result */ + opndVec.push_back(PrepareMemcpyParamOpnd(bNode.offset, *dest));/* param 0 */ + opndVec.push_back(src); /* param 1 */ + opndVec.push_back(PrepareMemcpyParamOpnd(bNode.blockSize));/* param 2 */ + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + if (IsBlkassignForPush(bNode)) { + SetLmbcArgInfo(static_cast(src), pTy, (int32)bNode.offset, 1); + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + } +} + void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); @@ -2520,18 +2676,23 @@ void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID f } } -Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { +Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); int32 offset = 0; - if (expr.GetFieldID() != 0) { - MIRStructType *structType = static_cast(symbol->GetType()); - /* with array of structs, it is possible to have nullptr */ - if (structType != nullptr) { - offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); + if (isAddrofoff) { + offset = addrofoffExpr.offset; + } else { + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + /* with array of structs, it is possible to have nullptr */ + if (structType != nullptr) { + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + } } } if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && - ((expr.GetFieldID() != 0) || + ((isAddrofoff == false && expr.GetFieldID() != 0) || (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { /* * Struct param is copied on the stack by caller if struct size > 16. @@ -2584,10 +2745,14 @@ Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { return &result; } - SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), expr.GetFieldID()); + SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), isAddrofoff ? 0 : expr.GetFieldID()); return &result; } +Operand *AArch64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + return SelectAddrof(static_cast(static_cast(expr)), parent, true); +} + Operand &AArch64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { uint32 instrSize = static_cast(expr.SizeOfInstr()); PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : @@ -2650,6 +2815,96 @@ Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ire return result; } +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType) { + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k32BitSize); + uint32 bitlen = byteSize * kBitsPerByte; + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); + MOperator mOp = PickLdInsn(bitlen, primType); + Insn &load = GetCG()->BuildInstruction(mOp, *result, memOpnd); + GetCurBB()->AppendInsn(load); + return result; +} + +Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + RegType regty = GetRegTyFromPrimTy(primType); + RegOperand *result = nullptr; + if (offset >= 0) { + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(offset); + if (info->GetPrimType() == PTY_agg) { + bool inVreg = (info->GetVregNO() > 0); + regno_t reg = inVreg ? info->GetVregNO() : info->GetRegNO(); + if (info->IsPureFloat()) { + if (reg == 0) { + primType = (info->GetFpSize() == k4ByteSize) ? PTY_f32 : PTY_f64; + result = GenLmbcParamLoad(offset, info->GetFpSize(), kRegTyFloat, primType); + } else { + CHECK_FATAL(((offset % info->GetFpSize()) == 0), "SelectIreadfpoff: pure float struct offset not aligned"); + reg += ((offset - info->GetOffset()) / info->GetFpSize()); + result = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(reg), bitlen, regty); + } + } else { + if (reg == 0) { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + if (info->GetSize() > k16ByteSize) { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else if (bytelen == k8ByteSize) { + bool isSecondReg = (offset - info->GetOffset()) >= k8ByteSize; + reg += (isSecondReg ? kOneRegister : 0); + if (inVreg) { + result = &GetOrCreateVirtualRegisterOperand(reg); + } else { + result = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(reg), bitlen, regty); + } + } else { + bool isSecondReg = (offset - info->GetOffset()) >= k8ByteSize; + reg += (isSecondReg ? kOneRegister : 0); + RegOperand *paramReg; + if (inVreg) { + paramReg = &GetOrCreateVirtualRegisterOperand(reg); + } else { + paramReg = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(reg), k64BitSize, kRegTyInt); + } + RegOperand *extractReg = &GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + uint32 lsbPos = ((isSecondReg ? (offset - k8ByteSize) : offset) - info->GetOffset()) * kBitsPerByte; + ImmOperand *lsb = &CreateImmOperand(lsbPos, k8BitSize, false); + ImmOperand *width = &CreateImmOperand(bitlen, k8BitSize, false); + MOperator mOp = /*(bytelen > k4ByteSize) ? */MOP_xubfxrri6i6/* : MOP_wubfxrri5i5*/; + Insn &extract = GetCG()->BuildInstruction(mOp, *extractReg, *paramReg, *lsb, *width); + GetCurBB()->AppendInsn(extract); + if (IsPrimitiveFloat(primType)) { + mOp = (bytelen > k4ByteSize) ? MOP_xvmovdr : MOP_xvmovsr; + result = &GetOrCreateVirtualRegisterOperand(NewVReg(regty, bytelen)); + Insn &mov = GetCG()->BuildInstruction(mOp, *result, *extractReg); + GetCurBB()->AppendInsn(mov); + } else { + result = extractReg; + } + } + } + } + } else { + CHECK_FATAL(primType == info->GetPrimType(), "Incorrect primtype"); + CHECK_FATAL(offset == info->GetOffset(), "Incorrect offset"); + if (info->GetRegNO() == 0) { + /* TODO : follow lmbc sp offset for now */ + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + result = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(info->GetRegNO()), bitlen, regty); + } + } + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + return result; +} + Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, PrimType finalBitFieldDestType) { int32 offset = 0; @@ -3268,9 +3523,7 @@ Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opn auto ®AssignNode = static_cast(parent); PregIdx pregIdx = regAssignNode.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { - /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype); } else { resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -5864,6 +6117,99 @@ void AArch64CGFunc::GenerateCleanupCode(BB &bb) { SetCleanupBB(*GetCurBB()); } +uint32 AArch64CGFunc::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { + AArch64CallConvImpl parmlocator(GetBecommon()); + return parmlocator.FloatParamRegRequired(structType, fpSize); +} + +/* + * Map param registers to formals. For small structs passed in param registers, + * create a move to vreg since lmbc IR does not create a regassign for them. + */ +void AArch64CGFunc::AssignLmbcFormalParams() { + PrimType primType; + uint32 offset; + regno_t intReg = R0; + regno_t fpReg = V0; + for (auto param : GetLmbcParamVec()) { + primType = param->GetPrimType(); + offset = param->GetOffset(); + if (IsPrimitiveInteger(primType)) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + intReg++; + } + } else if (IsPrimitiveFloat(primType)) { + if (fpReg > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + fpReg++; + } + } else if (primType == PTY_agg) { + if (param->IsPureFloat()) { + uint32 numFpRegs = param->GetNumRegs(); + if ((fpReg + numFpRegs - kOneRegister) > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + fpReg += numFpRegs; + } + } else if (param->GetSize() > k16ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + intReg++; + } + } else if (param->GetSize() <= k8ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + intReg++; + } + } else { + /* size > 8 && size <= 16 */ + if ((intReg + kOneRegister) > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kTwoRegister); + intReg += kTwoRegister; + } + } + if (param->GetRegNO() != 0) { + for (int i = 0; i < param->GetNumRegs(); ++i) { + PrimType pType = PTY_i64; + RegType rType = kRegTyInt; + uint32 rSize = k8ByteSize; + if (param->IsPureFloat()) { + rType = kRegTyFloat; + if (param->GetFpSize() <= k4ByteSize) { + pType = PTY_f32; + rSize = k4ByteSize; + } else { + pType = PTY_f64; + } + } + regno_t vreg = NewVReg(rType, rSize); + RegOperand &dest = GetOrCreateVirtualRegisterOperand(vreg); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(static_cast(param->GetRegNO() + i), rSize * kBitsPerByte, rType); + SelectCopy(dest, pType, src, pType); + if (param->GetVregNO() == 0) { + param->SetVregNO(vreg); + } + } + } + } else { + CHECK_FATAL(false, "lmbc formal primtype not handled"); + } + } +} + /* if offset < 0, allocation; otherwise, deallocation */ MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, int32 size) { MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); @@ -5883,7 +6229,13 @@ AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { LogicalShiftLeftOperand AArch64CGFunc::addSubLslOperand(kShiftAmount12, 4); MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { - MemOperand *memOp = CreateStackMemOpnd(RFP, offset, size); + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + MemOperand *memOp = CreateStackMemOpnd(reg, offset, size); return *memOp; } @@ -7698,6 +8050,46 @@ void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymb callBB->AppendBB(*jointBB); SetCurBB(*jointBB); } + +/* Lmbc calls have no argument, they are all explicit iassignspoff or + blkassign. Info collected and to be emitted here */ +void AArch64CGFunc::LmbcSelectParmList(ListOperand *srcOpnds) { + if (GetLmbcArgInfo() == nullptr) { + return; /* no arg */ + } + CHECK_FATAL(GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc, "To be called for Lmbc model only"); + MapleVector &args = GetLmbcCallArgs(); + MapleVector &types = GetLmbcCallArgTypes(); + MapleVector &offsets = GetLmbcCallArgOffsets(); + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + int iCnt = 0; + int fCnt = 0; + for (int i = 0; i < args.size(); i++) { + RegType ty = args[i]->GetRegisterType(); + PrimType pTy = types[i]; + if ((i + regs[i]) <= k8ByteSize) { + AArch64reg reg; + if (args[i]->IsOfIntClass() ) { + reg = static_cast(R0 + iCnt++); + } else { + reg = static_cast(V0 + fCnt++); + } + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else { + int32 pSize = GetPrimTypeSize(pTy); + Operand &memOpd = CreateMemOpnd(RSP, offsets[i], pSize); + GetCurBB()->AppendInsn( + GetCG()->BuildInstruction( + PickStInsn(pSize * kBitsPerByte, pTy), *args[i], memOpd)); + } + } + ResetLmbcArgInfo(); /* reset */ + ResetLmbcArgsInRegs(); + ResetLmbcTotalArgs(); +} + void AArch64CGFunc::SelectCall(CallNode &callNode) { MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); @@ -7709,6 +8101,9 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { } ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + LmbcSelectParmList(srcOpnds); + } bool callNative = false; if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || (fsym->GetName() == "MCC_CallSlowNative0") || (fsym->GetName() == "MCC_CallSlowNative1") || @@ -7774,7 +8169,11 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); - SelectParmList(icallNode, *srcOpnds); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + LmbcSelectParmList(srcOpnds); + } else { + SelectParmList(icallNode, *srcOpnds); + } Operand *fptrOpnd = &srcOpnd; if (fptrOpnd->GetKind() != Operand::kOpdRegister) { @@ -7840,8 +8239,7 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { AArch64CallConvImpl retLocator(GetBecommon()); CCLocInfo retMech; retLocator.InitReturnInfo(*retTyp, retMech); - if (retMech.GetRegCount() > 0) { - CHECK_FATAL(opnd0 != nullptr, "opnd0 must not be nullptr"); + if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); AArch64reg retReg = static_cast(retMech.GetReg0()); @@ -7876,8 +8274,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { } else { CHECK_FATAL(false, "nyi"); } - } else if (opnd0 != nullptr) { /* pass in memory */ - CHECK_FATAL(false, "SelectReturn: return in memory NYI"); } GetExitBBsVec().emplace_back(GetCurBB()); } @@ -7891,6 +8287,9 @@ RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, Pr case kSregFp: reg = RFP; break; + case kSregGp: + reg = RFP; + break; case kSregThrownval: { /* uses x0 == R0 */ ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); if (Globals::GetInstance()->GetOptimLevel() == 0) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp index 13c7522194e199e92ff7a2cb0bb94f9ae332e5b6..514b2fcf61019b73872460ffb9f6c5ac685ce678 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -189,6 +189,16 @@ void AArch64MemLayout::LayoutVarargParams() { } void AArch64MemLayout::LayoutFormalParams() { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc && mirFunction->GetFormalCount() == 0) { + /* + * lmbc : upformalsize - size of formals passed from caller's frame into current function + * framesize - total frame size of current function used by Maple IR + * outparmsize - portion of frame size of current function used by call parameters + */ + segArgsStkPassed.SetSize(mirFunction->GetOutParmSize()); + return; + } + AArch64CallConvImpl parmLocator(be); CCLocInfo ploc; for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { @@ -274,6 +284,11 @@ void AArch64MemLayout::LayoutFormalParams() { } void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc && mirFunction->GetFormalCount() == 0) { + segLocals.SetSize(mirFunction->GetFrameSize()/* - mirFunction->GetOutParmSize()*/); + return; + } + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); for (uint32 i = 0; i < symTabSize; ++i) { MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp index 2d4da9a0389721c204c138d6804a33b27e5e5c10..9f1ae84cd8ad0b0eefa540dada6808a41a3f20e4 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -22,6 +22,11 @@ void AArch64FPLROffsetAdjustment::Run() { } void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc) { + if (aarchCGFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + /* TODO: Need to handle */ + AdjustmentStackPointer(insn, aarchCGFunc); + return; + } uint32 opndNum = insn.GetOperandSize(); MemLayout *memLayout = aarchCGFunc.GetMemlayout(); bool stackBaseOpnd = false; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp index f4143111c0a8409042e6e59ecc7e85b70e2f2296..59e44f6e28366eae54c7768baa466529f8dbbee5 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -185,6 +185,9 @@ void AArch64GenProEpilog::TailCallBBOpt(BB &bb, std::set &callInsns, BB & * Return value: true if function do not need Prologue/Epilogue. false otherwise. */ bool AArch64GenProEpilog::TailCallOpt() { + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + return false; + } /* Count how many call insns in the whole function. */ uint32 nCount = 0; bool hasGetStackClass = false; @@ -1205,9 +1208,16 @@ void AArch64GenProEpilog::GeneratePushRegs() { Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); - if (argsToStkPassSize > 0) { - Operand &immOpnd = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); - aarchCGFunc.SelectAdd(fpOpnd, spOpnd, immOpnd, PTY_u64); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); if (cgFunc.GenCfi()) { cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(stackBaseReg, @@ -1423,10 +1433,17 @@ void AArch64GenProEpilog::GenerateProlog(BB &bb) { } if (useFP) { Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); - if (argsToStkPassSize > 0) { - Operand &immOpnd = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); - aarchCGFunc.SelectAdd(fpOpnd, spOpnd, immOpnd, PTY_u64); + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); if (cgFunc.GenCfi()) { cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn( @@ -1821,7 +1838,7 @@ void AArch64GenProEpilog::GenerateEpilog(BB &bb) { } if (cgFunc.HasVLAOrAlloca()) { - auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsStkPass().GetSize(); + auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsToStkPass().GetSize(); stackFrameSize = stackFrameSize < size ? 0 : stackFrameSize - size; } diff --git a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp index 47660be46c3075e88a8a3196531b2f1a1b6f322d..f8e5c766fdf7b771ad020667aa89cb3c31ea03d3 100644 --- a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp +++ b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp @@ -259,7 +259,9 @@ bool CgFuncPM::PhaseRun(MIRModule &m) { (void)cf.Simplify(mirFunc->GetBody()); } - DoFuncCGLower(m, *mirFunc); + if (m.GetFlavor() != MIRFlavor::kFlavorLmbc) { + DoFuncCGLower(m, *mirFunc); + } /* create CGFunc */ MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(mirFunc->GetStIdx().Idx()); auto funcMp = std::make_unique(memPoolCtrler, funcSt->GetName()); diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 5c0a89042cba966c4d1cf8b2259f1b9ce9847a1a..417eb0563ce69ca9c0e76cfea1c99f38c33c659b 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -184,6 +184,11 @@ Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { return cgFunc.SelectAddrof(addrofNode, parent); } +Operand *HandleAddrofoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofoffNode = static_cast(expr); + return cgFunc.SelectAddrofoff(addrofoffNode, parent); +} + Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { auto &addroffuncNode = static_cast(expr); return &cgFunc.SelectAddrofFunc(addroffuncNode, parent); @@ -204,6 +209,11 @@ Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) return cgFunc.SelectIreadoff(parent, ireadNode); } +Operand *HandleIreadfpoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadfpoff(parent, ireadNode); +} + Operand *HandleSub(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { return cgFunc.SelectSub(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); @@ -958,10 +968,12 @@ void InitHandleExprFactory() { RegisterFactoryFunction(OP_div, HandleDiv); RegisterFactoryFunction(OP_rem, HandleRem); RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addrofoff, HandleAddrofoff); RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); RegisterFactoryFunction(OP_iread, HandleIread); RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_ireadfpoff, HandleIreadfpoff); RegisterFactoryFunction(OP_sub, HandleSub); RegisterFactoryFunction(OP_band, HandleBand); RegisterFactoryFunction(OP_bior, HandleBior); @@ -1237,6 +1249,28 @@ void HandleIassignoff(StmtNode &stmt, CGFunc &cgFunc) { cgFunc.SelectIassignoff(iassignoffNode); } +void HandleIassignfpoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassignfpoff, "expect iassignfpoff"); + auto &iassignfpoffNode = static_cast(stmt); + cgFunc.SelectIassignfpoff(iassignfpoffNode, *cgFunc.HandleExpr(stmt, *stmt.Opnd(0))); +} + +void HandleIassignspoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassignspoff, "expect iassignspoff"); + auto &baseNode = static_cast(stmt); /* same as FP */ + BaseNode *rhs = baseNode.GetRHS(); + ASSERT(rhs != nullptr, "get rhs of iassignspoffNode failed"); + Operand *opnd0 = cgFunc.HandleExpr(baseNode, *rhs); + cgFunc.SelectIassignspoff(baseNode.GetPrimType(), baseNode.GetOffset(), *opnd0); +} + +void HandleBlkassignoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_blkassignoff, "expect blkassignoff"); + auto &baseNode = static_cast(stmt); + Operand *src = cgFunc.HandleExpr(baseNode, *baseNode.Opnd(1)); + cgFunc.SelectBlkassignoff(baseNode, src); +} + void HandleEval(const StmtNode &stmt, CGFunc &cgFunc) { (void)cgFunc.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); } @@ -1308,6 +1342,7 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_return, HandleReturn); RegisterFactoryFunction(OP_call, HandleCall); RegisterFactoryFunction(OP_icall, HandleICall); + RegisterFactoryFunction(OP_icallproto, HandleICall); RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); RegisterFactoryFunction(OP_intrinsiccallassigned, HandleIntrinCall); RegisterFactoryFunction(OP_intrinsiccallwithtype, HandleIntrinCall); @@ -1317,6 +1352,9 @@ void InitHandleStmtFactory() { RegisterFactoryFunction(OP_regassign, HandleRegassign); RegisterFactoryFunction(OP_iassign, HandleIassign); RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_iassignfpoff, HandleIassignfpoff); + RegisterFactoryFunction(OP_iassignspoff, HandleIassignspoff); + RegisterFactoryFunction(OP_blkassignoff, HandleBlkassignoff); RegisterFactoryFunction(OP_eval, HandleEval); RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); RegisterFactoryFunction(OP_membarrelease, HandleMembar); @@ -1361,6 +1399,7 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, lrVec(allocator.Adapter()), #endif /* TARGARM32 */ loops(allocator.Adapter()), + lmbcParamVec(allocator.Adapter()), shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) { mirModule.SetCurFunction(&func); dummyBB = CreateNewBB(); @@ -1493,11 +1532,99 @@ int32 CGFunc::GetFreqFromStmt(uint32 stmtId) { return GetFunction().GetFreqFromFirstStmt(stmtId); } +LmbcFormalParamInfo *CGFunc::GetLmbcFormalParamInfo(uint32 offset) { + MapleVector ¶mVec = GetLmbcParamVec(); + for (auto *param : paramVec) { + uint32 paramOffset = param->GetOffset(); + uint32 paramSize = param->GetSize(); + if (paramOffset <= offset && offset < (paramOffset + paramSize)) { + return param; + } + } + return nullptr; +} + +/* + * For formals of lmbc, the formal list is deleted if there is no + * passing of aggregate by value. + */ +void CGFunc::CreateLmbcFormalParamInfo() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return; + } + PrimType primType; + uint32 offset; + uint32 typeSize; + MIRFunction &func = GetFunction(); + if (func.GetParamSize() > 0) { + int stackOffset = 0; + for (size_t idx = 0; idx < func.GetParamSize(); ++idx) { + MIRSymbol *sym = func.GetFormal(idx); + MIRType *type; + TyIdx tyIdx; + if (sym) { + tyIdx = func.GetNthParamTyIdx(idx); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + FormalDef vec = const_cast(GetBecommon().GetMIRModule().CurFunction())->GetFormalDefAt(idx); + tyIdx = vec.formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + primType = type->GetPrimType(); + offset = stackOffset; + typeSize = GetBecommon().GetTypeSize(tyIdx); + stackOffset += (typeSize + 7) & (-8); + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + if (type->GetKind() == kTypeStruct) { + MIRStructType &structType = static_cast(*type); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(structType, fpSize); + if (numFpRegs > 0) { + info->SetIsPureFloat(); + info->SetNumRegs(numFpRegs); + info->SetFpSize(fpSize); + } + } + } + } else { + /* No aggregate pass by value here */ + for (StmtNode *stmt = func.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + primType = ireadNode->GetPrimType(); + offset = ireadNode->GetOffset(); + typeSize = GetPrimTypeSize(primType); CHECK_FATAL((offset % k8ByteSize) == 0, ""); // scalar only, no struct for now + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + } + } + std::sort(lmbcParamVec.begin(), lmbcParamVec.end(), + [] (LmbcFormalParamInfo *x, LmbcFormalParamInfo *y) + { return x->GetOffset() < y->GetOffset(); } + ); + AssignLmbcFormalParams(); +} + void CGFunc::GenerateInstruction() { InitHandleExprFactory(); InitHandleStmtFactory(); StmtNode *secondStmt = HandleFirstStmt(); + CreateLmbcFormalParamInfo(); /* First Pass: Creates the doubly-linked list of BBs (next,prev) */ volReleaseInsn = nullptr; unsigned lastSrcLoc = 0;