diff --git a/src/mapleall/maple_be/include/be/becommon.h b/src/mapleall/maple_be/include/be/becommon.h index cc18f3dc20b6bd59879e57837fed731456bac67f..92160fcc568f88adee7b79a0ee1c8fa4fd8f8590 100644 --- a/src/mapleall/maple_be/include/be/becommon.h +++ b/src/mapleall/maple_be/include/be/becommon.h @@ -164,7 +164,7 @@ class BECommon { return 1; } - const MIRModule &GetMIRModule() const { + MIRModule &GetMIRModule() const { return mirModule; } diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h index f2f0cd9b107c1948bbf93f1e5535871512c7da49..7a3c210537f2755579822a3c2241658235b61849 100644 --- a/src/mapleall/maple_be/include/be/common_utils.h +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -192,6 +192,19 @@ inline uint64 RoundUp(uint64 offset, uint64 align) { return RoundUpConst(offset, align); } +inline int64 RoundDownConst(int64 offset, int64 align) { + return (-align) & offset; +} + +// align must be a power of 2 +inline int64 RoundDown(int64 offset, int64 align) { + if (align == 0) { + return offset; + } + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundDownConst(offset, align); +} + inline bool IsAlignedTo(uint64 offset, uint64 align) { ASSERT(IsPowerOf2(align), "align must be power of 2!"); return (offset & (align - 1)) == 0; diff --git a/src/mapleall/maple_driver/include/driver_option_common.h b/src/mapleall/maple_driver/include/driver_option_common.h index 0b79889d36684227fc6e5f33f092202e13e03e8a..508a7879aa4253722b3d91739420064904958a09 100644 --- a/src/mapleall/maple_driver/include/driver_option_common.h +++ b/src/mapleall/maple_driver/include/driver_option_common.h @@ -52,6 +52,7 @@ enum DriverOptionIndex { kTimePhases, kGenMeMpl, kGenMapleBC, + kGenLMBC, kGenVtableImpl, kVerbose, kAllDebug, diff --git a/src/mapleall/maple_driver/include/driver_runner.h b/src/mapleall/maple_driver/include/driver_runner.h index d765a88484b649fb26cf70ac3730841a6c3f60de..ca245d1e22dce392bed0e75f0e33950dc1d7900d 100644 --- a/src/mapleall/maple_driver/include/driver_runner.h +++ b/src/mapleall/maple_driver/include/driver_runner.h @@ -37,7 +37,7 @@ class DriverRunner final { DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, const std::string &mpl2mplInput, const std::string &meInput, const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, - bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false) + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) : theModule(theModule), exeNames(exeNames), mpl2mplInput(mpl2mplInput), @@ -49,6 +49,7 @@ class DriverRunner final { genVtableImpl(genVtableImpl), genMeMpl(genMeMpl), genMapleBC(genMapleBC), + genLMBC(genLMBC), inputFileType(inpFileType) { auto lastDot = actualInput.find_last_of("."); baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); @@ -56,9 +57,9 @@ class DriverRunner final { DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, - bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false) + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) : DriverRunner(theModule, exeNames, inpFileType, "", "", actualInput, dwarf, - fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC) { + fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC, genLMBC) { auto lastDot = actualInput.find_last_of("."); baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); } @@ -107,6 +108,7 @@ class DriverRunner final { bool genVtableImpl = false; bool genMeMpl = false; bool genMapleBC = false; + bool genLMBC = false; std::string printOutExe = ""; std::string baseName; std::string outputFile; diff --git a/src/mapleall/maple_driver/include/mpl_options.h b/src/mapleall/maple_driver/include/mpl_options.h index 68fc647689acd09d60c4967cb7742e523910d9d2..e0bcb3cf624ad96e2fba0ef7745d0dfc27f8ad86 100644 --- a/src/mapleall/maple_driver/include/mpl_options.h +++ b/src/mapleall/maple_driver/include/mpl_options.h @@ -372,6 +372,10 @@ class MplOptions { return genMapleBC; } + bool HasSetGenLMBC() const { + return genLMBC; + } + bool HasSetGenOnlyObj() const { return genObj; } @@ -471,6 +475,7 @@ class MplOptions { bool genObj = false; bool genMeMpl = false; bool genMapleBC = false; + bool genLMBC = false; bool runMaplePhaseOnly = true; bool genVtableImpl = false; bool hasPrinted = false; diff --git a/src/mapleall/maple_driver/src/driver_option_common.cpp b/src/mapleall/maple_driver/src/driver_option_common.cpp index a59b93c8912376e6ba43b9baf8442cfef546901c..83d21dacec9dff4ebc89697f18fd91f4df32252e 100644 --- a/src/mapleall/maple_driver/src/driver_option_common.cpp +++ b/src/mapleall/maple_driver/src/driver_option_common.cpp @@ -330,6 +330,15 @@ const mapleOption::Descriptor kUsages[] = { " --genmaplebc \tGenerate .mbc file\n", "driver", {} }, + { kGenLMBC, + 0, + "", + "genlmbc", + kBuildTypeProduct, + kArgCheckPolicyNone, + " --genlmbc \tGenerate .lmbc file\n", + "driver", + {} }, { kGenObj, kEnable, "c", diff --git a/src/mapleall/maple_driver/src/driver_runner.cpp b/src/mapleall/maple_driver/src/driver_runner.cpp index 741f8ba2c5702b262969d5ddb65753ffe782e71b..3433eed3d017754b47209d8d000724b635687b90 100644 --- a/src/mapleall/maple_driver/src/driver_runner.cpp +++ b/src/mapleall/maple_driver/src/driver_runner.cpp @@ -267,6 +267,7 @@ void DriverRunner::RunNewPM(const std::string &output, const std::string &vtable } MeFuncPM::genMeMpl = genMeMpl; MeFuncPM::genMapleBC = genMapleBC; + MeFuncPM::genLMBC = genLMBC; MeFuncPM::timePhases = timePhases; MPLTimer timer; timer.Start(); diff --git a/src/mapleall/maple_driver/src/maple_comb_compiler.cpp b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp index 1433af7fa2230123e77c6a4bb344239dedeb0b48..64623f455f034954b92817d02b3aeca0e9c8ee0f 100644 --- a/src/mapleall/maple_driver/src/maple_comb_compiler.cpp +++ b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp @@ -181,7 +181,8 @@ ErrorCode MapleCombCompiler::Compile(MplOptions &options, const Action &action, DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, fileName, fileName, options.WithDwarf(), fileParsed, options.HasSetTimePhases(), options.HasSetGenVtableImpl(), - options.HasSetGenMeMpl(), options.HasSetGenMapleBC()); + options.HasSetGenMeMpl(), options.HasSetGenMapleBC(), + options.HasSetGenLMBC()); ErrorCode ret = kErrorNoError; MIRParser parser(*theModule); diff --git a/src/mapleall/maple_driver/src/mpl_options.cpp b/src/mapleall/maple_driver/src/mpl_options.cpp index 9bd86f1dcdb8a958d08e3af3f13d1518957ee0d0..0309d54ab910b4c7e6425638999bfcff5ae54128 100644 --- a/src/mapleall/maple_driver/src/mpl_options.cpp +++ b/src/mapleall/maple_driver/src/mpl_options.cpp @@ -192,6 +192,9 @@ ErrorCode MplOptions::HandleGeneralOptions() { case kGenMapleBC: genMapleBC = true; break; + case kGenLMBC: + genLMBC = true; + break; case kGenVtableImpl: genVtableImpl = true; break; diff --git a/src/mapleall/maple_ir/include/keywords.def b/src/mapleall/maple_ir/include/keywords.def index 15aa540a4b72ffd45da6fbdb21ea6c0dd1dd932b..7aa2960aacf908a2cea9cb880e5ebfe9878262cd 100644 --- a/src/mapleall/maple_ir/include/keywords.def +++ b/src/mapleall/maple_ir/include/keywords.def @@ -55,6 +55,7 @@ // per-function declaration keywords KEYWORD(framesize) KEYWORD(upformalsize) + KEYWORD(outparmsize) KEYWORD(moduleid) KEYWORD(funcsize) KEYWORD(funcid) diff --git a/src/mapleall/maple_ir/include/mir_builder.h b/src/mapleall/maple_ir/include/mir_builder.h index 453eb616336f90c2c844d8593f2138b18cc9dd82..0ee756d70e09253692aed8601a609be06bd61021 100755 --- a/src/mapleall/maple_ir/include/mir_builder.h +++ b/src/mapleall/maple_ir/include/mir_builder.h @@ -240,7 +240,7 @@ class MIRBuilder { RegassignNode *CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src); IassignNode *CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src); IassignoffNode *CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *opnd0, BaseNode *src); - IassignFPoffNode *CreateStmtIassignFPoff(PrimType pty, int32 offset, BaseNode *src); + IassignFPoffNode *CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src); CallNode *CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opcode = OP_call); CallNode *CreateStmtCall(const std::string &name, const MapleVector &args); CallNode *CreateStmtVirtualCall(PUIdx puIdx, const MapleVector &args) { diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h index bc7a50a3fa436f254ccc4e2465880acdd4698ab5..318d8a7dbc601e3d6248c8732c440087feebf615 100644 --- a/src/mapleall/maple_ir/include/mir_function.h +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -789,6 +789,13 @@ class MIRFunction { upFormalSize = size; } + uint16 GetOutParmSize() const { + return outParmSize; + } + void SetOutParmSize(uint16 size) { + outParmSize = size; + } + uint16 GetModuleId() const { return moduleID; } @@ -810,7 +817,7 @@ class MIRFunction { ++tempCount; } - const uint8 *GetFormalWordsTypeTagged() const { + uint8 *GetFormalWordsTypeTagged() const { return formalWordsTypeTagged; } void SetFormalWordsTypeTagged(uint8 *tagged) { @@ -820,7 +827,7 @@ class MIRFunction { return &formalWordsTypeTagged; } - const uint8 *GetLocalWordsTypeTagged() const { + uint8 *GetLocalWordsTypeTagged() const { return localWordsTypeTagged; } void SetLocalWordsTypeTagged(uint8 *tagged) { @@ -830,7 +837,7 @@ class MIRFunction { return &localWordsTypeTagged; } - const uint8 *GetFormalWordsRefCounted() const { + uint8 *GetFormalWordsRefCounted() const { return formalWordsRefCounted; } void SetFormalWordsRefCounted(uint8 *counted) { @@ -840,7 +847,7 @@ class MIRFunction { return &formalWordsRefCounted; } - const uint8 *GetLocalWordsRefCounted() const { + uint8 *GetLocalWordsRefCounted() const { return localWordsRefCounted; } void SetLocalWordsRefCounted(uint8 *counted) { @@ -1188,6 +1195,7 @@ class MIRFunction { uint8_t layoutType = kLayoutUnused; uint16 frameSize = 0; uint16 upFormalSize = 0; + uint16 outParmSize = 0; uint16 moduleID = 0; uint32 funcSize = 0; // size of code in words uint32 tempCount = 0; diff --git a/src/mapleall/maple_ir/include/mir_module.h b/src/mapleall/maple_ir/include/mir_module.h index 4421bfc2b027dc00b7cf543cee9e6c9c585631c2..c62444e5fd39e86cba5dec89763113d11288f169 100644 --- a/src/mapleall/maple_ir/include/mir_module.h +++ b/src/mapleall/maple_ir/include/mir_module.h @@ -46,9 +46,11 @@ enum MIRFlavor { kFeProduced, kMeProduced, kBeLowered, + kFlavorMbc, kMmpl, kCmplV1, - kCmpl // == CMPLv2 + kCmpl, // == CMPLv2 + kFlavorLmbc, }; @@ -174,10 +176,7 @@ class MIRModule { MIRModule &operator=(const MIRModule &module) = delete; ~MIRModule(); - const MemPool *GetMemPool() const { - return memPool; - } - MemPool *GetMemPool() { + MemPool *GetMemPool() const { return memPool; } MemPool *GetPragmaMemPool() { diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h index 219c3338e31c8b90a01c820109153286ee7b8b17..54a99c6b03191a9018c3c2455a0e284f88a47eea 100755 --- a/src/mapleall/maple_ir/include/mir_nodes.h +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -533,7 +533,7 @@ class IreadFPoffNode : public BaseNode { public: IreadFPoffNode() : BaseNode(OP_ireadfpoff) {} - IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp), offset(ofst) {} + IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} virtual ~IreadFPoffNode() = default; @@ -2794,13 +2794,14 @@ class IassignoffNode : public BinaryStmtNode { int32 offset = 0; }; +// for iassignfpoff, iassignspoff, iassignpcoff class IassignFPoffNode : public UnaryStmtNode { public: - IassignFPoffNode() : UnaryStmtNode(OP_iassignfpoff) {} + IassignFPoffNode(Opcode o) : UnaryStmtNode(o) {} - explicit IassignFPoffNode(int32 ofst) : UnaryStmtNode(OP_iassignfpoff), offset(ofst) {} + explicit IassignFPoffNode(Opcode o, int32 ofst) : UnaryStmtNode(o), offset(ofst) {} - IassignFPoffNode(PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(offset) { + IassignFPoffNode(Opcode o, PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(o, offset) { BaseNodeT::SetPrimType(primType); UnaryStmtNode::SetOpnd(src, 0); } @@ -2831,6 +2832,31 @@ class IassignFPoffNode : public UnaryStmtNode { typedef IassignFPoffNode IassignPCoffNode; +class BlkassignoffNode : public BinaryStmtNode { + public: + BlkassignoffNode() : BinaryStmtNode(OP_blkassignoff) { ptyp = PTY_agg; } + explicit BlkassignoffNode(int32 ofst, int32 bsize) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { ptyp = PTY_agg; } + explicit BlkassignoffNode(int32 ofst, int32 bsize, BaseNode *dest, BaseNode *src) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { + ptyp = PTY_agg; + SetBOpnd(dest, 0); + SetBOpnd(src, 1); + } + ~BlkassignoffNode() = default; + + void Dump(int32 indent) const override; + + BlkassignoffNode *CloneTree(MapleAllocator &allocator) const override { + BlkassignoffNode *node = allocator.GetMemPool()->New(offset, blockSize); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + public: + int32 offset = 0; + int32 blockSize = 0; +}; + // used by return, syncenter, syncexit class NaryStmtNode : public StmtNode, public NaryOpnds { public: diff --git a/src/mapleall/maple_ir/include/mir_parser.h b/src/mapleall/maple_ir/include/mir_parser.h index 14013baf872bf7ff106b60dc1e179742145daeac..249b619b075d5958d068a0737b1dad6e4cf79a30 100755 --- a/src/mapleall/maple_ir/include/mir_parser.h +++ b/src/mapleall/maple_ir/include/mir_parser.h @@ -110,6 +110,7 @@ class MIRParser { bool ParseStmtIassign(StmtNodePtr &stmt); bool ParseStmtIassignoff(StmtNodePtr &stmt); bool ParseStmtIassignFPoff(StmtNodePtr &stmt); + bool ParseStmtBlkassignoff(StmtNodePtr &stmt); bool ParseStmtDoloop(StmtNodePtr&); bool ParseStmtForeachelem(StmtNodePtr&); bool ParseStmtDowhile(StmtNodePtr&); @@ -284,6 +285,7 @@ class MIRParser { bool ParseStmtBlockForType(); bool ParseStmtBlockForFrameSize(); bool ParseStmtBlockForUpformalSize(); + bool ParseStmtBlockForOutParmSize(); bool ParseStmtBlockForModuleID(); bool ParseStmtBlockForFuncSize(); bool ParseStmtBlockForFuncID(); diff --git a/src/mapleall/maple_ir/include/opcodes.def b/src/mapleall/maple_ir/include/opcodes.def index d7863963d12983f3316fc0a63578456c99ece5e3..d1265eda42823ac0ed96f73921da6a9110acec57 100755 --- a/src/mapleall/maple_ir/include/opcodes.def +++ b/src/mapleall/maple_ir/include/opcodes.def @@ -218,3 +218,5 @@ OPCODE(dreadoff, dreadoffNode, OPCODEHASSSAUSE, 12) OPCODE(addrofoff, addrofoffNode, 0, 12) OPCODE(dassignoff, DassignoffNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) diff --git a/src/mapleall/maple_ir/src/bin_func_export.cpp b/src/mapleall/maple_ir/src/bin_func_export.cpp index ce263debcfc012d871464c6c0691db880f7079e4..07b1d9eeab5ad3ef0dac8c0f735030a48f4e30d9 100644 --- a/src/mapleall/maple_ir/src/bin_func_export.cpp +++ b/src/mapleall/maple_ir/src/bin_func_export.cpp @@ -40,6 +40,11 @@ void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { WriteNum(kBinFuncIdInfoStart); WriteNum(func->GetPuidxOrigin()); // the funcid OutputInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(func->GetUpFormalSize()); + WriteNum(func->GetFrameSize()); + WriteNum(func->GetOutParmSize()); + } WriteNum(~kBinFuncIdInfoStart); } @@ -255,6 +260,16 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { WriteNum(irNode->GetFieldID()); break; } + case OP_ireadoff: { + IreadoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } case OP_sext: case OP_zext: case OP_extractbits: { @@ -263,6 +278,12 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { WriteNum(extNode->GetBitsSize()); break; } + case OP_depositbits: { + DepositbitsNode *dbNode = static_cast(e); + WriteNum(dbNode->GetBitsOffset()); + WriteNum(dbNode->GetBitsSize()); + break; + } case OP_gcmallocjarray: case OP_gcpermallocjarray: { JarrayMallocNode *gcNode = static_cast(e); @@ -421,6 +442,19 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { WriteNum(iassoff->GetOffset()); break; } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *iassfpoff = static_cast(s); + WriteNum(iassfpoff->GetPrimType()); + WriteNum(iassfpoff->GetOffset()); + break; + } + case OP_blkassignoff: { + BlkassignoffNode *bass = static_cast(s); + WriteNum(bass->offset); + WriteNum(bass->blockSize); + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: @@ -556,6 +590,16 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { } break; } + case OP_rangegoto: { + RangeGotoNode *rgoto = static_cast(s); + WriteNum(rgoto->GetTagOffset()); + WriteNum(static_cast(rgoto->GetRangeGotoTable().size())); + for (SmallCasePair cpair : rgoto->GetRangeGotoTable()) { + WriteNum(cpair.first); + WriteNum(cpair.second); + } + break; + } case OP_jstry: { JsTryNode *tryNode = static_cast(s); WriteNum(tryNode->GetCatchOffset()); @@ -695,7 +739,9 @@ void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_s OutputLabelTab(func); OutputLocalTypeNameTab(func->GetTypeNameTab()); OutputFormalsStIdx(func); - OutputAliasMap(func->GetAliasVarMap()); + if (mod.GetFlavor() < kMmpl) { + OutputAliasMap(func->GetAliasVarMap()); + } lastOutputSrcPosition = SrcPosition(); OutputBlockNode(func->GetBody()); size++; diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp index 0802da340754af59b3838c73fd63d6811290057b..7195b191727648cbe78b1154c617cedec5dfd621 100644 --- a/src/mapleall/maple_ir/src/bin_func_import.cpp +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -46,6 +46,11 @@ void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { CHECK_FATAL(tag == kBinFuncIdInfoStart, "kBinFuncIdInfoStart expected"); func->SetPuidxOrigin(static_cast(ReadNum())); ImportInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + func->SetUpFormalSize(ReadNum()); + func->SetFrameSize(ReadNum()); + func->SetOutParmSize(ReadNum()); + } tag = ReadNum(); CHECK_FATAL(tag == ~kBinFuncIdInfoStart, "pattern mismatch in ImportFuncIdInfo()"); } @@ -307,6 +312,17 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { irNode->SetOpnd(ImportExpression(func), 0); return irNode; } + case OP_ireadoff: { + int32 ofst = ReadNum(); + IreadoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + irNode->SetOpnd(ImportExpression(func), 0); + return irNode; + } + case OP_ireadfpoff: { + int32 ofst = ReadNum(); + IreadFPoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + return irNode; + } case OP_sext: case OP_zext: case OP_extractbits: { @@ -316,6 +332,14 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { extNode->SetOpnd(ImportExpression(func), 0); return extNode; } + case OP_depositbits: { + DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); + dbNode->SetBitsOffset(static_cast(ReadNum())); + dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetOpnd(ImportExpression(func), 0); + dbNode->SetOpnd(ImportExpression(func), 1); + return dbNode; + } case OP_gcmallocjarray: case OP_gcpermallocjarray: { JarrayMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ); @@ -524,6 +548,24 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *s = func->GetCodeMemPool()->New(op); + s->SetPrimType((PrimType)ReadNum()); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), 0); + stmt = s; + break; + } + case OP_blkassignoff: { + BlkassignoffNode *s = func->GetCodeMemPool()->New(); + s->offset = static_cast(ReadNum()); + s->blockSize = static_cast(ReadNum()); + s->SetOpnd(ImportExpression(func), 0); + s->SetOpnd(ImportExpression(func), 1); + stmt = s; + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: @@ -746,6 +788,19 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_rangegoto: { + RangeGotoNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetTagOffset(ReadNum()); + uint32 tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + uint16 casetag = ReadNum(); + LabelIdx lidx(ReadNum()); + s->AddRangeGoto(casetag, lidx); + } + s->SetOpnd(ImportExpression(func), 0); + stmt = s; + break; + } case OP_jstry: { JsTryNode *s = mod.CurFuncCodeMemPool()->New(); s->SetCatchOffset(static_cast(ReadNum())); @@ -880,7 +935,9 @@ void BinaryMplImport::ReadFunctionBodyField() { ImportLabelTab(fn); ImportLocalTypeNameTable(fn->GetTypeNameTab()); ImportFormalsStIdx(fn); - ImportAliasMap(fn); + if (mod.GetFlavor() < kMmpl) { + ImportAliasMap(fn); + } (void)ImportBlockNode(fn); mod.AddFunction(fn); } diff --git a/src/mapleall/maple_ir/src/bin_mpl_export.cpp b/src/mapleall/maple_ir/src/bin_mpl_export.cpp index 5330c0f093033d9b44f854a50fc07da33ad49fc9..aab834d3316fba5bda240fbab8ff3e054978d4c9 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_export.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -749,6 +749,9 @@ void BinaryMplExport::WriteHeaderField(uint64 contentIdx) { WriteNum(mod.GetFlavor()); WriteNum(mod.GetSrcLang()); WriteNum(mod.GetID()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(mod.GetGlobalMemSize()); + } WriteNum(mod.GetNumFuncs()); WriteAsciiStr(mod.GetEntryFuncName()); OutputInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); diff --git a/src/mapleall/maple_ir/src/bin_mpl_import.cpp b/src/mapleall/maple_ir/src/bin_mpl_import.cpp index eab676c12628263f4e72727f79fab831199a055a..64fb01070912fd8b1d181ac9c13f917749a7916a 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_import.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -1155,6 +1155,9 @@ void BinaryMplImport::ReadHeaderField() { mod.SetFlavor((MIRFlavor)ReadNum()); mod.SetSrcLang((MIRSrcLang)ReadNum()); mod.SetID(static_cast(ReadNum())); + if (mod.GetFlavor() == kFlavorLmbc) { + mod.SetGlobalMemSize(ReadNum()); + } mod.SetNumFuncs(static_cast(ReadNum())); std::string inStr; ReadAsciiStr(inStr); diff --git a/src/mapleall/maple_ir/src/mir_builder.cpp b/src/mapleall/maple_ir/src/mir_builder.cpp index 22873baaa6763a5ee5ccbc81577478c19c621147..abec8b50d9809d10f206974ff5f6d824f72a77b6 100755 --- a/src/mapleall/maple_ir/src/mir_builder.cpp +++ b/src/mapleall/maple_ir/src/mir_builder.cpp @@ -800,8 +800,8 @@ IassignoffNode *MIRBuilder::CreateStmtIassignoff(PrimType pty, int32 offset, Bas return GetCurrentFuncCodeMp()->New(pty, offset, addr, src); } -IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(PrimType pty, int32 offset, BaseNode *src) { - return GetCurrentFuncCodeMp()->New(pty, offset, src); +IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(op, pty, offset, src); } CallNode *MIRBuilder::CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode) { diff --git a/src/mapleall/maple_ir/src/mir_function.cpp b/src/mapleall/maple_ir/src/mir_function.cpp index e9e85f459905b12878c4c381d65641487d04a2d8..75cafccb6b8d978e80c49182cb822073760316f3 100644 --- a/src/mapleall/maple_ir/src/mir_function.cpp +++ b/src/mapleall/maple_ir/src/mir_function.cpp @@ -362,6 +362,8 @@ void MIRFunction::Dump(bool withoutBody) { if (module->GetFlavor() < kMmpl) { DumpFlavorLoweredThanMmpl(); + } else { + LogInfo::MapleLogger() << " () void"; } // codeMemPool is nullptr, means maple_ir has been released for memory's sake @@ -382,7 +384,7 @@ void MIRFunction::Dump(bool withoutBody) { void MIRFunction::DumpUpFormal(int32 indent) const { PrintIndentation(indent + 1); - LogInfo::MapleLogger() << "upFormalSize " << GetUpFormalSize() << '\n'; + LogInfo::MapleLogger() << "upformalsize " << GetUpFormalSize() << '\n'; if (localWordsTypeTagged != nullptr) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "formalWordsTypeTagged = [ "; @@ -411,7 +413,7 @@ void MIRFunction::DumpUpFormal(int32 indent) const { void MIRFunction::DumpFrame(int32 indent) const { PrintIndentation(indent + 1); - LogInfo::MapleLogger() << "frameSize " << static_cast(GetFrameSize()) << '\n'; + LogInfo::MapleLogger() << "framesize " << static_cast(GetFrameSize()) << '\n'; if (localWordsTypeTagged != nullptr) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "localWordsTypeTagged = [ "; @@ -454,6 +456,11 @@ void MIRFunction::DumpFuncBody(int32 indent) { DumpFrame(indent); } + if (GetOutParmSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "outparmsize " << GetOutParmSize() << '\n'; + } + if (GetModuleId() > 0) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "moduleID " << static_cast(GetModuleId()) << '\n'; diff --git a/src/mapleall/maple_ir/src/mir_module.cpp b/src/mapleall/maple_ir/src/mir_module.cpp index 324fcdc8da67c749cdfa0b3c194900dc154abb45..f4c5b29603d91d49d632bd7efcef9ab218190f18 100644 --- a/src/mapleall/maple_ir/src/mir_module.cpp +++ b/src/mapleall/maple_ir/src/mir_module.cpp @@ -231,7 +231,7 @@ void MIRModule::DumpGlobals(bool emitStructureType) const { } LogInfo::MapleLogger() << std::dec; } - if (flavor < kMmpl) { + if (flavor < kMmpl || flavor == kFlavorLmbc) { for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); diff --git a/src/mapleall/maple_ir/src/mir_nodes.cpp b/src/mapleall/maple_ir/src/mir_nodes.cpp index 694d2974e801653a35fcd481e6a293b1a786c437..6885f4bfb1675da65abc9668c0b84c530d08b9a0 100755 --- a/src/mapleall/maple_ir/src/mir_nodes.cpp +++ b/src/mapleall/maple_ir/src/mir_nodes.cpp @@ -764,6 +764,13 @@ void IassignFPoffNode::Dump(int32 indent) const { LogInfo::MapleLogger() << '\n'; } +void BlkassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << offset << " " << blockSize; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + void GotoNode::Dump(int32 indent) const { StmtNode::DumpBase(indent); if (offset == 0) { @@ -865,7 +872,7 @@ void UnaryStmtNode::DumpOpnd(int32 indent) const { if (uOpnd != nullptr) { uOpnd->Dump(indent); } - LogInfo::MapleLogger() << ")\n"; + LogInfo::MapleLogger() << ")"; } void UnaryStmtNode::Dump(int32 indent) const { diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp index a593036cb05dd90be9b121cc88c36ed54754f0f6..7181283181f741956b7cbe19a95fbf86bc4290d0 100755 --- a/src/mapleall/maple_ir/src/mir_parser.cpp +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -215,12 +215,13 @@ bool MIRParser::ParseStmtIassignoff(StmtNodePtr &stmt) { } bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { + Opcode op = lexer.GetTokenKind() == TK_iassignfpoff ? OP_iassignfpoff : OP_iassignspoff; if (!IsPrimitiveType(lexer.NextToken())) { Error("expect type parsing binary operator but get "); return false; } // iassignfpoff ( ) - auto *iAssignOff = mod.CurFuncCodeMemPool()->New(); + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(op); iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); if (lexer.NextToken() != TK_intconst) { Error("expect offset but get "); @@ -238,6 +239,33 @@ bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { return true; } +bool MIRParser::ParseStmtBlkassignoff(StmtNodePtr &stmt) { + // blkassignoff (, ) + BlkassignoffNode *bassignoff = mod.CurFuncCodeMemPool()->New(); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + bassignoff->offset = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_intconst) { + Error("expect size but get "); + return false; + } + bassignoff->blockSize = lexer.GetTheIntVal(); + lexer.NextToken(); + BaseNode *destAddr = nullptr; + BaseNode *srcAddr = nullptr; + // parse 2 operands, the dest address followed by src address + if (!ParseExprTwoOperand(destAddr, srcAddr)) { + return false; + } + bassignoff->SetOpnd(destAddr, 0); + bassignoff->SetOpnd(srcAddr, 1); + lexer.NextToken(); + stmt = bassignoff; + return true; +} + bool MIRParser::ParseStmtDoloop(StmtNodePtr &stmt) { // syntax: doloop (, , ) { // } @@ -616,10 +644,10 @@ bool MIRParser::ParseStmtRangegoto(StmtNodePtr &stmt) { return false; } if (!IsPrimitiveInteger(expr->GetPrimType())) { - rangeGotoNode->SetOpnd(expr, 0); Error("expect expression return integer but get "); return false; } + rangeGotoNode->SetOpnd(expr, 0); if (lexer.NextToken() == TK_intconst) { rangeGotoNode->SetTagOffset(static_cast(lexer.GetTheIntVal())); } else { @@ -1960,6 +1988,18 @@ bool MIRParser::ParseStmtBlockForUpformalSize() { return true; } +bool MIRParser::ParseStmtBlockForOutParmSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after outparmsize but get "); + return false; + } + fn->SetOutParmSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + bool MIRParser::ParseStmtBlockForModuleID() { MIRFunction *fn = paramCurrFuncForParseStmtBlock; lexer.NextToken(); @@ -2637,10 +2677,10 @@ bool MIRParser::ParseExprIreadFPoff(BaseNodePtr &expr) { return false; } iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); - if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { - Error("only scalar types allowed for ireadoff"); - return false; - } + //if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { + // Error("only scalar types allowed for ireadoff"); + // return false; + //} if (lexer.GetTokenKind() != TK_intconst) { Error("expect offset but get "); return false; @@ -3314,6 +3354,8 @@ std::map MIRParser::InitFuncPtrMapForPar funcPtrMap[TK_iassign] = &MIRParser::ParseStmtIassign; funcPtrMap[TK_iassignoff] = &MIRParser::ParseStmtIassignoff; funcPtrMap[TK_iassignfpoff] = &MIRParser::ParseStmtIassignFPoff; + funcPtrMap[TK_iassignspoff] = &MIRParser::ParseStmtIassignFPoff; + funcPtrMap[TK_blkassignoff] = &MIRParser::ParseStmtBlkassignoff; funcPtrMap[TK_regassign] = &MIRParser::ParseStmtRegassign; funcPtrMap[TK_doloop] = &MIRParser::ParseStmtDoloop; funcPtrMap[TK_foreachelem] = &MIRParser::ParseStmtForeachelem; @@ -3411,6 +3453,7 @@ std::map MIRParser::InitFuncPtrMapF funcPtrMap[TK_type] = &MIRParser::ParseStmtBlockForType; funcPtrMap[TK_framesize] = &MIRParser::ParseStmtBlockForFrameSize; funcPtrMap[TK_upformalsize] = &MIRParser::ParseStmtBlockForUpformalSize; + funcPtrMap[TK_outparmsize] = &MIRParser::ParseStmtBlockForOutParmSize; funcPtrMap[TK_moduleid] = &MIRParser::ParseStmtBlockForModuleID; funcPtrMap[TK_funcsize] = &MIRParser::ParseStmtBlockForFuncSize; funcPtrMap[TK_funcid] = &MIRParser::ParseStmtBlockForFuncID; diff --git a/src/mapleall/maple_me/BUILD.gn b/src/mapleall/maple_me/BUILD.gn index 91dc3354dcd1adc5871c6e877aa05d04cde93ce9..bdc24d533e7a6c054582f5f0d605bcfae5b4a05d 100755 --- a/src/mapleall/maple_me/BUILD.gn +++ b/src/mapleall/maple_me/BUILD.gn @@ -23,6 +23,12 @@ include_directories = [ "${MAPLEALL_ROOT}/maple_util/include", "${MAPLEALL_ROOT}/maple_driver/include", "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat", ] src_libmplme = [ @@ -116,6 +122,8 @@ src_libmplme = [ "src/me_safety_warning.cpp", "src/lfo_unroll.cpp", "src/me_slp.cpp", + "src/lmbc_memlayout.cpp", + "src/lmbc_lower.cpp", ] src_libmplmewpo = [ diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..90145a49298ad285cba212117e6e973d1b80d937 --- /dev/null +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEME_INCLUDE_LMBC_LOWER_H_ +#define MAPLEME_INCLUDE_LMBC_LOWER_H_ +#include "mir_builder.h" +#include "lmbc_memlayout.h" + +namespace maple { + +class LMBCLowerer { + public: + MIRModule *mirModule; + MIRFunction *func; + maplebe::BECommon *becommon; + MIRBuilder *mirBuilder; + GlobalMemLayout *globmemlayout; + LMBCMemLayout *memlayout; + + public: + explicit LMBCLowerer(MIRModule *mod, maplebe::BECommon *becmmn, MIRFunction *f, GlobalMemLayout *gmemlayout, LMBCMemLayout *lmemlayout) : + mirModule(mod), func(f), becommon(becmmn), mirBuilder(mod->GetMIRBuilder()), + globmemlayout(gmemlayout), memlayout(lmemlayout) {} + + BaseNode *ReadregNodeForSymbol(MIRSymbol *); + PregIdx GetSpecialRegFromSt(const MIRSymbol *); + BaseNode *LowerDread(AddrofNode *); + BaseNode *LowerAddrof(AddrofNode *); + BaseNode *LowerIread(IreadNode *); + BaseNode *LowerExpr(BaseNode *expr); + void LowerAggDassign(BlockNode *, const DassignNode *); + void LowerDassign(DassignNode *, BlockNode *); + void LowerIassign(IassignNode *, BlockNode *); + void LowerAggIassign(BlockNode *, IassignNode *); + void LowerReturn(NaryStmtNode *retNode, BlockNode *newblk); + void LowerCall(NaryStmtNode *callNode, BlockNode *newblk); + BlockNode *LowerBlock(BlockNode *); + void LoadFormalsAssignedToPregs(); + void LowerFunction(); +}; + +} // namespace maple + +#endif // MAPLEME_INCLUDE_LMBC_LOWER_H_ diff --git a/src/mapleall/maple_me/include/lmbc_memlayout.h b/src/mapleall/maple_me/include/lmbc_memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..592873654f23d6b2a90967c04f3787a5f6d3d1a3 --- /dev/null +++ b/src/mapleall/maple_me/include/lmbc_memlayout.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H +#define MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H + +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_function.h" +#include "becommon.h" + +namespace maple { + +typedef enum { + MS_unknown, + MS_upformal, // for the incoming parameters that are passed on the caller's stack + MS_formal, // for the incoming parameters that are passed in registers + MS_actual, // for the outgoing parameters + MS_local, // for all local variables and temporaries + MS_FPbased, // addressed via offset from the frame pointer + MS_SPbased, // addressed via offset from the stack pointer + MS_GPbased, // addressed via offset from the global pointer +} MemSegmentKind; + +class MemSegment; + +// describes where a symbol is allocated +class SymbolAlloc { + public: + MemSegment *mem_segment; + int32 offset; + + public: + SymbolAlloc() : mem_segment(nullptr), offset(0) {} + + ~SymbolAlloc() {} + +}; // class SymbolAlloc + +// keeps track of the allocation of a memory segment +class MemSegment { + public: + MemSegmentKind kind; + int32 size; // size is negative if allocated offsets are negative + SymbolAlloc how_alloc; // this segment may be allocated inside another segment + public: + MemSegment(MemSegmentKind k) : kind(k), size(0) {} + + MemSegment(MemSegmentKind k, int32 sz) : kind(k), size(sz) {} + + ~MemSegment() {} + +}; // class MemSegment + +class LMBCMemLayout { + public: + MIRFunction *func; + MemSegment seg_upformal; + MemSegment seg_formal; + MemSegment seg_actual; + MemSegment seg_FPbased; + MemSegment seg_SPbased; + MapleVector sym_alloc_table; // index is StIdx + + public: + uint32 FindLargestActualArea(void); + uint32 FindLargestActualArea(StmtNode *, int &); + explicit LMBCMemLayout(MIRFunction *f, MapleAllocator *mallocator) + : func(f), + seg_upformal(MS_upformal), + seg_formal(MS_formal), + seg_actual(MS_actual), + seg_FPbased(MS_FPbased, -GetPrimTypeSize(PTY_ptr)), + seg_SPbased(MS_SPbased), + sym_alloc_table(mallocator->Adapter()) { + sym_alloc_table.resize(f->GetSymTab()->GetSymbolTableSize()); + } + + ~LMBCMemLayout() {} + + void LayoutStackFrame(void); + int32 StackFrameSize(void) const { + return seg_SPbased.size - seg_FPbased.size; + } + + int32 UpformalSize(void) const { + return seg_upformal.size; + } +}; + +class GlobalMemLayout { + public: + MemSegment seg_GPbased; + MapleVector sym_alloc_table; // index is StIdx + private: + maplebe::BECommon *be; + MIRModule *mirModule; + + public: + GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAllocator *mallocator); + ~GlobalMemLayout() {} + + private: + void FillScalarValueInMap(uint32 startaddress, PrimType pty, MIRConst *c); + void FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRConst *c); + void FillSymbolValueInMap(const MIRSymbol *sym); +}; + +// for specifying how a parameter is passed +struct PLocInfo { + int32 memoffset; + int32 memsize; +}; + +// for processing an incoming or outgoing parameter list +class ParmLocator { + private: + int32 parmNum; // number of all types of parameters processed so far + int32 lastMemOffset; + + public: + ParmLocator() : parmNum(0), lastMemOffset(0) {} + + ~ParmLocator() {} + + void LocateNextParm(const MIRType *ty, PLocInfo &ploc); +}; + +// given the type of the return value, determines the return mechanism +class ReturnMechanism { + public: + bool fake_first_parm; // whether returning in memory via fake first parameter + PrimType ptype0; // the primitive type stored in retval0 + + ReturnMechanism(const MIRType *retty); +}; + +} /* namespace maple */ + +#endif /* MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H */ diff --git a/src/mapleall/maple_me/include/me_phase_manager.h b/src/mapleall/maple_me/include/me_phase_manager.h index 4dd7dbf3bf19541994f2e9c855f8563a7a010929..dfbf24e15f468e6944ca73040c71403ba9eb907c 100644 --- a/src/mapleall/maple_me/include/me_phase_manager.h +++ b/src/mapleall/maple_me/include/me_phase_manager.h @@ -104,6 +104,7 @@ class MeFuncPM : public FunctionPM { static bool genMeMpl; static bool timePhases; static bool genMapleBC; + static bool genLMBC; void SetMeInput(const std::string &str) { meInput = str; diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30c22ad26ea76e1e6616db95391f31f866db5d08 --- /dev/null +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -0,0 +1,677 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "lmbc_lower.h" + +namespace maple { + +using namespace std; + +inline PrimType UnsignedPrimType(int32 bytesize) { + if (bytesize == 4) { + return PTY_u32; + } + if (bytesize == 2) { + return PTY_u16; + } + if (bytesize == 1) { + return PTY_u8; + } + return PTY_u32; +} + +PregIdx LMBCLowerer::GetSpecialRegFromSt(const MIRSymbol *sym) { + MIRStorageClass storageClass = sym->GetStorageClass(); + PregIdx specreg = 0; + if (storageClass == kScAuto || storageClass == kScFormal) { + CHECK(sym->GetStIndex() < memlayout->sym_alloc_table.size(), + "index out of range in LMBCLowerer::GetSpecialRegFromSt"); + SymbolAlloc symalloc = memlayout->sym_alloc_table[sym->GetStIndex()]; + if (symalloc.mem_segment->kind == MS_upformal || symalloc.mem_segment->kind == MS_formal || + symalloc.mem_segment->kind == MS_FPbased) { + specreg = -kSregFp; + } else if (symalloc.mem_segment->kind == MS_actual || symalloc.mem_segment->kind == MS_SPbased) { + specreg = -kSregSp; + } else { + CHECK_FATAL(false, "LMBCLowerer::LowerDread: bad memory layout for local variable"); + } + } else if (storageClass == kScGlobal || storageClass == kScFstatic || storageClass == kScExtern || storageClass == kScPstatic) { + specreg = -kSregGp; + } else { + CHECK_FATAL(false, "LMBCLowerer::LowerDread: NYI"); + } + return specreg; +} + +BaseNode *LMBCLowerer::ReadregNodeForSymbol(MIRSymbol *sym) { + return mirBuilder->CreateExprRegread(LOWERED_PTR_TYPE, GetSpecialRegFromSt(sym)); +} + +BaseNode *LMBCLowerer::LowerAddrof(AddrofNode *expr) { + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(expr->GetStIdx()); + if (symbol->GetStorageClass() == kScText) { + return expr; + } + int32 offset = 0; + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerAddrof: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } + //BaseNode *rrn = ReadregNodeForSymbol(symbol); + PrimType symty = (expr->GetPrimType() == PTY_simplestr || expr->GetPrimType() == PTY_simpleobj) ? expr->GetPrimType() : LOWERED_PTR_TYPE; + BaseNode *rrn = mirBuilder->CreateExprRegread(symty, GetSpecialRegFromSt(symbol)); + offset += symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()].offset + : globmemlayout->sym_alloc_table[symbol->GetStIndex()].offset; + return (offset == 0) ? rrn + : mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)expr->GetPrimType()), rrn, + mirBuilder->GetConstInt(offset)); +} + +BaseNode *LMBCLowerer::LowerDread(AddrofNode *expr) { + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(expr->GetStIdx()); + PrimType symty = symbol->GetType()->GetPrimType(); + int32 offset = 0; + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerDread: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(expr->GetFieldID()); + symty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first)->GetPrimType(); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } + // allow dread class reference + PregIdx spcreg = GetSpecialRegFromSt(symbol); + if (spcreg == -kSregFp) { + CHECK_FATAL(symbol->IsLocal(), "load from fp non local?"); + IreadFPoffNode *ireadoff = mirBuilder->CreateExprIreadFPoff( + symty, memlayout->sym_alloc_table[symbol->GetStIndex()].offset + offset); + return ireadoff; + } else { + BaseNode *rrn = mirBuilder->CreateExprRegread(LOWERED_PTR_TYPE, spcreg); + SymbolAlloc &symalloc = symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()] + : globmemlayout->sym_alloc_table[symbol->GetStIndex()]; + IreadoffNode *ireadoff = mirBuilder->CreateExprIreadoff(symty, symalloc.offset + offset, rrn); + return ireadoff; + } +} + +static MIRType *GetPointedToType(const MIRPtrType *pointerty) { + MIRType *atype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (atype->GetKind() == kTypeArray) { + MIRArrayType *arraytype = static_cast(atype); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arraytype->GetElemTyIdx()); + } + if (atype->GetKind() == kTypeFArray || atype->GetKind() == kTypeJArray) { + MIRFarrayType *farraytype = static_cast(atype); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(farraytype->GetElemTyIdx()); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); +} + +BaseNode *LMBCLowerer::LowerIread(IreadNode *expr) { + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr->GetTyIdx()); + MIRPtrType *pointerty = static_cast(type); + CHECK_FATAL(pointerty, "expect a pointer type at iread node"); + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "SelectIread: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(expr->GetFieldID()); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } else { + type = GetPointedToType(pointerty); + } + BaseNode *ireadoff = mirBuilder->CreateExprIreadoff(type->GetPrimType(), offset, expr->Opnd(0)); + return ireadoff; +} + +BaseNode *LMBCLowerer::LowerExpr(BaseNode *expr) { + for (size_t i = 0; i < expr->NumOpnds(); ++i) { + expr->SetOpnd(LowerExpr(expr->Opnd(i)), i); + } + switch (expr->GetOpCode()) { + case OP_dread: return LowerDread(static_cast(expr)); + case OP_addrof: return LowerAddrof(static_cast(expr)); + case OP_iread: return LowerIread(static_cast(expr)); + default: ; + } + return expr; +} + +void LMBCLowerer::LowerAggDassign(BlockNode *newblk, const DassignNode *dsnode) { + MIRSymbol *lhssymbol = func->GetLocalOrGlobalSymbol(dsnode->GetStIdx()); + int32 lhsoffset = 0; + MIRType *lhsty = lhssymbol->GetType(); + if (dsnode->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(lhssymbol->GetType()); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(dsnode->GetFieldID()); + lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + lhsoffset = becommon->GetFieldOffset(*structty, dsnode->GetFieldID()).first; + } + uint32 lhsalign = becommon->GetTypeAlign(lhsty->GetTypeIndex()); + uint32 lhssize = becommon->GetTypeSize(lhsty->GetTypeIndex()); + + uint32 rhsalign; + uint32 alignused; + int32 rhsoffset = 0; + BaseNode *loadnode = nullptr; + IassignoffNode *iassignoff = nullptr; + if (dsnode->Opnd(0)->GetOpCode() == OP_dread) { + AddrofNode *rhsdread = static_cast(dsnode->Opnd(0)); + MIRSymbol *rhssymbol = func->GetLocalOrGlobalSymbol(rhsdread->GetStIdx()); + MIRType *rhsty = rhssymbol->GetType(); + if (rhsdread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(rhssymbol->GetType()); + CHECK_FATAL(structty, "SelectDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsdread->GetFieldID()); + rhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsdread->GetFieldID()).first; + } + rhsalign = becommon->GetTypeAlign(rhsty->GetTypeIndex()); + BaseNode *rRrn = ReadregNodeForSymbol(rhssymbol); + SymbolAlloc &rsymalloc = rhssymbol->IsLocal() ? memlayout->sym_alloc_table[rhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[rhssymbol->GetStIndex()]; + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + if (!alignused) { + alignused = 1u; + } + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), + rsymalloc.offset + rhsoffset + i * alignused, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), + rsymalloc.offset + rhsoffset + lhssizeCovered, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else if (dsnode->Opnd(0)->GetOpCode() == OP_regread) { + RegreadNode *regread = static_cast(dsnode->Opnd(0)); + CHECK_FATAL(regread->GetRegIdx() == -kSregRetval0 && regread->GetPrimType() == PTY_agg, ""); + + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, 4u); // max alignment is 32-bit + PregIdx ridx = -kSregRetval0; + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(alignused), ridx - i); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + ridx = -kSregRetval0 - (lhssize / alignused); + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(newalignused), ridx--); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else { // iread + IreadNode *rhsiread = static_cast(dsnode->Opnd(0)); + CHECK_FATAL(rhsiread, "LowerAggDassign: illegal rhs for dassign node of structure type"); + rhsiread->SetOpnd(LowerExpr(rhsiread->Opnd(0)), 0); + MIRType *rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsiread->GetTyIdx()); + MIRPtrType *pointerty = static_cast(rhsRdTy); + CHECK_FATAL(pointerty, "LowerAggDassign: expect a pointer type at iread node"); + if (rhsiread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsiread->GetFieldID()); + rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsiread->GetFieldID()).first; + } else { + rhsRdTy = GetPointedToType(pointerty); + } + rhsalign = becommon->GetTypeAlign(rhsRdTy->GetTypeIndex()); + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + CHECK(lhssymbol->GetStIndex() < memlayout->sym_alloc_table.size() && + lhssymbol->GetStIndex() < globmemlayout->sym_alloc_table.size(), + "index oout of range in LMBCLowerer::LowerAggDassign"); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), rhsoffset + i * alignused, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), rhsoffset + lhssizeCovered, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } +} + +void LMBCLowerer::LowerDassign(DassignNode *dsnode, BlockNode *newblk) { + if (dsnode->Opnd(0)->GetPrimType() != PTY_agg) { + dsnode->SetOpnd(LowerExpr(dsnode->Opnd(0)), 0); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dsnode->GetStIdx()); + int32 offset = 0; + PrimType ptypused = symbol->GetType()->GetPrimType(); + if (dsnode->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerDassign: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, dsnode->GetFieldID()).first; + TyIdx ftyidx = structty->TraverseToField(dsnode->GetFieldID()).second.first; + ptypused = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftyidx)->GetPrimType(); + } + PregIdx spcreg = GetSpecialRegFromSt(symbol); + if (spcreg == -kSregFp) { + IassignFPoffNode *iassignoff = mirBuilder->CreateStmtIassignFPoff(OP_iassignfpoff, + ptypused, memlayout->sym_alloc_table[symbol->GetStIndex()].offset + offset, dsnode->Opnd(0)); + newblk->AddStatement(iassignoff); + } else { + BaseNode *rrn = ReadregNodeForSymbol(symbol); + SymbolAlloc &symalloc = symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()] + : globmemlayout->sym_alloc_table[symbol->GetStIndex()]; + IassignoffNode *iassignoff = + mirBuilder->CreateStmtIassignoff(ptypused, symalloc.offset + offset, rrn, dsnode->Opnd(0)); + newblk->AddStatement(iassignoff); + } + } else { + LowerAggDassign(newblk, dsnode); + } +} + +void LMBCLowerer::LowerAggIassign(BlockNode *newblk, IassignNode *iassign) { + int32 lhsoffset = 0; + MIRType *lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign->GetTyIdx()); + MIRPtrType *pointerty = static_cast(lhsty); + if (pointerty->GetKind() != kTypePointer) { + TypeAttrs typeAttrs; + pointerty = static_cast(GlobalTables::GetTypeTable().GetOrCreatePointerType(*lhsty, GetExactPtrPrimType(), typeAttrs)); + } + if (iassign->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(iassign->GetFieldID()); + lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + lhsoffset = becommon->GetFieldOffset(*structty, iassign->GetFieldID()).first; + } else { + lhsty = GetPointedToType(pointerty); + } + uint32 lhsalign = becommon->GetTypeAlign(lhsty->GetTypeIndex()); + uint32 lhssize = becommon->GetTypeSize(lhsty->GetTypeIndex()); + + uint32 rhsalign; + uint32 alignused; + int32 rhsoffset = 0; + BaseNode *loadnode = nullptr; + IassignoffNode *iassignoff = nullptr; + if (iassign->GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsdread = static_cast(iassign->GetRHS()); + MIRSymbol *rhssymbol = func->GetLocalOrGlobalSymbol(rhsdread->GetStIdx()); + MIRType *rhsty = rhssymbol->GetType(); + if (rhsdread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(rhssymbol->GetType()); + CHECK_FATAL(structty, "SelectDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsdread->GetFieldID()); + rhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsdread->GetFieldID()).first; + } + rhsalign = becommon->GetTypeAlign(rhsty->GetTypeIndex()); + BaseNode *rRrn = ReadregNodeForSymbol(rhssymbol); + CHECK(rhssymbol->GetStIndex() < memlayout->sym_alloc_table.size() && + rhssymbol->GetStIndex() < globmemlayout->sym_alloc_table.size(), + "index out of range in LMBCLowerer::LowerAggIassign"); + SymbolAlloc &rsymalloc = rhssymbol->IsLocal() ? memlayout->sym_alloc_table[rhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[rhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), + rsymalloc.offset + rhsoffset + i * alignused, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), + rsymalloc.offset + rhsoffset + lhssizeCovered, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else if (iassign->GetRHS()->GetOpCode() == OP_regread) { + RegreadNode *regread = static_cast(iassign->GetRHS()); + CHECK_FATAL(regread->GetRegIdx() == -kSregRetval0 && regread->GetPrimType() == PTY_agg, ""); + + alignused = std::min(lhsalign, 4u); // max alignment is 32-bit + PregIdx ridx = -kSregRetval0; + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(alignused), ridx - i); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + ridx = -kSregRetval0 - (lhssize / alignused); + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(newalignused), ridx--); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else { // iread + IreadNode *rhsiread = static_cast(iassign->GetRHS()); + CHECK_FATAL(rhsiread, "LowerAggIassign: illegal rhs for dassign node of structure type"); + rhsiread->SetOpnd(LowerExpr(rhsiread->Opnd(0)), 0); + MIRType *rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsiread->GetTyIdx()); + MIRPtrType *pointerty = static_cast(rhsRdTy); + CHECK_FATAL(pointerty, "LowerAggIassign: expect a pointer type at iread node"); + if (rhsiread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggIassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsiread->GetFieldID()); + rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsiread->GetFieldID()).first; + } else { + rhsRdTy = GetPointedToType(pointerty); + } + rhsalign = becommon->GetTypeAlign(rhsRdTy->GetTypeIndex()); + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), rhsoffset + i * alignused, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), rhsoffset + lhssizeCovered, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } +} + +void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { + iassign->addrExpr = LowerExpr(iassign->Opnd(0)); + if (iassign->GetRHS()->GetPrimType() != PTY_agg) { + iassign->SetRHS(LowerExpr(iassign->GetRHS())); + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign->GetTyIdx()); + MIRPtrType *pointerty = static_cast(type); + CHECK_FATAL(pointerty, "LowerIassign::expect a pointer type at iassign node"); + if (iassign->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggIassign: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, iassign->GetFieldID()).first; + TyIdx ftyidx = structty->TraverseToField(iassign->GetFieldID()).second.first; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftyidx); + } else { + type = GetPointedToType(pointerty); + } + PrimType ptypused = type->GetPrimType(); + IassignoffNode *iassignoff = + mirBuilder->CreateStmtIassignoff(ptypused, offset, iassign->addrExpr, iassign->GetRHS()); + newblk->AddStatement(iassignoff); + } else { + LowerAggIassign(newblk, iassign); + } +} + +// called only if the return has > 1 operand; assume prior lowering already +// converted any return of structs to be via fake parameter +void LMBCLowerer::LowerReturn(NaryStmtNode *retNode, BlockNode *newblk) { + CHECK_FATAL(retNode->NumOpnds() <= 2, "LMBCLowerer::LowerReturn: more than 2 return values NYI"); + for (int i = 0; i < retNode->NumOpnds(); i++) { + CHECK_FATAL(retNode->Opnd(i)->GetPrimType() != PTY_agg, "LMBCLowerer::LowerReturn: return of aggregate needs to be handled first"); + // insert regassign for the returned value + BaseNode *rhs = LowerExpr(retNode->Opnd(i)); + RegassignNode *regasgn = mirBuilder->CreateStmtRegassign(rhs->GetPrimType(), i == 0 ? -kSregRetval0 : -kSregRetval1, rhs); + newblk->AddStatement(regasgn); + } + retNode->GetNopnd().clear(); // remove the return operands + retNode->SetNumOpnds(0); + newblk->AddStatement(retNode); +} + +void LMBCLowerer::LowerCall(NaryStmtNode *naryStmt, BlockNode *newblk) { + // go through each parameter + uint32 i = 0; + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + i = 1; + } + ParmLocator parmlocator; + for (; i < naryStmt->NumOpnds(); i++) { + BaseNode *opnd = naryStmt->Opnd(i); + MIRType *ty = nullptr; + // get ty for this parameter + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd->GetPrimType())); + } else { + Opcode opnd_opcode = opnd->GetOpCode(); + CHECK_FATAL(opnd_opcode == OP_dread || opnd_opcode == OP_iread, ""); + if (opnd_opcode == OP_dread) { + AddrofNode *dread = static_cast(opnd); + MIRSymbol *sym = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(dread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } else { // OP_iread + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, ""); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(iread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } + } + PLocInfo ploc; + parmlocator.LocateNextParm(ty, ploc); + if (opnd->GetPrimType() != PTY_agg) { + IassignFPoffNode *iass = mirBuilder->CreateStmtIassignFPoff(OP_iassignspoff, opnd->GetPrimType(), ploc.memoffset, LowerExpr(opnd)); + newblk->AddStatement(iass); + } else { + BlkassignoffNode *bass = mirModule->CurFuncCodeMemPool()->New(ploc.memoffset, ploc.memsize); + bass->SetBOpnd(mirBuilder->CreateExprRegread(PTY_a64, -kSregSp), 0); + // the operand is either OP_dread or OP_iread; use its address instead + if (opnd->GetOpCode() == OP_dread) { + opnd->SetOpCode(OP_addrof); + } else { + opnd->SetOpCode(OP_iaddrof); + } + bass->SetBOpnd(opnd, 1); + newblk->AddStatement(bass); + } + } + BaseNode *opnd0 = nullptr; + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + opnd0 = naryStmt->Opnd(0); + naryStmt->GetNopnd().clear(); // remove the call operands + naryStmt->GetNopnd().push_back(opnd0); + naryStmt->SetNumOpnds(1); + } else { + naryStmt->GetNopnd().clear(); // remove the call operands + naryStmt->SetNumOpnds(0); + } + newblk->AddStatement(naryStmt); +} + +BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { + BlockNode *newblk = mirModule->CurFuncCodeMemPool()->New(); + if (!block->GetFirst()) { + return newblk; + } + StmtNode *nextstmt = block->GetFirst(); + do { + StmtNode *stmt = nextstmt; + if (stmt == block->GetLast()) { + nextstmt = nullptr; + } else { + nextstmt = stmt->GetNext(); + } + stmt->SetNext(nullptr); + switch (stmt->GetOpCode()) { + case OP_dassign: { + LowerDassign(static_cast(stmt), newblk); + break; + } + case OP_iassign: { + LowerIassign(static_cast(stmt), newblk); + break; + } + case OP_return: { + NaryStmtNode *retNode = static_cast(stmt); + if (retNode->GetNopndSize() == 0) { + newblk->AddStatement(stmt); + } else { + LowerReturn(retNode, newblk); + } + } + case OP_call: + case OP_icall: { + LowerCall(static_cast(stmt), newblk); + break; + } + default: { + for (size_t i = 0; i < stmt->NumOpnds(); ++i) { + stmt->SetOpnd(LowerExpr(stmt->Opnd(i)), i); + } + newblk->AddStatement(stmt); + break; + } + } + } while (nextstmt != nullptr); + return newblk; +} + +void LMBCLowerer::LoadFormalsAssignedToPregs() { + // go through each formals + for (int32 i = func->GetFormalDefVec().size()-1; i >= 0; i--) { + MIRSymbol *formalSt = func->GetFormalDefVec()[i].formalSym; + if (formalSt->GetSKind() != kStPreg) { + continue; + } + MIRPreg *preg = formalSt->GetPreg(); + uint32 stindex = formalSt->GetStIndex(); + PrimType pty = formalSt->GetType()->GetPrimType(); + IreadFPoffNode *ireadfpoff = mirBuilder->CreateExprIreadFPoff(pty, memlayout->sym_alloc_table[stindex].offset); + RegassignNode *rass = mirBuilder->CreateStmtRegassign(pty, func->GetPregTab()->GetPregIdxFromPregno(preg->GetPregNo()), ireadfpoff); + func->GetBody()->InsertFirst(rass); + } +} + +void LMBCLowerer::LowerFunction() { + BlockNode *origbody = func->GetBody(); + BlockNode *newbody = LowerBlock(origbody); + func->SetBody(newbody); + LoadFormalsAssignedToPregs(); +} + +} // namespace maple diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f628e8d71fc084109543c64052c2825987fe9846 --- /dev/null +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -0,0 +1,482 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// For each function being compiled, lay out its parameters, return values and +// local variables on its stack frame. This involves determining how parameters +// and return values are passed from analyzing their types. +// +// Allocate all the global variables within the global memory block which is +// addressed via offset from the global pointer GP during execution. Allocate +// this block pointed to by mirModule.globalBlkMap and perform the static +// initializations. + +#include "lmbc_memlayout.h" + +namespace maple { + +uint32 LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int &maxActualSize) { + if (!stmt) { + return maxActualSize; + } + Opcode opcode = stmt->op; + switch (opcode) { + case OP_block: { + BlockNode *blcknode = static_cast(stmt); + for (StmtNode &s : blcknode->GetStmtNodes()) { + FindLargestActualArea(&s, maxActualSize); + } + break; + } + case OP_if: { + IfStmtNode *ifnode = static_cast(stmt); + FindLargestActualArea(ifnode->GetThenPart(), maxActualSize); + FindLargestActualArea(ifnode->GetElsePart(), maxActualSize); + break; + } + case OP_doloop: { + FindLargestActualArea(static_cast(stmt)->GetDoBody(), maxActualSize); + break; + } + case OP_dowhile: + case OP_while: + FindLargestActualArea(static_cast(stmt)->GetBody(), maxActualSize); + break; + case OP_call: + case OP_icall: + case OP_intrinsiccall: { + ParmLocator parmlocator; // instantiate a parm locator + NaryStmtNode *callstmt = static_cast(stmt); + for (uint32 i = 0; i < callstmt->NumOpnds(); i++) { + BaseNode *opnd = callstmt->Opnd(i); + CHECK_FATAL(opnd->GetPrimType() != PTY_void, ""); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd->GetPrimType())); + } else { + Opcode opnd_opcode = opnd->GetOpCode(); + CHECK_FATAL(opnd_opcode == OP_dread || opnd_opcode == OP_iread, ""); + if (opnd_opcode == OP_dread) { + AddrofNode *dread = static_cast(opnd); + MIRSymbol *sym = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(dread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } else { // OP_iread + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, ""); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(iread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } + } + PLocInfo ploc; + parmlocator.LocateNextParm(ty, ploc); + maxActualSize = std::max(maxActualSize, ploc.memoffset + ploc.memsize); + maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); + } + break; + } + default: + return maxActualSize; + } + maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); + return maxActualSize; +} + +// go over all outgoing calls in the function body and get the maximum space +// needed for storing the actuals based on the actual parameters and the ABI; +// this assumes that all nesting of statements has been removed, so that all +// the statements are at only one block level +uint32 LMBCMemLayout::FindLargestActualArea(void) { + int32 maxActualSize = 0; + FindLargestActualArea(func->GetBody(), maxActualSize); + return static_cast(maxActualSize); +} + +void LMBCMemLayout::LayoutStackFrame(void) { + MIRSymbol *sym = nullptr; + // StIdx stIdx; + // go through formal parameters + ParmLocator parmlocator; // instantiate a parm locator + PLocInfo ploc; + for (uint32 i = 0; i < func->GetFormalDefVec().size(); i++) { + FormalDef formalDef = func->GetFormalDefAt(i); + sym = formalDef.formalSym; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDef.formalTyIdx); + parmlocator.LocateNextParm(ty, ploc); + uint32 stindex = sym->GetStIndex(); + // always passed in memory, so allocate in seg_upformal + sym_alloc_table[stindex].mem_segment = &seg_upformal; + seg_upformal.size = maplebe::RoundUp(seg_upformal.size, ty->GetAlign()); + sym_alloc_table[stindex].offset = seg_upformal.size; + seg_upformal.size += ty->GetSize(); + seg_upformal.size = maplebe::RoundUp(seg_upformal.size, GetPrimTypeSize(PTY_ptr)); + // LogInfo::MapleLogger() << "LAYOUT: formal %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at seg_upformal offset " << sym_alloc_table[stindex].offset << " passed in memory\n"; + } + + // allocate seg_formal in seg_FPbased + seg_formal.how_alloc.mem_segment = &seg_FPbased; + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_FPbased.size -= seg_formal.size; + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_formal.how_alloc.offset = seg_FPbased.size; + //LogInfo::MapleLogger() << "LAYOUT: seg_formal at seg_FPbased offset " << seg_formal.how_alloc.offset << " with size " + // << seg_formal.size << std::endl; + + // allocate the local variables + uint32 symtabsize = func->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symtabsize; i++) { + sym = func->GetSymTab()->GetSymbolFromStIdx(i); + if (!sym) { + continue; + } + if (sym->IsDeleted()) { + continue; + } + if (sym->GetStorageClass() != kScAuto) { + continue; + } + uint32 stindex = sym->GetStIndex(); + sym_alloc_table[stindex].mem_segment = &seg_FPbased; + seg_FPbased.size -= sym->GetType()->GetSize(); + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].offset = seg_FPbased.size; + // LogInfo::MapleLogger() << "LAYOUT: local %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at FPbased offset " << sym_alloc_table[stindex].offset << std::endl; + } + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + + // allocate seg_actual for storing the outgoing parameters; this requires + // going over all outgoing calls and get the maximum space needed for the + // actuals + seg_actual.size = FindLargestActualArea(); + func->SetOutParmSize(seg_actual.size); + + // allocate seg_actual in seg_SPbased + seg_actual.how_alloc.mem_segment = &seg_SPbased; + seg_actual.how_alloc.offset = seg_SPbased.size; + seg_SPbased.size = maplebe::RoundUp(seg_SPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_SPbased.size += seg_actual.size; + seg_SPbased.size = maplebe::RoundUp(seg_SPbased.size, GetPrimTypeSize(PTY_ptr)); + //LogInfo::MapleLogger() << "LAYOUT: seg_actual at seg_SPbased offset " << seg_actual.how_alloc.offset << " with size " + // << seg_actual.size << std::endl; +} + +inline uint8 GetU8Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint16 GetU16Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint32 GetU32Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint64 GetU64Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint32 GetF32Const(MIRConst *c) { + MIRFloatConst *floatconst = static_cast(c); + return static_cast(floatconst->GetIntValue()); +} + +inline uint64 GetF64Const(MIRConst *c) { + MIRDoubleConst *doubleconst = static_cast(c); + return static_cast(doubleconst->GetIntValue()); +} + +void GlobalMemLayout::FillScalarValueInMap(uint32 startaddress, PrimType pty, MIRConst *c) { + switch (pty) { + case PTY_u1: + case PTY_u8: + case PTY_i8: { + uint8 *p = &mirModule->GetGlobalBlockMap()[startaddress]; + *p = GetU8Const(c); + break; + } + case PTY_u16: + case PTY_i16: { + uint16 *p = (uint16 *)(&mirModule->GetGlobalBlockMap()[startaddress]); + *p = GetU16Const(c); + break; + } + case PTY_u32: + case PTY_i32: { + uint32 *p = (uint32 *)(&mirModule->GetGlobalBlockMap()[startaddress]); + *p = GetU32Const(c); + break; + } + case PTY_u64: + case PTY_i64: { + uint64 *p = (uint64 *)(&mirModule->GetGlobalBlockMap()[startaddress]); + *p = GetU64Const(c); + break; + } + case PTY_f32: { + uint32 *p = (uint32 *)(&mirModule->GetGlobalBlockMap()[startaddress]); + *p = GetF32Const(c); + break; + } + case PTY_f64: { + uint64 *p = (uint64 *)(&mirModule->GetGlobalBlockMap()[startaddress]); + *p = GetF64Const(c); + break; + } + default: + CHECK_FATAL(false, "FillScalarValueInMap: NYI"); + } + return; +} + +void GlobalMemLayout::FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRConst *c) { + switch (ty->GetKind()) { + case kTypeScalar: + FillScalarValueInMap(startaddress, ty->GetPrimType(), c); + break; + case kTypeArray: { + MIRArrayType *arraytype = static_cast(ty); + MIRType *elemtype = arraytype->GetElemType(); + int32 elemsize = elemtype->GetSize(); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent array initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (MapleVector::iterator it = constvec.begin(); it != constvec.end(); + it++, startaddress += elemsize) { + FillTypeValueInMap(startaddress, elemtype, *it); + } + break; + } + case kTypeStruct: { + MIRStructType *structty = static_cast(ty); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent struct initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (uint32 i = 0; i < constvec.size(); i++) { + uint32 fieldID = aggconst->GetFieldIdItem(i); + FieldPair thepair = structty->TraverseToField(fieldID); + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + uint32 offset = be->GetFieldOffset(*structty, fieldID).first; + FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); + } + break; + } + case kTypeClass: { + MIRClassType *classty = static_cast(ty); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent class initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (uint32 i = 0; i < constvec.size(); i++) { + uint32 fieldID = aggconst->GetFieldIdItem(i); + FieldPair thepair = classty->TraverseToField(fieldID); + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + uint32 offset = be->GetFieldOffset(*classty, fieldID).first; + FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); + } + break; + } + default: + CHECK_FATAL(false, "FillTypeValueInMap: NYI"); + } +} + +void GlobalMemLayout::FillSymbolValueInMap(const MIRSymbol *sym) { + if (sym->GetKonst() == nullptr) { + return; + } + uint32 stindex = sym->GetStIndex(); + CHECK(stindex < sym_alloc_table.size(), "index out of range in GlobalMemLayout::FillSymbolValueInMap"); + uint32 symaddress = sym_alloc_table[stindex].offset; + FillTypeValueInMap(symaddress, sym->GetType(), sym->GetKonst()); + return; +} + +GlobalMemLayout::GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAllocator *mallocator) + : seg_GPbased(MS_GPbased), sym_alloc_table(mallocator->Adapter()), be(b), mirModule(mod) { + uint32 symtabsize = GlobalTables::GetGsymTable().GetSymbolTableSize(); + sym_alloc_table.resize(symtabsize); + MIRSymbol *sym = nullptr; + // StIdx stIdx; + // allocate the global variables ordered based on alignments + for (int32 curalign = 8; curalign != 0; curalign >>= 1) { + for (uint32 i = 0; i < symtabsize; i++) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (!sym) { + continue; + } + if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { + continue; + } + if (sym->GetType()->GetAlign() != curalign) { + continue; + } + uint32 stindex = sym->GetStIndex(); + sym_alloc_table[stindex].mem_segment = &seg_GPbased; + seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].offset = seg_GPbased.size; + seg_GPbased.size += sym->GetType()->GetSize(); + // LogInfo::MapleLogger() << "LAYOUT: global %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at GPbased offset " << sym_alloc_table[stindex].offset << std::endl; + } + } + seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, GetPrimTypeSize(PTY_ptr)); + mirModule->SetGlobalMemSize(seg_GPbased.size); + // allocate the memory map for the GP block + mirModule->SetGlobalBlockMap(static_cast(mirModule->GetMemPool()->Calloc(seg_GPbased.size))); + // perform initialization on globalblkmap + for (uint32 i = 0; i < symtabsize; i++) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (!sym) { + continue; + } + if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { + continue; + } + // FillSymbolValueInMap(sym); + } +} + +// LocateNextParm should be called with each parameter in the parameter list +// starting from the beginning, one call per parameter in sequence; it returns +// the information on how each parameter is passed in ploc +void ParmLocator::LocateNextParm(const MIRType *ty, PLocInfo &ploc) { + ploc.memoffset = lastMemOffset; + ploc.memsize = ty->GetSize(); + + uint32 rightpad = 0; + parmNum++; + + switch (ty->GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + rightpad = GetPrimTypeSize(PTY_i32) - ploc.memsize; + break; + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_ptr: + case PTY_ref: +#ifdef DYNAMICLANG + case PTY_simplestr: + case PTY_simpleobj: + case PTY_dynany: + case PTY_dyni32: + case PTY_dynf64: + case PTY_dynstr: + case PTY_dynobj: + case PTY_dynundef: + case PTY_dynbool: + case PTY_dynf32: + case PTY_dynnone: + case PTY_dynnull: +#endif + break; + + case PTY_f32: + rightpad = GetPrimTypeSize(PTY_f64) - ploc.memsize; + break; + case PTY_c64: + case PTY_f64: + break; + + case PTY_c128: + break; + + case PTY_agg: { + ploc.memsize = ty->GetSize(); + // compute rightpad + int32 paddedSize = maplebe::RoundUp(ploc.memsize, 8); + rightpad = paddedSize - ploc.memsize; + break; + } + default: + CHECK_FATAL(false, ""); + } + + lastMemOffset = ploc.memoffset + ploc.memsize + rightpad; + return; +} + +// instantiated with the type of the function return value, it describes how +// the return value is to be passed back to the caller +ReturnMechanism::ReturnMechanism(const MIRType *retty) : fake_first_parm(false) { + switch (retty->GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_f32: + case PTY_f64: +#ifdef DYNAMICLANG + case PTY_simplestr: + case PTY_simpleobj: + case PTY_dynany: + case PTY_dyni32: + case PTY_dynstr: + case PTY_dynobj: +#endif + ptype0 = retty->GetPrimType(); + return; + + case PTY_c64: + case PTY_c128: + fake_first_parm = true; + ptype0 = PTY_a32; + return; + + case PTY_agg: { + uint32 size = retty->GetSize(); + if (size > 4) { + fake_first_parm = true; + ptype0 = PTY_a32; + } else { + ptype0 = PTY_u32; + } + return; + } + + default: + return; + } +} + +} // namespace maple diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index 9b62f9f1d15f792ae5819e8352cf9835b32fafc8..9194d9232535a5ea12fc276582316d1798f7cc4d 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -14,6 +14,10 @@ */ #include "me_phase_manager.h" #include "bin_mplt.h" +#include "becommon.h" +#include "lower.h" +#include "lmbc_memlayout.h" +#include "lmbc_lower.h" #define JAVALANG (mirModule.IsJavaModule()) #define CLANG (mirModule.IsCModule()) @@ -21,6 +25,7 @@ namespace maple { bool MeFuncPM::genMeMpl = false; bool MeFuncPM::genMapleBC = false; +bool MeFuncPM::genLMBC = false; bool MeFuncPM::timePhases = false; void MeFuncPM::DumpMEIR(const MeFunction &f, const std::string phaseName, bool isBefore) { @@ -112,6 +117,8 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { m.Emit("comb.me.mpl"); } if (genMapleBC) { + m.SetFlavor(kFlavorMbc); + // output .mbc BinaryMplt binMplt(m); std::string modFileName = m.GetFileName(); std::string::size_type lastdot = modFileName.find_last_of("."); @@ -120,6 +127,38 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { std::string filestem = modFileName.substr(0, lastdot); binMplt.Export(filestem + ".mbc", nullptr); } + if (genLMBC) { + m.SetFlavor(kFlavorLmbc); + maplebe::BECommon beCommon(m); + GlobalMemLayout globalMemLayout(&beCommon, &m, &m.GetMPAllocator()); + maplebe::CGLowerer cgLower(m, beCommon, false, false); + cgLower.RegisterBuiltIns(); + cgLower.RegisterExternalLibraryFunctions(); + for (auto func : compFuncList) { + if (func->GetBody() == nullptr) { + continue; + } + m.SetCurFunction(func); + cgLower.LowerFunc(*func); + MemPool *layoutMp = memPoolCtrler.NewMemPool("layout mempool", true); + MapleAllocator layoutAlloc(layoutMp); + LMBCMemLayout localMemLayout(func, &layoutAlloc); + localMemLayout.LayoutStackFrame(); + LMBCLowerer lmbcLowerer(&m, &beCommon, func, &globalMemLayout, &localMemLayout); + lmbcLowerer.LowerFunction(); + func->SetFrameSize(localMemLayout.StackFrameSize()); + func->SetUpFormalSize(localMemLayout.UpformalSize()); + memPoolCtrler.DeleteMemPool(layoutMp); + } + // output .lmbc + BinaryMplt binMplt(m); + std::string modFileName = m.GetFileName(); + std::string::size_type lastdot = modFileName.find_last_of("."); + + binMplt.GetBinExport().not2mplt = true; + std::string filestem = modFileName.substr(0, lastdot); + binMplt.Export(filestem + ".lmbc", nullptr); + } if (MeFuncPM::timePhases) { DumpPhaseTime(); }