From 140bd04529fcddfb9792a9fc1dc85d1cc5431860 Mon Sep 17 00:00:00 2001 From: Fred Chow Date: Tue, 5 Apr 2022 16:32:59 -0700 Subject: [PATCH 1/5] Merged in lowering and memory layout code from incubator to produce lowered maple bytecodes (lmbc) when -genlmbc is specified Since mplme needs to call CGLower, need maple_me's build to include some header files from maple_be. After CGLower, it will call memory layout followed by LMBCLowerer to complete the lowering to lmbc. Output is generated in .lmbc file which is in binary form. Defined the new module flavors kFlavorMbc and kFlavorLmbc. The symbol tables and type tables are not yet deleted. Various bug fixes related to the lowered opcodes. The generated .lmbc file is experimental only. --- src/mapleall/maple_be/include/be/becommon.h | 2 +- .../maple_be/include/be/common_utils.h | 13 + .../include/driver_option_common.h | 1 + .../maple_driver/include/driver_runner.h | 8 +- .../maple_driver/include/mpl_options.h | 5 + .../maple_driver/src/driver_option_common.cpp | 9 + .../maple_driver/src/driver_runner.cpp | 1 + .../maple_driver/src/maple_comb_compiler.cpp | 3 +- src/mapleall/maple_driver/src/mpl_options.cpp | 3 + src/mapleall/maple_ir/include/mir_function.h | 8 +- src/mapleall/maple_ir/include/mir_module.h | 9 +- src/mapleall/maple_ir/include/mir_nodes.h | 2 +- src/mapleall/maple_ir/src/bin_func_export.cpp | 40 +- src/mapleall/maple_ir/src/bin_func_import.cpp | 48 +- src/mapleall/maple_ir/src/bin_mpl_export.cpp | 3 + src/mapleall/maple_ir/src/bin_mpl_import.cpp | 3 + src/mapleall/maple_ir/src/mir_function.cpp | 6 +- src/mapleall/maple_ir/src/mir_module.cpp | 2 +- src/mapleall/maple_ir/src/mir_parser.cpp | 10 +- src/mapleall/maple_me/BUILD.gn | 8 + src/mapleall/maple_me/include/lmbc_lower.h | 53 ++ .../maple_me/include/lmbc_memlayout.h | 154 +++++ .../maple_me/include/me_phase_manager.h | 1 + src/mapleall/maple_me/src/lmbc_lower.cpp | 563 ++++++++++++++++++ src/mapleall/maple_me/src/lmbc_memlayout.cpp | 481 +++++++++++++++ .../maple_me/src/me_phase_manager.cpp | 36 ++ 26 files changed, 1447 insertions(+), 25 deletions(-) create mode 100644 src/mapleall/maple_me/include/lmbc_lower.h create mode 100644 src/mapleall/maple_me/include/lmbc_memlayout.h create mode 100644 src/mapleall/maple_me/src/lmbc_lower.cpp create mode 100644 src/mapleall/maple_me/src/lmbc_memlayout.cpp diff --git a/src/mapleall/maple_be/include/be/becommon.h b/src/mapleall/maple_be/include/be/becommon.h index cc18f3dc20..92160fcc56 100644 --- a/src/mapleall/maple_be/include/be/becommon.h +++ b/src/mapleall/maple_be/include/be/becommon.h @@ -164,7 +164,7 @@ class BECommon { return 1; } - const MIRModule &GetMIRModule() const { + MIRModule &GetMIRModule() const { return mirModule; } diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h index f2f0cd9b10..7a3c210537 100644 --- a/src/mapleall/maple_be/include/be/common_utils.h +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -192,6 +192,19 @@ inline uint64 RoundUp(uint64 offset, uint64 align) { return RoundUpConst(offset, align); } +inline int64 RoundDownConst(int64 offset, int64 align) { + return (-align) & offset; +} + +// align must be a power of 2 +inline int64 RoundDown(int64 offset, int64 align) { + if (align == 0) { + return offset; + } + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundDownConst(offset, align); +} + inline bool IsAlignedTo(uint64 offset, uint64 align) { ASSERT(IsPowerOf2(align), "align must be power of 2!"); return (offset & (align - 1)) == 0; diff --git a/src/mapleall/maple_driver/include/driver_option_common.h b/src/mapleall/maple_driver/include/driver_option_common.h index 0b79889d36..508a7879aa 100644 --- a/src/mapleall/maple_driver/include/driver_option_common.h +++ b/src/mapleall/maple_driver/include/driver_option_common.h @@ -52,6 +52,7 @@ enum DriverOptionIndex { kTimePhases, kGenMeMpl, kGenMapleBC, + kGenLMBC, kGenVtableImpl, kVerbose, kAllDebug, diff --git a/src/mapleall/maple_driver/include/driver_runner.h b/src/mapleall/maple_driver/include/driver_runner.h index d765a88484..ca245d1e22 100644 --- a/src/mapleall/maple_driver/include/driver_runner.h +++ b/src/mapleall/maple_driver/include/driver_runner.h @@ -37,7 +37,7 @@ class DriverRunner final { DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, const std::string &mpl2mplInput, const std::string &meInput, const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, - bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false) + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) : theModule(theModule), exeNames(exeNames), mpl2mplInput(mpl2mplInput), @@ -49,6 +49,7 @@ class DriverRunner final { genVtableImpl(genVtableImpl), genMeMpl(genMeMpl), genMapleBC(genMapleBC), + genLMBC(genLMBC), inputFileType(inpFileType) { auto lastDot = actualInput.find_last_of("."); baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); @@ -56,9 +57,9 @@ class DriverRunner final { DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, - bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false) + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) : DriverRunner(theModule, exeNames, inpFileType, "", "", actualInput, dwarf, - fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC) { + fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC, genLMBC) { auto lastDot = actualInput.find_last_of("."); baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); } @@ -107,6 +108,7 @@ class DriverRunner final { bool genVtableImpl = false; bool genMeMpl = false; bool genMapleBC = false; + bool genLMBC = false; std::string printOutExe = ""; std::string baseName; std::string outputFile; diff --git a/src/mapleall/maple_driver/include/mpl_options.h b/src/mapleall/maple_driver/include/mpl_options.h index 68fc647689..e0bcb3cf62 100644 --- a/src/mapleall/maple_driver/include/mpl_options.h +++ b/src/mapleall/maple_driver/include/mpl_options.h @@ -372,6 +372,10 @@ class MplOptions { return genMapleBC; } + bool HasSetGenLMBC() const { + return genLMBC; + } + bool HasSetGenOnlyObj() const { return genObj; } @@ -471,6 +475,7 @@ class MplOptions { bool genObj = false; bool genMeMpl = false; bool genMapleBC = false; + bool genLMBC = false; bool runMaplePhaseOnly = true; bool genVtableImpl = false; bool hasPrinted = false; diff --git a/src/mapleall/maple_driver/src/driver_option_common.cpp b/src/mapleall/maple_driver/src/driver_option_common.cpp index a59b93c891..83d21dacec 100644 --- a/src/mapleall/maple_driver/src/driver_option_common.cpp +++ b/src/mapleall/maple_driver/src/driver_option_common.cpp @@ -330,6 +330,15 @@ const mapleOption::Descriptor kUsages[] = { " --genmaplebc \tGenerate .mbc file\n", "driver", {} }, + { kGenLMBC, + 0, + "", + "genlmbc", + kBuildTypeProduct, + kArgCheckPolicyNone, + " --genlmbc \tGenerate .lmbc file\n", + "driver", + {} }, { kGenObj, kEnable, "c", diff --git a/src/mapleall/maple_driver/src/driver_runner.cpp b/src/mapleall/maple_driver/src/driver_runner.cpp index 741f8ba2c5..3433eed3d0 100644 --- a/src/mapleall/maple_driver/src/driver_runner.cpp +++ b/src/mapleall/maple_driver/src/driver_runner.cpp @@ -267,6 +267,7 @@ void DriverRunner::RunNewPM(const std::string &output, const std::string &vtable } MeFuncPM::genMeMpl = genMeMpl; MeFuncPM::genMapleBC = genMapleBC; + MeFuncPM::genLMBC = genLMBC; MeFuncPM::timePhases = timePhases; MPLTimer timer; timer.Start(); diff --git a/src/mapleall/maple_driver/src/maple_comb_compiler.cpp b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp index 1433af7fa2..64623f455f 100644 --- a/src/mapleall/maple_driver/src/maple_comb_compiler.cpp +++ b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp @@ -181,7 +181,8 @@ ErrorCode MapleCombCompiler::Compile(MplOptions &options, const Action &action, DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, fileName, fileName, options.WithDwarf(), fileParsed, options.HasSetTimePhases(), options.HasSetGenVtableImpl(), - options.HasSetGenMeMpl(), options.HasSetGenMapleBC()); + options.HasSetGenMeMpl(), options.HasSetGenMapleBC(), + options.HasSetGenLMBC()); ErrorCode ret = kErrorNoError; MIRParser parser(*theModule); diff --git a/src/mapleall/maple_driver/src/mpl_options.cpp b/src/mapleall/maple_driver/src/mpl_options.cpp index 9bd86f1dcd..0309d54ab9 100644 --- a/src/mapleall/maple_driver/src/mpl_options.cpp +++ b/src/mapleall/maple_driver/src/mpl_options.cpp @@ -192,6 +192,9 @@ ErrorCode MplOptions::HandleGeneralOptions() { case kGenMapleBC: genMapleBC = true; break; + case kGenLMBC: + genLMBC = true; + break; case kGenVtableImpl: genVtableImpl = true; break; diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h index bc7a50a3fa..7468d9ce26 100644 --- a/src/mapleall/maple_ir/include/mir_function.h +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -810,7 +810,7 @@ class MIRFunction { ++tempCount; } - const uint8 *GetFormalWordsTypeTagged() const { + uint8 *GetFormalWordsTypeTagged() const { return formalWordsTypeTagged; } void SetFormalWordsTypeTagged(uint8 *tagged) { @@ -820,7 +820,7 @@ class MIRFunction { return &formalWordsTypeTagged; } - const uint8 *GetLocalWordsTypeTagged() const { + uint8 *GetLocalWordsTypeTagged() const { return localWordsTypeTagged; } void SetLocalWordsTypeTagged(uint8 *tagged) { @@ -830,7 +830,7 @@ class MIRFunction { return &localWordsTypeTagged; } - const uint8 *GetFormalWordsRefCounted() const { + uint8 *GetFormalWordsRefCounted() const { return formalWordsRefCounted; } void SetFormalWordsRefCounted(uint8 *counted) { @@ -840,7 +840,7 @@ class MIRFunction { return &formalWordsRefCounted; } - const uint8 *GetLocalWordsRefCounted() const { + uint8 *GetLocalWordsRefCounted() const { return localWordsRefCounted; } void SetLocalWordsRefCounted(uint8 *counted) { diff --git a/src/mapleall/maple_ir/include/mir_module.h b/src/mapleall/maple_ir/include/mir_module.h index 4421bfc2b0..c62444e5fd 100644 --- a/src/mapleall/maple_ir/include/mir_module.h +++ b/src/mapleall/maple_ir/include/mir_module.h @@ -46,9 +46,11 @@ enum MIRFlavor { kFeProduced, kMeProduced, kBeLowered, + kFlavorMbc, kMmpl, kCmplV1, - kCmpl // == CMPLv2 + kCmpl, // == CMPLv2 + kFlavorLmbc, }; @@ -174,10 +176,7 @@ class MIRModule { MIRModule &operator=(const MIRModule &module) = delete; ~MIRModule(); - const MemPool *GetMemPool() const { - return memPool; - } - MemPool *GetMemPool() { + MemPool *GetMemPool() const { return memPool; } MemPool *GetPragmaMemPool() { diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h index 219c3338e3..6c1df6ab78 100755 --- a/src/mapleall/maple_ir/include/mir_nodes.h +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -533,7 +533,7 @@ class IreadFPoffNode : public BaseNode { public: IreadFPoffNode() : BaseNode(OP_ireadfpoff) {} - IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp), offset(ofst) {} + IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} virtual ~IreadFPoffNode() = default; diff --git a/src/mapleall/maple_ir/src/bin_func_export.cpp b/src/mapleall/maple_ir/src/bin_func_export.cpp index ce263debcf..89fb69531a 100644 --- a/src/mapleall/maple_ir/src/bin_func_export.cpp +++ b/src/mapleall/maple_ir/src/bin_func_export.cpp @@ -40,6 +40,10 @@ void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { WriteNum(kBinFuncIdInfoStart); WriteNum(func->GetPuidxOrigin()); // the funcid OutputInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(func->GetUpFormalSize()); + WriteNum(func->GetFrameSize()); + } WriteNum(~kBinFuncIdInfoStart); } @@ -255,6 +259,16 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { WriteNum(irNode->GetFieldID()); break; } + case OP_ireadoff: { + IreadoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } case OP_sext: case OP_zext: case OP_extractbits: { @@ -263,6 +277,12 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { WriteNum(extNode->GetBitsSize()); break; } + case OP_depositbits: { + DepositbitsNode *dbNode = static_cast(e); + WriteNum(dbNode->GetBitsOffset()); + WriteNum(dbNode->GetBitsSize()); + break; + } case OP_gcmallocjarray: case OP_gcpermallocjarray: { JarrayMallocNode *gcNode = static_cast(e); @@ -421,6 +441,12 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { WriteNum(iassoff->GetOffset()); break; } + case OP_iassignfpoff: { + IassignFPoffNode *iassfpoff = static_cast(s); + WriteNum(iassfpoff->GetPrimType()); + WriteNum(iassfpoff->GetOffset()); + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: @@ -556,6 +582,16 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { } break; } + case OP_rangegoto: { + RangeGotoNode *rgoto = static_cast(s); + WriteNum(rgoto->GetTagOffset()); + WriteNum(static_cast(rgoto->GetRangeGotoTable().size())); + for (SmallCasePair cpair : rgoto->GetRangeGotoTable()) { + WriteNum(cpair.first); + WriteNum(cpair.second); + } + break; + } case OP_jstry: { JsTryNode *tryNode = static_cast(s); WriteNum(tryNode->GetCatchOffset()); @@ -695,7 +731,9 @@ void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_s OutputLabelTab(func); OutputLocalTypeNameTab(func->GetTypeNameTab()); OutputFormalsStIdx(func); - OutputAliasMap(func->GetAliasVarMap()); + if (mod.GetFlavor() < kMmpl) { + OutputAliasMap(func->GetAliasVarMap()); + } lastOutputSrcPosition = SrcPosition(); OutputBlockNode(func->GetBody()); size++; diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp index 0802da3407..5891b5cf72 100644 --- a/src/mapleall/maple_ir/src/bin_func_import.cpp +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -46,6 +46,10 @@ void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { CHECK_FATAL(tag == kBinFuncIdInfoStart, "kBinFuncIdInfoStart expected"); func->SetPuidxOrigin(static_cast(ReadNum())); ImportInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + func->SetUpFormalSize(ReadNum()); + func->SetFrameSize(ReadNum()); + } tag = ReadNum(); CHECK_FATAL(tag == ~kBinFuncIdInfoStart, "pattern mismatch in ImportFuncIdInfo()"); } @@ -307,6 +311,17 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { irNode->SetOpnd(ImportExpression(func), 0); return irNode; } + case OP_ireadoff: { + int32 ofst = ReadNum(); + IreadoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + irNode->SetOpnd(ImportExpression(func), 0); + return irNode; + } + case OP_ireadfpoff: { + int32 ofst = ReadNum(); + IreadFPoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + return irNode; + } case OP_sext: case OP_zext: case OP_extractbits: { @@ -316,6 +331,14 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { extNode->SetOpnd(ImportExpression(func), 0); return extNode; } + case OP_depositbits: { + DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); + dbNode->SetBitsOffset(static_cast(ReadNum())); + dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetOpnd(ImportExpression(func), 0); + dbNode->SetOpnd(ImportExpression(func), 1); + return dbNode; + } case OP_gcmallocjarray: case OP_gcpermallocjarray: { JarrayMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ); @@ -524,6 +547,14 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_iassignfpoff: { + IassignFPoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType((PrimType)ReadNum()); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), 0); + stmt = s; + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: @@ -746,6 +777,19 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_rangegoto: { + RangeGotoNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetTagOffset(ReadNum()); + uint32 tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + uint16 casetag = ReadNum(); + LabelIdx lidx(ReadNum()); + s->AddRangeGoto(casetag, lidx); + } + s->SetOpnd(ImportExpression(func), 0); + stmt = s; + break; + } case OP_jstry: { JsTryNode *s = mod.CurFuncCodeMemPool()->New(); s->SetCatchOffset(static_cast(ReadNum())); @@ -880,7 +924,9 @@ void BinaryMplImport::ReadFunctionBodyField() { ImportLabelTab(fn); ImportLocalTypeNameTable(fn->GetTypeNameTab()); ImportFormalsStIdx(fn); - ImportAliasMap(fn); + if (mod.GetFlavor() < kMmpl) { + ImportAliasMap(fn); + } (void)ImportBlockNode(fn); mod.AddFunction(fn); } diff --git a/src/mapleall/maple_ir/src/bin_mpl_export.cpp b/src/mapleall/maple_ir/src/bin_mpl_export.cpp index 5330c0f093..aab834d331 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_export.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -749,6 +749,9 @@ void BinaryMplExport::WriteHeaderField(uint64 contentIdx) { WriteNum(mod.GetFlavor()); WriteNum(mod.GetSrcLang()); WriteNum(mod.GetID()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(mod.GetGlobalMemSize()); + } WriteNum(mod.GetNumFuncs()); WriteAsciiStr(mod.GetEntryFuncName()); OutputInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); diff --git a/src/mapleall/maple_ir/src/bin_mpl_import.cpp b/src/mapleall/maple_ir/src/bin_mpl_import.cpp index eab676c126..64fb010709 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_import.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -1155,6 +1155,9 @@ void BinaryMplImport::ReadHeaderField() { mod.SetFlavor((MIRFlavor)ReadNum()); mod.SetSrcLang((MIRSrcLang)ReadNum()); mod.SetID(static_cast(ReadNum())); + if (mod.GetFlavor() == kFlavorLmbc) { + mod.SetGlobalMemSize(ReadNum()); + } mod.SetNumFuncs(static_cast(ReadNum())); std::string inStr; ReadAsciiStr(inStr); diff --git a/src/mapleall/maple_ir/src/mir_function.cpp b/src/mapleall/maple_ir/src/mir_function.cpp index e9e85f4599..4053b90321 100644 --- a/src/mapleall/maple_ir/src/mir_function.cpp +++ b/src/mapleall/maple_ir/src/mir_function.cpp @@ -362,6 +362,8 @@ void MIRFunction::Dump(bool withoutBody) { if (module->GetFlavor() < kMmpl) { DumpFlavorLoweredThanMmpl(); + } else { + LogInfo::MapleLogger() << " () void"; } // codeMemPool is nullptr, means maple_ir has been released for memory's sake @@ -382,7 +384,7 @@ void MIRFunction::Dump(bool withoutBody) { void MIRFunction::DumpUpFormal(int32 indent) const { PrintIndentation(indent + 1); - LogInfo::MapleLogger() << "upFormalSize " << GetUpFormalSize() << '\n'; + LogInfo::MapleLogger() << "upformalsize " << GetUpFormalSize() << '\n'; if (localWordsTypeTagged != nullptr) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "formalWordsTypeTagged = [ "; @@ -411,7 +413,7 @@ void MIRFunction::DumpUpFormal(int32 indent) const { void MIRFunction::DumpFrame(int32 indent) const { PrintIndentation(indent + 1); - LogInfo::MapleLogger() << "frameSize " << static_cast(GetFrameSize()) << '\n'; + LogInfo::MapleLogger() << "framesize " << static_cast(GetFrameSize()) << '\n'; if (localWordsTypeTagged != nullptr) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "localWordsTypeTagged = [ "; diff --git a/src/mapleall/maple_ir/src/mir_module.cpp b/src/mapleall/maple_ir/src/mir_module.cpp index 324fcdc8da..f4c5b29603 100644 --- a/src/mapleall/maple_ir/src/mir_module.cpp +++ b/src/mapleall/maple_ir/src/mir_module.cpp @@ -231,7 +231,7 @@ void MIRModule::DumpGlobals(bool emitStructureType) const { } LogInfo::MapleLogger() << std::dec; } - if (flavor < kMmpl) { + if (flavor < kMmpl || flavor == kFlavorLmbc) { for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp index a593036cb0..e8773d1071 100755 --- a/src/mapleall/maple_ir/src/mir_parser.cpp +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -616,10 +616,10 @@ bool MIRParser::ParseStmtRangegoto(StmtNodePtr &stmt) { return false; } if (!IsPrimitiveInteger(expr->GetPrimType())) { - rangeGotoNode->SetOpnd(expr, 0); Error("expect expression return integer but get "); return false; } + rangeGotoNode->SetOpnd(expr, 0); if (lexer.NextToken() == TK_intconst) { rangeGotoNode->SetTagOffset(static_cast(lexer.GetTheIntVal())); } else { @@ -2637,10 +2637,10 @@ bool MIRParser::ParseExprIreadFPoff(BaseNodePtr &expr) { return false; } iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); - if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { - Error("only scalar types allowed for ireadoff"); - return false; - } + //if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { + // Error("only scalar types allowed for ireadoff"); + // return false; + //} if (lexer.GetTokenKind() != TK_intconst) { Error("expect offset but get "); return false; diff --git a/src/mapleall/maple_me/BUILD.gn b/src/mapleall/maple_me/BUILD.gn index 91dc3354dc..bdc24d533e 100755 --- a/src/mapleall/maple_me/BUILD.gn +++ b/src/mapleall/maple_me/BUILD.gn @@ -23,6 +23,12 @@ include_directories = [ "${MAPLEALL_ROOT}/maple_util/include", "${MAPLEALL_ROOT}/maple_driver/include", "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat", ] src_libmplme = [ @@ -116,6 +122,8 @@ src_libmplme = [ "src/me_safety_warning.cpp", "src/lfo_unroll.cpp", "src/me_slp.cpp", + "src/lmbc_memlayout.cpp", + "src/lmbc_lower.cpp", ] src_libmplmewpo = [ diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h new file mode 100644 index 0000000000..a0a31a434b --- /dev/null +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEME_INCLUDE_LMBC_LOWER_H_ +#define MAPLEME_INCLUDE_LMBC_LOWER_H_ +#include "mir_builder.h" +#include "lmbc_memlayout.h" + +namespace maple { + +class LMBCLowerer { + public: + MIRModule *mirModule; + MIRFunction *func; + maplebe::BECommon *becommon; + MIRBuilder *mirBuilder; + GlobalMemLayout *globmemlayout; + LMBCMemLayout *memlayout; + + public: + explicit LMBCLowerer(MIRModule *mod, maplebe::BECommon *becmmn, MIRFunction *f, GlobalMemLayout *gmemlayout, LMBCMemLayout *lmemlayout) : + mirModule(mod), func(f), becommon(becmmn), mirBuilder(mod->GetMIRBuilder()), + globmemlayout(gmemlayout), memlayout(lmemlayout) {} + + BaseNode *ReadregNodeForSymbol(MIRSymbol *); + PregIdx GetSpecialRegFromSt(const MIRSymbol *); + BaseNode *LowerDread(AddrofNode *); + BaseNode *LowerAddrof(AddrofNode *); + BaseNode *LowerIread(IreadNode *); + BaseNode *LowerExpr(BaseNode *expr); + void LowerAggDassign(BlockNode *, const DassignNode *); + void LowerDassign(DassignNode *, BlockNode *); + void LowerIassign(IassignNode *, BlockNode *); + void LowerAggIassign(BlockNode *, IassignNode *); + BlockNode *LowerBlock(BlockNode *); + void LowerFunction(); +}; + +} // namespace maple + +#endif // MAPLEME_INCLUDE_LMBC_LOWER_H_ diff --git a/src/mapleall/maple_me/include/lmbc_memlayout.h b/src/mapleall/maple_me/include/lmbc_memlayout.h new file mode 100644 index 0000000000..67c921c334 --- /dev/null +++ b/src/mapleall/maple_me/include/lmbc_memlayout.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H +#define MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H + +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_function.h" +#include "becommon.h" + +namespace maple { + +typedef enum { + MS_unknown, + MS_upformal, // for the incoming parameters that are passed on the caller's stack + MS_formal, // for the incoming parameters that are passed in registers + MS_actual, // for the outgoing parameters + MS_local, // for all local variables and temporaries + MS_FPbased, // addressed via offset from the frame pointer + MS_SPbased, // addressed via offset from the stack pointer + MS_GPbased, // addressed via offset from the global pointer +} MemSegmentKind; + +class MemSegment; + +// describes where a symbol is allocated +class SymbolAlloc { + public: + MemSegment *mem_segment; + int32 offset; + + public: + SymbolAlloc() : mem_segment(nullptr), offset(0) {} + + ~SymbolAlloc() {} + +}; // class SymbolAlloc + +// keeps track of the allocation of a memory segment +class MemSegment { + public: + MemSegmentKind kind; + int32 size; // size is negative if allocated offsets are negative + SymbolAlloc how_alloc; // this segment may be allocated inside another segment + public: + MemSegment(MemSegmentKind k) : kind(k), size(0) {} + + MemSegment(MemSegmentKind k, int32 sz) : kind(k), size(sz) {} + + ~MemSegment() {} + +}; // class MemSegment + +class LMBCMemLayout { + public: + maplebe::BECommon &be; + MIRFunction *func; + MemSegment seg_upformal; + MemSegment seg_formal; + MemSegment seg_actual; + MemSegment seg_FPbased; + MemSegment seg_SPbased; + MapleVector sym_alloc_table; // index is StIdx + + public: + uint32 FindLargestActualArea(void); + uint32 FindLargestActualArea(StmtNode *, int &); + explicit LMBCMemLayout(maplebe::BECommon &b, MIRFunction *f, MapleAllocator *mallocator) + : be(b), + func(f), + seg_upformal(MS_upformal), + seg_formal(MS_formal), + seg_actual(MS_actual), + seg_FPbased(MS_FPbased, -GetPrimTypeSize(PTY_ptr)), + seg_SPbased(MS_SPbased), + sym_alloc_table(mallocator->Adapter()) { + sym_alloc_table.resize(f->GetSymTab()->GetSymbolTableSize()); + } + + ~LMBCMemLayout() {} + + void LayoutStackFrame(void); + int32 StackFrameSize(void) const { + return seg_SPbased.size - seg_FPbased.size; + } + + int32 UpformalSize(void) const { + return seg_upformal.size; + } +}; + +class GlobalMemLayout { + public: + MemSegment seg_GPbased; + MapleVector sym_alloc_table; // index is StIdx + private: + maplebe::BECommon &be_; + + public: + GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocator); + ~GlobalMemLayout() {} + + private: + void FillScalarValueInMap(uint32 startaddress, PrimType pty, MIRConst *c); + void FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRConst *c); + void FillSymbolValueInMap(const MIRSymbol *sym); +}; + +// for specifying how a parameter is passed +struct PLocInfo { + int32 memoffset; + int32 memsize; +}; + +// for processing an incoming or outgoing parameter list +class ParmLocator { + private: + maplebe::BECommon &be_; + int32 parm_num_; // number of all types of parameters processed so far + int32 last_memoffset_; + + public: + ParmLocator(maplebe::BECommon &b) : be_(b), parm_num_(0), last_memoffset_(0) {} + + ~ParmLocator() {} + + void LocateNextParm(const MIRType *ty, PLocInfo &ploc); +}; + +// given the type of the return value, determines the return mechanism +class ReturnMechanism { + public: + bool fake_first_parm; // whether returning in memory via fake first parameter + PrimType ptype0; // the primitive type stored in retval0 + + ReturnMechanism(const MIRType *retty, maplebe::BECommon &be); +}; + +} /* namespace maple */ + +#endif /* MAPLEME_INCLUDE_LMBC_MEMLAYOUT_H */ diff --git a/src/mapleall/maple_me/include/me_phase_manager.h b/src/mapleall/maple_me/include/me_phase_manager.h index 4dd7dbf3bf..dfbf24e15f 100644 --- a/src/mapleall/maple_me/include/me_phase_manager.h +++ b/src/mapleall/maple_me/include/me_phase_manager.h @@ -104,6 +104,7 @@ class MeFuncPM : public FunctionPM { static bool genMeMpl; static bool timePhases; static bool genMapleBC; + static bool genLMBC; void SetMeInput(const std::string &str) { meInput = str; diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp new file mode 100644 index 0000000000..a33cc3be53 --- /dev/null +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -0,0 +1,563 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "lmbc_lower.h" + +namespace maple { + +using namespace std; + +inline PrimType UnsignedPrimType(int32 bytesize) { + if (bytesize == 4) { + return PTY_u32; + } + if (bytesize == 2) { + return PTY_u16; + } + if (bytesize == 1) { + return PTY_u8; + } + return PTY_u32; +} + +PregIdx LMBCLowerer::GetSpecialRegFromSt(const MIRSymbol *sym) { + MIRStorageClass storageClass = sym->GetStorageClass(); + PregIdx specreg = 0; + if (storageClass == kScAuto || storageClass == kScFormal) { + CHECK(sym->GetStIndex() < memlayout->sym_alloc_table.size(), + "index out of range in LMBCLowerer::GetSpecialRegFromSt"); + SymbolAlloc symalloc = memlayout->sym_alloc_table[sym->GetStIndex()]; + if (symalloc.mem_segment->kind == MS_upformal || symalloc.mem_segment->kind == MS_formal || + symalloc.mem_segment->kind == MS_FPbased) { + specreg = -kSregFp; + } else if (symalloc.mem_segment->kind == MS_actual || symalloc.mem_segment->kind == MS_SPbased) { + specreg = -kSregSp; + } else { + CHECK_FATAL(false, "LMBCLowerer::LowerDread: bad memory layout for local variable"); + } + } else if (storageClass == kScGlobal || storageClass == kScFstatic || storageClass == kScExtern || storageClass == kScPstatic) { + specreg = -kSregGp; + } else { + CHECK_FATAL(false, "LMBCLowerer::LowerDread: NYI"); + } + return specreg; +} + +BaseNode *LMBCLowerer::ReadregNodeForSymbol(MIRSymbol *sym) { + return mirBuilder->CreateExprRegread(LOWERED_PTR_TYPE, GetSpecialRegFromSt(sym)); +} + +BaseNode *LMBCLowerer::LowerAddrof(AddrofNode *expr) { + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(expr->GetStIdx()); + if (symbol->GetStorageClass() == kScText) { + return expr; + } + int32 offset = 0; + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerAddrof: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } + //BaseNode *rrn = ReadregNodeForSymbol(symbol); + PrimType symty = (expr->GetPrimType() == PTY_simplestr || expr->GetPrimType() == PTY_simpleobj) ? expr->GetPrimType() : LOWERED_PTR_TYPE; + BaseNode *rrn = mirBuilder->CreateExprRegread(symty, GetSpecialRegFromSt(symbol)); + offset += symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()].offset + : globmemlayout->sym_alloc_table[symbol->GetStIndex()].offset; + return (offset == 0) ? rrn + : mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)expr->GetPrimType()), rrn, + mirBuilder->GetConstInt(offset)); +} + +BaseNode *LMBCLowerer::LowerDread(AddrofNode *expr) { + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(expr->GetStIdx()); + PrimType symty = symbol->GetType()->GetPrimType(); + int32 offset = 0; + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerDread: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(expr->GetFieldID()); + symty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first)->GetPrimType(); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } + // allow dread class reference + PregIdx spcreg = GetSpecialRegFromSt(symbol); + if (spcreg == -kSregFp) { + CHECK_FATAL(symbol->IsLocal(), "load from fp non local?"); + IreadFPoffNode *ireadoff = mirBuilder->CreateExprIreadFPoff( + symty, memlayout->sym_alloc_table[symbol->GetStIndex()].offset + offset); + return ireadoff; + } else { + BaseNode *rrn = mirBuilder->CreateExprRegread(LOWERED_PTR_TYPE, spcreg); + SymbolAlloc &symalloc = symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()] + : globmemlayout->sym_alloc_table[symbol->GetStIndex()]; + IreadoffNode *ireadoff = mirBuilder->CreateExprIreadoff(symty, symalloc.offset + offset, rrn); + return ireadoff; + } +} + +static MIRType *GetPointedToType(const MIRPtrType *pointerty) { + MIRType *atype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (atype->GetKind() == kTypeArray) { + MIRArrayType *arraytype = static_cast(atype); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arraytype->GetElemTyIdx()); + } + if (atype->GetKind() == kTypeFArray || atype->GetKind() == kTypeJArray) { + MIRFarrayType *farraytype = static_cast(atype); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(farraytype->GetElemTyIdx()); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); +} + +BaseNode *LMBCLowerer::LowerIread(IreadNode *expr) { + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr->GetTyIdx()); + MIRPtrType *pointerty = static_cast(type); + CHECK_FATAL(pointerty, "expect a pointer type at iread node"); + if (expr->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "SelectIread: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(expr->GetFieldID()); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + } else { + type = GetPointedToType(pointerty); + } + BaseNode *ireadoff = mirBuilder->CreateExprIreadoff(type->GetPrimType(), offset, expr->Opnd(0)); + return ireadoff; +} + +BaseNode *LMBCLowerer::LowerExpr(BaseNode *expr) { + for (size_t i = 0; i < expr->NumOpnds(); ++i) { + expr->SetOpnd(LowerExpr(expr->Opnd(i)), i); + } + switch (expr->GetOpCode()) { + case OP_dread: return LowerDread(static_cast(expr)); + case OP_addrof: return LowerAddrof(static_cast(expr)); + case OP_iread: return LowerIread(static_cast(expr)); + default: ; + } + return expr; +} + +void LMBCLowerer::LowerAggDassign(BlockNode *newblk, const DassignNode *dsnode) { + MIRSymbol *lhssymbol = func->GetLocalOrGlobalSymbol(dsnode->GetStIdx()); + int32 lhsoffset = 0; + MIRType *lhsty = lhssymbol->GetType(); + if (dsnode->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(lhssymbol->GetType()); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(dsnode->GetFieldID()); + lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + lhsoffset = becommon->GetFieldOffset(*structty, dsnode->GetFieldID()).first; + } + uint32 lhsalign = becommon->GetTypeAlign(lhsty->GetTypeIndex()); + uint32 lhssize = becommon->GetTypeSize(lhsty->GetTypeIndex()); + + uint32 rhsalign; + uint32 alignused; + int32 rhsoffset = 0; + BaseNode *loadnode = nullptr; + IassignoffNode *iassignoff = nullptr; + if (dsnode->Opnd(0)->GetOpCode() == OP_dread) { + AddrofNode *rhsdread = static_cast(dsnode->Opnd(0)); + MIRSymbol *rhssymbol = func->GetLocalOrGlobalSymbol(rhsdread->GetStIdx()); + MIRType *rhsty = rhssymbol->GetType(); + if (rhsdread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(rhssymbol->GetType()); + CHECK_FATAL(structty, "SelectDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsdread->GetFieldID()); + rhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsdread->GetFieldID()).first; + } + rhsalign = becommon->GetTypeAlign(rhsty->GetTypeIndex()); + BaseNode *rRrn = ReadregNodeForSymbol(rhssymbol); + SymbolAlloc &rsymalloc = rhssymbol->IsLocal() ? memlayout->sym_alloc_table[rhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[rhssymbol->GetStIndex()]; + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + if (!alignused) { + alignused = 1u; + } + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), + rsymalloc.offset + rhsoffset + i * alignused, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), + rsymalloc.offset + rhsoffset + lhssizeCovered, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else if (dsnode->Opnd(0)->GetOpCode() == OP_regread) { + RegreadNode *regread = static_cast(dsnode->Opnd(0)); + CHECK_FATAL(regread->GetRegIdx() == -kSregRetval0 && regread->GetPrimType() == PTY_agg, ""); + + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, 4u); // max alignment is 32-bit + PregIdx ridx = -kSregRetval0; + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(alignused), ridx - i); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + ridx = -kSregRetval0 - (lhssize / alignused); + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(newalignused), ridx--); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else { // iread + IreadNode *rhsiread = static_cast(dsnode->Opnd(0)); + CHECK_FATAL(rhsiread, "LowerAggDassign: illegal rhs for dassign node of structure type"); + rhsiread->SetOpnd(LowerExpr(rhsiread->Opnd(0)), 0); + MIRType *rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsiread->GetTyIdx()); + MIRPtrType *pointerty = static_cast(rhsRdTy); + CHECK_FATAL(pointerty, "LowerAggDassign: expect a pointer type at iread node"); + if (rhsiread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsiread->GetFieldID()); + rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsiread->GetFieldID()).first; + } else { + rhsRdTy = GetPointedToType(pointerty); + } + rhsalign = becommon->GetTypeAlign(rhsRdTy->GetTypeIndex()); + BaseNode *lRrn = ReadregNodeForSymbol(lhssymbol); + CHECK(lhssymbol->GetStIndex() < memlayout->sym_alloc_table.size() && + lhssymbol->GetStIndex() < globmemlayout->sym_alloc_table.size(), + "index oout of range in LMBCLowerer::LowerAggDassign"); + SymbolAlloc &lsymalloc = lhssymbol->IsLocal() ? memlayout->sym_alloc_table[lhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[lhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), rhsoffset + i * alignused, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(alignused), lsymalloc.offset + lhsoffset + i * alignused, lRrn, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), rhsoffset + lhssizeCovered, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff( + UnsignedPrimType(newalignused), lsymalloc.offset + lhsoffset + lhssizeCovered, lRrn, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } +} + +void LMBCLowerer::LowerDassign(DassignNode *dsnode, BlockNode *newblk) { + if (dsnode->Opnd(0)->GetPrimType() != PTY_agg) { + dsnode->SetOpnd(LowerExpr(dsnode->Opnd(0)), 0); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dsnode->GetStIdx()); + int32 offset = 0; + PrimType ptypused = symbol->GetType()->GetPrimType(); + if (dsnode->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(symbol->GetType()); + CHECK_FATAL(structty, "LMBCLowerer::LowerDassign: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, dsnode->GetFieldID()).first; + TyIdx ftyidx = structty->TraverseToField(dsnode->GetFieldID()).second.first; + ptypused = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftyidx)->GetPrimType(); + } + PregIdx spcreg = GetSpecialRegFromSt(symbol); + if (spcreg == -kSregFp) { + IassignFPoffNode *iassignoff = mirBuilder->CreateStmtIassignFPoff( + ptypused, memlayout->sym_alloc_table[symbol->GetStIndex()].offset + offset, dsnode->Opnd(0)); + newblk->AddStatement(iassignoff); + } else { + BaseNode *rrn = ReadregNodeForSymbol(symbol); + SymbolAlloc &symalloc = symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()] + : globmemlayout->sym_alloc_table[symbol->GetStIndex()]; + IassignoffNode *iassignoff = + mirBuilder->CreateStmtIassignoff(ptypused, symalloc.offset + offset, rrn, dsnode->Opnd(0)); + newblk->AddStatement(iassignoff); + } + } else { + LowerAggDassign(newblk, dsnode); + } +} + +void LMBCLowerer::LowerAggIassign(BlockNode *newblk, IassignNode *iassign) { + int32 lhsoffset = 0; + MIRType *lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign->GetTyIdx()); + MIRPtrType *pointerty = static_cast(lhsty); + if (pointerty->GetKind() != kTypePointer) { + TypeAttrs typeAttrs; + pointerty = static_cast(GlobalTables::GetTypeTable().GetOrCreatePointerType(*lhsty, GetExactPtrPrimType(), typeAttrs)); + } + if (iassign->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(iassign->GetFieldID()); + lhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + lhsoffset = becommon->GetFieldOffset(*structty, iassign->GetFieldID()).first; + } else { + lhsty = GetPointedToType(pointerty); + } + uint32 lhsalign = becommon->GetTypeAlign(lhsty->GetTypeIndex()); + uint32 lhssize = becommon->GetTypeSize(lhsty->GetTypeIndex()); + + uint32 rhsalign; + uint32 alignused; + int32 rhsoffset = 0; + BaseNode *loadnode = nullptr; + IassignoffNode *iassignoff = nullptr; + if (iassign->GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsdread = static_cast(iassign->GetRHS()); + MIRSymbol *rhssymbol = func->GetLocalOrGlobalSymbol(rhsdread->GetStIdx()); + MIRType *rhsty = rhssymbol->GetType(); + if (rhsdread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(rhssymbol->GetType()); + CHECK_FATAL(structty, "SelectDassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsdread->GetFieldID()); + rhsty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsdread->GetFieldID()).first; + } + rhsalign = becommon->GetTypeAlign(rhsty->GetTypeIndex()); + BaseNode *rRrn = ReadregNodeForSymbol(rhssymbol); + CHECK(rhssymbol->GetStIndex() < memlayout->sym_alloc_table.size() && + rhssymbol->GetStIndex() < globmemlayout->sym_alloc_table.size(), + "index out of range in LMBCLowerer::LowerAggIassign"); + SymbolAlloc &rsymalloc = rhssymbol->IsLocal() ? memlayout->sym_alloc_table[rhssymbol->GetStIndex()] + : globmemlayout->sym_alloc_table[rhssymbol->GetStIndex()]; + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), + rsymalloc.offset + rhsoffset + i * alignused, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), + rsymalloc.offset + rhsoffset + lhssizeCovered, rRrn); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else if (iassign->GetRHS()->GetOpCode() == OP_regread) { + RegreadNode *regread = static_cast(iassign->GetRHS()); + CHECK_FATAL(regread->GetRegIdx() == -kSregRetval0 && regread->GetPrimType() == PTY_agg, ""); + + alignused = std::min(lhsalign, 4u); // max alignment is 32-bit + PregIdx ridx = -kSregRetval0; + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(alignused), ridx - i); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + ridx = -kSregRetval0 - (lhssize / alignused); + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprRegread(UnsignedPrimType(newalignused), ridx--); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } else { // iread + IreadNode *rhsiread = static_cast(iassign->GetRHS()); + CHECK_FATAL(rhsiread, "LowerAggIassign: illegal rhs for dassign node of structure type"); + rhsiread->SetOpnd(LowerExpr(rhsiread->Opnd(0)), 0); + MIRType *rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsiread->GetTyIdx()); + MIRPtrType *pointerty = static_cast(rhsRdTy); + CHECK_FATAL(pointerty, "LowerAggIassign: expect a pointer type at iread node"); + if (rhsiread->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggIassign: non-zero fieldID for non-structure"); + FieldPair thepair = structty->TraverseToField(rhsiread->GetFieldID()); + rhsRdTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + rhsoffset = becommon->GetFieldOffset(*structty, rhsiread->GetFieldID()).first; + } else { + rhsRdTy = GetPointedToType(pointerty); + } + rhsalign = becommon->GetTypeAlign(rhsRdTy->GetTypeIndex()); + + alignused = std::min(lhsalign, rhsalign); + alignused = std::min(alignused, 4u); // max alignment is 32-bit + for (uint32 i = 0; i < (lhssize / alignused); i++) { + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(alignused), rhsoffset + i * alignused, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(alignused), lhsoffset + i * alignused, + iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + } + // take care of extra content at the end less than the unit of alignused + uint32 lhssizeCovered = (lhssize / alignused) * alignused; + uint32 newalignused = alignused; + while (lhssizeCovered < lhssize) { + newalignused = newalignused >> 1; + if (lhssizeCovered + newalignused > lhssize) { + continue; + } + // generate the load + loadnode = mirBuilder->CreateExprIreadoff(UnsignedPrimType(newalignused), rhsoffset + lhssizeCovered, + rhsiread->Opnd(0)); + // generate the store + iassignoff = mirBuilder->CreateStmtIassignoff(UnsignedPrimType(newalignused), + lhsoffset + lhssizeCovered, iassign->addrExpr, loadnode); + newblk->AddStatement(iassignoff); + lhssizeCovered += newalignused; + } + } +} + +void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { + iassign->addrExpr = LowerExpr(iassign->Opnd(0)); + if (iassign->GetRHS()->GetPrimType() != PTY_agg) { + iassign->SetRHS(LowerExpr(iassign->GetRHS())); + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign->GetTyIdx()); + MIRPtrType *pointerty = static_cast(type); + CHECK_FATAL(pointerty, "LowerIassign::expect a pointer type at iassign node"); + if (iassign->GetFieldID() != 0) { + MIRStructType *structty = dynamic_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); + CHECK_FATAL(structty, "LowerAggIassign: non-zero fieldID for non-structure"); + offset = becommon->GetFieldOffset(*structty, iassign->GetFieldID()).first; + TyIdx ftyidx = structty->TraverseToField(iassign->GetFieldID()).second.first; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftyidx); + } else { + type = GetPointedToType(pointerty); + } + PrimType ptypused = type->GetPrimType(); + IassignoffNode *iassignoff = + mirBuilder->CreateStmtIassignoff(ptypused, offset, iassign->addrExpr, iassign->GetRHS()); + newblk->AddStatement(iassignoff); + } else { + LowerAggIassign(newblk, iassign); + } +} + +BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { + BlockNode *newblk = mirModule->CurFuncCodeMemPool()->New(); + if (!block->GetFirst()) { + return newblk; + } + StmtNode *nextstmt = block->GetFirst(); + do { + StmtNode *stmt = nextstmt; + if (stmt == block->GetLast()) { + nextstmt = nullptr; + } else { + nextstmt = stmt->GetNext(); + } + stmt->SetNext(nullptr); + switch (stmt->GetOpCode()) { + case OP_dassign: { + LowerDassign(static_cast(stmt), newblk); + break; + } + case OP_iassign: { + LowerIassign(static_cast(stmt), newblk); + break; + } + default: { + for (size_t i = 0; i < stmt->NumOpnds(); ++i) { + stmt->SetOpnd(LowerExpr(stmt->Opnd(i)), i); + } + newblk->AddStatement(stmt); + break; + } + } + } while (nextstmt != nullptr); + return newblk; +} + +void LMBCLowerer::LowerFunction() { + BlockNode *origbody = func->GetBody(); + BlockNode *newbody = LowerBlock(origbody); + func->SetBody(newbody); +} + +} // namespace maple diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp new file mode 100644 index 0000000000..2d23f64ede --- /dev/null +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -0,0 +1,481 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// For each function being compiled, lay out its parameters, return values and +// local variables on its stack frame. This involves determining how parameters +// and return values are passed from analyzing their types. +// +// Allocate all the global variables within the global memory block which is +// addressed via offset from the global pointer GP during execution. Allocate +// this block pointed to by mirModule.globalBlkMap and perform the static +// initializations. + +#include "lmbc_memlayout.h" + +namespace maple { + +uint32 LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int &maxActualSize) { + if (!stmt) { + return maxActualSize; + } + Opcode opcode = stmt->op; + switch (opcode) { + case OP_block: { + BlockNode *blcknode = static_cast(stmt); + for (StmtNode &s : blcknode->GetStmtNodes()) { + FindLargestActualArea(&s, maxActualSize); + } + break; + } + case OP_if: { + IfStmtNode *ifnode = static_cast(stmt); + FindLargestActualArea(ifnode->GetThenPart(), maxActualSize); + FindLargestActualArea(ifnode->GetElsePart(), maxActualSize); + break; + } + case OP_doloop: { + FindLargestActualArea(static_cast(stmt)->GetDoBody(), maxActualSize); + break; + } + case OP_dowhile: + case OP_while: + FindLargestActualArea(static_cast(stmt)->GetBody(), maxActualSize); + break; + case OP_call: + case OP_icall: + case OP_intrinsiccall: { + ParmLocator parmlocator(be); // instantiate a parm locator + NaryStmtNode *callstmt = static_cast(stmt); + for (uint32 i = 0; i < callstmt->NumOpnds(); i++) { + BaseNode *opnd = callstmt->Opnd(i); + CHECK_FATAL(opnd->GetPrimType() != PTY_void, ""); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd->GetPrimType())); + } else { + Opcode opnd_opcode = opnd->GetOpCode(); + CHECK_FATAL(opnd_opcode == OP_dread || opnd_opcode == OP_iread, ""); + if (opnd_opcode == OP_dread) { + AddrofNode *dread = static_cast(opnd); + MIRSymbol *sym = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(dread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } else { // OP_iread + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, ""); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(iread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } + } + PLocInfo ploc; + parmlocator.LocateNextParm(ty, ploc); + maxActualSize = std::max(maxActualSize, ploc.memoffset + ploc.memsize); + maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); + } + break; + } + default: + return maxActualSize; + } + maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); + return maxActualSize; +} + +// go over all outgoing calls in the function body and get the maximum space +// needed for storing the actuals based on the actual parameters and the ABI; +// this assumes that all nesting of statements has been removed, so that all +// the statements are at only one block level +uint32 LMBCMemLayout::FindLargestActualArea(void) { + int32 maxActualSize = 0; + FindLargestActualArea(func->GetBody(), maxActualSize); + return static_cast(maxActualSize); +} + +void LMBCMemLayout::LayoutStackFrame(void) { + MIRSymbol *sym = nullptr; + // StIdx stIdx; + // go through formal parameters + ParmLocator parmlocator(be); // instantiate a parm locator + PLocInfo ploc; + for (uint32 i = 0; i < func->GetFormalDefVec().size(); i++) { + FormalDef formalDef = func->GetFormalDefAt(i); + sym = formalDef.formalSym; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDef.formalTyIdx); + parmlocator.LocateNextParm(ty, ploc); + uint32 stindex = sym->GetStIndex(); + // always passed in memory, so allocate in seg_upformal + sym_alloc_table[stindex].mem_segment = &seg_upformal; + seg_upformal.size = maplebe::RoundUp(seg_upformal.size, be.GetTypeAlign(ty->GetTypeIndex())); + sym_alloc_table[stindex].offset = seg_upformal.size; + seg_upformal.size += be.GetTypeSize(ty->GetTypeIndex()); + seg_upformal.size = maplebe::RoundUp(seg_upformal.size, GetPrimTypeSize(PTY_ptr)); + // LogInfo::MapleLogger() << "LAYOUT: formal %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at seg_upformal offset " << sym_alloc_table[stindex].offset << " passed in memory\n"; + } + + // allocate seg_formal in seg_FPbased + seg_formal.how_alloc.mem_segment = &seg_FPbased; + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_FPbased.size -= seg_formal.size; + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_formal.how_alloc.offset = seg_FPbased.size; + //LogInfo::MapleLogger() << "LAYOUT: seg_formal at seg_FPbased offset " << seg_formal.how_alloc.offset << " with size " + // << seg_formal.size << std::endl; + + // allocate the local variables + uint32 symtabsize = func->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symtabsize; i++) { + sym = func->GetSymTab()->GetSymbolFromStIdx(i); + if (!sym) { + continue; + } + if (sym->IsDeleted()) { + continue; + } + if (sym->GetStorageClass() != kScAuto) { + continue; + } + uint32 stindex = sym->GetStIndex(); + sym_alloc_table[stindex].mem_segment = &seg_FPbased; + seg_FPbased.size -= be.GetTypeSize(sym->GetTyIdx()); + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, be.GetTypeAlign(sym->GetTyIdx())); + sym_alloc_table[stindex].offset = seg_FPbased.size; + // LogInfo::MapleLogger() << "LAYOUT: local %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at FPbased offset " << sym_alloc_table[stindex].offset << std::endl; + } + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + + // allocate seg_actual for storing the outgoing parameters; this requires + // going over all outgoing calls and get the maximum space needed for the + // actuals + seg_actual.size = FindLargestActualArea(); + + // allocate seg_actual in seg_SPbased + seg_actual.how_alloc.mem_segment = &seg_SPbased; + seg_actual.how_alloc.offset = seg_SPbased.size; + seg_SPbased.size = maplebe::RoundUp(seg_SPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_SPbased.size += seg_actual.size; + seg_SPbased.size = maplebe::RoundUp(seg_SPbased.size, GetPrimTypeSize(PTY_ptr)); + //LogInfo::MapleLogger() << "LAYOUT: seg_actual at seg_SPbased offset " << seg_actual.how_alloc.offset << " with size " + // << seg_actual.size << std::endl; +} + +inline uint8 GetU8Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint16 GetU16Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint32 GetU32Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint64 GetU64Const(MIRConst *c) { + MIRIntConst *intconst = static_cast(c); + return static_cast(intconst->GetValue()); +} + +inline uint32 GetF32Const(MIRConst *c) { + MIRFloatConst *floatconst = static_cast(c); + return static_cast(floatconst->GetIntValue()); +} + +inline uint64 GetF64Const(MIRConst *c) { + MIRDoubleConst *doubleconst = static_cast(c); + return static_cast(doubleconst->GetIntValue()); +} + +void GlobalMemLayout::FillScalarValueInMap(uint32 startaddress, PrimType pty, MIRConst *c) { + switch (pty) { + case PTY_u1: + case PTY_u8: + case PTY_i8: { + uint8 *p = &be_.GetMIRModule().GetGlobalBlockMap()[startaddress]; + *p = GetU8Const(c); + break; + } + case PTY_u16: + case PTY_i16: { + uint16 *p = (uint16 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + *p = GetU16Const(c); + break; + } + case PTY_u32: + case PTY_i32: { + uint32 *p = (uint32 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + *p = GetU32Const(c); + break; + } + case PTY_u64: + case PTY_i64: { + uint64 *p = (uint64 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + *p = GetU64Const(c); + break; + } + case PTY_f32: { + uint32 *p = (uint32 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + *p = GetF32Const(c); + break; + } + case PTY_f64: { + uint64 *p = (uint64 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + *p = GetF64Const(c); + break; + } + default: + CHECK_FATAL(false, "FillScalarValueInMap: NYI"); + } + return; +} + +void GlobalMemLayout::FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRConst *c) { + switch (ty->GetKind()) { + case kTypeScalar: + FillScalarValueInMap(startaddress, ty->GetPrimType(), c); + break; + case kTypeArray: { + MIRArrayType *arraytype = static_cast(ty); + MIRType *elemtype = arraytype->GetElemType(); + int32 elemsize = elemtype->GetSize(); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent array initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (MapleVector::iterator it = constvec.begin(); it != constvec.end(); + it++, startaddress += elemsize) { + FillTypeValueInMap(startaddress, elemtype, *it); + } + break; + } + case kTypeStruct: { + MIRStructType *structty = static_cast(ty); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent struct initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (uint32 i = 0; i < constvec.size(); i++) { + uint32 fieldID = aggconst->GetFieldIdItem(i); + FieldPair thepair = structty->TraverseToField(fieldID); + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + uint32 offset = be_.GetFieldOffset(*structty, fieldID).first; + FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); + } + break; + } + case kTypeClass: { + MIRClassType *classty = static_cast(ty); + MIRAggConst *aggconst = dynamic_cast(c); + CHECK_FATAL(aggconst, "FillTypeValueInMap: inconsistent class initialization specification"); + MapleVector &constvec = aggconst->GetConstVec(); + for (uint32 i = 0; i < constvec.size(); i++) { + uint32 fieldID = aggconst->GetFieldIdItem(i); + FieldPair thepair = classty->TraverseToField(fieldID); + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + uint32 offset = be_.GetFieldOffset(*classty, fieldID).first; + FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); + } + break; + } + default: + CHECK_FATAL(false, "FillTypeValueInMap: NYI"); + } +} + +void GlobalMemLayout::FillSymbolValueInMap(const MIRSymbol *sym) { + if (sym->GetKonst() == nullptr) { + return; + } + uint32 stindex = sym->GetStIndex(); + CHECK(stindex < sym_alloc_table.size(), "index out of range in GlobalMemLayout::FillSymbolValueInMap"); + uint32 symaddress = sym_alloc_table[stindex].offset; + FillTypeValueInMap(symaddress, sym->GetType(), sym->GetKonst()); + return; +} + +GlobalMemLayout::GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocator) + : seg_GPbased(MS_GPbased), sym_alloc_table(mallocator->Adapter()), be_(be) { + uint32 symtabsize = GlobalTables::GetGsymTable().GetSymbolTableSize(); + sym_alloc_table.resize(symtabsize); + MIRSymbol *sym = nullptr; + // StIdx stIdx; + // allocate the global variables ordered based on alignments + for (int32 curalign = 8; curalign != 0; curalign >>= 1) { + for (uint32 i = 0; i < symtabsize; i++) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (!sym) { + continue; + } + if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { + continue; + } + if (be.GetTypeAlign(sym->GetTyIdx()) != curalign) { + continue; + } + uint32 stindex = sym->GetStIndex(); + sym_alloc_table[stindex].mem_segment = &seg_GPbased; + seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, be.GetTypeAlign(sym->GetTyIdx())); + sym_alloc_table[stindex].offset = seg_GPbased.size; + seg_GPbased.size += be.GetTypeSize(sym->GetTyIdx()); + // LogInfo::MapleLogger() << "LAYOUT: global %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); + // LogInfo::MapleLogger() << " at GPbased offset " << sym_alloc_table[stindex].offset << std::endl; + } + } + seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, GetPrimTypeSize(PTY_ptr)); + be.GetMIRModule().SetGlobalMemSize(seg_GPbased.size); + // allocate the memory map for the GP block + be.GetMIRModule().SetGlobalBlockMap(static_cast(be.GetMIRModule().GetMemPool()->Calloc(seg_GPbased.size))); + // perform initialization on globalblkmap + for (uint32 i = 0; i < symtabsize; i++) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (!sym) { + continue; + } + if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { + continue; + } + // FillSymbolValueInMap(sym); + } +} + +// LocateNextParm should be called with each parameter in the parameter list +// starting from the beginning, one call per parameter in sequence; it returns +// the information on how each parameter is passed in ploc +void ParmLocator::LocateNextParm(const MIRType *ty, PLocInfo &ploc) { + ploc.memoffset = last_memoffset_; + ploc.memsize = GetPrimTypeSize(ty->GetPrimType()); + + uint32 rightpad = 0; + parm_num_++; + + switch (ty->GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + rightpad = GetPrimTypeSize(PTY_i32) - ploc.memsize; + break; + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_ptr: + case PTY_ref: +#ifdef DYNAMICLANG + case PTY_simplestr: + case PTY_simpleobj: + case PTY_dynany: + case PTY_dyni32: + case PTY_dynf64: + case PTY_dynstr: + case PTY_dynobj: + case PTY_dynundef: + case PTY_dynbool: + case PTY_dynf32: + case PTY_dynnone: + case PTY_dynnull: +#endif + break; + + case PTY_f32: + rightpad = GetPrimTypeSize(PTY_f64) - ploc.memsize; + break; + case PTY_c64: + case PTY_f64: + break; + + case PTY_c128: + break; + + case PTY_agg: { + ploc.memsize = be_.GetTypeSize(ty->GetTypeIndex()); + // compute rightpad + int32 paddedSize = maplebe::RoundUp(ploc.memsize, 8); + rightpad = paddedSize - ploc.memsize; + break; + } + default: + CHECK_FATAL(false, ""); + } + + last_memoffset_ = ploc.memoffset + ploc.memsize + rightpad; + return; +} + +// instantiated with the type of the function return value, it describes how +// the return value is to be passed back to the caller +ReturnMechanism::ReturnMechanism(const MIRType *retty, maplebe::BECommon &be) : fake_first_parm(false) { + switch (retty->GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_f32: + case PTY_f64: +#ifdef DYNAMICLANG + case PTY_simplestr: + case PTY_simpleobj: + case PTY_dynany: + case PTY_dyni32: + case PTY_dynstr: + case PTY_dynobj: +#endif + ptype0 = retty->GetPrimType(); + return; + + case PTY_c64: + case PTY_c128: + fake_first_parm = true; + ptype0 = PTY_a32; + return; + + case PTY_agg: { + uint32 size = be.GetTypeSize(retty->GetTypeIndex()); + if (size > 4) { + fake_first_parm = true; + ptype0 = PTY_a32; + } else { + ptype0 = PTY_u32; + } + return; + } + + default: + return; + } +} + +} // namespace maple diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index 9b62f9f1d1..afea165178 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -14,6 +14,10 @@ */ #include "me_phase_manager.h" #include "bin_mplt.h" +#include "becommon.h" +#include "lower.h" +#include "lmbc_memlayout.h" +#include "lmbc_lower.h" #define JAVALANG (mirModule.IsJavaModule()) #define CLANG (mirModule.IsCModule()) @@ -21,6 +25,7 @@ namespace maple { bool MeFuncPM::genMeMpl = false; bool MeFuncPM::genMapleBC = false; +bool MeFuncPM::genLMBC = false; bool MeFuncPM::timePhases = false; void MeFuncPM::DumpMEIR(const MeFunction &f, const std::string phaseName, bool isBefore) { @@ -112,6 +117,8 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { m.Emit("comb.me.mpl"); } if (genMapleBC) { + m.SetFlavor(kFlavorMbc); + // output .mbc BinaryMplt binMplt(m); std::string modFileName = m.GetFileName(); std::string::size_type lastdot = modFileName.find_last_of("."); @@ -120,6 +127,35 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { std::string filestem = modFileName.substr(0, lastdot); binMplt.Export(filestem + ".mbc", nullptr); } + if (genLMBC) { + m.SetFlavor(kFlavorLmbc); + maplebe::BECommon beCommon(m); + GlobalMemLayout globalMemLayout(beCommon, &m.GetMPAllocator()); + maplebe::CGLowerer cgLower(m, beCommon, false, false); + cgLower.RegisterBuiltIns(); + cgLower.RegisterExternalLibraryFunctions(); + for (auto func : compFuncList) { + m.SetCurFunction(func); + cgLower.LowerFunc(*func); + MemPool *layoutMp = memPoolCtrler.NewMemPool("layout mempool", true); + MapleAllocator layoutAlloc(layoutMp); + LMBCMemLayout localMemLayout(beCommon, func, &layoutAlloc); + localMemLayout.LayoutStackFrame(); + LMBCLowerer lmbcLowerer(&m, &beCommon, func, &globalMemLayout, &localMemLayout); + lmbcLowerer.LowerFunction(); + func->SetFrameSize(localMemLayout.StackFrameSize()); + func->SetUpFormalSize(localMemLayout.UpformalSize()); + memPoolCtrler.DeleteMemPool(layoutMp); + } + // output .lmbc + BinaryMplt binMplt(m); + std::string modFileName = m.GetFileName(); + std::string::size_type lastdot = modFileName.find_last_of("."); + + binMplt.GetBinExport().not2mplt = true; + std::string filestem = modFileName.substr(0, lastdot); + binMplt.Export(filestem + ".lmbc", nullptr); + } if (MeFuncPM::timePhases) { DumpPhaseTime(); } -- Gitee From f434eff7587deb880ee50f9b429c9082fe304354 Mon Sep 17 00:00:00 2001 From: Fred Chow Date: Thu, 7 Apr 2022 17:37:15 -0700 Subject: [PATCH 2/5] In lowering to lmbc, insert reassigns at function entry for any formal promoted to preg --- src/mapleall/maple_me/include/lmbc_lower.h | 1 + src/mapleall/maple_me/src/lmbc_lower.cpp | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h index a0a31a434b..4e9b3ff3b7 100644 --- a/src/mapleall/maple_me/include/lmbc_lower.h +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -45,6 +45,7 @@ class LMBCLowerer { void LowerIassign(IassignNode *, BlockNode *); void LowerAggIassign(BlockNode *, IassignNode *); BlockNode *LowerBlock(BlockNode *); + void LoadFormalsAssignedToPregs(); void LowerFunction(); }; diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp index a33cc3be53..f5dd00a939 100644 --- a/src/mapleall/maple_me/src/lmbc_lower.cpp +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -554,10 +554,27 @@ BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { return newblk; } +void LMBCLowerer::LoadFormalsAssignedToPregs() { + // go through each formals + for (int32 i = func->GetFormalDefVec().size()-1; i >= 0; i--) { + MIRSymbol *formalSt = func->GetFormalDefVec()[i].formalSym; + if (formalSt->GetSKind() != kStPreg) { + continue; + } + MIRPreg *preg = formalSt->GetPreg(); + uint32 stindex = formalSt->GetStIndex(); + PrimType pty = formalSt->GetType()->GetPrimType(); + IreadFPoffNode *ireadfpoff = mirBuilder->CreateExprIreadFPoff(pty, memlayout->sym_alloc_table[stindex].offset); + RegassignNode *rass = mirBuilder->CreateStmtRegassign(pty, func->GetPregTab()->GetPregIdxFromPregno(preg->GetPregNo()), ireadfpoff); + func->GetBody()->InsertFirst(rass); + } +} + void LMBCLowerer::LowerFunction() { BlockNode *origbody = func->GetBody(); BlockNode *newbody = LowerBlock(origbody); func->SetBody(newbody); + LoadFormalsAssignedToPregs(); } } // namespace maple -- Gitee From 6824702d65e8a5c8287bd21c2f471eb47dda057e Mon Sep 17 00:00:00 2001 From: Fred Chow Date: Thu, 7 Apr 2022 20:20:45 -0700 Subject: [PATCH 3/5] fixed a lmbc bug when mpl2mpl is run --- src/mapleall/maple_me/src/me_phase_manager.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index afea165178..1b56bf8c63 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -135,6 +135,9 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { cgLower.RegisterBuiltIns(); cgLower.RegisterExternalLibraryFunctions(); for (auto func : compFuncList) { + if (func->GetBody() == nullptr) { + continue; + } m.SetCurFunction(func); cgLower.LowerFunc(*func); MemPool *layoutMp = memPoolCtrler.NewMemPool("layout mempool", true); -- Gitee From e9e135be73bb4e13df9778cbbe03ab3e1ac0c5d6 Mon Sep 17 00:00:00 2001 From: Fred Chow Date: Sat, 9 Apr 2022 10:49:39 -0700 Subject: [PATCH 4/5] only put fstatic and pstatic variables in global memory block --- .../maple_me/include/lmbc_memlayout.h | 20 +++---- src/mapleall/maple_me/src/lmbc_memlayout.cpp | 58 +++++++++---------- .../maple_me/src/me_phase_manager.cpp | 4 +- 3 files changed, 40 insertions(+), 42 deletions(-) diff --git a/src/mapleall/maple_me/include/lmbc_memlayout.h b/src/mapleall/maple_me/include/lmbc_memlayout.h index 67c921c334..592873654f 100644 --- a/src/mapleall/maple_me/include/lmbc_memlayout.h +++ b/src/mapleall/maple_me/include/lmbc_memlayout.h @@ -66,7 +66,6 @@ class MemSegment { class LMBCMemLayout { public: - maplebe::BECommon &be; MIRFunction *func; MemSegment seg_upformal; MemSegment seg_formal; @@ -78,9 +77,8 @@ class LMBCMemLayout { public: uint32 FindLargestActualArea(void); uint32 FindLargestActualArea(StmtNode *, int &); - explicit LMBCMemLayout(maplebe::BECommon &b, MIRFunction *f, MapleAllocator *mallocator) - : be(b), - func(f), + explicit LMBCMemLayout(MIRFunction *f, MapleAllocator *mallocator) + : func(f), seg_upformal(MS_upformal), seg_formal(MS_formal), seg_actual(MS_actual), @@ -107,10 +105,11 @@ class GlobalMemLayout { MemSegment seg_GPbased; MapleVector sym_alloc_table; // index is StIdx private: - maplebe::BECommon &be_; + maplebe::BECommon *be; + MIRModule *mirModule; public: - GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocator); + GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAllocator *mallocator); ~GlobalMemLayout() {} private: @@ -128,12 +127,11 @@ struct PLocInfo { // for processing an incoming or outgoing parameter list class ParmLocator { private: - maplebe::BECommon &be_; - int32 parm_num_; // number of all types of parameters processed so far - int32 last_memoffset_; + int32 parmNum; // number of all types of parameters processed so far + int32 lastMemOffset; public: - ParmLocator(maplebe::BECommon &b) : be_(b), parm_num_(0), last_memoffset_(0) {} + ParmLocator() : parmNum(0), lastMemOffset(0) {} ~ParmLocator() {} @@ -146,7 +144,7 @@ class ReturnMechanism { bool fake_first_parm; // whether returning in memory via fake first parameter PrimType ptype0; // the primitive type stored in retval0 - ReturnMechanism(const MIRType *retty, maplebe::BECommon &be); + ReturnMechanism(const MIRType *retty); }; } /* namespace maple */ diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp index 2d23f64ede..191b6e43bc 100644 --- a/src/mapleall/maple_me/src/lmbc_memlayout.cpp +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -56,7 +56,7 @@ uint32 LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int &maxActualSize) case OP_call: case OP_icall: case OP_intrinsiccall: { - ParmLocator parmlocator(be); // instantiate a parm locator + ParmLocator parmlocator; // instantiate a parm locator NaryStmtNode *callstmt = static_cast(stmt); for (uint32 i = 0; i < callstmt->NumOpnds(); i++) { BaseNode *opnd = callstmt->Opnd(i); @@ -116,7 +116,7 @@ void LMBCMemLayout::LayoutStackFrame(void) { MIRSymbol *sym = nullptr; // StIdx stIdx; // go through formal parameters - ParmLocator parmlocator(be); // instantiate a parm locator + ParmLocator parmlocator; // instantiate a parm locator PLocInfo ploc; for (uint32 i = 0; i < func->GetFormalDefVec().size(); i++) { FormalDef formalDef = func->GetFormalDefAt(i); @@ -126,9 +126,9 @@ void LMBCMemLayout::LayoutStackFrame(void) { uint32 stindex = sym->GetStIndex(); // always passed in memory, so allocate in seg_upformal sym_alloc_table[stindex].mem_segment = &seg_upformal; - seg_upformal.size = maplebe::RoundUp(seg_upformal.size, be.GetTypeAlign(ty->GetTypeIndex())); + seg_upformal.size = maplebe::RoundUp(seg_upformal.size, ty->GetAlign()); sym_alloc_table[stindex].offset = seg_upformal.size; - seg_upformal.size += be.GetTypeSize(ty->GetTypeIndex()); + seg_upformal.size += ty->GetSize(); seg_upformal.size = maplebe::RoundUp(seg_upformal.size, GetPrimTypeSize(PTY_ptr)); // LogInfo::MapleLogger() << "LAYOUT: formal %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); // LogInfo::MapleLogger() << " at seg_upformal offset " << sym_alloc_table[stindex].offset << " passed in memory\n"; @@ -158,8 +158,8 @@ void LMBCMemLayout::LayoutStackFrame(void) { } uint32 stindex = sym->GetStIndex(); sym_alloc_table[stindex].mem_segment = &seg_FPbased; - seg_FPbased.size -= be.GetTypeSize(sym->GetTyIdx()); - seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, be.GetTypeAlign(sym->GetTyIdx())); + seg_FPbased.size -= sym->GetType()->GetSize(); + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, sym->GetType()->GetAlign()); sym_alloc_table[stindex].offset = seg_FPbased.size; // LogInfo::MapleLogger() << "LAYOUT: local %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); // LogInfo::MapleLogger() << " at FPbased offset " << sym_alloc_table[stindex].offset << std::endl; @@ -216,35 +216,35 @@ void GlobalMemLayout::FillScalarValueInMap(uint32 startaddress, PrimType pty, MI case PTY_u1: case PTY_u8: case PTY_i8: { - uint8 *p = &be_.GetMIRModule().GetGlobalBlockMap()[startaddress]; + uint8 *p = &mirModule->GetGlobalBlockMap()[startaddress]; *p = GetU8Const(c); break; } case PTY_u16: case PTY_i16: { - uint16 *p = (uint16 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + uint16 *p = (uint16 *)(&mirModule->GetGlobalBlockMap()[startaddress]); *p = GetU16Const(c); break; } case PTY_u32: case PTY_i32: { - uint32 *p = (uint32 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + uint32 *p = (uint32 *)(&mirModule->GetGlobalBlockMap()[startaddress]); *p = GetU32Const(c); break; } case PTY_u64: case PTY_i64: { - uint64 *p = (uint64 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + uint64 *p = (uint64 *)(&mirModule->GetGlobalBlockMap()[startaddress]); *p = GetU64Const(c); break; } case PTY_f32: { - uint32 *p = (uint32 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + uint32 *p = (uint32 *)(&mirModule->GetGlobalBlockMap()[startaddress]); *p = GetF32Const(c); break; } case PTY_f64: { - uint64 *p = (uint64 *)(&be_.GetMIRModule().GetGlobalBlockMap()[startaddress]); + uint64 *p = (uint64 *)(&mirModule->GetGlobalBlockMap()[startaddress]); *p = GetF64Const(c); break; } @@ -281,7 +281,7 @@ void GlobalMemLayout::FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRCo uint32 fieldID = aggconst->GetFieldIdItem(i); FieldPair thepair = structty->TraverseToField(fieldID); MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); - uint32 offset = be_.GetFieldOffset(*structty, fieldID).first; + uint32 offset = be->GetFieldOffset(*structty, fieldID).first; FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); } break; @@ -295,7 +295,7 @@ void GlobalMemLayout::FillTypeValueInMap(uint32 startaddress, MIRType *ty, MIRCo uint32 fieldID = aggconst->GetFieldIdItem(i); FieldPair thepair = classty->TraverseToField(fieldID); MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); - uint32 offset = be_.GetFieldOffset(*classty, fieldID).first; + uint32 offset = be->GetFieldOffset(*classty, fieldID).first; FillTypeValueInMap(startaddress + offset, fieldty, constvec[i]); } break; @@ -316,8 +316,8 @@ void GlobalMemLayout::FillSymbolValueInMap(const MIRSymbol *sym) { return; } -GlobalMemLayout::GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocator) - : seg_GPbased(MS_GPbased), sym_alloc_table(mallocator->Adapter()), be_(be) { +GlobalMemLayout::GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAllocator *mallocator) + : seg_GPbased(MS_GPbased), sym_alloc_table(mallocator->Adapter()), be(b), mirModule(mod) { uint32 symtabsize = GlobalTables::GetGsymTable().GetSymbolTableSize(); sym_alloc_table.resize(symtabsize); MIRSymbol *sym = nullptr; @@ -329,25 +329,25 @@ GlobalMemLayout::GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocat if (!sym) { continue; } - if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { + if (sym->GetStorageClass() != kScPstatic && sym->GetStorageClass() != kScFstatic) { continue; } - if (be.GetTypeAlign(sym->GetTyIdx()) != curalign) { + if (sym->GetType()->GetAlign() != curalign) { continue; } uint32 stindex = sym->GetStIndex(); sym_alloc_table[stindex].mem_segment = &seg_GPbased; - seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, be.GetTypeAlign(sym->GetTyIdx())); + seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, sym->GetType()->GetAlign()); sym_alloc_table[stindex].offset = seg_GPbased.size; - seg_GPbased.size += be.GetTypeSize(sym->GetTyIdx()); + seg_GPbased.size += sym->GetType()->GetSize(); // LogInfo::MapleLogger() << "LAYOUT: global %" << GlobalTables::GetStringFromGstridx(sym->GetNameStridx()); // LogInfo::MapleLogger() << " at GPbased offset " << sym_alloc_table[stindex].offset << std::endl; } } seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, GetPrimTypeSize(PTY_ptr)); - be.GetMIRModule().SetGlobalMemSize(seg_GPbased.size); + mirModule->SetGlobalMemSize(seg_GPbased.size); // allocate the memory map for the GP block - be.GetMIRModule().SetGlobalBlockMap(static_cast(be.GetMIRModule().GetMemPool()->Calloc(seg_GPbased.size))); + mirModule->SetGlobalBlockMap(static_cast(mirModule->GetMemPool()->Calloc(seg_GPbased.size))); // perform initialization on globalblkmap for (uint32 i = 0; i < symtabsize; i++) { sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); @@ -365,11 +365,11 @@ GlobalMemLayout::GlobalMemLayout(maplebe::BECommon &be, MapleAllocator *mallocat // starting from the beginning, one call per parameter in sequence; it returns // the information on how each parameter is passed in ploc void ParmLocator::LocateNextParm(const MIRType *ty, PLocInfo &ploc) { - ploc.memoffset = last_memoffset_; - ploc.memsize = GetPrimTypeSize(ty->GetPrimType()); + ploc.memoffset = lastMemOffset; + ploc.memsize = ty->GetSize(); uint32 rightpad = 0; - parm_num_++; + parmNum++; switch (ty->GetPrimType()) { case PTY_u1: @@ -414,7 +414,7 @@ void ParmLocator::LocateNextParm(const MIRType *ty, PLocInfo &ploc) { break; case PTY_agg: { - ploc.memsize = be_.GetTypeSize(ty->GetTypeIndex()); + ploc.memsize = ty->GetSize(); // compute rightpad int32 paddedSize = maplebe::RoundUp(ploc.memsize, 8); rightpad = paddedSize - ploc.memsize; @@ -424,13 +424,13 @@ void ParmLocator::LocateNextParm(const MIRType *ty, PLocInfo &ploc) { CHECK_FATAL(false, ""); } - last_memoffset_ = ploc.memoffset + ploc.memsize + rightpad; + lastMemOffset = ploc.memoffset + ploc.memsize + rightpad; return; } // instantiated with the type of the function return value, it describes how // the return value is to be passed back to the caller -ReturnMechanism::ReturnMechanism(const MIRType *retty, maplebe::BECommon &be) : fake_first_parm(false) { +ReturnMechanism::ReturnMechanism(const MIRType *retty) : fake_first_parm(false) { switch (retty->GetPrimType()) { case PTY_u1: case PTY_u8: @@ -463,7 +463,7 @@ ReturnMechanism::ReturnMechanism(const MIRType *retty, maplebe::BECommon &be) : return; case PTY_agg: { - uint32 size = be.GetTypeSize(retty->GetTypeIndex()); + uint32 size = retty->GetSize(); if (size > 4) { fake_first_parm = true; ptype0 = PTY_a32; diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index 1b56bf8c63..9194d92325 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -130,7 +130,7 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { if (genLMBC) { m.SetFlavor(kFlavorLmbc); maplebe::BECommon beCommon(m); - GlobalMemLayout globalMemLayout(beCommon, &m.GetMPAllocator()); + GlobalMemLayout globalMemLayout(&beCommon, &m, &m.GetMPAllocator()); maplebe::CGLowerer cgLower(m, beCommon, false, false); cgLower.RegisterBuiltIns(); cgLower.RegisterExternalLibraryFunctions(); @@ -142,7 +142,7 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { cgLower.LowerFunc(*func); MemPool *layoutMp = memPoolCtrler.NewMemPool("layout mempool", true); MapleAllocator layoutAlloc(layoutMp); - LMBCMemLayout localMemLayout(beCommon, func, &layoutAlloc); + LMBCMemLayout localMemLayout(func, &layoutAlloc); localMemLayout.LayoutStackFrame(); LMBCLowerer lmbcLowerer(&m, &beCommon, func, &globalMemLayout, &localMemLayout); lmbcLowerer.LowerFunction(); -- Gitee From bdf92437bba3b6b2bef7305ec3ca0dae6514792a Mon Sep 17 00:00:00 2001 From: Fred Chow Date: Wed, 13 Apr 2022 18:31:57 -0700 Subject: [PATCH 5/5] Lowered parameter passing to stores to argument build area Implemented new opcodes OP_iassignspoff and OP_blkassignoff for this purpose. Return statements also lowered to regassign of returned value to retval0. --- src/mapleall/maple_ir/include/keywords.def | 1 + src/mapleall/maple_ir/include/mir_builder.h | 2 +- src/mapleall/maple_ir/include/mir_function.h | 8 ++ src/mapleall/maple_ir/include/mir_nodes.h | 32 +++++- src/mapleall/maple_ir/include/mir_parser.h | 2 + src/mapleall/maple_ir/include/opcodes.def | 2 + src/mapleall/maple_ir/src/bin_func_export.cpp | 8 ++ src/mapleall/maple_ir/src/bin_func_import.cpp | 13 ++- src/mapleall/maple_ir/src/mir_builder.cpp | 4 +- src/mapleall/maple_ir/src/mir_function.cpp | 5 + src/mapleall/maple_ir/src/mir_nodes.cpp | 9 +- src/mapleall/maple_ir/src/mir_parser.cpp | 45 ++++++++- src/mapleall/maple_me/include/lmbc_lower.h | 2 + src/mapleall/maple_me/src/lmbc_lower.cpp | 99 ++++++++++++++++++- src/mapleall/maple_me/src/lmbc_memlayout.cpp | 3 +- 15 files changed, 224 insertions(+), 11 deletions(-) diff --git a/src/mapleall/maple_ir/include/keywords.def b/src/mapleall/maple_ir/include/keywords.def index 15aa540a4b..7aa2960aac 100644 --- a/src/mapleall/maple_ir/include/keywords.def +++ b/src/mapleall/maple_ir/include/keywords.def @@ -55,6 +55,7 @@ // per-function declaration keywords KEYWORD(framesize) KEYWORD(upformalsize) + KEYWORD(outparmsize) KEYWORD(moduleid) KEYWORD(funcsize) KEYWORD(funcid) diff --git a/src/mapleall/maple_ir/include/mir_builder.h b/src/mapleall/maple_ir/include/mir_builder.h index 453eb61633..0ee756d70e 100755 --- a/src/mapleall/maple_ir/include/mir_builder.h +++ b/src/mapleall/maple_ir/include/mir_builder.h @@ -240,7 +240,7 @@ class MIRBuilder { RegassignNode *CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src); IassignNode *CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src); IassignoffNode *CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *opnd0, BaseNode *src); - IassignFPoffNode *CreateStmtIassignFPoff(PrimType pty, int32 offset, BaseNode *src); + IassignFPoffNode *CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src); CallNode *CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opcode = OP_call); CallNode *CreateStmtCall(const std::string &name, const MapleVector &args); CallNode *CreateStmtVirtualCall(PUIdx puIdx, const MapleVector &args) { diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h index 7468d9ce26..318d8a7dbc 100644 --- a/src/mapleall/maple_ir/include/mir_function.h +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -789,6 +789,13 @@ class MIRFunction { upFormalSize = size; } + uint16 GetOutParmSize() const { + return outParmSize; + } + void SetOutParmSize(uint16 size) { + outParmSize = size; + } + uint16 GetModuleId() const { return moduleID; } @@ -1188,6 +1195,7 @@ class MIRFunction { uint8_t layoutType = kLayoutUnused; uint16 frameSize = 0; uint16 upFormalSize = 0; + uint16 outParmSize = 0; uint16 moduleID = 0; uint32 funcSize = 0; // size of code in words uint32 tempCount = 0; diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h index 6c1df6ab78..54a99c6b03 100755 --- a/src/mapleall/maple_ir/include/mir_nodes.h +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -2794,13 +2794,14 @@ class IassignoffNode : public BinaryStmtNode { int32 offset = 0; }; +// for iassignfpoff, iassignspoff, iassignpcoff class IassignFPoffNode : public UnaryStmtNode { public: - IassignFPoffNode() : UnaryStmtNode(OP_iassignfpoff) {} + IassignFPoffNode(Opcode o) : UnaryStmtNode(o) {} - explicit IassignFPoffNode(int32 ofst) : UnaryStmtNode(OP_iassignfpoff), offset(ofst) {} + explicit IassignFPoffNode(Opcode o, int32 ofst) : UnaryStmtNode(o), offset(ofst) {} - IassignFPoffNode(PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(offset) { + IassignFPoffNode(Opcode o, PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(o, offset) { BaseNodeT::SetPrimType(primType); UnaryStmtNode::SetOpnd(src, 0); } @@ -2831,6 +2832,31 @@ class IassignFPoffNode : public UnaryStmtNode { typedef IassignFPoffNode IassignPCoffNode; +class BlkassignoffNode : public BinaryStmtNode { + public: + BlkassignoffNode() : BinaryStmtNode(OP_blkassignoff) { ptyp = PTY_agg; } + explicit BlkassignoffNode(int32 ofst, int32 bsize) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { ptyp = PTY_agg; } + explicit BlkassignoffNode(int32 ofst, int32 bsize, BaseNode *dest, BaseNode *src) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { + ptyp = PTY_agg; + SetBOpnd(dest, 0); + SetBOpnd(src, 1); + } + ~BlkassignoffNode() = default; + + void Dump(int32 indent) const override; + + BlkassignoffNode *CloneTree(MapleAllocator &allocator) const override { + BlkassignoffNode *node = allocator.GetMemPool()->New(offset, blockSize); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + public: + int32 offset = 0; + int32 blockSize = 0; +}; + // used by return, syncenter, syncexit class NaryStmtNode : public StmtNode, public NaryOpnds { public: diff --git a/src/mapleall/maple_ir/include/mir_parser.h b/src/mapleall/maple_ir/include/mir_parser.h index 14013baf87..249b619b07 100755 --- a/src/mapleall/maple_ir/include/mir_parser.h +++ b/src/mapleall/maple_ir/include/mir_parser.h @@ -110,6 +110,7 @@ class MIRParser { bool ParseStmtIassign(StmtNodePtr &stmt); bool ParseStmtIassignoff(StmtNodePtr &stmt); bool ParseStmtIassignFPoff(StmtNodePtr &stmt); + bool ParseStmtBlkassignoff(StmtNodePtr &stmt); bool ParseStmtDoloop(StmtNodePtr&); bool ParseStmtForeachelem(StmtNodePtr&); bool ParseStmtDowhile(StmtNodePtr&); @@ -284,6 +285,7 @@ class MIRParser { bool ParseStmtBlockForType(); bool ParseStmtBlockForFrameSize(); bool ParseStmtBlockForUpformalSize(); + bool ParseStmtBlockForOutParmSize(); bool ParseStmtBlockForModuleID(); bool ParseStmtBlockForFuncSize(); bool ParseStmtBlockForFuncID(); diff --git a/src/mapleall/maple_ir/include/opcodes.def b/src/mapleall/maple_ir/include/opcodes.def index d7863963d1..d1265eda42 100755 --- a/src/mapleall/maple_ir/include/opcodes.def +++ b/src/mapleall/maple_ir/include/opcodes.def @@ -218,3 +218,5 @@ OPCODE(dreadoff, dreadoffNode, OPCODEHASSSAUSE, 12) OPCODE(addrofoff, addrofoffNode, 0, 12) OPCODE(dassignoff, DassignoffNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) diff --git a/src/mapleall/maple_ir/src/bin_func_export.cpp b/src/mapleall/maple_ir/src/bin_func_export.cpp index 89fb69531a..07b1d9eeab 100644 --- a/src/mapleall/maple_ir/src/bin_func_export.cpp +++ b/src/mapleall/maple_ir/src/bin_func_export.cpp @@ -43,6 +43,7 @@ void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { if (mod.GetFlavor() == kFlavorLmbc) { WriteNum(func->GetUpFormalSize()); WriteNum(func->GetFrameSize()); + WriteNum(func->GetOutParmSize()); } WriteNum(~kBinFuncIdInfoStart); } @@ -441,12 +442,19 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { WriteNum(iassoff->GetOffset()); break; } + case OP_iassignspoff: case OP_iassignfpoff: { IassignFPoffNode *iassfpoff = static_cast(s); WriteNum(iassfpoff->GetPrimType()); WriteNum(iassfpoff->GetOffset()); break; } + case OP_blkassignoff: { + BlkassignoffNode *bass = static_cast(s); + WriteNum(bass->offset); + WriteNum(bass->blockSize); + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp index 5891b5cf72..7195b19172 100644 --- a/src/mapleall/maple_ir/src/bin_func_import.cpp +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -49,6 +49,7 @@ void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { if (mod.GetFlavor() == kFlavorLmbc) { func->SetUpFormalSize(ReadNum()); func->SetFrameSize(ReadNum()); + func->SetOutParmSize(ReadNum()); } tag = ReadNum(); CHECK_FATAL(tag == ~kBinFuncIdInfoStart, "pattern mismatch in ImportFuncIdInfo()"); @@ -547,14 +548,24 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_iassignspoff: case OP_iassignfpoff: { - IassignFPoffNode *s = func->GetCodeMemPool()->New(); + IassignFPoffNode *s = func->GetCodeMemPool()->New(op); s->SetPrimType((PrimType)ReadNum()); s->SetOffset(static_cast(ReadNum())); s->SetOpnd(ImportExpression(func), 0); stmt = s; break; } + case OP_blkassignoff: { + BlkassignoffNode *s = func->GetCodeMemPool()->New(); + s->offset = static_cast(ReadNum()); + s->blockSize = static_cast(ReadNum()); + s->SetOpnd(ImportExpression(func), 0); + s->SetOpnd(ImportExpression(func), 1); + stmt = s; + break; + } case OP_call: case OP_virtualcall: case OP_virtualicall: diff --git a/src/mapleall/maple_ir/src/mir_builder.cpp b/src/mapleall/maple_ir/src/mir_builder.cpp index 22873baaa6..abec8b50d9 100755 --- a/src/mapleall/maple_ir/src/mir_builder.cpp +++ b/src/mapleall/maple_ir/src/mir_builder.cpp @@ -800,8 +800,8 @@ IassignoffNode *MIRBuilder::CreateStmtIassignoff(PrimType pty, int32 offset, Bas return GetCurrentFuncCodeMp()->New(pty, offset, addr, src); } -IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(PrimType pty, int32 offset, BaseNode *src) { - return GetCurrentFuncCodeMp()->New(pty, offset, src); +IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(op, pty, offset, src); } CallNode *MIRBuilder::CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode) { diff --git a/src/mapleall/maple_ir/src/mir_function.cpp b/src/mapleall/maple_ir/src/mir_function.cpp index 4053b90321..75cafccb6b 100644 --- a/src/mapleall/maple_ir/src/mir_function.cpp +++ b/src/mapleall/maple_ir/src/mir_function.cpp @@ -456,6 +456,11 @@ void MIRFunction::DumpFuncBody(int32 indent) { DumpFrame(indent); } + if (GetOutParmSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "outparmsize " << GetOutParmSize() << '\n'; + } + if (GetModuleId() > 0) { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "moduleID " << static_cast(GetModuleId()) << '\n'; diff --git a/src/mapleall/maple_ir/src/mir_nodes.cpp b/src/mapleall/maple_ir/src/mir_nodes.cpp index 694d2974e8..6885f4bfb1 100755 --- a/src/mapleall/maple_ir/src/mir_nodes.cpp +++ b/src/mapleall/maple_ir/src/mir_nodes.cpp @@ -764,6 +764,13 @@ void IassignFPoffNode::Dump(int32 indent) const { LogInfo::MapleLogger() << '\n'; } +void BlkassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << offset << " " << blockSize; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + void GotoNode::Dump(int32 indent) const { StmtNode::DumpBase(indent); if (offset == 0) { @@ -865,7 +872,7 @@ void UnaryStmtNode::DumpOpnd(int32 indent) const { if (uOpnd != nullptr) { uOpnd->Dump(indent); } - LogInfo::MapleLogger() << ")\n"; + LogInfo::MapleLogger() << ")"; } void UnaryStmtNode::Dump(int32 indent) const { diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp index e8773d1071..7181283181 100755 --- a/src/mapleall/maple_ir/src/mir_parser.cpp +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -215,12 +215,13 @@ bool MIRParser::ParseStmtIassignoff(StmtNodePtr &stmt) { } bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { + Opcode op = lexer.GetTokenKind() == TK_iassignfpoff ? OP_iassignfpoff : OP_iassignspoff; if (!IsPrimitiveType(lexer.NextToken())) { Error("expect type parsing binary operator but get "); return false; } // iassignfpoff ( ) - auto *iAssignOff = mod.CurFuncCodeMemPool()->New(); + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(op); iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); if (lexer.NextToken() != TK_intconst) { Error("expect offset but get "); @@ -238,6 +239,33 @@ bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { return true; } +bool MIRParser::ParseStmtBlkassignoff(StmtNodePtr &stmt) { + // blkassignoff (, ) + BlkassignoffNode *bassignoff = mod.CurFuncCodeMemPool()->New(); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + bassignoff->offset = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_intconst) { + Error("expect size but get "); + return false; + } + bassignoff->blockSize = lexer.GetTheIntVal(); + lexer.NextToken(); + BaseNode *destAddr = nullptr; + BaseNode *srcAddr = nullptr; + // parse 2 operands, the dest address followed by src address + if (!ParseExprTwoOperand(destAddr, srcAddr)) { + return false; + } + bassignoff->SetOpnd(destAddr, 0); + bassignoff->SetOpnd(srcAddr, 1); + lexer.NextToken(); + stmt = bassignoff; + return true; +} + bool MIRParser::ParseStmtDoloop(StmtNodePtr &stmt) { // syntax: doloop (, , ) { // } @@ -1960,6 +1988,18 @@ bool MIRParser::ParseStmtBlockForUpformalSize() { return true; } +bool MIRParser::ParseStmtBlockForOutParmSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after outparmsize but get "); + return false; + } + fn->SetOutParmSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + bool MIRParser::ParseStmtBlockForModuleID() { MIRFunction *fn = paramCurrFuncForParseStmtBlock; lexer.NextToken(); @@ -3314,6 +3354,8 @@ std::map MIRParser::InitFuncPtrMapForPar funcPtrMap[TK_iassign] = &MIRParser::ParseStmtIassign; funcPtrMap[TK_iassignoff] = &MIRParser::ParseStmtIassignoff; funcPtrMap[TK_iassignfpoff] = &MIRParser::ParseStmtIassignFPoff; + funcPtrMap[TK_iassignspoff] = &MIRParser::ParseStmtIassignFPoff; + funcPtrMap[TK_blkassignoff] = &MIRParser::ParseStmtBlkassignoff; funcPtrMap[TK_regassign] = &MIRParser::ParseStmtRegassign; funcPtrMap[TK_doloop] = &MIRParser::ParseStmtDoloop; funcPtrMap[TK_foreachelem] = &MIRParser::ParseStmtForeachelem; @@ -3411,6 +3453,7 @@ std::map MIRParser::InitFuncPtrMapF funcPtrMap[TK_type] = &MIRParser::ParseStmtBlockForType; funcPtrMap[TK_framesize] = &MIRParser::ParseStmtBlockForFrameSize; funcPtrMap[TK_upformalsize] = &MIRParser::ParseStmtBlockForUpformalSize; + funcPtrMap[TK_outparmsize] = &MIRParser::ParseStmtBlockForOutParmSize; funcPtrMap[TK_moduleid] = &MIRParser::ParseStmtBlockForModuleID; funcPtrMap[TK_funcsize] = &MIRParser::ParseStmtBlockForFuncSize; funcPtrMap[TK_funcid] = &MIRParser::ParseStmtBlockForFuncID; diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h index 4e9b3ff3b7..90145a4929 100644 --- a/src/mapleall/maple_me/include/lmbc_lower.h +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -44,6 +44,8 @@ class LMBCLowerer { void LowerDassign(DassignNode *, BlockNode *); void LowerIassign(IassignNode *, BlockNode *); void LowerAggIassign(BlockNode *, IassignNode *); + void LowerReturn(NaryStmtNode *retNode, BlockNode *newblk); + void LowerCall(NaryStmtNode *callNode, BlockNode *newblk); BlockNode *LowerBlock(BlockNode *); void LoadFormalsAssignedToPregs(); void LowerFunction(); diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp index f5dd00a939..30c22ad26e 100644 --- a/src/mapleall/maple_me/src/lmbc_lower.cpp +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -325,7 +325,7 @@ void LMBCLowerer::LowerDassign(DassignNode *dsnode, BlockNode *newblk) { } PregIdx spcreg = GetSpecialRegFromSt(symbol); if (spcreg == -kSregFp) { - IassignFPoffNode *iassignoff = mirBuilder->CreateStmtIassignFPoff( + IassignFPoffNode *iassignoff = mirBuilder->CreateStmtIassignFPoff(OP_iassignfpoff, ptypused, memlayout->sym_alloc_table[symbol->GetStIndex()].offset + offset, dsnode->Opnd(0)); newblk->AddStatement(iassignoff); } else { @@ -519,6 +519,90 @@ void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { } } +// called only if the return has > 1 operand; assume prior lowering already +// converted any return of structs to be via fake parameter +void LMBCLowerer::LowerReturn(NaryStmtNode *retNode, BlockNode *newblk) { + CHECK_FATAL(retNode->NumOpnds() <= 2, "LMBCLowerer::LowerReturn: more than 2 return values NYI"); + for (int i = 0; i < retNode->NumOpnds(); i++) { + CHECK_FATAL(retNode->Opnd(i)->GetPrimType() != PTY_agg, "LMBCLowerer::LowerReturn: return of aggregate needs to be handled first"); + // insert regassign for the returned value + BaseNode *rhs = LowerExpr(retNode->Opnd(i)); + RegassignNode *regasgn = mirBuilder->CreateStmtRegassign(rhs->GetPrimType(), i == 0 ? -kSregRetval0 : -kSregRetval1, rhs); + newblk->AddStatement(regasgn); + } + retNode->GetNopnd().clear(); // remove the return operands + retNode->SetNumOpnds(0); + newblk->AddStatement(retNode); +} + +void LMBCLowerer::LowerCall(NaryStmtNode *naryStmt, BlockNode *newblk) { + // go through each parameter + uint32 i = 0; + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + i = 1; + } + ParmLocator parmlocator; + for (; i < naryStmt->NumOpnds(); i++) { + BaseNode *opnd = naryStmt->Opnd(i); + MIRType *ty = nullptr; + // get ty for this parameter + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd->GetPrimType())); + } else { + Opcode opnd_opcode = opnd->GetOpCode(); + CHECK_FATAL(opnd_opcode == OP_dread || opnd_opcode == OP_iread, ""); + if (opnd_opcode == OP_dread) { + AddrofNode *dread = static_cast(opnd); + MIRSymbol *sym = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(dread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } else { // OP_iread + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, ""); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + CHECK_FATAL(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair = static_cast(ty)->TraverseToField(iread->GetFieldID()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + } + } + PLocInfo ploc; + parmlocator.LocateNextParm(ty, ploc); + if (opnd->GetPrimType() != PTY_agg) { + IassignFPoffNode *iass = mirBuilder->CreateStmtIassignFPoff(OP_iassignspoff, opnd->GetPrimType(), ploc.memoffset, LowerExpr(opnd)); + newblk->AddStatement(iass); + } else { + BlkassignoffNode *bass = mirModule->CurFuncCodeMemPool()->New(ploc.memoffset, ploc.memsize); + bass->SetBOpnd(mirBuilder->CreateExprRegread(PTY_a64, -kSregSp), 0); + // the operand is either OP_dread or OP_iread; use its address instead + if (opnd->GetOpCode() == OP_dread) { + opnd->SetOpCode(OP_addrof); + } else { + opnd->SetOpCode(OP_iaddrof); + } + bass->SetBOpnd(opnd, 1); + newblk->AddStatement(bass); + } + } + BaseNode *opnd0 = nullptr; + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + opnd0 = naryStmt->Opnd(0); + naryStmt->GetNopnd().clear(); // remove the call operands + naryStmt->GetNopnd().push_back(opnd0); + naryStmt->SetNumOpnds(1); + } else { + naryStmt->GetNopnd().clear(); // remove the call operands + naryStmt->SetNumOpnds(0); + } + newblk->AddStatement(naryStmt); +} + BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { BlockNode *newblk = mirModule->CurFuncCodeMemPool()->New(); if (!block->GetFirst()) { @@ -542,6 +626,19 @@ BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { LowerIassign(static_cast(stmt), newblk); break; } + case OP_return: { + NaryStmtNode *retNode = static_cast(stmt); + if (retNode->GetNopndSize() == 0) { + newblk->AddStatement(stmt); + } else { + LowerReturn(retNode, newblk); + } + } + case OP_call: + case OP_icall: { + LowerCall(static_cast(stmt), newblk); + break; + } default: { for (size_t i = 0; i < stmt->NumOpnds(); ++i) { stmt->SetOpnd(LowerExpr(stmt->Opnd(i)), i); diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp index 191b6e43bc..f628e8d71f 100644 --- a/src/mapleall/maple_me/src/lmbc_memlayout.cpp +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -170,6 +170,7 @@ void LMBCMemLayout::LayoutStackFrame(void) { // going over all outgoing calls and get the maximum space needed for the // actuals seg_actual.size = FindLargestActualArea(); + func->SetOutParmSize(seg_actual.size); // allocate seg_actual in seg_SPbased seg_actual.how_alloc.mem_segment = &seg_SPbased; @@ -329,7 +330,7 @@ GlobalMemLayout::GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAll if (!sym) { continue; } - if (sym->GetStorageClass() != kScPstatic && sym->GetStorageClass() != kScFstatic) { + if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { continue; } if (sym->GetType()->GetAlign() != curalign) { -- Gitee