diff --git a/src/bin/jbc2mpl b/src/bin/jbc2mpl index ce80cf95074a54a3022d571e57dde99960c168f4..219f1789ff182078e69a61fe86495fbc60f118d5 100755 Binary files a/src/bin/jbc2mpl and b/src/bin/jbc2mpl differ diff --git a/src/bin/maple b/src/bin/maple index f10aeeb18bbeba96a01b459167cf703e94cc8306..654c437aa9898c07ccf1d1c10ce448d4ae55c323 100755 Binary files a/src/bin/maple and b/src/bin/maple differ diff --git a/src/deplibs/libmplphase.a b/src/deplibs/libmplphase.a index ce197c6660e80be61ab2d04342f195d70313e722..01b920f6caf1e6e9bd5341e918405fce3c0b6b78 100644 Binary files a/src/deplibs/libmplphase.a and b/src/deplibs/libmplphase.a differ diff --git a/src/deplibs/libmplutil.a b/src/deplibs/libmplutil.a index 53d674477177435dee7d5dd778a67d7579d71917..4231e2da9201656c77bc6e535d63556b0ffce1cc 100644 Binary files a/src/deplibs/libmplutil.a and b/src/deplibs/libmplutil.a differ diff --git a/src/maple_driver/defs/phases.def b/src/maple_driver/defs/phases.def index 7f2a50401d1de5d653580d3c1b8dcd719a33477d..0b9e0b60f2c1e0206e9392cd7f16c11bbcd2d6ab 100644 --- a/src/maple_driver/defs/phases.def +++ b/src/maple_driver/defs/phases.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -14,23 +14,41 @@ */ // Phase arguments are: name, condition. By default, all phases are required, so the condition value is 'true'. // You can use condition to control these phases and your custom phases. E.g. ADD_PHASE("custom_phase", option1 == value1 [more conditions...]) +ADD_PHASE("clone", true) ADD_PHASE("classhierarchy", true) +ADD_PHASE("callgraph", true) ADD_PHASE("vtableanalysis", true) ADD_PHASE("reflectionanalysis", true) ADD_PHASE("gencheckcast", true) ADD_PHASE("javaintrnlowering", true) +ADD_PHASE("analyzector", true) // mephase begin +ADD_PHASE("bypatheh", MeOption::optLevel == 2) +ADD_PHASE("loopcanon", MeOption::optLevel == 2) +ADD_PHASE("splitcriticaledge", MeOption::optLevel == 2) ADD_PHASE("ssatab", true) ADD_PHASE("aliasclass", true) ADD_PHASE("ssa", true) -ADD_PHASE("analyzerc", true) +ADD_PHASE("dse", MeOption::optLevel == 2) +ADD_PHASE("abcopt", MeOption::optLevel == 2) +ADD_PHASE("ssadevirt", MeOption::optLevel == 2) +ADD_PHASE("hprop", MeOption::optLevel == 2) +ADD_PHASE("hdse", MeOption::optLevel == 2) +ADD_PHASE("may2dassign", MeOption::optLevel == 2) +ADD_PHASE("condbasednpc", MeOption::optLevel == 2) +ADD_PHASE("epre", MeOption::optLevel == 2) +ADD_PHASE("stmtpre", MeOption::optLevel == 2) +ADD_PHASE("analyzerc", MeOption::optLevel == 2) ADD_PHASE("rclowering", true) -ADD_PHASE("gclowering", true) +ADD_PHASE("rename2preg", MeOption::optLevel == 2) +//ADD_PHASE("lpre", MeOption::optLevel == 2) +ADD_PHASE("pregrename", MeOption::optLevel == 2) ADD_PHASE("emit", true) // mephase end ADD_PHASE("GenNativeStubFunc", true) ADD_PHASE("clinit", true) ADD_PHASE("VtableImpl", true) +ADD_PHASE("CodeReLayout", MeOption::optLevel == 2) ADD_PHASE("javaehlower", true) -ADD_PHASE("DecoupleStatic", true) ADD_PHASE("MUIDReplacement", true) +ADD_PHASE("ConstantFold", true) diff --git a/src/maple_driver/include/compiler.h b/src/maple_driver/include/compiler.h index ecc64d5cdac511bb76fa34e313cc9e1fe640231d..a02e31768a1ec83d456811279af5e6ee83f46e43 100644 --- a/src/maple_driver/include/compiler.h +++ b/src/maple_driver/include/compiler.h @@ -116,6 +116,9 @@ class MapleCombCompiler : public Compiler { private: std::string realRunningExe; + void DecideMeRealLevel(MeOption &meOption, const std::vector &inputOptions); + void DecideMpl2MplRealLevel(Options &mpl2mplOption, const std::vector &inputOptions, + const MplOptions &options); std::unordered_set GetFinalOutputs(const MplOptions &mplOptions) const override; void GetTmpFilesToDelete(const MplOptions &mplOptions, std::vector &tempFiles) const override; MeOption *MakeMeOptions(const MplOptions &options, MemPool &memPool); diff --git a/src/maple_driver/include/compiler_factory.h b/src/maple_driver/include/compiler_factory.h index 33aad35c5f89516c93f9c2c978ff0ea7f2657acd..d6dc772338e547173a0108c254356f54ee807343 100644 --- a/src/maple_driver/include/compiler_factory.h +++ b/src/maple_driver/include/compiler_factory.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -33,6 +33,7 @@ class CompilerFactory { ErrorCode Compile(const MplOptions &mplOptions); private: + bool compileFinished = false; CompilerFactory(); void Insert(const std::string &name, Compiler *value); ErrorCode DeleteTmpFiles(const MplOptions &mplOptions, const std::vector &tempFiles, diff --git a/src/maple_driver/include/usages.h b/src/maple_driver/include/usages.h index 42cf81cb28ecd25fa4cdeeebb526df2514ccc7eb..7f07a1d612e1664b434318a375b79d08c234a9ff 100644 --- a/src/maple_driver/include/usages.h +++ b/src/maple_driver/include/usages.h @@ -62,6 +62,7 @@ enum OptionIndex : uint64 { kStmtPrepuLimit, kLpreLimit, kLprepulLimit, + kPregreNameLimit, kDelrcpuLimit, kEpreIncludeRef, kNoEpreIncludeRef, @@ -110,6 +111,10 @@ enum OptionIndex : uint64 { kProfileColdCount, kProfileHotRate, kProfileColdRate, + kMpl2MplNativeOpt, + kMpl2MplOptL1, + kMpl2MplOptL2, + kMpl2MplNoDot, // ----------mplcg begin--------- kCGQuiet, kPie, diff --git a/src/maple_driver/src/compiler_factory.cpp b/src/maple_driver/src/compiler_factory.cpp index ed011cff0dc884eb3b5bb3d98679914289859ba6..f57081ffc41142204ae4d02ea5976b4075320900 100644 --- a/src/maple_driver/src/compiler_factory.cpp +++ b/src/maple_driver/src/compiler_factory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -83,6 +83,11 @@ ErrorCode CompilerFactory::DeleteTmpFiles(const MplOptions &mplOptions, const st } ErrorCode CompilerFactory::Compile(const MplOptions &mplOptions) { + if (compileFinished) { + LogInfo::MapleLogger() << + "Failed! Compilation has been completed in previous time and multi-instance compilation is not supported\n"; + return kErrorCompileFail; + } std::vector compilers; if (compilerSelector == nullptr) { LogInfo::MapleLogger() << "Failed! Compiler is null." << "\n"; @@ -103,6 +108,8 @@ ErrorCode CompilerFactory::Compile(const MplOptions &mplOptions) { return ret; } } + // Compiler finished + compileFinished = true; if (!mplOptions.HasSetSaveTmps() || !mplOptions.GetSaveFiles().empty()) { std::vector tmpFiles; diff --git a/src/maple_driver/src/maple_comb_compiler.cpp b/src/maple_driver/src/maple_comb_compiler.cpp index 7ca5eb8254d01af2421d7deff9c9758052c7c3aa..c73adf39e1a2209d0fedfa019be2d6a193240680 100644 --- a/src/maple_driver/src/maple_comb_compiler.cpp +++ b/src/maple_driver/src/maple_comb_compiler.cpp @@ -97,6 +97,23 @@ void MapleCombCompiler::PrintCommand(const MplOptions &options) const { << GetInputFileName(options) << options.GetPrintCommandStr() << '\n'; } +void MapleCombCompiler::DecideMeRealLevel(MeOption &meOption, const std::vector &inputOptions) { + for (const mapleOption::Option &opt : inputOptions) { + switch (opt.Index()) { + case kMeOptL1: + meOption.optLevel = MeOption::kLevelOne; + break; + case kMeOptL2: + meOption.optLevel = MeOption::kLevelTwo; + // Turn the followings ON only at O2 + meOption.optDirectCall = true; + meOption.epreIncludeRef = true; + break; + default: + break; + } + } +} MeOption *MapleCombCompiler::MakeMeOptions(const MplOptions &options, MemPool &optMp) { MeOption *meOption = new MeOption(optMp); @@ -105,6 +122,10 @@ MeOption *MapleCombCompiler::MakeMeOptions(const MplOptions &options, MemPool &o LogInfo::MapleLogger() << "no me input options\n"; return meOption; } + DecideMeRealLevel(*meOption, it->second); + if (options.HasSetDebugFlag()) { + LogInfo::MapleLogger() << "Real Me level:" << std::to_string(meOption->optLevel) << "\n"; + } for (const mapleOption::Option &opt : it->second) { if (options.HasSetDebugFlag()) { LogInfo::MapleLogger() << "Me options: " << opt.Index() << " " << opt.OptionKey() << " " << opt.Args() << '\n'; @@ -204,6 +225,9 @@ MeOption *MapleCombCompiler::MakeMeOptions(const MplOptions &options, MemPool &o case kLprepulLimit: meOption->lprePULimit = std::stoul(opt.Args(), nullptr); break; + case kPregreNameLimit: + meOption->pregRenameLimit = std::stoul(opt.Args(), nullptr); + break; case kDelrcpuLimit: meOption->delRcPULimit = std::stoul(opt.Args(), nullptr); break; @@ -254,6 +278,25 @@ MeOption *MapleCombCompiler::MakeMeOptions(const MplOptions &options, MemPool &o return meOption; } +void MapleCombCompiler::DecideMpl2MplRealLevel(Options &mpl2mplOption, + const std::vector &inputOptions, + const MplOptions &options) { + for (const mapleOption::Option &opt : inputOptions) { + switch (opt.Index()) { + case kMpl2MplOptL1: + mpl2mplOption.O1 = true; + mpl2mplOption.O2 = true; + mpl2mplOption.usePreg = true; + break; + case kMpl2MplOptL2: + mpl2mplOption.O2 = true; + mpl2mplOption.usePreg = true; + break; + default: + break; + } + } +} Options *MapleCombCompiler::MakeMpl2MplOptions(const MplOptions &options, MemPool &optMp) { auto *mpl2mplOption = new Options(optMp); @@ -262,6 +305,7 @@ Options *MapleCombCompiler::MakeMpl2MplOptions(const MplOptions &options, MemPoo LogInfo::MapleLogger() << "no mpl2mpl input options\n"; return mpl2mplOption; } + DecideMpl2MplRealLevel(*mpl2mplOption, it->second, options); for (const mapleOption::Option &opt : it->second) { if (options.HasSetDebugFlag()) { LogInfo::MapleLogger() << "mpl2mpl options: " << opt.Index() << " " << opt.OptionKey() << " " << opt.Args() @@ -398,6 +442,18 @@ Options *MapleCombCompiler::MakeMpl2MplOptions(const MplOptions &options, MemPoo mpl2mplOption->skipVirtualMethod = (opt.Type() == kEnable); break; #endif + case kMpl2MplNativeOpt: + mpl2mplOption->nativeOpt = (opt.Type() == kEnable); + break; + case kMpl2MplOptL1: + // Already handled above in DecideMpl2MplRealLevel + break; + case kMpl2MplOptL2: + // Already handled above in DecideMpl2MplRealLevel + break; + case kMpl2MplNoDot: + mpl2mplOption->noDot = (opt.Type() == kEnable); + break; default: WARN(kLncWarn, "input invalid key for mpl2mpl " + opt.OptionKey()); break; diff --git a/src/maple_driver/src/mpl_options.cpp b/src/maple_driver/src/mpl_options.cpp index 9c6e9cbbe1e7c53e7b988e3a402a0257407e83cf..904671142efb00e1bffff54d315901431e51b1e0 100644 --- a/src/maple_driver/src/mpl_options.cpp +++ b/src/maple_driver/src/mpl_options.cpp @@ -274,6 +274,30 @@ const mapleOption::Descriptor usages[] = { " \tme\n", "all", { { nullptr, nullptr, nullptr, nullptr } } }, + { kMeOptL1, + 0, + nullptr, + "O1", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyOptional, + " -O1 \tDo some optimization.\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kMeOptL2, + 0, + nullptr, + "O2", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyOptional, + " -O2 \tDo some optimization.\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, { kMeRange, 0, nullptr, @@ -460,6 +484,136 @@ const mapleOption::Descriptor usages[] = { " --no-dump-after \tDo not extra IR dump after the specified phase in me\n", "me", { { nullptr, nullptr, nullptr, nullptr } } }, + { kEpreLimit, + 0, + nullptr, + "eprelimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --eprelimit \tApply EPRE optimization only for the first NUM expressions\n" + " \t--eprelimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kEprepuLimit, + 0, + nullptr, + "eprepulimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --eprepulimit \tApply EPRE optimization only for the first NUM PUs\n" + " \t--eprepulimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kStmtPrepuLimit, + 0, + nullptr, + "stmtprepulimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --stmtprepulimit \tApply STMTPRE optimization only for the first NUM PUs\n" + " \t--stmtprepulimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kLpreLimit, + 0, + nullptr, + "lprelimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --lprelimit \tApply LPRE optimization only for the first NUM variables\n" + " \t--lprelimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kLprepulLimit, + 0, + nullptr, + "lprepulimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --lprepulimit \tApply LPRE optimization only for the first NUM PUs\n" + " \t--lprepulimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kPregreNameLimit, + 0, + nullptr, + "pregrenamelimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --pregrenamelimit \tApply Preg Renaming optimization only up to NUM times\n" + " \t--pregrenamelimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kDelrcpuLimit, + 0, + nullptr, + "delrcpulimit", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyRequired, + " --delrcpulimit \tApply DELEGATERC optimization only for the first NUM PUs\n" + " \t--delrcpulimit=NUM\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kEpreIncludeRef, + kEnable, + nullptr, + "epreincluderef", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyBool, + " --epreincluderef \tInclude ref-type expressions when performing epre optimization\n" + " --no-epreincluderef \tDon't include ref-type expressions when performing epre optimization\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kEpreLocalRefVar, + kEnable, + nullptr, + "eprelocalrefvar", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyBool, + " --eprelocalrefvar \tThe EPRE phase will create new localrefvars when appropriate\n" + " --no-eprelocalrefvar \tDisable the EPRE phase create new localrefvars when appropriate\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kEprelhSivar, + kEnable, + nullptr, + "eprelhsivar", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyBool, + " --eprelhsivar \tThe EPRE phase will consider iassigns when optimizing ireads\n" + " --no-eprelhsivar \tDisable the EPRE phase consider iassigns when optimizing ireads\n", + "me", + { { nullptr, nullptr, nullptr, nullptr } } }, { kLessThrowAlias, kEnable, nullptr, @@ -884,6 +1038,56 @@ const mapleOption::Descriptor usages[] = { "mpl2mpl", { { nullptr, nullptr, nullptr, nullptr } } }, #endif + { kMpl2MplNativeOpt, + kEnable, + nullptr, + "nativeopt", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyBool, + " --nativeopt \tEnable native opt\n" + " --no-nativeopt \tDisable native opt\n", + "mpl2mpl", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kMpl2MplOptL1, + 0, + nullptr, + "O1", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyOptional, + " -O1 \tDo some optimization.\n", + "mpl2mpl", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kMpl2MplOptL2, + 0, + nullptr, + "O2", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyOptional, + " -O2 \tDo some optimization.\n", + "mpl2mpl", + { { nullptr, nullptr, nullptr, nullptr } } }, + { kMpl2MplNoDot, + kEnable, + nullptr, + "nodot", + nullptr, + false, + nullptr, + mapleOption::BuildType::kBuildTypeAll, + mapleOption::ArgCheckPolicy::kArgCheckPolicyBool, + " --nodot \tDisable dot file generation from cfg\n" + " --no-nodot \tEnable dot file generation from cfg\n", + "mpl2mpl", + { { nullptr, nullptr, nullptr, nullptr } } }, // mplcg { kPie, kEnable, diff --git a/src/maple_ipa/BUILD.gn b/src/maple_ipa/BUILD.gn index deb400828130a196880f954cdc29350ba101b75c..2211cce5297de518bd80904fcb0122992d01fe1d 100644 --- a/src/maple_ipa/BUILD.gn +++ b/src/maple_ipa/BUILD.gn @@ -26,6 +26,9 @@ include_directories = [ src_libmplipa = [ "src/interleaved_manager.cpp", "src/module_phase_manager.cpp", + "src/clone.cpp", + "src/retype.cpp", + "src/callgraph.cpp", ] configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] diff --git a/src/maple_ipa/include/callgraph.h b/src/maple_ipa/include/callgraph.h new file mode 100644 index 0000000000000000000000000000000000000000..f779e480370fef5ea155b2c865b4a073e6303134 --- /dev/null +++ b/src/maple_ipa/include/callgraph.h @@ -0,0 +1,489 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_CALLGRAPH_H +#define MAPLE_IPA_INCLUDE_CALLGRAPH_H +#include "module_phase.h" +#include "mir_nodes.h" +#include "class_hierarchy.h" +#include "mir_builder.h" +namespace maple { +class SCCNode; +typedef enum { + kCallTypeInvalid, + kCallTypeCall, + kCallTypeVirtualCall, + kCallTypeSuperCall, + kCallTypeInterfaceCall, + kCallTypeIcall, + kCallTypeIntrinsicCall, + kCallTypeXinitrinsicCall, + kCallTypeIntrinsicCallWithType, + kCallTypeCustomCall, + kCallTypePolymorphicCall, + kCallTypeFakeThreadStartRun +} CallType; + +struct NodeComparator { + bool operator()(const MIRFunction *lhs, const MIRFunction *rhs) const { + return lhs->GetPuidx() < rhs->GetPuidx(); + } +}; + +template +struct Comparator { + bool operator()(const T *lhs, const T *rhs) const { + return lhs->GetID() < rhs->GetID(); + } +}; + +// Information description of each callsite +class CallInfo { + public: + CallInfo(CallType type, MIRFunction *call, StmtNode *s, uint32 ld, uint32 stmtId, bool local = false) + : areAllArgsLocal(local), ctype(type), mirFunc(call), callStmt(s), loopDepth(ld), id(stmtId) {} + + virtual ~CallInfo() {} + + uint32 GetID() const { + return id; + } + + const char *GetCalleeName() const; + CallType GetCallType() const { + return ctype; + } + + uint32 GetLoopDepth() const { + return loopDepth; + } + + const char *GetCallTypeName() const; + StmtNode *GetCallStmt() const { + return callStmt; + } + + const MIRFunction *GetFunc() const { + return mirFunc; + } + + bool AreAllArgsLocal() const { + return areAllArgsLocal; + } + + void SetAllArgsLocal() { + areAllArgsLocal = true; + } + + private: + bool areAllArgsLocal; + CallType ctype; // Call type + MIRFunction *mirFunc; // Used to get signature + StmtNode *callStmt; // Call statement + uint32 loopDepth; + uint32 id; +}; + +class CGNode; +using Callsite = std::pair>*>; +using CalleeIt = MapleMap>*, Comparator>::iterator; +// Node in callgraph +class CGNode { + public: + void AddNumRefs() { + ++numReferences; + } + + void DecreaseNumRefs() { + --numReferences; + } + + CGNode(MIRFunction *func, MapleAllocator *allocater, uint32 index) + : alloc(allocater), + id(index), + sccNode(nullptr), + mirFunc(func), + callees(alloc->Adapter()), + vcallCandidates(alloc->Adapter()), + isVcallCandidatesValid(false), + icallCandidates(alloc->Adapter()), + isIcallCandidatesValid(false), + numReferences(0), + callerSet(alloc->Adapter()), + stmtCount(0), + nodeCount(0), + mustNotBeInlined(false), + vcallCands(alloc->Adapter()) {} + + ~CGNode() {} + + void Dump(std::ofstream &fout) const; + void DumpDetail() const; + + MIRFunction *GetMIRFunction() const { + return mirFunc; + } + + void AddCallsite(CallInfo*, CGNode*); + void AddCallsite(CallInfo*, MapleSet>*); + void RemoveCallsite(const CallInfo*, CGNode*); + + uint32 GetID() const { + return id; + } + + SCCNode *GetSCCNode() { + return sccNode; + } + + void SetSCCNode(SCCNode *node) { + sccNode = node; + } + + int32 GetPuIdx() const { + return mirFunc ? mirFunc->GetPuidx() : -1; + } + + const std::string &GetMIRFuncName() const { + return mirFunc ? mirFunc->GetName() : GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(0)); + } + + void AddCandsForCallNode(const KlassHierarchy *kh); + void AddVCallCandidate(MIRFunction *func) { + vcallCands.push_back(func); + } + + bool HasSetVCallCandidates() const { + return !vcallCands.empty(); + } + + MIRFunction *HasOneCandidate() const; + MapleVector &GetVCallCandidates() { + return vcallCands; + } + + /* add caller to CGNode */ + void AddCaller(CGNode *caller) { + callerSet.insert(caller); + } + + void DelCaller(CGNode *caller) { + callerSet.erase(caller); + } + + bool HasCaller() const { + return (!callerSet.empty()); + } + + uint32 NumberOfUses() const { + return callerSet.size(); + } + + bool IsCalleeOf(CGNode *func); + void IncrStmtCount() { + ++stmtCount; + } + + void IncrNodeCountBy(uint32 x) { + nodeCount += x; + } + + uint32 GetStmtCount() const { + return stmtCount; + } + + uint32 GetNodeCount() const { + return nodeCount; + } + + void Reset() { + stmtCount = 0; + nodeCount = 0; + numReferences = 0; + callees.clear(); + vcallCands.clear(); + } + + uint32 NumberOfCallSites() const { + return callees.size(); + } + + MapleMap>*, Comparator> &GetCallee() { + return callees; + } + + const SCCNode *GetSCCNode() const { + return sccNode; + } + + MapleSet>::iterator CallerBegin() const { + return callerSet.begin(); + } + + MapleSet>::iterator CallerEnd() const { + return callerSet.end(); + } + + bool IsMustNotBeInlined() const { + return mustNotBeInlined; + } + + void SetMustNotBeInlined() { + mustNotBeInlined = true; + } + + bool IsVcallCandidatesValid() const { + return isVcallCandidatesValid; + } + + void SetVcallCandidatesValid() { + isVcallCandidatesValid = true; + } + + void AddVcallCandidate(CGNode *item) { + vcallCandidates.insert(item); + } + + MapleSet> &GetVcallCandidates() { + return vcallCandidates; + } + + bool IsIcallCandidatesValid() const { + return isIcallCandidatesValid; + } + + void SetIcallCandidatesValid() { + isIcallCandidatesValid = true; + } + + void AddIcallCandidate(CGNode *item) { + icallCandidates.insert(item); + } + + MapleSet> &GetIcallCandidates() { + return icallCandidates; + } + + private: + // mirFunc is generated from callStmt's puIdx from mpl instruction + // mirFunc will be nullptr if CGNode represents a external/intrinsic call + MapleAllocator *alloc; + uint32 id; + SCCNode *sccNode; // the id of the scc where this cgnode belongs to + MIRFunction *mirFunc; + // Each callsite corresponds to one element + MapleMap>*, Comparator> callees; + MapleSet> vcallCandidates; // vcall candidates of mirFunc + bool isVcallCandidatesValid; + MapleSet> icallCandidates; // icall candidates of mirFunc + bool isIcallCandidatesValid; + uint32 numReferences; // The number of the node in this or other CGNode's callees + // function candidate for virtual call + // now the candidates would be same function name from base class to subclass + // with type inference, the candidates would be reduced + MapleSet> callerSet; + uint32 stmtCount; // count number of statements in the function, reuse this as callsite id + uint32 nodeCount; // count number of MIR nodes in the function/ + // this flag is used to mark the function which will read the current method invocation stack or something else, + // so it cannot be inlined and all the parent nodes which contain this node should not be inlined, either. + bool mustNotBeInlined; + MapleVector vcallCands; +}; + +class SCCNode { + public: + uint32 id; + MapleVector cgNodes; + MapleSet> callerScc; + MapleSet> calleeScc; + explicit SCCNode(uint32 index, MapleAllocator *alloc) + : id(index), + cgNodes(alloc->Adapter()), + callerScc(alloc->Adapter()), + calleeScc(alloc->Adapter()) {} + + virtual ~SCCNode() {} + + void AddCGNode(CGNode *cgn) { + cgNodes.push_back(cgn); + } + + void Dump(); + void DumpCycle(); + void Verify(); + void Setup(); + const MapleVector &GetCGNodes() const { + return cgNodes; + } + + const MapleSet> &GetCalles() const { + return calleeScc; + } + + bool HasRecursion() const; + bool HasSelfRecursion() const; + bool HasCaller() const { + return (!callerScc.empty()); + } + + uint32 GetID() const { + return id; + } +}; +class CallGraph : public AnalysisResult { + public: + CallGraph(MIRModule *m, MemPool *memPool, KlassHierarchy *kh, const char *fn); + ~CallGraph() {} + + CGNode *CallExternal() const { + return callExternal; + } + + void BuildCallGraph(); + CGNode *GetEntryNode() const { + return entry_node; + } + + const MapleVector &GetRootNodes() const { + return rootNodes; + } + + const KlassHierarchy *GetKlassh() const { + return klassh; + } + + const MapleVector &GetSCCTopVec() const { + return sccTopologicalVec; + } + + const MapleMap &GetNodesMap() const { + return nodesMap; + } + + void HandleBody(MIRFunction*, BlockNode*, CGNode*, uint32); + void AddCallGraphNode(MIRFunction*); + void DumpToFile(bool dumpall = true); + void Dump() const; + CGNode *GetCGNode(MIRFunction *func) const; + CGNode *GetCGNode(PUIdx puIdx) const; + SCCNode *GetSCCNode(MIRFunction *func) const; + bool IsRootNode(MIRFunction *func) const; + void UpdateCallGraphNode(CGNode *node); + void RecomputeSCC(); + MIRFunction *CurFunction() const { + return mirModule->CurFunction(); + } + + bool IsInIPA() const { + return mirModule->IsInIPA(); + } + + /* iterator */ + typedef MapleMap::iterator iterator; + iterator Begin() { + return nodesMap.begin(); + } + + iterator End() { + return nodesMap.end(); + } + + MIRFunction *GetMIRFunction(iterator it) const { + return (*it).first; + } + + CGNode *GetCGNode(iterator it) const { + return (*it).second; + } + + void DelNode(CGNode *node); + bool debug_flag; + bool debug_scc; + void BuildSCC(); + void VerifySCC(); + void BuildSCCDFS(CGNode *caller, unsigned int &visitIndex, std::vector &sccNodes, + std::vector &cgNodes, std::vector &visitedOrder, + std::vector &lowestOrder, std::vector &inStack, + std::vector &visitStack); + + private: + MIRModule *mirModule; + MapleAllocator cgalloc; + MIRBuilder *mirBuilder; + CGNode *entry_node; // For main function, nullptr if there is multiple entries + MapleVector rootNodes; + const char *fileName; /* used for output dot file */ + KlassHierarchy *klassh; + MapleMap nodesMap; + MapleVector sccTopologicalVec; + CGNode *callExternal; /* Auxiliary node used in icall/intrinsic call */ + uint32 numOfNodes; + uint32 numOfSccs; + std::unordered_set callsiteHash; + void GenCallGraph(); + CGNode *GetOrGenCGNode(PUIdx puidx, bool isVcall = false, bool isIcall = false); + CallType GetCallType(Opcode op) const; + CallInfo *GenCallInfo(CallType type, MIRFunction *call, StmtNode *s, uint32 loopDepth, uint32 callsiteid) { + return cgalloc.GetMemPool()->New(type, call, s, loopDepth, callsiteid); + } + + void FindRootNodes(); + void SCCTopologicalSort(std::vector &sccNodes); + void SetCompilationFunclist(); + void IncrNodesCount(CGNode *cgnode, BaseNode *bn); +}; +class DoCallGraph : public ModulePhase { + public: + DoCallGraph(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *m) override; + std::string PhaseName() const override { + return "callgraph"; + } + + virtual ~DoCallGraph(){}; +}; +class IPODevirtulize { + public: + IPODevirtulize(MIRModule *m, MemPool *memPool, KlassHierarchy *kh) + : cgalloc(memPool), mirBuilder(cgalloc.GetMemPool()->New(m)), klassh(kh), debugFlag(false) {} + + virtual ~IPODevirtulize() = default; + void DevirtualFinal(); + KlassHierarchy *GetKlassh() const { + return klassh; + } + + private: + MapleAllocator cgalloc; + MIRBuilder *mirBuilder; + KlassHierarchy *klassh; + bool debugFlag; + void SearchDefInMemberMethods(const Klass *klass); + void SearchDefInClinit(const Klass *klass); +}; +class DoIPODevirtulize : public ModulePhase { + public: + explicit DoIPODevirtulize(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *m) override; + std::string PhaseName() const override { + return "ipodevirtulize"; + } + + virtual ~DoIPODevirtulize(){}; +}; + +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_CALLGRAPH_H diff --git a/src/maple_ipa/include/clone.h b/src/maple_ipa/include/clone.h new file mode 100644 index 0000000000000000000000000000000000000000..88d4dfe63c5593ed0697f9d1e20362ef8cfce6d5 --- /dev/null +++ b/src/maple_ipa/include/clone.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_CLONE_H +#define MAPLE_IPA_INCLUDE_CLONE_H +#include "mir_module.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "class_hierarchy.h" +#include "me_ir.h" +#include "module_phase.h" +static constexpr char kFullNameStr[] = "INFO_fullname"; +static constexpr char kClassNameStr[] = "INFO_classname"; +static constexpr char kFuncNameStr[] = "INFO_funcname"; +static constexpr char kVoidRetSuffix[] = "CLONEDignoreret"; +namespace maple { +class ReplaceRetIgnored { + public: + ReplaceRetIgnored(MemPool *memPool); + virtual ~ReplaceRetIgnored() = default; + + bool ShouldReplaceWithVoidFunc(const CallMeStmt *stmt, const MIRFunction *calleeFunc) const; + std::string GenerateNewBaseName(const MIRFunction *originalFunc); + std::string GenerateNewFullName(const MIRFunction *originalFunc); + const std::set *GetTobeClonedFuncNames() const { + return &toBeClonedFuncNames; + } + + bool IsInCloneList(const std::string &funcName) const { + return toBeClonedFuncNames.find(funcName) != toBeClonedFuncNames.end(); + } + + static bool IsClonedFunc(const std::string &funcName) { + return funcName.find(kVoidRetSuffix) != std::string::npos; + } + + private: + maple::MapleAllocator allocator; + std::set toBeClonedFuncNames; + bool RealShouldReplaceWithVoidFunc(Opcode op, size_t nRetSize, const MIRFunction *calleeFunc) const; +}; +class Clone : public AnalysisResult { + public: + Clone(MIRModule *mod, MemPool *memPool, MIRBuilder &builder, KlassHierarchy *kh) + : AnalysisResult(memPool), mirModule(mod), allocator(memPool), dexBuilder(builder), kh(kh), + replaceRetIgnored(memPool->New(memPool)) {} + + ~Clone() = default; + + static MIRSymbol *CloneLocalSymbol(const MIRSymbol *oldSym, MIRFunction *newFunc); + static void CloneSymbols(MIRFunction *newFunc, const MIRFunction *oldFunc); + static void CloneLabels(MIRFunction *newFunc, const MIRFunction *oldFunc); + MIRFunction *CloneFunction(MIRFunction *originalFunction, const std::string &newBaseFuncName, + MIRType *returnType = nullptr); + MIRFunction *CloneFunctionNoReturn(MIRFunction *originalFunction); + void DoClone(); + void CopyFuncInfo(const MIRFunction *originalFunction, MIRFunction *newFunc) const; + void UpdateFuncInfo(MIRFunction *newFunc); + void CloneArgument(MIRFunction *originalFunction, ArgVector &argument) const; + ReplaceRetIgnored *GetReplaceRetIgnored() { + return replaceRetIgnored; + } + + void UpdateReturnVoidIfPossible(CallMeStmt *callMeStmt, const MIRFunction *targetFunc); + + private: + MIRModule *mirModule; + MapleAllocator allocator; + MIRBuilder &dexBuilder; + KlassHierarchy *kh; + ReplaceRetIgnored *replaceRetIgnored; +}; +class DoClone : public ModulePhase { + public: + explicit DoClone(ModulePhaseID id) : ModulePhase(id) {} + + virtual ~DoClone() = default; + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *m) override; + std::string PhaseName() const override { + return "clone"; + } +}; +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_CLONE_H diff --git a/src/maple_ipa/include/module_phases.def b/src/maple_ipa/include/module_phases.def index b24f8e7a5e32a859bc146827be2eb1797a31e9a8..2e78edcf3a05a274cea190ac5e1c197d18219432 100644 --- a/src/maple_ipa/include/module_phases.def +++ b/src/maple_ipa/include/module_phases.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -12,8 +12,10 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v1 for more details. */ +MODAPHASE(MoPhase_CLONE, DoClone) MODAPHASE(MoPhase_CHA, DoKlassHierarchy) MODAPHASE(MoPhase_CLINIT, DoClassInit) +MODAPHASE(MoPhase_CALLGRAPH_ANALYSIS, DoCallGraph) #if MIR_JAVA MODTPHASE(MoPhase_GENNATIVESTUBFUNC, DoGenerateNativeStubFunc) MODAPHASE(MoPhase_VTABLEANALYSIS, DoVtableAnalysis) @@ -23,4 +25,7 @@ MODTPHASE(MoPhase_JAVAINTRNLOWERING, DoJavaIntrnLowering) MODTPHASE(MoPhase_JAVAEHLOWER, JavaEHLowererPhase) MODTPHASE(MoPhase_MUIDREPLACEMENT, DoMUIDReplacement) MODTPHASE(MoPhase_CHECKCASTGENERATION, DoCheckCastGeneration) +MODTPHASE(MoPhase_ANALYZECTOR, DoAnalyzeCtor) +MODTPHASE(MoPhase_CodeReLayout, DoCodeReLayout) #endif +MODTPHASE(MoPhase_CONSTANTFOLD, DoConstantFold) diff --git a/src/maple_ipa/include/retype.h b/src/maple_ipa/include/retype.h new file mode 100644 index 0000000000000000000000000000000000000000..11ad1fa638970d1d5d2635eaec32cb2738852d91 --- /dev/null +++ b/src/maple_ipa/include/retype.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_RETYPE_H +#define MAPLE_IPA_INCLUDE_RETYPE_H +#include "module_phase.h" +#include "mir_nodes.h" +#include "class_hierarchy.h" + +namespace maple { +class Retype { + public: + MIRModule *mirmodule; + MapleAllocator allocator; + MIRBuilder &dexbuilder; + KlassHierarchy *klassh; + + public: + explicit Retype(MIRModule *mod, MemPool *memPool, MIRBuilder &builder, KlassHierarchy *k) + : mirmodule(mod), allocator(memPool), dexbuilder(builder), klassh(k) {} + + virtual ~Retype() {} + + void ReplaceRetypeExpr(const BaseNode *opnd); + void Retypestmt(MIRFunction *func); + void DoRetype(); +}; + +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_RETYPE_H diff --git a/src/maple_ipa/src/callgraph.cpp b/src/maple_ipa/src/callgraph.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d9943a1949d524de8df94e6c2b58493585873030 --- /dev/null +++ b/src/maple_ipa/src/callgraph.cpp @@ -0,0 +1,1710 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "callgraph.h" +#include +#include +#include +#include +#include +#include "option.h" +#include "retype.h" +#include "string_utils.h" +// Call Graph Analysis +// This phase is a foundation phase of compilation. This phase build +// the call graph not only for this module also for the modules it +// depends on when this phase is running for IPA. +// The main procedure shows as following. +// A. Devirtual virtual call of private final static and none-static +// variable. This step aims to reduce the callee set for each call +// which can benefit IPA analysis. +// B. Build Call Graph. +// i) For IPA, it rebuild all the call graph of the modules this +// module depends on. All necessary information is stored in mplt. +// ii) Analysis each function in this module. For each call statement +// create a CGNode, and collect potential callee functions to +// generate Call Graph. +// C. Find All Root Node for the Call Graph. +// D. Construct SCC based on Tarjan Algorithm +// E. Set compilation order as the bottom-up order of callgraph. So callee +// is always compiled before caller. This benefits those optimizations +// need interprocedure information like escape analysis. +namespace maple { +const char *CallInfo::GetCallTypeName() const { + switch (ctype) { + case kCallTypeCall: + return "c"; + case kCallTypeVirtualCall: + return "v"; + case kCallTypeSuperCall: + return "s"; + case kCallTypeInterfaceCall: + return "i"; + case kCallTypeIcall: + return "icall"; + case kCallTypeIntrinsicCall: + return "intrinsiccall"; + case kCallTypeXinitrinsicCall: + return "xintrinsiccall"; + case kCallTypeIntrinsicCallWithType: + return "intrinsiccallwithtype"; + case kCallTypeFakeThreadStartRun: + return "fakecallstartrun"; + case kCallTypeCustomCall: + return "customcall"; + case kCallTypePolymorphicCall: + return "polymorphiccall"; + default: + CHECK_FATAL(false, "unsupport CALL type"); + return nullptr; + } +} + +const char *CallInfo::GetCalleeName() const { + if ((ctype >= kCallTypeCall) && (ctype <= kCallTypeInterfaceCall)) { + MIRFunction *mirf = mirFunc; + return mirf->GetName().c_str(); + } else if (ctype == kCallTypeIcall) { + return "IcallUnknown"; + } else if ((ctype >= kCallTypeIntrinsicCall) && (ctype <= kCallTypeIntrinsicCallWithType)) { + return "IntrinsicCall"; + } else if (ctype == kCallTypeCustomCall) { + return "CustomCall"; + } else if (ctype == kCallTypePolymorphicCall) { + return "PolymorphicCall"; + } + CHECK_FATAL(false, "should not be here"); + return nullptr; +} + +void CGNode::DumpDetail() const { + LogInfo::MapleLogger() << "---CGNode @" << this << ": " << mirFunc->GetName() << "\t"; + if (HasOneCandidate() != nullptr) { + LogInfo::MapleLogger() << "@One Candidate\n"; + } else { + LogInfo::MapleLogger() << std::endl; + } + if (HasSetVCallCandidates()) { + for (uint32 i = 0; i < vcallCands.size(); i++) { + LogInfo::MapleLogger() << " virtual call candidates: " << vcallCands[i]->GetName() << "\n"; + } + } + for (auto &callSite : callees) { + for (auto &cgIt : *callSite.second) { + CallInfo *ci = callSite.first; + CGNode *node = cgIt; + MIRFunction *mf = node->GetMIRFunction(); + if (mf) { + LogInfo::MapleLogger() << "\tcallee in module : " << mf->GetName() << " "; + } else { + LogInfo::MapleLogger() << "\tcallee external: " << ci->GetCalleeName(); + } + } + } + // dump caller + for (auto const &callernode : callerSet) { + CHECK_FATAL(callernode && callernode->mirFunc, ""); + LogInfo::MapleLogger() << "\tcaller : " << callernode->mirFunc->GetName() << std::endl; + } +} + +void CGNode::Dump(std::ofstream &fout) const { + /* if dumpall == 1, dump whole call graph + * else dump callgraph with function defined in same module */ + CHECK_FATAL(mirFunc != nullptr, ""); + if (callees.empty()) { + fout << "\"" << mirFunc->GetName() << "\";\n"; + return; + } + for (auto &callSite : callees) { + for (auto &cgIt : *callSite.second) { + CallInfo *ci = callSite.first; + CGNode *node = cgIt; + if (!node) { + continue; + } + MIRFunction *func = node->GetMIRFunction(); + fout << "\"" << mirFunc->GetName() << "\" -> "; + if (func) { + if (node->GetSCCNode() != nullptr && node->GetSCCNode()->GetCGNodes().size() > 1) { + fout << "\"" << func->GetName() << "\"[label=" << node->GetSCCNode()->id << " color=red];\n"; + } else { + fout << "\"" << func->GetName() << "\"[label=" << 0 << " color=blue];\n"; + } + } else { + // unknown / external function with empty function body + fout << "\"" << ci->GetCalleeName() << "\"[label=" << ci->GetCallTypeName() << " color=blue];\n"; + } + } + } +} + +void CGNode::AddCallsite(CallInfo *ci, MapleSet> *callee) { + callees.insert(std::pair>*>(ci, callee)); +} + +void CGNode::AddCallsite(CallInfo *ci, CGNode *node) { + CHECK_FATAL(ci->GetCallType() != kCallTypeInterfaceCall, "must be true"); + CHECK_FATAL(ci->GetCallType() != kCallTypeVirtualCall, "must be true"); + auto *cgVector = alloc->GetMemPool()->New>>(alloc->Adapter()); + cgVector->insert(node); + (void)callees.emplace(ci, cgVector); + if (node) { + node->AddNumRefs(); + } +} + +void CGNode::RemoveCallsite(const CallInfo *ci, CGNode *node) { + for (Callsite callSite : GetCallee()) { + if (callSite.first == ci) { + auto cgIt = callSite.second->find(node); + if (cgIt != callSite.second->end()) { + callSite.second->erase(cgIt); + return; + } + CHECK_FATAL(false, "node isn't in ci"); + } + } +} + +bool CGNode::IsCalleeOf(CGNode *func) { + return callerSet.find(func) != callerSet.end(); +} + +void CallGraph::DelNode(CGNode *node) { + if (!node->GetMIRFunction()) { + return; + } + for (auto &callSite : node->GetCallee()) { + for (auto &cgIt : *callSite.second) { + cgIt->DelCaller(node); + if (!cgIt->HasCaller()) { + DelNode(cgIt); + } + } + } + MIRFunction *func = node->GetMIRFunction(); + // Delete the method of class info + if (func->GetClassTyIdx() != 0u) { + MIRType *classType = GlobalTables::GetTypeTable().GetTypeTable().at(func->GetClassTyIdx()); + auto *mirStructType = static_cast(classType); + uint32 j = 0; + for (; j < mirStructType->GetMethods().size(); ++j) { + if (mirStructType->GetMethods()[j].first == func->GetStIdx()) { + mirStructType->GetMethods().erase(mirStructType->GetMethods().begin() + j); + break; + } + } + } + for (uint32 i = 0; i < GlobalTables::GetFunctionTable().GetFuncTable().size(); ++i) { + if (GlobalTables::GetFunctionTable().GetFunctionFromPuidx(i) == func) { + uint32 j = 0; + for (; j < mirModule->GetFunctionList().size(); ++j) { + if (mirModule->GetFunction(j) == GlobalTables::GetFunctionTable().GetFunctionFromPuidx(i)) { + break; + } + } + if (j < mirModule->GetFunctionList().size()) { + mirModule->GetFunctionList().erase(mirModule->GetFunctionList().begin() + j); + } + GlobalTables::GetFunctionTable().GetFuncTable()[i] = nullptr; + break; + } + } + nodesMap.erase(func); + // Update Klass info as it has been built + if (klassh->GetKlassFromFunc(func)) { + klassh->GetKlassFromFunc(func)->DelMethod(*func); + } +} + +CallGraph::CallGraph(MIRModule *m, MemPool *memPool, KlassHierarchy *kh, const char *fn) + : AnalysisResult(memPool), + mirModule(m), + cgalloc(memPool), + mirBuilder(cgalloc.GetMemPool()->New(m)), + entry_node(nullptr), + rootNodes(cgalloc.Adapter()), + fileName(fn), + klassh(kh), + nodesMap(cgalloc.Adapter()), + sccTopologicalVec(cgalloc.Adapter()), + numOfNodes(0), + numOfSccs(0) { + CHECK_FATAL(fn != nullptr, ""); + callExternal = cgalloc.GetMemPool()->New(static_cast(nullptr), &cgalloc, numOfNodes++); + debug_flag = false; + debug_scc = false; +} + +CallType CallGraph::GetCallType(Opcode op) const { + CallType t = kCallTypeInvalid; + switch (op) { + case OP_call: + case OP_callassigned: + t = kCallTypeCall; + break; + case OP_virtualcall: + case OP_virtualcallassigned: + t = kCallTypeVirtualCall; + break; + case OP_superclasscall: + case OP_superclasscallassigned: + t = kCallTypeSuperCall; + break; + case OP_interfacecall: + case OP_interfacecallassigned: + t = kCallTypeInterfaceCall; + break; + case OP_icall: + case OP_icallassigned: + t = kCallTypeIcall; + break; + case OP_intrinsiccall: + case OP_intrinsiccallassigned: + t = kCallTypeIntrinsicCall; + break; + case OP_xintrinsiccall: + case OP_xintrinsiccallassigned: + t = kCallTypeXinitrinsicCall; + break; + case OP_intrinsiccallwithtype: + case OP_intrinsiccallwithtypeassigned: + t = kCallTypeIntrinsicCallWithType; + break; + case OP_customcall: + case OP_customcallassigned: + t = kCallTypeCustomCall; + break; + case OP_polymorphiccall: + case OP_polymorphiccallassigned: + t = kCallTypePolymorphicCall; + break; + default: + break; + } + return t; +} + +CGNode *CallGraph::GetCGNode(MIRFunction *func) const { + if (nodesMap.find(func) != nodesMap.end()) { + return nodesMap.at(func); + } + return nullptr; +} + +CGNode *CallGraph::GetCGNode(PUIdx puIdx) const { + return GetCGNode(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); +} + +SCCNode *CallGraph::GetSCCNode(MIRFunction *func) const { + CGNode *cgnode = GetCGNode(func); + return cgnode ? cgnode->GetSCCNode() : nullptr; +} + +bool CallGraph::IsRootNode(MIRFunction *func) const { + if (GetCGNode(func)) { + return (!GetCGNode(func)->HasCaller()); + } else { + return false; + } +} + +CGNode *CallGraph::GetOrGenCGNode(PUIdx puIdx, bool isVcall, bool isIcall) { + CGNode *node = GetCGNode(puIdx); + if (node == nullptr) { + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + node = cgalloc.GetMemPool()->New(mirFunc, &cgalloc, numOfNodes++); + nodesMap.insert(std::make_pair(mirFunc, node)); + } + if (isVcall && !node->IsVcallCandidatesValid()) { + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + Klass *klass = nullptr; + if (StringUtils::StartsWith(mirFunc->GetBaseClassName(), JARRAY_PREFIX_STR)) { // Array + klass = klassh->GetKlassFromName(NameMangler::kJavaLangObjectStr); + } else { + klass = klassh->GetKlassFromStrIdx(mirFunc->GetBaseClassNameStrIdx()); + } + if (klass == nullptr) { // Incomplete + node->SetVcallCandidatesValid(); + return node; + } + // Traverse all subclasses + std::vector klassVector; + klassVector.push_back(klass); + GStrIdx calleeFuncStrIdx = mirFunc->GetBaseFuncNameWithTypeStrIdx(); + for (Klass *currKlass : klassVector) { + const MIRFunction *method = currKlass->GetMethod(calleeFuncStrIdx); + if (method != nullptr) { + node->AddVcallCandidate(GetOrGenCGNode(method->GetPuidx())); + } + // add subclass of currKlass into vector + for (Klass *subKlass : currKlass->GetSubKlasses()) { + klassVector.push_back(subKlass); + } + } + if (!klass->GetMIRClassType()->IsAbstract()) { + // If klass.foo does not exist, search superclass and find the nearest one + // klass.foo does not exist + auto &klassMethods = klass->GetMethods(); + if (std::find(klassMethods.begin(), klassMethods.end(), mirFunc) == klassMethods.end()) { + Klass *superKlass = klass->GetSuperKlass(); + while (superKlass != nullptr) { + const MIRFunction *method = superKlass->GetMethod(calleeFuncStrIdx); + if (method != nullptr) { + node->AddVcallCandidate(GetOrGenCGNode(method->GetPuidx())); + break; + } + superKlass = superKlass->GetSuperKlass(); + } + } + } + node->SetVcallCandidatesValid(); + } + if (isIcall && !node->IsIcallCandidatesValid()) { + Klass *CallerKlass = nullptr; + if (StringUtils::StartsWith(CurFunction()->GetBaseClassName(), JARRAY_PREFIX_STR)) { // Array + CallerKlass = klassh->GetKlassFromName(NameMangler::kJavaLangObjectStr); + } else { + CallerKlass = klassh->GetKlassFromStrIdx(CurFunction()->GetBaseClassNameStrIdx()); + } + if (CallerKlass == nullptr) { // Incomplete + CHECK_FATAL(false, "class is incomplete, impossible."); + return node; + } + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + Klass *klass = nullptr; + if (StringUtils::StartsWith(mirFunc->GetBaseClassName(), JARRAY_PREFIX_STR)) { // Array + klass = klassh->GetKlassFromName(NameMangler::kJavaLangObjectStr); + } else { + klass = klassh->GetKlassFromStrIdx(mirFunc->GetBaseClassNameStrIdx()); + } + if (klass == nullptr) { // Incomplete + node->SetIcallCandidatesValid(); + return node; + } + GStrIdx calleeFuncStrIdx = mirFunc->GetBaseFuncNameWithTypeStrIdx(); + // Traverse all classes which implement the interface + for (Klass *implKlass : klass->GetImplKlasses()) { + const MIRFunction *method = implKlass->GetMethod(calleeFuncStrIdx); + if (method != nullptr) { + node->AddIcallCandidate(GetOrGenCGNode(method->GetPuidx())); + } else if (!implKlass->GetMIRClassType()->IsAbstract()) { + // Search in its parent class + Klass *superKlass = implKlass->GetSuperKlass(); + while (superKlass != nullptr) { + const MIRFunction *methodT = superKlass->GetMethod(calleeFuncStrIdx); + if (methodT != nullptr) { + node->AddIcallCandidate(GetOrGenCGNode(methodT->GetPuidx())); + break; + } + superKlass = superKlass->GetSuperKlass(); + } + } + } + node->SetIcallCandidatesValid(); + } + return node; +} + +void CallGraph::HandleBody(MIRFunction *func, BlockNode *body, CGNode *node, uint32 loopDepth) { + StmtNode *stmtNext = nullptr; + for (StmtNode *stmt = body->GetFirst(); stmt != nullptr; stmt = stmtNext) { + stmtNext = static_cast(stmt)->GetNext(); + Opcode op = stmt->GetOpCode(); + if (op == OP_comment) { + continue; + } else if (op == OP_doloop) { + DoloopNode *n = static_cast(stmt); + HandleBody(func, n->GetDoBody(), node, loopDepth + 1); + } else if (op == OP_dowhile || op == OP_while) { + WhileStmtNode *n = static_cast(stmt); + HandleBody(func, n->GetBody(), node, loopDepth + 1); + } else if (op == OP_if) { + IfStmtNode *n = static_cast(stmt); + HandleBody(func, n->GetThenPart(), node, loopDepth); + if (n->GetElsePart()) { + HandleBody(func, n->GetElsePart(), node, loopDepth); + } + } else { + node->IncrStmtCount(); + CallType ct = GetCallType(op); + switch (ct) { + case kCallTypeVirtualCall: { + PUIdx calleePUIdx = (static_cast(stmt))->GetPUIdx(); + MIRFunction *calleefunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePUIdx); + CallInfo *callInfo = GenCallInfo(kCallTypeVirtualCall, calleefunc, stmt, loopDepth, stmt->GetStmtID()); + // Retype makes object type more inaccurate. + StmtNode *stmtPrev = static_cast(stmt)->GetPrev(); + if (stmtPrev->GetOpCode() == OP_dassign) { + DassignNode *dassignNode = static_cast(stmtPrev); + if (dassignNode->GetRHS()->GetOpCode() == OP_retype) { + CallNode *callNode = static_cast(stmt); + CHECK_FATAL(callNode->Opnd(0)->GetOpCode() == OP_dread, "Must be dread."); + AddrofNode *dread = static_cast(callNode->Opnd(0)); + if (dassignNode->GetStIdx() == dread->GetStIdx()) { + RetypeNode *retypeNode = static_cast(dassignNode->GetRHS()); + CHECK_FATAL(retypeNode->Opnd(0)->GetOpCode() == OP_dread, "Must be dread."); + AddrofNode *dreadT = static_cast(retypeNode->Opnd(0)); + MIRType *type = func->GetLocalOrGlobalSymbol(dreadT->GetStIdx())->GetType(); + CHECK_FATAL(type->IsMIRPtrType(), "Must be ptr type."); + MIRPtrType *ptrType = static_cast(type); + MIRType *targetType = ptrType->GetPointedType(); + MIRFunction *calleefuncT = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePUIdx); + GStrIdx calleeFuncStrIdx = calleefuncT->GetBaseFuncNameWithTypeStrIdx(); + Klass *klass = klassh->GetKlassFromTyIdx(targetType->GetTypeIndex()); + if (klass != nullptr) { + const MIRFunction *method = klass->GetMethod(calleeFuncStrIdx); + if (method != nullptr) { + calleePUIdx = method->GetPuidx(); + } else { + std::string funcName = klass->GetKlassName(); + funcName.append((NameMangler::kNameSplitterStr)); + funcName.append(calleefuncT->GetBaseFuncNameWithType()); + MIRFunction *methodT = mirBuilder->GetOrCreateFunction(funcName, (TyIdx) (PTY_void)); + methodT->SetBaseClassNameStrIdx(klass->GetKlassNameStrIdx()); + methodT->SetBaseFuncNameWithTypeStrIdx(calleeFuncStrIdx); + calleePUIdx = methodT->GetPuidx(); + } + } + } + } + } + // Add a call node whether or not the calleefunc has its body + CGNode *calleeNode = GetOrGenCGNode(calleePUIdx, true); + CHECK_FATAL(calleeNode != nullptr, "calleenode is null"); + CHECK_FATAL(calleeNode->IsVcallCandidatesValid(), "vcall candidate must be valid"); + node->AddCallsite(callInfo, &calleeNode->GetVcallCandidates()); + for (auto &cgIt : calleeNode->GetVcallCandidates()) { + CGNode *calleeNodeT = cgIt; + calleeNodeT->AddCaller(node); + } + break; + } + case kCallTypeInterfaceCall: { + PUIdx calleePUIdx = (static_cast(stmt))->GetPUIdx(); + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePUIdx); + CallInfo *callInfo = GenCallInfo(kCallTypeInterfaceCall, calleeFunc, stmt, loopDepth, stmt->GetStmtID()); + // Add a call node whether or not the calleeFunc has its body + CGNode *calleeNode = GetOrGenCGNode(calleeFunc->GetPuidx(), false, true); + CHECK_FATAL(calleeNode != nullptr, "calleenode is null"); + CHECK_FATAL(calleeNode->IsIcallCandidatesValid(), "icall candidate must be valid"); + node->AddCallsite(callInfo, &calleeNode->GetIcallCandidates()); + for (auto &cgIt : calleeNode->GetIcallCandidates()) { + CGNode *calleeNodeT = cgIt; + calleeNodeT->AddCaller(node); + } + break; + } + case kCallTypeCall: { + PUIdx calleePUIdx = (static_cast(stmt))->GetPUIdx(); + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePUIdx); + // Ignore clinit + if (!calleeFunc->IsClinit()) { + CallInfo *callInfo = GenCallInfo(kCallTypeCall, calleeFunc, stmt, loopDepth, stmt->GetStmtID()); + CGNode *calleeNode = GetOrGenCGNode(calleeFunc->GetPuidx()); + ASSERT(calleeNode != nullptr, "calleenode is null"); + calleeNode->AddCaller(node); + node->AddCallsite(callInfo, calleeNode); + } + break; + } + case kCallTypeSuperCall: { + PUIdx calleePUIdx = (static_cast(stmt))->GetPUIdx(); + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePUIdx); + Klass *klass = klassh->GetKlassFromFunc(calleeFunc); + if (klass == nullptr) { // Fix CI + continue; + } + ASSERT(klass != nullptr, "Klass not found"); + MapleVector *cands = klass->GetCandidates(calleeFunc->GetBaseFuncNameWithTypeStrIdx()); + // continue to search its implinterfaces + if (!cands) { + for (Klass *implinterface : klass->GetImplInterfaces()) { + cands = implinterface->GetCandidates(calleeFunc->GetBaseFuncNameWithTypeStrIdx()); + if (cands && !cands->empty()) { + break; + } + } + } + if (!cands || cands->empty()) { + continue; // Fix CI + } + MIRFunction *actualMirfunc = cands->at(0); + CallInfo *callInfo = GenCallInfo(kCallTypeCall, actualMirfunc, stmt, loopDepth, stmt->GetStmtID()); + CGNode *calleeNode = GetOrGenCGNode(actualMirfunc->GetPuidx()); + ASSERT(calleeNode != nullptr, "calleenode is null"); + calleeNode->AddCaller(node); + (static_cast(stmt))->SetPUIdx(actualMirfunc->GetPuidx()); + node->AddCallsite(callInfo, calleeNode); + break; + } + case kCallTypeIntrinsicCall: + case kCallTypeIntrinsicCallWithType: + case kCallTypeCustomCall: + case kCallTypePolymorphicCall: + case kCallTypeIcall: + case kCallTypeXinitrinsicCall: + case kCallTypeInvalid: { + break; + } + default: { + CHECK_FATAL(false, "TODO::unsupport call type"); + } + } + } + } +} + +void CallGraph::UpdateCallGraphNode(CGNode *node) { + node->Reset(); + MIRFunction *func = node->GetMIRFunction(); + BlockNode *body = func->GetBody(); + HandleBody(func, body, node, 0); +} + +void CallGraph::RecomputeSCC() { + sccTopologicalVec.clear(); + numOfSccs = 0; + BuildSCC(); +} + +void CallGraph::AddCallGraphNode(MIRFunction *func) { + CGNode *node = GetOrGenCGNode(func->GetPuidx()); + CHECK_FATAL(node != nullptr, "node is null in CallGraph::GenCallGraph"); + BlockNode *body = func->GetBody(); + HandleBody(func, body, node, 0); + /* set root if current function is static main */ + if (func->GetName() == mirModule->GetEntryFuncName()) { + mirModule->SetEntryFunction(func); + entry_node = node; + } +} + +static void ResetInferredType(std::vector &inferredSymbols) { + for (unsigned int i = 0; i < inferredSymbols.size(); i++) { + inferredSymbols[i]->SetInferredTyIdx(TyIdx()); + } + inferredSymbols.clear(); +} + +static void ResetInferredType(std::vector &inferredSymbols, MIRSymbol *s) { + if (!s) { + return; + } + if (s->GetInferredTyIdx() == kInitTyIdx || s->GetInferredTyIdx() == kNoneTyIdx) { + return; + } + unsigned int i = 0; + for (; i < inferredSymbols.size(); i++) { + if (inferredSymbols[i] == s) { + s->SetInferredTyIdx(TyIdx()); + inferredSymbols.erase(inferredSymbols.begin() + i); + break; + } + } + // ASSERT(i &inferredSymbols, MIRSymbol *s, TyIdx idx) { + s->SetInferredTyIdx(idx); + unsigned int i = 0; + for (; i < inferredSymbols.size(); i++) { + if (inferredSymbols[i] == s) { + break; + } + } + if (i == inferredSymbols.size()) { + inferredSymbols.push_back(s); + } +} + +void IPODevirtulize::SearchDefInClinit(const Klass *klass) { + MIRClassType *classtype = static_cast(klass->GetMIRStructType()); + std::vector staticFinalPrivateSymbols; + for (uint32 i = 0; i < classtype->GetStaticFields().size(); i++) { + FieldAttrs attribute = classtype->GetStaticFields()[i].second.second; + if (attribute.GetAttr(FLDATTR_final)) { + staticFinalPrivateSymbols.push_back( + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(classtype->GetStaticFields()[i].first)); + } + } + std::string typeName = klass->GetKlassName(); + typeName.append(NameMangler::kClinitSuffix); + GStrIdx clinitFuncGstridx = + GlobalTables::GetStrTable().GetStrIdxFromName(NameMangler::GetInternalNameLiteral(typeName)); + if (clinitFuncGstridx == 0u) { + return; + } + MIRFunction *func = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(clinitFuncGstridx)->GetFunction(); + if (!func->GetBody()) { + return; + } + StmtNode *stmtNext = nullptr; + std::vector gcmallocSymbols; + for (StmtNode *stmt = func->GetBody()->GetFirst(); stmt != nullptr; stmt = stmtNext) { + stmtNext = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_comment: + break; + case OP_dassign: { + DassignNode *dassignNode = static_cast(stmt); + MIRSymbol *leftSymbol = func->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); + unsigned i = 0; + for (; i < staticFinalPrivateSymbols.size(); i++) { + if (staticFinalPrivateSymbols[i] == leftSymbol) { + break; + } + } + if (i < staticFinalPrivateSymbols.size()) { + if (dassignNode->GetRHS()->GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(dassignNode->GetRHS()); + MIRSymbol *rightSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + if (rightSymbol->GetInferredTyIdx() != kInitTyIdx && rightSymbol->GetInferredTyIdx() != kNoneTyIdx && + (staticFinalPrivateSymbols[i]->GetInferredTyIdx() == kInitTyIdx || + (staticFinalPrivateSymbols[i]->GetInferredTyIdx() == rightSymbol->GetInferredTyIdx()))) { + staticFinalPrivateSymbols[i]->SetInferredTyIdx(rightSymbol->GetInferredTyIdx()); + } else { + staticFinalPrivateSymbols[i]->SetInferredTyIdx(kInitTyIdx); + staticFinalPrivateSymbols.erase(staticFinalPrivateSymbols.begin() + i); + } + } else { + staticFinalPrivateSymbols[i]->SetInferredTyIdx(kInitTyIdx); + staticFinalPrivateSymbols.erase(staticFinalPrivateSymbols.begin() + i); + } + } else if (dassignNode->GetRHS()->GetOpCode() == OP_gcmalloc) { + GCMallocNode *gcmallocNode = static_cast(dassignNode->GetRHS()); + TyIdx inferredTypeIdx = gcmallocNode->GetTyIdx(); + SetInferredType(gcmallocSymbols, leftSymbol, inferredTypeIdx); + } else { + ResetInferredType(gcmallocSymbols, leftSymbol); + } + break; + } + case OP_call: + case OP_callassigned: { + CallNode *cnode = static_cast(stmt); + MIRFunction *calleefunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(cnode->GetPUIdx()); + if (calleefunc->GetName().find(NameMangler::kClinitSubStr, 0) != std::string::npos) { + // ignore all side effect of initizlizor + continue; + } + for (unsigned int i = 0; i < cnode->GetReturnVec().size(); i++) { + StIdx stidx = cnode->GetReturnVec()[i].first; + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stidx); + ResetInferredType(gcmallocSymbols, tmpSymbol); + } + for (size_t i = 0; i < cnode->GetNopndSize(); i++) { + BaseNode *node = cnode->GetNopndAt(i); + if (node->GetOpCode() != OP_dread) { + continue; + } + // ASSERT(node->op == OP_dread, "Must be dread"); + DreadNode *dreadNode = static_cast(node); + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + ResetInferredType(gcmallocSymbols, tmpSymbol); + } + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *callnode = static_cast(stmt); + if (callnode->GetIntrinsic() != INTRN_JAVA_CLINIT_CHECK) { + ResetInferredType(gcmallocSymbols); + } + break; + } + default: + ResetInferredType(gcmallocSymbols); + break; + } + } +} + +void IPODevirtulize::SearchDefInMemberMethods(const Klass *klass) { + SearchDefInClinit(klass); + MIRClassType *classtype = static_cast(klass->GetMIRStructType()); + std::vector finalPrivateFieldID; + for (uint32 i = 0; i < classtype->GetFieldsSize(); i++) { + FieldAttrs attribute = classtype->GetFields()[i].second.second; + if (attribute.GetAttr(FLDATTR_final)) { + // Conflict with simplify + if (!strcmp(klass->GetKlassName().c_str(), + "Lcom_2Fandroid_2Fserver_2Fpm_2FPackageManagerService_24ActivityIntentResolver_3B") && + !strcmp(GlobalTables::GetStrTable().GetStringFromStrIdx(classtype->GetFields()[i].first).c_str(), + "mActivities")) { + continue; + } + FieldID id = mirBuilder->GetStructFieldIDFromFieldNameParentFirst( + classtype, GlobalTables::GetStrTable().GetStringFromStrIdx(classtype->GetFields()[i].first)); + finalPrivateFieldID.push_back(id); + } + } + std::vector initMethods; + std::string typeName = klass->GetKlassName(); + typeName.append(NameMangler::kCinitStr); + for (MIRFunction *const &method : klass->GetMethods()) { + if (!strncmp(method->GetName().c_str(), typeName.c_str(), typeName.length())) { + initMethods.push_back(method); + } + } + if (initMethods.empty()) { + return; + } + ASSERT(!initMethods.empty(), "Must have initializor"); + StmtNode *stmtNext = nullptr; + for (unsigned int i = 0; i < initMethods.size(); i++) { + MIRFunction *func = initMethods[i]; + if (!func->GetBody()) { + continue; + } + std::vector gcmallocSymbols; + for (StmtNode *stmt = func->GetBody()->GetFirst(); stmt != nullptr; stmt = stmtNext) { + stmtNext = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_comment: + break; + case OP_dassign: { + DassignNode *dassignNode = static_cast(stmt); + MIRSymbol *leftSymbol = func->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); + if (dassignNode->GetRHS()->GetOpCode() == OP_gcmalloc) { + GCMallocNode *gcmallocNode = static_cast(dassignNode->GetRHS()); + SetInferredType(gcmallocSymbols, leftSymbol, gcmallocNode->GetTyIdx()); + } else if (dassignNode->GetRHS()->GetOpCode() == OP_retype) { + RetypeNode *retystmt = static_cast(dassignNode->GetRHS()); + BaseNode *fromnode = retystmt->Opnd(0); + if (fromnode->GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(fromnode); + MIRSymbol *fromSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + SetInferredType(gcmallocSymbols, leftSymbol, fromSymbol->GetInferredTyIdx()); + } else { + ResetInferredType(gcmallocSymbols, leftSymbol); + } + } else { + ResetInferredType(gcmallocSymbols, leftSymbol); + } + break; + } + case OP_call: + case OP_callassigned: { + CallNode *cnode = static_cast(stmt); + MIRFunction *calleefunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(cnode->GetPUIdx()); + if (calleefunc->GetName().find(NameMangler::kClinitSubStr, 0) != std::string::npos) { + // ignore all side effect of initizlizor + continue; + } + for (size_t j = 0; j < cnode->GetReturnVec().size(); j++) { + StIdx stidx = cnode->GetReturnVec()[j].first; + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stidx); + ResetInferredType(gcmallocSymbols, tmpSymbol); + } + for (size_t j = 0; j < cnode->GetNopndSize(); j++) { + BaseNode *node = cnode->GetNopndAt(j); + if (node->GetOpCode() != OP_dread) { + continue; + } + // ASSERT(node->op == OP_dread, "Must be dread"); + DreadNode *dreadNode = static_cast(node); + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + ResetInferredType(gcmallocSymbols, tmpSymbol); + } + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *callnode = static_cast(stmt); + if (callnode->GetIntrinsic() != INTRN_JAVA_CLINIT_CHECK) { + ResetInferredType(gcmallocSymbols); + } + break; + } + case OP_iassign: { + IassignNode *iassignNode = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassignNode->GetTyIdx()); + ASSERT(type->GetKind() == kTypePointer, "Must be pointer type"); + MIRPtrType *pointedType = static_cast(type); + if (pointedType->GetPointedTyIdx() == classtype->GetTypeIndex()) { + // set field of current class + FieldID fieldID = iassignNode->GetFieldID(); + unsigned j = 0; + for (; j < finalPrivateFieldID.size(); j++) { + if (finalPrivateFieldID[j] == fieldID) { + break; + } + } + if (j < finalPrivateFieldID.size()) { + if (iassignNode->GetRHS()->GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(iassignNode->GetRHS()); + CHECK_FATAL(dreadNode != nullptr, "Impossible"); + MIRSymbol *rightSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + if (rightSymbol->GetInferredTyIdx() != kInitTyIdx && rightSymbol->GetInferredTyIdx() != kNoneTyIdx && + (classtype->GetElemInferredTyIdx(fieldID) == kInitTyIdx || + (classtype->GetElemInferredTyIdx(fieldID) == rightSymbol->GetInferredTyIdx()))) { + classtype->SetElemInferredTyIdx(fieldID, rightSymbol->GetInferredTyIdx()); + } else { + classtype->SetElemInferredTyIdx(fieldID, kInitTyIdx); + finalPrivateFieldID.erase(finalPrivateFieldID.begin() + j); + } + } else { + classtype->SetElemInferredTyIdx(fieldID, kInitTyIdx); + finalPrivateFieldID.erase(finalPrivateFieldID.begin() + j); + } + } + } + break; + } + default: + ResetInferredType(gcmallocSymbols); + break; + } + } + } +} + +void DoDevirtual(const Klass *klass, const KlassHierarchy *klassh) { + MIRClassType *classtype = static_cast(klass->GetMIRStructType()); + for (auto &func : klass->GetMethods()) { + if (!func->GetBody()) { + continue; + } + StmtNode *stmtNext = nullptr; + std::vector inferredSymbols; + for (StmtNode *stmt = func->GetBody()->GetFirst(); stmt != nullptr; stmt = stmtNext) { + stmtNext = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_comment: + case OP_assertnonnull: + case OP_brtrue: + case OP_brfalse: + break; + case OP_dassign: { + DassignNode *dassignNode = static_cast(stmt); + MIRSymbol *leftSymbol = func->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); + if (dassignNode->GetRHS()->GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(dassignNode->GetRHS()); + if (func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx())->GetInferredTyIdx() != kInitTyIdx) { + SetInferredType(inferredSymbols, leftSymbol, + func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx())->GetInferredTyIdx()); + } + } else if (dassignNode->GetRHS()->GetOpCode() == OP_iread) { + IreadNode *ireadNode = static_cast(dassignNode->GetRHS()); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ireadNode->GetTyIdx()); + ASSERT(type->GetKind() == kTypePointer, "Must be pointer type"); + MIRPtrType *pointedType = static_cast(type); + if (pointedType->GetPointedTyIdx() == classtype->GetTypeIndex()) { + FieldID fieldID = ireadNode->GetFieldID(); + FieldID tmpID = fieldID; + TyIdx tmpTyIdx = classtype->GetElemInferredTyIdx(tmpID); + if (tmpTyIdx != kInitTyIdx && tmpTyIdx != kNoneTyIdx) { + SetInferredType(inferredSymbols, leftSymbol, classtype->GetElemInferredTyIdx(fieldID)); + } + } + } else { + ResetInferredType(inferredSymbols, leftSymbol); + } + break; + } + case OP_interfacecall: + case OP_interfacecallassigned: + case OP_virtualcall: + case OP_virtualcallassigned: { + CallNode *calleeNode = static_cast(stmt); + MIRFunction *calleefunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleeNode->GetPUIdx()); + if (calleeNode->GetNopndAt(0)->GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(calleeNode->GetNopndAt(0)); + MIRSymbol *rightSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + if (rightSymbol->GetInferredTyIdx() != kInitTyIdx && rightSymbol->GetInferredTyIdx() != kNoneTyIdx) { + // Devirtual + Klass *currKlass = klassh->GetKlassFromTyIdx(rightSymbol->GetInferredTyIdx()); + if (op == OP_interfacecall || op == OP_interfacecallassigned || op == OP_virtualcall || + op == OP_virtualcallassigned) { + std::vector klassVector; + klassVector.push_back(currKlass); + bool hasDevirtualed = false; + for (unsigned int index = 0; index < klassVector.size(); index++) { + Klass *tmpKlass = klassVector[index]; + for (MIRFunction *const &method : tmpKlass->GetMethods()) { + if (calleefunc->GetBaseFuncNameWithTypeStrIdx() == method->GetBaseFuncNameWithTypeStrIdx()) { + calleeNode->SetPUIdx(method->GetPuidx()); + if (op == OP_virtualcall || op == OP_interfacecall) { + calleeNode->SetOpCode(OP_call); + } + if (op == OP_virtualcallassigned || op == OP_interfacecallassigned) { + calleeNode->SetOpCode(OP_callassigned); + } + hasDevirtualed = true; + if (false) { + LogInfo::MapleLogger() << "Devirtualize In function:" + func->GetName() << '\n'; + LogInfo::MapleLogger() << calleeNode->GetOpCode() << '\n'; + LogInfo::MapleLogger() << " From:" << calleefunc->GetName() << '\n'; + LogInfo::MapleLogger() << " To :" + << GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleeNode->GetPUIdx())->GetName() + << '\n'; + } + break; + } + } + if (hasDevirtualed) { + break; + } + // add subclass of currKlass into vecotr + for (Klass *superKlass : tmpKlass->GetSuperKlasses()) { + klassVector.push_back(superKlass); + } + } + if (hasDevirtualed) { + for (size_t i = 0; i < calleeNode->GetNopndSize(); i++) { + BaseNode *node = calleeNode->GetNopndAt(i); + if (node->GetOpCode() != OP_dread) { + continue; + } + dreadNode = static_cast(node); + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + ResetInferredType(inferredSymbols, tmpSymbol); + } + if (op == OP_interfacecallassigned || op == OP_virtualcallassigned) { + CallNode *cnode = static_cast(stmt); + for (unsigned int i = 0; i < cnode->GetReturnVec().size(); i++) { + StIdx stidx = cnode->GetReturnVec()[i].first; + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stidx); + ResetInferredType(inferredSymbols, tmpSymbol); + } + } + break; + } + // Search default function in interfaces + Klass *tmpInterface = nullptr; + MIRFunction *tmpMethod = nullptr; + for (Klass *iklass : currKlass->GetImplInterfaces()) { + for (MIRFunction *const &method : iklass->GetMethods()) { + if (calleefunc->GetBaseFuncNameWithTypeStrIdx() == method->GetBaseFuncNameWithTypeStrIdx() && + !method->GetFuncAttrs().GetAttr(FUNCATTR_abstract)) { + if (!tmpInterface || klassh->IsSuperKlassForInterface(tmpInterface, iklass)) { + tmpInterface = iklass; + tmpMethod = method; + } + break; + } + } + } + // Add this check for the thirdparty APP compile + if (!tmpMethod) { + Klass *parentKlass = klassh->GetKlassFromName(calleefunc->GetBaseClassName()); + CHECK_FATAL(parentKlass != nullptr, "null ptr check"); + bool flag = false; + if (parentKlass->GetKlassName() == currKlass->GetKlassName()) { + flag = true; + } else { + for (Klass *const &superclass : currKlass->GetSuperKlasses()) { + if (parentKlass->GetKlassName() == superclass->GetKlassName()) { + flag = true; + break; + } + } + if (!flag && parentKlass->IsInterface()) { + for (Klass *const &implClass : currKlass->GetImplKlasses()) { + if (parentKlass->GetKlassName() == implClass->GetKlassName()) { + flag = true; + break; + } + } + } + } + if (!flag) { + LogInfo::MapleLogger() << "warning: func " << calleefunc->GetName() << " is not found in DeVirtual!" + << std::endl; + LogInfo::MapleLogger() << "warning: " << calleefunc->GetBaseClassName() << " is not the parent of " + << currKlass->GetKlassName() << std::endl; + } + } + if (!tmpMethod) { // SearchWithoutRettype, search only in current class now. + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleefunc->GetReturnTyIdx()); + Klass *targetKlass = nullptr; + bool isCalleeScalar = false; + if (retType->GetKind() == kTypePointer && retType->GetPrimType() == PTY_ref) { + MIRType *ptrType = (static_cast(retType))->GetPointedType(); + targetKlass = klassh->GetKlassFromTyIdx(ptrType->GetTypeIndex()); + } else if (retType->GetKind() == kTypeScalar) { + isCalleeScalar = true; + } else { + targetKlass = klassh->GetKlassFromTyIdx(retType->GetTypeIndex()); + } + if (!targetKlass && !isCalleeScalar) { + CHECK_FATAL(targetKlass != nullptr, "null ptr check"); + } + Klass *curRetKlass = nullptr; + bool isCurrVtabScalar = false; + bool isFindMethod = false; + for (MIRFunction *const &method : currKlass->GetMethods()) { + if (calleefunc->GetBaseFuncSigStrIdx() == method->GetBaseFuncSigStrIdx()) { + Klass *tmpKlass = nullptr; + MIRType *tmpType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(method->GetReturnTyIdx()); + if (tmpType->GetKind() == kTypePointer && tmpType->GetPrimType() == PTY_ref) { + MIRType *ptrType = (static_cast(tmpType))->GetPointedType(); + tmpKlass = klassh->GetKlassFromTyIdx(ptrType->GetTypeIndex()); + } else if (tmpType->GetKind() == kTypeScalar) { + isCurrVtabScalar = true; + } else { + tmpKlass = klassh->GetKlassFromTyIdx(tmpType->GetTypeIndex()); + } + if (!tmpKlass && !isCurrVtabScalar) { + CHECK_FATAL(tmpKlass != nullptr, "null ptr check"); + } + if (isCalleeScalar || isCurrVtabScalar) { + if (isFindMethod) { + LogInfo::MapleLogger() << "warning: this " << currKlass->GetKlassName() + << " has mult methods with the same function name but with different return type!" + << std::endl; + break; + } + tmpMethod = method; + isFindMethod = true; + continue; + } + if (targetKlass->IsClass() && klassh->IsSuperKlass(tmpKlass, targetKlass) && + (!curRetKlass || klassh->IsSuperKlass(curRetKlass, tmpKlass))) { + curRetKlass = tmpKlass; + tmpMethod = method; + } + if (targetKlass->IsClass() && klassh->IsInterfaceImplemented(tmpKlass, targetKlass)) { + tmpMethod = method; + break; + } + if (!targetKlass->IsClass()) { + CHECK_FATAL(tmpKlass != nullptr, "Klass null ptr check"); + if (tmpKlass->IsClass() && klassh->IsInterfaceImplemented(targetKlass, tmpKlass) && + (!curRetKlass || klassh->IsSuperKlass(curRetKlass, tmpKlass))) { + curRetKlass = tmpKlass; + tmpMethod = method; + } + if (!tmpKlass->IsClass() && klassh->IsSuperKlassForInterface(tmpKlass, targetKlass) && + (!curRetKlass || klassh->IsSuperKlass(curRetKlass, tmpKlass))) { + curRetKlass = tmpKlass; + tmpMethod = method; + } + } + } + } + } + if (!tmpMethod && (currKlass->IsClass() || currKlass->IsInterface())) { + LogInfo::MapleLogger() << "warning: func " << calleefunc->GetName() + << " is not found in DeVirtual!" << std::endl; + stmt->SetOpCode(OP_callassigned); + break; + } else if (!tmpMethod) { + LogInfo::MapleLogger() << "Error: func " << calleefunc->GetName() << " is not found!" << std::endl; + ASSERT(tmpMethod, "Must not be null"); + } + calleeNode->SetPUIdx(tmpMethod->GetPuidx()); + if (op == OP_virtualcall || op == OP_interfacecall) { + calleeNode->SetOpCode(OP_call); + } + if (op == OP_virtualcallassigned || op == OP_interfacecallassigned) { + calleeNode->SetOpCode(OP_callassigned); + } + if (false) { + LogInfo::MapleLogger() << "Devirtualize In function:" + func->GetName() << '\n'; + LogInfo::MapleLogger() << calleeNode->GetOpCode() << '\n'; + LogInfo::MapleLogger() << " From:" << calleefunc->GetName() << '\n'; + LogInfo::MapleLogger() << " To :" + << GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleeNode->GetPUIdx())->GetName() + << '\n'; + } + for (size_t i = 0; i < calleeNode->GetNopndSize(); i++) { + BaseNode *node = calleeNode->GetNopndAt(i); + if (node->GetOpCode() != OP_dread) { + continue; + } + dreadNode = static_cast(node); + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + ResetInferredType(inferredSymbols, tmpSymbol); + } + if (op == OP_interfacecallassigned || op == OP_virtualcallassigned) { + CallNode *cnode = static_cast(stmt); + for (unsigned int i = 0; i < cnode->GetReturnVec().size(); i++) { + StIdx stidx = cnode->GetReturnVec()[i].first; + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stidx); + ResetInferredType(inferredSymbols, tmpSymbol); + } + } + break; + } + } + } + } + case OP_call: + case OP_callassigned: { + CallNode *cnode = static_cast(stmt); + for (size_t i = 0; i < cnode->GetReturnVec().size(); i++) { + StIdx stidx = cnode->GetReturnVec()[i].first; + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(stidx); + ResetInferredType(inferredSymbols, tmpSymbol); + } + for (size_t i = 0; i < cnode->GetNopndSize(); i++) { + BaseNode *node = cnode->GetNopndAt(i); + if (node->GetOpCode() != OP_dread) { + continue; + } + DreadNode *dreadNode = static_cast(node); + MIRSymbol *tmpSymbol = func->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + ResetInferredType(inferredSymbols, tmpSymbol); + } + break; + } + default: + ResetInferredType(inferredSymbols); + break; + } + } + } +} + +void IPODevirtulize::DevirtualFinal() { + // Search all klass in order to find final variables + MapleMap::const_iterator it = klassh->GetKlasses().begin(); + for (; it != klassh->GetKlasses().end(); ++it) { + Klass *klass = it->second; + if (klass->IsClass()) { + MIRClassType *classtype = static_cast(klass->GetMIRStructType()); + // Initialize inferred type of member fileds as kInitTyidx + for (unsigned int i = 0; i < classtype->GetFieldsSize(); i++) { // Don't include parent's field + classtype->SetElemInferredTyIdx(i, kInitTyIdx); + } + SearchDefInMemberMethods(klass); + for (unsigned int i = 0; i < classtype->GetFieldInferredTyIdx().size(); i++) { + if (classtype->GetElemInferredTyIdx(i) != kInitTyIdx && classtype->GetElemInferredTyIdx(i) != kNoneTyIdx) { + if (debugFlag) { + FieldID tmpID = i; + FieldPair pair = classtype->TraverseToFieldRef(tmpID); + LogInfo::MapleLogger() << "Inferred Final Private None-Static Variable:" + klass->GetKlassName() + ":" + + GlobalTables::GetStrTable().GetStringFromStrIdx(pair.first) + << '\n'; + } + } + } + for (uint32 i = 0; i < classtype->GetStaticFields().size(); i++) { + FieldAttrs attribute = classtype->GetStaticFields()[i].second.second; + if (!GlobalTables::GetGsymTable().GetSymbolFromStrIdx(classtype->GetStaticFields()[i].first)) { + continue; + } + if (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(classtype->GetStaticFields()[i].first)->GetInferredTyIdx() + != kInitTyIdx && + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(classtype->GetStaticFields()[i].first)->GetInferredTyIdx() + != kNoneTyIdx) { + CHECK_FATAL(attribute.GetAttr(FLDATTR_final), "Must be final private"); + if (debugFlag) { + LogInfo::MapleLogger() << "Final Private Static Variable:" + + GlobalTables::GetStrTable().GetStringFromStrIdx(classtype->GetStaticFields()[i].first) + << '\n'; + } + } + } + DoDevirtual(klass, GetKlassh()); + } + } +} + +using CallSite = std::pair; +void CallGraph::GenCallGraph() { + // Read existing call graph from mplt, std::map > + // caller_PUIdx and all call site info are needed. Rebuild all other info of CGNode using CHA + for (auto const &it : mirModule->GetMethod2TargetMap()) { + CGNode *node = GetOrGenCGNode(it.first); + CHECK_FATAL(node != nullptr, "node is null"); + std::vector callees = it.second; + for (auto itInner = callees.begin(); itInner != callees.end(); ++itInner) { + CallInfo *info = (*itInner).first; + CGNode *calleeNode = GetOrGenCGNode(info->GetFunc()->GetPuidx(), info->GetCallType() == kCallTypeVirtualCall, + info->GetCallType() == kCallTypeInterfaceCall); + CHECK_FATAL(calleeNode != nullptr, "calleeNode is null"); + if (info->GetCallType() == kCallTypeVirtualCall) { + node->AddCallsite((*itInner).first, &calleeNode->GetVcallCandidates()); + } else if (info->GetCallType() == kCallTypeInterfaceCall) { + node->AddCallsite((*itInner).first, &calleeNode->GetIcallCandidates()); + } else if (info->GetCallType() == kCallTypeCall) { + node->AddCallsite((*itInner).first, calleeNode); + } else if (info->GetCallType() == kCallTypeSuperCall) { + const MIRFunction *calleefunc = info->GetFunc(); + Klass *klass = klassh->GetKlassFromFunc(calleefunc); + if (klass == nullptr) { // Fix CI + continue; + } + MapleVector *cands = klass->GetCandidates(calleefunc->GetBaseFuncNameWithTypeStrIdx()); + // continue to search its implinterfaces + if (cands == nullptr) { + for (Klass *implinterface : klass->GetImplInterfaces()) { + cands = implinterface->GetCandidates(calleefunc->GetBaseFuncNameWithTypeStrIdx()); + if (cands && !cands->empty()) { + break; + } + } + } + if (cands == nullptr || cands->empty()) { + continue; // Fix CI + } + MIRFunction *actualMirfunc = cands->at(0); + CGNode *tempNode = GetOrGenCGNode(actualMirfunc->GetPuidx()); + ASSERT(tempNode != nullptr, "calleenode is null in CallGraph::HandleBody"); + node->AddCallsite(info, tempNode); + } + for (auto &callSite : node->GetCallee()) { + if (callSite.first == info) { + for (auto &cgIt : *callSite.second) { + CGNode *tempNode = cgIt; + tempNode->AddCaller(node); + } + break; + } + } + } + } + // Deal with function override, function in current module override functions from mplt. + // Don't need anymore as we rebuild candidate base on the latest CHA. + for (auto it = GlobalTables::GetFunctionTable().GetFuncTable().begin(); + it != GlobalTables::GetFunctionTable().GetFuncTable().end(); it++) { + MIRFunction *mirFunc = *it; + if (!mirFunc || !mirFunc->GetBody()) { + continue; + } + mirModule->SetCurFunction(mirFunc); + AddCallGraphNode(mirFunc); + } + // Add all root nodes + FindRootNodes(); + BuildSCC(); +} + +void CallGraph::FindRootNodes() { + if (!rootNodes.empty()) { + CHECK_FATAL(false, "rootNodes has already been set"); + } + for (auto const &it : nodesMap) { + CGNode *node = it.second; + if (!node->HasCaller()) { + rootNodes.push_back(node); + } + } +} + +void CallGraph::Dump() const { + for (auto const &it : nodesMap) { + CGNode *node = it.second; + node->DumpDetail(); + } +} + +void CallGraph::DumpToFile(bool dumpall) { + if (Options::noDot) { + return; + } + std::ofstream cgfile; + char *outName = nullptr; + MapleString outfile(fileName, GetMempool()); + if (dumpall) { + outName = (outfile.append("-callgraph.dot")).c_str(); + } else { + outName = (outfile.append("-callgraphlight.dot")).c_str(); + } + cgfile.open(outName, std::ios::trunc); + cgfile << "digraph graphname {\n"; + for (auto const &it : nodesMap) { + CGNode *node = it.second; + // dump user defined function + if (dumpall) { + node->Dump(cgfile); + } else { + if ((node->GetMIRFunction() != nullptr) && (!node->GetMIRFunction()->IsEmpty())) { + node->Dump(cgfile); + } + } + } + cgfile << "}\n"; + cgfile.close(); +} + +void CallGraph::BuildCallGraph() { + GenCallGraph(); + // Dump callgraph to dot file + if (debug_flag) { + DumpToFile(true); + } + SetCompilationFunclist(); +} + +// Sort CGNode within an SCC. Try best to arrange callee appears before +// its (direct) caller, so that caller can benefit from summary info. +// If we have iterative inter-procedure analysis, then would not bother +// do this. +static bool CGNodeCompare(CGNode *left, CGNode *right) { + // special case: left calls right and right calls left, then compare by id + if (left->IsCalleeOf(right) && right->IsCalleeOf(left)) { + return left->GetID() < right->GetID(); + } + // left is right's direct callee, then make left appears first + if (left->IsCalleeOf(right)) { + return true; + } else if (right->IsCalleeOf(left)) { + return false; + } + return left->GetID() < right->GetID(); +} + +// Set compilation order as the bottom-up order of callgraph. So callee +// is always compiled before caller. This benifits thoses optimizations +// need interprocedure information like escape analysis. +void CallGraph::SetCompilationFunclist() { + mirModule->GetCompilationList().clear(); + mirModule->GetFunctionList().clear(); + const MapleVector &sccTopVec = GetSCCTopVec(); + for (int i = sccTopVec.size() - 1; i >= 0; i--) { + SCCNode *sccNode = sccTopVec[i]; + std::sort(sccNode->cgNodes.begin(), sccNode->cgNodes.end(), CGNodeCompare); + for (auto const kIt : sccNode->cgNodes) { + CGNode *node = kIt; + MIRFunction *func = node->GetMIRFunction(); + if ((func && func->GetBody() && !IsInIPA()) || (func && !func->IsNative())) { + mirModule->GetCompilationList().push_back(func); + mirModule->GetFunctionList().push_back(func); + } + } + } + if ((mirModule->GetCompilationList().size() != mirModule->GetFunctionList().size() && + mirModule->GetCompilationList().size() != mirModule->GetFunctionList().size() - mirModule->GetOptFuncsSize())) { + CHECK_FATAL(false, "should be equal"); + } +} + +bool SCCNode::HasRecursion() const { + if (cgNodes.empty()) { + return false; + } + if (cgNodes.size() > 1) { + return true; + } + CGNode *node = cgNodes[0]; + for (auto &callSite : node->GetCallee()) { + for (auto &cgIt : *callSite.second) { + CGNode *calleeNode = cgIt; + if (!calleeNode) { + continue; + } + if (node == calleeNode) { + return true; + } + } + } + return false; +} + +bool SCCNode::HasSelfRecursion() const { + if (cgNodes.size() != 1) { + return false; + } + CGNode *node = cgNodes[0]; + for (auto &callSite : node->GetCallee()) { + for (auto &cgIt : *callSite.second) { + CGNode *calleeNode = cgIt; + if (calleeNode == nullptr) { + continue; + } + if (node == calleeNode) { + return true; + } + } + } + return false; +} + +void SCCNode::Dump() { + printf("SCC %d contains\n", id); + for (auto const kIt : cgNodes) { + CGNode *node = kIt; + if (node->GetMIRFunction()) { + printf(" function(%d): %s\n", node->GetMIRFunction()->GetPuidx(), node->GetMIRFunction()->GetName().c_str()); + } else { + printf(" function: external\n"); + } + } +} + +void SCCNode::DumpCycle() { + CGNode *currNode = cgNodes[0]; + std::vector searched; + searched.push_back(currNode); + std::vector invalidNodes; + while (true) { + bool findNewCallee = false; + for (auto &callSite : currNode->GetCallee()) { + for (auto &cgIt : *callSite.second) { + CGNode *calleeNode = cgIt; + if (calleeNode->GetSCCNode() == this) { + unsigned int j = 0; + for (; j < invalidNodes.size(); j++) { + if (invalidNodes[j] == calleeNode) { + break; + } + } + // Find a invalid node + if (j < invalidNodes.size()) { + continue; + } + for (j = 0; j < searched.size(); j++) { + if (searched[j] == calleeNode) { + break; + } + } + if (j == searched.size()) { + currNode = calleeNode; + searched.push_back(currNode); + findNewCallee = true; + break; + } + } + } + } + if (searched.size() == cgNodes.size()) { + break; + } + if (!findNewCallee) { + invalidNodes.push_back(searched[searched.size() - 1]); + searched.pop_back(); + currNode = searched[searched.size() - 1]; + } + } + for (auto it = searched.begin(); it != searched.end(); it++) { + LogInfo::MapleLogger() << (*it)->GetMIRFunction()->GetName() << '\n'; + } +} + +void SCCNode::Verify() { + if (cgNodes.size() <= 0) { + CHECK_FATAL(false, ""); + } + for (CGNode *const &node : cgNodes) { + if (node->GetSCCNode() != this) { + CHECK_FATAL(false, ""); + } + } +} + +void SCCNode::Setup() { + for (CGNode *const &node : cgNodes) { + for (auto &callSite : node->GetCallee()) { + for (auto &cgIt : *callSite.second) { + CGNode *calleeNode = cgIt; + if (!calleeNode) { + continue; + } + if (calleeNode->GetSCCNode() == this) { + continue; + } + calleeScc.insert(calleeNode->GetSCCNode()); + } + } + for (auto itCaller = node->CallerBegin(); itCaller != node->CallerEnd(); itCaller++) { + CGNode *callerNode = *itCaller; + if (callerNode->GetSCCNode() == this) { + continue; + } + callerScc.insert(callerNode->GetSCCNode()); + } + } +} + +void CallGraph::BuildSCCDFS(CGNode *caller, uint32 &visitIndex, std::vector &sccNodes, + std::vector &cgNodes, std::vector &visitedOrder, + std::vector &lowestOrder, std::vector &inStack, + std::vector &visitStack) { + uint32 id = caller->GetID(); + cgNodes.at(id) = caller; + visitedOrder.at(id) = visitIndex; + lowestOrder.at(id) = visitIndex; + visitIndex++; + visitStack.push_back(id); + inStack.at(id) = true; + for (auto &callSite : caller->GetCallee()) { + for (auto &cgIt : *callSite.second) { + CGNode *calleeNode = cgIt; + if (!calleeNode) { + continue; + } + uint32 calleeId = calleeNode->GetID(); + if (!visitedOrder.at(calleeId)) { + // callee has not been processed yet + BuildSCCDFS(calleeNode, visitIndex, sccNodes, cgNodes, visitedOrder, lowestOrder, inStack, visitStack); + if (lowestOrder.at(calleeId) < lowestOrder.at(id)) { + lowestOrder.at(id) = lowestOrder.at(calleeId); + } + } else if (inStack.at(calleeId) && visitedOrder.at(calleeId) < lowestOrder.at(id)) { + // back edge + lowestOrder.at(id) = visitedOrder.at(calleeId); + } + } + } + if (visitedOrder.at(id) == lowestOrder.at(id)) { + SCCNode *sccNode = cgalloc.GetMemPool()->New(numOfSccs++, &cgalloc); + uint32 stackTopId; + do { + stackTopId = visitStack.back(); + visitStack.pop_back(); + inStack.at(stackTopId) = false; + CGNode *topNode = cgNodes.at(stackTopId); + topNode->SetSCCNode(sccNode); + sccNode->AddCGNode(topNode); + } while (stackTopId != id); + sccNodes.push_back(sccNode); + } +} + +void CallGraph::VerifySCC() { + for (auto const &it : nodesMap) { + CGNode *node = it.second; + if (node->GetSCCNode() == nullptr) { + CHECK_FATAL(false, ""); + } + } +} + +void CallGraph::BuildSCC() { + // This is the mapping between cg_id to cg_node. We may consider putting this in the CallGraph if it will be used + // frenqutenly in the future. + std::vector cgNodes(numOfNodes, nullptr); + std::vector visitedOrder(numOfNodes, 0); + std::vector lowestOrder(numOfNodes, 0); + std::vector inStack(numOfNodes, false); + std::vector sccNodes; + uint32 visitIndex = 1; + std::vector visitStack; + // Starting from roots is a good strategy for DSF + for (CGNode *const &root : rootNodes) { + BuildSCCDFS(root, visitIndex, sccNodes, cgNodes, visitedOrder, lowestOrder, inStack, visitStack); + } + // However, not all SCC can be reached from roots. + // E.g. foo()->foo(), foo is not considered as a root. + for (auto const &it : nodesMap) { + CGNode *node = it.second; + if (!node->GetSCCNode()) { + BuildSCCDFS(node, visitIndex, sccNodes, cgNodes, visitedOrder, lowestOrder, inStack, visitStack); + } + } + for (SCCNode *const &scc : sccNodes) { + // LogInfo::MapleLogger() << "SCC:" << scc->cgNodes.size() << '\n'; + scc->Verify(); + scc->Setup(); // fix caller and callee info. + if (debug_scc && scc->HasRecursion()) { + scc->Dump(); + } + } + SCCTopologicalSort(sccNodes); +} + +void CallGraph::SCCTopologicalSort(std::vector &sccNodes) { + std::set> inQueue; // Local variable, no need to use MapleSet + for (SCCNode *const &node : sccNodes) { + if (!node->HasCaller()) { + sccTopologicalVec.push_back(node); + inQueue.insert(node); + } + } + // Top-down iterates all nodes + for (unsigned i = 0; i < sccTopologicalVec.size(); i++) { + SCCNode *sccNode = sccTopologicalVec[i]; + for (SCCNode *callee : sccNode->calleeScc) { + if (inQueue.find(callee) == inQueue.end()) { + // callee has not been visited + bool callerAllVisited = true; + // Check whether all callers of the current callee have been visited + for (SCCNode *caller : callee->callerScc) { + if (inQueue.find(caller) == inQueue.end()) { + callerAllVisited = false; + break; + } + } + if (callerAllVisited) { + sccTopologicalVec.push_back(callee); + inQueue.insert(callee); + } + } + } + } +} + +void CGNode::AddCandsForCallNode(const KlassHierarchy *kh) { + /* already set vcall candidates information */ + if (HasSetVCallCandidates()) { + return; + } + CHECK_FATAL(mirFunc != nullptr, ""); + Klass *klass = kh->GetKlassFromFunc(mirFunc); + if (klass) { + MapleVector *v = klass->GetCandidates(mirFunc->GetBaseFuncNameWithTypeStrIdx()); + if (v) { + vcallCands = *v; // Vector copy + } + } +} + +MIRFunction *CGNode::HasOneCandidate() const { + int count = 0; + MIRFunction *cand = nullptr; + if (!mirFunc->IsEmpty()) { + count++; + cand = mirFunc; + } + /* scan candidates */ + for (uint32 i = 0; i < vcallCands.size(); i++) { + if (vcallCands[i] == nullptr) { + CHECK_FATAL(false, ""); + } + if (!vcallCands[i]->IsEmpty()) { + count++; + if (!cand) { + cand = vcallCands[i]; + } + } + } + return count == 1 ? cand : nullptr; +} + +AnalysisResult *DoCallGraph::Run(MIRModule *module, ModuleResultMgr *m) { + MemPool *memPool = memPoolCtrler.NewMemPool("callgraph mempool"); + KlassHierarchy *cha = static_cast(m->GetAnalysisResult(MoPhase_CHA, module)); + CHECK_FATAL(cha != nullptr, ""); + CallGraph *cg = memPool->New(module, memPool, cha, module->GetFileName().c_str()); + cg->debug_flag = TRACE_PHASE; + cg->BuildCallGraph(); + m->AddResult(GetPhaseID(), *module, *cg); + // do retype + MemPool *localMp = memPoolCtrler.NewMemPool(PhaseName()); + maple::MIRBuilder dexMirbuilder(module); + KlassHierarchy *retypeKh = static_cast(m->GetAnalysisResult(MoPhase_CHA, module)); + Retype retype(module, localMp, dexMirbuilder, retypeKh); + retype.DoRetype(); + memPoolCtrler.DeleteMemPool(localMp); + return cg; +} + +AnalysisResult *DoIPODevirtulize::Run(MIRModule *module, ModuleResultMgr *m) { + MemPool *memPool = memPoolCtrler.NewMemPool("ipodevirulize mempool"); + KlassHierarchy *cha = static_cast(m->GetAnalysisResult(MoPhase_CHA, module)); + CHECK_FATAL(cha != nullptr, ""); + IPODevirtulize *dev = memPool->New(module, memPool, cha); + // Devirtualize vcall of final variable + dev->DevirtualFinal(); + memPoolCtrler.DeleteMemPool(memPool); + return nullptr; +} + +} // namespace maple diff --git a/src/maple_ipa/src/clone.cpp b/src/maple_ipa/src/clone.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a76c2708771447ebd3d347c6e3978242b143c3d6 --- /dev/null +++ b/src/maple_ipa/src/clone.cpp @@ -0,0 +1,251 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "clone.h" +#include +#include +#include + +// For some funcs, when we can ignore their return-values, we clone a new func of +// them without return-values. We configure a list to save these funcs and clone +// at the very beginning so that clones can also enjoy the optimizations after. +// This mainly contains the clone of funcbody(include labels, symbols, arguments, +// etc.) and the update of the new func infomation. +namespace maple { +ReplaceRetIgnored::ReplaceRetIgnored(MemPool *memPool) : allocator(memPool) { +} + +bool ReplaceRetIgnored::RealShouldReplaceWithVoidFunc(Opcode op, size_t nRetSize, + const MIRFunction *calleeFunc) const { + MIRType *returnType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeFunc->GetReturnTyIdx()); + return nRetSize == 0 && (op == OP_virtualcallassigned || op == OP_callassigned || op == OP_superclasscallassigned) && + !calleeFunc->IsNative() && (returnType->GetKind() == kTypePointer) && (returnType->GetPrimType() == PTY_ref); +} + +bool ReplaceRetIgnored::ShouldReplaceWithVoidFunc(const CallMeStmt *stmt, const MIRFunction *calleeFunc) const { + return RealShouldReplaceWithVoidFunc(stmt->GetOp(), stmt->MustDefListSize(), calleeFunc); +} + +std::string ReplaceRetIgnored::GenerateNewBaseName(const MIRFunction *originalFunc) { + return std::string(originalFunc->GetBaseFuncName()).append(kVoidRetSuffix); +} + +std::string ReplaceRetIgnored::GenerateNewFullName(const MIRFunction *originalFunc) { + const std::string &oldSignature = originalFunc->GetSignature(); + auto retPos = oldSignature.find("_29"); + return std::string(originalFunc->GetBaseClassName()) + .append(NameMangler::kNameSplitterStr) + .append(GenerateNewBaseName(originalFunc)) + .append(NameMangler::kNameSplitterStr) + .append(oldSignature.substr(0, retPos + 3)) + .append("V"); +} + +MIRSymbol *Clone::CloneLocalSymbol(const MIRSymbol *oldSym, MIRFunction *newFunc) { + MemPool *newMP = newFunc->GetMemPool(); + MIRSymbol *newSym = newMP->New(*oldSym); + if (oldSym->GetSKind() == kStConst) { + newSym->SetKonst(oldSym->GetKonst()->Clone(*newMP)); + } else if (oldSym->GetSKind() == kStPreg) { + newSym->SetPreg(newMP->New(*oldSym->GetPreg())); + } else if (oldSym->GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym->GetName().c_str()); + } + return newSym; +} + +void Clone::CloneSymbols(MIRFunction *newFunc, const MIRFunction *oldFunc) { + size_t symTabSize = oldFunc->GetSymbolTabSize(); + for (size_t i = oldFunc->GetFormalCount() + 1; i < symTabSize; ++i) { + MIRSymbol *sym = oldFunc->GetSymbolTabItem(i); + if (sym == nullptr) { + continue; + } + MIRSymbol *newSym = CloneLocalSymbol(sym, newFunc); + if (!newFunc->GetSymTab()->AddStOutside(newSym)) { + CHECK_FATAL(false, "%s already existed in func %s", sym->GetName().c_str(), newFunc->GetName().c_str()); + } + } +} + +void Clone::CloneLabels(MIRFunction *newFunc, const MIRFunction *oldFunc) { + size_t labelTabSize = oldFunc->GetLabelTab()->GetLabelTableSize(); + for (size_t i = 1; i < labelTabSize; ++i) { + const std::string &labelName = oldFunc->GetLabelTabItem(i); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(labelName); + newFunc->GetLabelTab()->AddLabel(strIdx); + } +} + +// Clone a function +MIRFunction *Clone::CloneFunction(MIRFunction *originalFunction, const std::string &newBaseFuncName, + MIRType *returnType) { + MapleAllocator cgAlloc(originalFunction->GetCodeMempool()); + ArgVector argument(cgAlloc.Adapter()); + CloneArgument(originalFunction, argument); + MIRType *retType = returnType; + if (retType == nullptr) { + retType = originalFunction->GetReturnType(); + } + std::string fullName = originalFunction->GetBaseClassName(); + const std::string &signature = originalFunction->GetSignature(); + fullName = fullName.append(NameMangler::kNameSplitterStr) + .append(newBaseFuncName) + .append(NameMangler::kNameSplitterStr) + .append(signature); + MIRFunction *newFunc = + dexBuilder.CreateFunction(fullName, *retType, argument, false, originalFunction->GetBody() != nullptr); + CHECK_FATAL(newFunc != nullptr, "create cloned function failed"); + dexBuilder.GetMirModule().AddFunction(newFunc); + Klass *klass = kh->GetKlassFromName(originalFunction->GetBaseClassName()); + CHECK_FATAL(klass != nullptr, "getklass failed"); + klass->AddMethod(newFunc); + newFunc->SetClassTyIdx(originalFunction->GetClassTyIdx()); + MIRClassType *classType = klass->GetMIRClassType(); + classType->GetMethods().push_back( + MethodPair(newFunc->GetStIdx(), TyidxFuncAttrPair(newFunc->GetFuncSymbol()->GetTyIdx(), + originalFunction->GetFuncAttrs()))); + newFunc->SetFlag(originalFunction->GetFlag()); + newFunc->SetSrcPosition(originalFunction->GetSrcPosition()); + newFunc->SetFuncAttrs(originalFunction->GetFuncAttrs()); + newFunc->SetBaseClassFuncNames(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName)); + if (originalFunction->GetBody()) { + CopyFuncInfo(originalFunction, newFunc); + MIRFunction *originalCurrFunction = dexBuilder.GetCurrentFunctionNotNull(); + dexBuilder.SetCurrentFunction(*newFunc); + newFunc->SetBody( + originalFunction->GetBody()->CloneTree(originalFunction->GetModule()->GetCurFuncCodeMPAllocator())); + CloneSymbols(newFunc, originalFunction); + CloneLabels(newFunc, originalFunction); + dexBuilder.SetCurrentFunction(*originalCurrFunction); + } + return newFunc; +} + +void Clone::CloneArgument(MIRFunction *originalFunction, ArgVector &argument) const { + for (size_t i = 0; i < originalFunction->GetFormalCount(); ++i) { + argument.push_back(ArgPair(originalFunction->GetFormal(i)->GetName(), originalFunction->GetNthParamType(i))); + } +} + +void Clone::CopyFuncInfo(const MIRFunction *originalFunction, MIRFunction *newFunc) const { + auto funcNameIdx = newFunc->GetBaseFuncNameStrIdx(); + auto fullNameIdx = newFunc->GetNameStrIdx(); + auto classNameIdx = newFunc->GetBaseClassNameStrIdx(); + auto metaFullNameIdx = dexBuilder.GetOrCreateStringIndex(kFullNameStr); + auto metaClassNameIdx = dexBuilder.GetOrCreateStringIndex(kClassNameStr); + auto metaFuncNameIdx = dexBuilder.GetOrCreateStringIndex(kFuncNameStr); + const MIRInfoVector &fnInfo = originalFunction->GetInfoVector(); + const MapleVector &infoIsString = originalFunction->InfoIsString(); + size_t size = fnInfo.size(); + for (size_t i = 0; i < size; ++i) { + if (fnInfo[i].first == metaFullNameIdx) { + newFunc->PushbackMIRInfo(std::pair(fnInfo[i].first, fullNameIdx)); + } else if (fnInfo[i].first == metaFuncNameIdx) { + newFunc->PushbackMIRInfo(std::pair(fnInfo[i].first, funcNameIdx)); + } else if (fnInfo[i].first == metaClassNameIdx) { + newFunc->PushbackMIRInfo(std::pair(fnInfo[i].first, classNameIdx)); + } else { + newFunc->PushbackMIRInfo(std::pair(fnInfo[i].first, fnInfo[i].second)); + } + newFunc->PushbackIsString(infoIsString[i]); + } +} + +void Clone::UpdateFuncInfo(MIRFunction *newFunc) { + auto fullNameIdx = newFunc->GetNameStrIdx(); + auto metaFullNameIdx = dexBuilder.GetOrCreateStringIndex(kFullNameStr); + size_t size = newFunc->GetInfoVector().size(); + for (size_t i = 0; i < size; ++i) { + if (newFunc->GetInfoVector()[i].first == metaFullNameIdx) { + newFunc->SetMIRInfoNum(i, fullNameIdx); + break; + } + } +} + +// Clone all functions that would be invoked with their return value ignored +// @param original_function The original function to be cloned +// @param dexBuilder A helper object +// @return Pointer to the newly cloned function +MIRFunction *Clone::CloneFunctionNoReturn(MIRFunction *originalFunction) { + const std::string oldSignature = originalFunction->GetSignature(); + const std::string kNewMethodBaseName = replaceRetIgnored->GenerateNewBaseName(originalFunction); + MIRFunction *originalCurrFunction = dexBuilder.GetMirModule().CurFunction(); + MIRFunction *newFunction = + CloneFunction(originalFunction, kNewMethodBaseName, GlobalTables::GetTypeTable().GetTypeFromTyIdx(1)); + + // new stmt should be located in the newFunction->codemp, dexBuilder.CreateStmtReturn will use CurFunction().codemp + // to assign space for the new stmt. So we set it correctly here. + dexBuilder.GetMirModule().SetCurFunction(newFunction); + if (originalFunction->GetBody()) { + auto *body = newFunction->GetBody(); + for (auto &stmt : body->GetStmtNodes()) { + if (stmt.GetOpCode() == OP_return) { + body->ReplaceStmt1WithStmt2(&stmt, dexBuilder.CreateStmtReturn(nullptr)); + } + } + } + // setup new names for the newly cloned function + std::string newFuncFullName = replaceRetIgnored->GenerateNewFullName(originalFunction); + GStrIdx fullNameStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(newFuncFullName); + newFunction->OverrideBaseClassFuncNames(fullNameStrIdx); + MIRSymbol *funcSt = newFunction->GetFuncSymbol(); + GlobalTables::GetGsymTable().RemoveFromStringSymbolMap(*funcSt); + funcSt->SetNameStrIdx(fullNameStrIdx); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + UpdateFuncInfo(newFunction); + dexBuilder.GetMirModule().SetCurFunction(originalCurrFunction); + return newFunction; +} + +void Clone::UpdateReturnVoidIfPossible(CallMeStmt *callMeStmt, const MIRFunction *targetFunc) { + if (callMeStmt) { + if (replaceRetIgnored->ShouldReplaceWithVoidFunc(callMeStmt, targetFunc)) { + if (replaceRetIgnored->IsInCloneList(targetFunc->GetName())) { + std::string funcNameReturnVoid = replaceRetIgnored->GenerateNewFullName(targetFunc); + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcNameReturnVoid); + MIRFunction *funcReturnVoid = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(gStrIdx)->GetFunction(); + CHECK_FATAL(nullptr != funcReturnVoid, "target function not found at ssadevirtual"); + callMeStmt->SetPUIdx(funcReturnVoid->GetPuidx()); + } + } + } +} + +void Clone::DoClone() { + std::set clonedNewFuncMap; + for (const std::string &funcName : *(replaceRetIgnored->GetTobeClonedFuncNames())) { + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(gStrIdx); + if (nullptr != symbol) { + GStrIdx gStrIdxOfFunc = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRFunction *oriFunc = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(gStrIdxOfFunc)->GetFunction(); + mirModule->SetCurFunction(oriFunc); + clonedNewFuncMap.insert(CloneFunctionNoReturn(oriFunc)->GetName()); + } + } +} + +AnalysisResult *DoClone::Run(MIRModule *module, ModuleResultMgr *mrm) { + MemPool *memPool = memPoolCtrler.NewMemPool(PhaseName()); + maple::MIRBuilder dexMirBuilder(module); + KlassHierarchy *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, module)); + Clone *clone = memPool->New(module, memPool, dexMirBuilder, kh); + clone->DoClone(); + mrm->AddResult(GetPhaseID(), *module, *clone); + return clone; +} + +} // namespace maple diff --git a/src/maple_ipa/src/module_phase_manager.cpp b/src/maple_ipa/src/module_phase_manager.cpp index 22f4985aafdd8cb558b42ed0ccd7572852d8a4b2..d7b96915bfd753087b7989d214f14df74c0c0721 100644 --- a/src/maple_ipa/src/module_phase_manager.cpp +++ b/src/maple_ipa/src/module_phase_manager.cpp @@ -18,6 +18,8 @@ #include "option.h" #include "bin_mpl_export.h" #include "mpl_timer.h" +#include "clone.h" +#include "callgraph.h" #if MIR_JAVA #include "native_stub_func.h" #include "vtable_analysis.h" @@ -28,6 +30,9 @@ #include "native_stub_func.h" #include "muid_replacement.h" #include "gen_check_cast.h" +#include "analyzector.h" +#include "coderelayout.h" +#include "constantfold.h" #endif // ~MIR_JAVA namespace { diff --git a/src/maple_ipa/src/retype.cpp b/src/maple_ipa/src/retype.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e4e1539dfc1a81e0f34be1fcb70c11d791e9a64 --- /dev/null +++ b/src/maple_ipa/src/retype.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "retype.h" +#include +#include + +namespace maple { +void Retype::ReplaceRetypeExpr(const BaseNode *expr) { + if (expr->NumOpnds() == 0) { + return; + } + for (size_t i = 0; i < expr->NumOpnds(); i++) { + BaseNode *opnd = expr->Opnd(i); + if (opnd->GetOpCode() == OP_retype) { + opnd->SetOpnd(opnd->Opnd(0), i); + continue; + } + ReplaceRetypeExpr(opnd); + } +} + +void Retype::Retypestmt(MIRFunction *func) { + if (func->IsEmpty()) { + return; + } + for (auto &stmt : func->GetBody()->GetStmtNodes()) { + if (stmt.GetOpCode() == OP_comment) { + continue; + } + for (size_t i = 0; i < stmt.NumOpnds(); i++) { + BaseNode *opnd = stmt.Opnd(i); + if (opnd->GetOpCode() == OP_retype) { + stmt.SetOpnd(opnd->Opnd(0), i); + continue; + } else { + ReplaceRetypeExpr(opnd); + } + } + } +} + +void Retype::DoRetype() { + for (MIRFunction *func : mirmodule->GetFunctionList()) { + if (func->IsEmpty()) { + continue; + } + Retypestmt(func); + } +} + +} // namespace maple diff --git a/src/maple_ir/include/mir_function.h b/src/maple_ir/include/mir_function.h index b3e20a8b528fc17447627abbd07ff79ac1655aa1..a5b7c6d68d25029080ee42ea10bc3e5438218d2c 100644 --- a/src/maple_ir/include/mir_function.h +++ b/src/maple_ir/include/mir_function.h @@ -97,6 +97,9 @@ class MIRFunction { return baseFuncWithTypeStrIdx; } + GStrIdx GetBaseFuncSigStrIdx() const { + return baseFuncSigStrIdx; + } void SetBaseClassNameStrIdx(GStrIdx id) { baseClassStrIdx = id; @@ -422,6 +425,12 @@ class MIRFunction { funcType = type; } + TyIdx GetInferredReturnTyIdx() const { + return inferredReturnTyIdx; + } + void SetInferredReturnTyIdx(TyIdx tyIdx) { + inferredReturnTyIdx = tyIdx; + } const MapleVector &GetArgumentsTyIdx() const { return argumentsTyIdx; @@ -563,6 +572,12 @@ class MIRFunction { layoutType = type; } + uint32 GetCallTimes() const { + return callTimes; + } + void SetCallTimes(uint32 times) { + callTimes = times; + } uint16 GetFrameSize() const { return frameSize; @@ -728,6 +743,8 @@ class MIRFunction { StIdx symbolTableIdx; // the symbol table index of this function MIRFuncType *funcType = nullptr; TyIdx returnTyIdx{0}; // the declared return type of this function + TyIdx inferredReturnTyIdx{0}; // the actual return type of of this function (may be a + // subclass of the above). 0 means can not be inferred. TyIdx classTyIdx{0}; // class/interface type this function belongs to MapleVector formals{module->GetMPAllocator().Adapter()}; // formal parameter symbols of this function MapleSet retRefSym{module->GetMPAllocator().Adapter()}; @@ -742,6 +759,7 @@ class MIRFunction { MapleAllocator dataMPAllocator{dataMemPool}; MemPool *codeMemPool = memPoolCtrler.NewMemPool("func code mempool"); MapleAllocator codeMemPoolAllocator{codeMemPool}; + uint32 callTimes = 0; BlockNode *body = nullptr; SrcPosition srcPosition{}; FuncAttrs funcAttrs{}; @@ -798,6 +816,7 @@ class MIRFunction { // the string table index of base function name mangled with type info GStrIdx baseFuncWithTypeStrIdx{0}; // funcname + types of args, no type of retv + GStrIdx baseFuncSigStrIdx{0}; GStrIdx signatureStrIdx{0}; void DumpFlavorLoweredThanMmpl() const; diff --git a/src/maple_ir/include/mir_module.h b/src/maple_ir/include/mir_module.h index c5c6cc66a80699ed847b901c32bec3e743f29ba6..310c878f5fec0d71d1d95b931705465161699e3d 100644 --- a/src/maple_ir/include/mir_module.h +++ b/src/maple_ir/include/mir_module.h @@ -20,6 +20,7 @@ #include "opcodes.h" #include "mpl_logging.h" #include "muid.h" +#include "profile.h" #if MIR_FEATURE_FULL #include #include @@ -187,6 +188,10 @@ class MIRModule { return symbolSet; } + Profile &GetProfile() { + return profile; + } + void SetSomeSymbolNeedForDecl(bool s) { someSymbolNeedForwDecl = s; } @@ -472,6 +477,7 @@ class MIRModule { MapleSet externStructTypeSet; MapleSet symbolSet; MapleVector symbolDefOrder; + Profile profile; bool someSymbolNeedForwDecl = false; // some symbols' addressses used in initialization std::ostream &out; diff --git a/src/maple_ir/include/option.h b/src/maple_ir/include/option.h index 102d7e4f438ca451e259b32a72ee2bf736124571..5d23f602a75f6ccb454cf32462f3a5420d15db02 100644 --- a/src/maple_ir/include/option.h +++ b/src/maple_ir/include/option.h @@ -82,6 +82,15 @@ class Options { #if MIR_JAVA static bool skipVirtualMethod; #endif + static bool nativeOpt; + static bool O1; + static bool O2; + static bool noDot; + static std::string proFileData; + static std::string proFileFuncData; + static std::string proFileClassData; + static bool profileStaticFields; + static bool checkArrayStore; private: MapleAllocator optionAlloc; std::vector phaseSeq; diff --git a/src/maple_ir/src/option.cpp b/src/maple_ir/src/option.cpp index e6eeda9c9388eabf6825852c186d6b42aae663b1..20c3c4a0e273116476a8a6ac8b51e8be10eb59ec 100644 --- a/src/maple_ir/src/option.cpp +++ b/src/maple_ir/src/option.cpp @@ -55,6 +55,15 @@ bool Options::emitVtableImpl = false; #if MIR_JAVA bool Options::skipVirtualMethod = false; #endif +bool Options::nativeOpt = true; +bool Options::O1 = false; +bool Options::O2 = false; +bool Options::noDot = false; +bool Options::profileStaticFields = false; +std::string Options::proFileData = ""; +std::string Options::proFileFuncData = ""; +std::string Options::proFileClassData = ""; +bool Options::checkArrayStore = false; enum OptionIndex { kUnknown, kHelp, diff --git a/src/maple_me/BUILD.gn b/src/maple_me/BUILD.gn index cb2211b99a2280c3f8dd932b4f515524804aef97..cf68b225cfbeb0e3f192532c59c1a68ee5c36639 100644 --- a/src/maple_me/BUILD.gn +++ b/src/maple_me/BUILD.gn @@ -29,6 +29,7 @@ src_libmplme = [ "src/hdse.cpp", "src/prop.cpp", "src/me_alias_class.cpp", + "src/me_critical_edge.cpp", "src/me_bb_layout.cpp", "src/me_bypath_eh.cpp", "src/me_cfg.cpp", @@ -36,6 +37,8 @@ src_libmplme = [ "src/me_hdse.cpp", "src/me_dominance.cpp", "src/me_emit.cpp", + "src/me_ssa_devirtual.cpp", + "src/ssa_devirtual.cpp", "src/me_function.cpp", "src/me_loop_analysis.cpp", "src/me_irmap.cpp", @@ -43,9 +46,13 @@ src_libmplme = [ "src/me_option.cpp", "src/me_phase_manager.cpp", "src/me_prop.cpp", + "src/me_analyze_rc.cpp", + "src/me_delegate_rc.cpp", "src/me_cond_based_opt.cpp", "src/me_rc_lowering.cpp", "src/me_lower_globals.cpp", + "src/me_may2dassign.cpp", + "src/preg_renamer.cpp", "src/me_rename2preg.cpp", "src/me_ssa.cpp", "src/me_ssa_tab.cpp", diff --git a/src/maple_me/include/dse.h b/src/maple_me/include/dse.h index 4a7f2eb46f93ccccc5f060a0465a08cb8f0990cf..ed87b8bacbb1bb6e3bed1ef15a2ebb442ca65aff 100644 --- a/src/maple_me/include/dse.h +++ b/src/maple_me/include/dse.h @@ -27,13 +27,12 @@ namespace maple { class DSE { public: DSE(std::vector &&bbVec, BB &commonEntryBB, BB &commonExitBB, SSATab &ssaTab, - Dominance &postDom, bool enableDebug = false, bool decouple = false) + Dominance &postDom, bool enableDebug = false) : enableDebug(enableDebug), bbVec(bbVec), commonEntryBB(commonEntryBB), commonExitBB(commonExitBB), ssaTab(ssaTab), postDom(postDom), bbRequired(bbVec.size(), false), - exprRequired(ssaTab.GetVersionStTableSize(), false), - decoupleStatic(decouple) {} + exprRequired(ssaTab.GetVersionStTableSize(), false) {} ~DSE() = default; @@ -118,7 +117,6 @@ class DSE { std::vector exprRequired; std::forward_list> workList{}; bool cfgUpdated = false; - bool decoupleStatic = false; }; } // namespace maple #endif // MAPLE_ME_INCLUDE_DSE_H diff --git a/src/maple_me/include/hdse.h b/src/maple_me/include/hdse.h index 7509112fdd058f6d363625075ba332de2ac91e79..29c9408c1711417822c4bb0c14634ab96705d05d 100644 --- a/src/maple_me/include/hdse.h +++ b/src/maple_me/include/hdse.h @@ -23,7 +23,8 @@ class MeIRMap; class HDSE { public: HDSE(MIRModule &mod, const MapleVector &bbVec, BB &commonEntryBB, BB &commonExitBB, SSATab &ssaTab, - Dominance &pDom, IRMap &map, bool enabledDebug = false, bool decouple = false) + Dominance &pDom, IRMap &map, bool enabledDebug = false) + : hdseDebug(enabledDebug), mirModule(mod), bbVec(bbVec), @@ -32,8 +33,7 @@ class HDSE { ssaTab(ssaTab), postDom(pDom), irMap(map), - bbRequired(bbVec.size(), false), - decoupleStatic(decouple) {} + bbRequired(bbVec.size(), false) {} virtual ~HDSE() = default; @@ -100,7 +100,6 @@ class HDSE { std::vector bbRequired; std::vector exprLive; std::forward_list workList; - bool decoupleStatic = false; }; } // namespace maple #endif // MAPLE_ME_INCLUDE_HDSE_H diff --git a/src/maple_me/include/me_analyze_rc.h b/src/maple_me/include/me_analyze_rc.h new file mode 100644 index 0000000000000000000000000000000000000000..e166f7a32cdc7a3ceeb4a4013a2c41d0654ab8c5 --- /dev/null +++ b/src/maple_me/include/me_analyze_rc.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MEANALYZERC_H +#define MAPLE_ME_INCLUDE_MEANALYZERC_H +#include "me_function.h" +#include "me_phase.h" +#include "me_alias_class.h" +#include "me_irmap.h" + +namespace maple { +class RCItem { + public: + RCItem(const OriginalSt &o, MapleAllocator &alloc) + : ost(o), + noAlias(false), + nonLocal(false), + isFormal(false), + needSomeRC(false), + versionStack(alloc.Adapter()), + occurBBs(std::less(), alloc.Adapter()) {} + + virtual ~RCItem() = default; + + void Dump(); + private: + friend class AnalyzeRC; + const OriginalSt &ost; + bool noAlias; + bool nonLocal; // need to do placement optimization if (!nonLocal) + bool isFormal; // is an incoming formal parameter + bool needSomeRC; // true if any definition has rhs that needs RC + MapleStack versionStack; + MapleSet occurBBs; // set of BBs where the pointer occurs; only for local ref pointers +}; + +class AnalyzeRC { + public: + AnalyzeRC(MeFunction &f, Dominance &dom, AliasClass &ac, MemPool *memPool) + : func(f), + irMap(*f.GetIRMap()), + ssaTab(*f.GetMeSSATab()), + dominance(dom), + aliasClass(ac), + analyzeRCMp(memPool), + analyzeRCAllocator(memPool), + meBuilder(irMap.GetIRMapAlloc()), + rcItemsMap(std::less(), analyzeRCAllocator.Adapter()), + skipLocalRefVars(false) {} + + virtual ~AnalyzeRC() = default; + void Run(); + + private: + void IdentifyRCStmts(); + void CreateCleanupIntrinsics(); + void RenameRefPtrs(BB *bb); + void OptimizeRC(); + void RemoveUnneededCleanups(); + void RenameUses(MeStmt &meStmt); + RCItem *FindOrCreateRCItem(const OriginalSt &ost); + OriginalSt *GetOriginalSt(MeExpr &refLHS); + VarMeExpr *GetZeroVersionVarMeExpr(const VarMeExpr &var); + bool NeedIncref(const MeStmt &stmt); + UnaryMeStmt *CreateIncrefZeroVersion(const OriginalSt &ost); + DassignMeStmt *CreateDassignInit(OriginalSt &ost, BB &bb); + void TraverseStmt(BB &bb); + bool NeedDecRef(RCItem &rcItem, MeExpr &expr); + bool NeedDecRef(IvarMeExpr &ivar); + bool NeedDecRef(VarMeExpr &var); + + friend class MeDoAnalyzeRC; + MeFunction &func; + IRMap &irMap; + SSATab &ssaTab; + Dominance &dominance; + AliasClass &aliasClass; + MemPool *analyzeRCMp; + MapleAllocator analyzeRCAllocator; + MeBuilder meBuilder; + MapleMap rcItemsMap; + bool skipLocalRefVars; +}; + +class MeDoAnalyzeRC : public MeFuncPhase { + public: + explicit MeDoAnalyzeRC(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoAnalyzeRC() = default; + AnalysisResult *Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) override; + std::string PhaseName() const override { + return "analyzerc"; + } +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_MEANALYZERC_H diff --git a/src/maple_me/include/me_critical_edge.h b/src/maple_me/include/me_critical_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..815602b4edcfc7b2703d2aa7b6ec7feeceb3cc69 --- /dev/null +++ b/src/maple_me/include/me_critical_edge.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MECRITICALEDGE_H +#define MAPLE_ME_INCLUDE_MECRITICALEDGE_H + +#include "me_phase.h" +#include "bb.h" + +namespace maple { +// Split critical edge +class MeDoSplitCEdge : public MeFuncPhase { + public: + explicit MeDoSplitCEdge(MePhaseID id) : MeFuncPhase(id) {} + + ~MeDoSplitCEdge() = default; + + AnalysisResult *Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) override; + std::string PhaseName() const override { + return "splitcriticaledge"; + } + + private: + void BreakCriticalEdge(MeFunction &func, BB &pred, BB &succ); +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_MECRITICALEDGE_H diff --git a/src/maple_me/include/me_delegate_rc.h b/src/maple_me/include/me_delegate_rc.h new file mode 100644 index 0000000000000000000000000000000000000000..fd98afbd1b90ee93c9cbcf937e19870ec54f150c --- /dev/null +++ b/src/maple_me/include/me_delegate_rc.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MEDELEGATERC_H +#define MAPLE_ME_INCLUDE_MEDELEGATERC_H +#include "me_function.h" +#include "me_phase.h" +#include "me_irmap.h" +namespace maple { +class DelegateRC { + public: + DelegateRC(MeFunction &func, Dominance &dom, MemPool *memPool, bool enabledDebug) + : func(func), + irMap(*func.GetIRMap()), + ssaTab(*func.GetMeSSATab()), + dominance(dom), + delegateRCAllocator(memPool), + verStCantDelegate(irMap.GetVerst2MeExprTableSize(), false, delegateRCAllocator.Adapter()), + verStUseCounts(irMap.GetVerst2MeExprTableSize(), 0, delegateRCAllocator.Adapter()), + refVar2RegMap(delegateRCAllocator.Adapter()), + verStDerefedCopied(irMap.GetVerst2MeExprTableSize(), false, delegateRCAllocator.Adapter()), + verStCantDecrefEarly(irMap.GetVerst2MeExprTableSize(), false, delegateRCAllocator.Adapter()), + enabledDebug(enabledDebug) {} + + virtual ~DelegateRC() = default; + + void SetCantDelegateAndCountUses(); + void DelegateStmtRC(); + std::set RenameAndGetLiveLocalRefVar(); + void CleanUpDeadLocalRefVar(const std::set &liveLocalrefvars); + + private: + bool IsCopiedOrDerefedOp(const Opcode op); + void CollectVstCantDecrefEarly(MeExpr &opnd0, MeExpr &opnd1); + void CollectUseCounts(const MeExpr &x); + void FindAndDecrUseCount(VarMeExpr *rhsVar, MeExpr *x, int32 &remainingUses); + bool MayThrowException(MeStmt &stmt); + bool ContainAllTheUses(VarMeExpr *rhsVar, const MeStmt &fromStmt, const MeStmt *toStmt); + RegMeExpr *RHSTempDelegated(MeExpr *rhs, MeStmt &useStmt); + bool FinalRefNoRC(const MeExpr &x); + void SetCantDelegate(const MapleMap &meVarPhiList); + void SaveDerefedOrCopiedVst(const MeExpr *expr); + void CollectDerefedOrCopied(const MeStmt &stmt); + void CollectDerefedOrCopied(const MeExpr &x); + void CollectUsesInfo(const MeExpr &x); + bool CanOmitRC4LHSVar(const MeStmt &stmt, bool &onlyWithDecref); + void DelegateHandleNoRCStmt(MeStmt &stmt, bool addDecref); + void DelegateRCTemp(MeStmt &stmt); + void RenameDelegatedRefVarUses(MeStmt &meStmt, MeExpr *meExpr); + + MeFunction &func; + IRMap &irMap; + SSATab &ssaTab; + Dominance &dominance; + MapleAllocator delegateRCAllocator; + MapleVector verStCantDelegate; // true if it has appearance as phi opnd + MapleVector verStUseCounts; // use counts of each SSA version + MapleMap refVar2RegMap; // map to the replacement preg + MapleVector verStDerefedCopied; // true if it is dereferenced or copied or passed as parameter + MapleVector verStCantDecrefEarly; // true if it is unsafe to insert early decref in form B1 delegation + bool enabledDebug; +}; + +class MeDoDelegateRC : public MeFuncPhase { + public: + explicit MeDoDelegateRC(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoDelegateRC() = default; + AnalysisResult *Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) override; + std::string PhaseName() const override { + return "delegaterc"; + } +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_MEDELEGATERC_H diff --git a/src/maple_me/include/me_dse.h b/src/maple_me/include/me_dse.h index 16b5fa47cef74e8d6ce88c7f01ec86ac42daaf69..6efd7ed45d30cb3ac09e29aa8ddebd94a5a02102 100644 --- a/src/maple_me/include/me_dse.h +++ b/src/maple_me/include/me_dse.h @@ -29,7 +29,7 @@ class MeDSE : public DSE { MeDSE(MeFunction *f, Dominance *dom, bool enabledDebug) : DSE(std::vector(f->GetAllBBs().begin(), f->GetAllBBs().end()), *f->GetCommonEntryBB(), *f->GetCommonExitBB(), *f->GetMeSSATab(), - *dom, enabledDebug, MeOption::decoupleStatic), + *dom, enabledDebug), func(*f) {} virtual ~MeDSE() = default; diff --git a/src/maple_me/include/me_hdse.h b/src/maple_me/include/me_hdse.h index 0111484c64ac3062c00c7de906db7b30e8ffc07f..2552246cb2a952c63e113f94478abaad3f07632f 100644 --- a/src/maple_me/include/me_hdse.h +++ b/src/maple_me/include/me_hdse.h @@ -26,7 +26,7 @@ class MeHDSE : public HDSE { public: MeHDSE(MeFunction &f, Dominance &pDom, IRMap &map, bool enabledDebug) : HDSE(f.GetMIRModule(), f.GetAllBBs(), *f.GetCommonEntryBB(), *f.GetCommonExitBB(), *f.GetMeSSATab(), - pDom, map, enabledDebug, MeOption::decoupleStatic) {} + pDom, map, enabledDebug) {} virtual ~MeHDSE() = default; void RunHDSE(); diff --git a/src/maple_me/include/me_ir.h b/src/maple_me/include/me_ir.h index ca36e92de09711938f7042bcf0e7d7773d00cd3c..dca6e145167fbc10b92d486baed5da3174bc39b9 100644 --- a/src/maple_me/include/me_ir.h +++ b/src/maple_me/include/me_ir.h @@ -2372,17 +2372,12 @@ class UnaryMeStmt : public MeStmt { opnd = val; } - bool GetDecrefBeforeExit() const { - return decrefBeforeExit; - } - void Dump(IRMap*) const; StmtNode &EmitStmt(SSATab &ssaTab); private: MeExpr *opnd = nullptr; - bool decrefBeforeExit = false; // true if decref is inserted due to anticipated function exit }; class GotoMeStmt : public MeStmt { diff --git a/src/maple_me/include/me_may2dassign.h b/src/maple_me/include/me_may2dassign.h new file mode 100644 index 0000000000000000000000000000000000000000..793ff62b6c8471f2634bd39d397282286c31ebfe --- /dev/null +++ b/src/maple_me/include/me_may2dassign.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MEMAY2DASSIGN_H +#define MAPLE_ME_INCLUDE_MEMAY2DASSIGN_H +#include "me_function.h" +#include "me_phase.h" +#include "me_irmap.h" + +namespace maple { +class May2Dassign { + public: + May2Dassign(MeFunction *f) : func(f), irMap(f->GetIRMap()), ssaTab(f->GetMeSSATab()) {} + + ~May2Dassign() = default; + void DoIt(); + + private: + MeFunction *func; + IRMap *irMap; + SSATab *ssaTab; +}; + +class MeDoMay2Dassign : public MeFuncPhase { + public: + MeDoMay2Dassign(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoMay2Dassign() = default; + AnalysisResult *Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) override; + std::string PhaseName() const override { + return "may2dassign"; + } +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_MEMAY2DASSIGN_H diff --git a/src/maple_me/include/me_option.h b/src/maple_me/include/me_option.h index 46d078fd20c0d995995393457c9582b087a242e5..5cf5ce3b099efc1e4e32b5d0f3138a167b2dbc88 100644 --- a/src/maple_me/include/me_option.h +++ b/src/maple_me/include/me_option.h @@ -70,6 +70,7 @@ class MeOption { static uint32 eprePULimit; static uint32 lpreLimit; static uint32 lprePULimit; + static uint32 pregRenameLimit; static uint32 delRcPULimit; static bool ignoreIPA; static bool epreIncludeRef; @@ -93,7 +94,6 @@ class MeOption { static bool lpreSpeculate; static bool spillAtCatch; static bool optDirectCall; - static bool decoupleStatic; private: std::unordered_set skipPhases; MapleAllocator optionAlloc; diff --git a/src/maple_me/include/me_phases.def b/src/maple_me/include/me_phases.def index 8e889e481d6616467765a1496aeb77ac999eecca..20a0c2a2b900b716b2647190645489ac8a0c8441 100644 --- a/src/maple_me/include/me_phases.def +++ b/src/maple_me/include/me_phases.def @@ -19,9 +19,24 @@ FUNCAPHASE(MeFuncPhase_SSA, MeDoSSA) FUNCAPHASE(MeFuncPhase_IRMAP, MeDoIRMap) FUNCAPHASE(MeFuncPhase_BBLAYOUT, MeDoBBLayout) FUNCAPHASE(MeFuncPhase_MELOOP, MeDoMeLoop) +FUNCTPHASE(MeFuncPhase_BYPATHEH, MeDoBypathEH) +FUNCAPHASE(MeFuncPhase_MEABCOPT, MeDoABCOpt) +FUNCAPHASE(MeFuncPhase_CONDBASEDNPC, MeDoCondBasedNPC) +FUNCTPHASE(MeFuncPhase_MAY2DASSIGN, MeDoMay2Dassign) +FUNCTPHASE(MeFuncPhase_LOOPCANON, MeDoLoopCanon) +FUNCTPHASE(MeFuncPhase_SPLITCEDGE, MeDoSplitCEdge) +FUNCTPHASE(MeFuncPhase_DSE, MeDoDSE) +FUNCTPHASE(MeFuncPhase_HPROP, MeDoMeProp) +FUNCTPHASE(MeFuncPhase_HDSE, MeDoHDSE) +FUNCTPHASE(MeFuncPhase_SSADEVIRT, MeDoSSADevirtual) FUNCTPHASE(MeFuncPhase_SSAEPRE, MeDoSSAEPre) FUNCTPHASE(MeFuncPhase_SSALPRE, MeDoSSALPre) FUNCTPHASE(MeFuncPhase_STOREPRE, MeDoStorePre) FUNCTPHASE(MeFuncPhase_STMTPRE, MeDoStmtPre) +FUNCTPHASE(MeFuncPhase_SSARENAME2PREG, MeDoSSARename2Preg) +FUNCTPHASE(MeFuncPhase_PREGRENAMER, MeDoPregRename) +FUNCTPHASE(MeFuncPhase_ANALYZERC, MeDoAnalyzeRC) +FUNCAPHASE(MeFuncPhase_DELEGATERC, MeDoDelegateRC) +FUNCAPHASE(MeFuncPhase_CONDBASEDRC, MeDoCondBasedRC) FUNCTPHASE(MeFuncPhase_RCLOWERING, MeDoRCLowering) FUNCTPHASE(MeFuncPhase_EMIT, MeDoEmit) diff --git a/src/maple_me/include/me_ssa_devirtual.h b/src/maple_me/include/me_ssa_devirtual.h new file mode 100644 index 0000000000000000000000000000000000000000..463d5ce97021b7dac0d0648f13b44ac335abd3bd --- /dev/null +++ b/src/maple_me/include/me_ssa_devirtual.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_MESSADEVIRTUAL_H +#define MAPLE_ME_INCLUDE_MESSADEVIRTUAL_H + +#include "ssa_devirtual.h" +#include "clone.h" + +namespace maple { +class MeSSADevirtual : public SSADevirtual { + public: + MeSSADevirtual(MemPool *memPool, MIRModule *mod, MeFunction *func, IRMap *irMap, KlassHierarchy *kh, Dominance *dom, + Clone *clone) + : SSADevirtual(memPool, mod, irMap, kh, dom, func->GetAllBBs().size(), clone), func(func) {} + + ~MeSSADevirtual() = default; + + protected: + BB *GetBB(BBId id) override { + return func->GetAllBBs().at(id); + } + + MIRFunction *GetMIRFunction() override { + return func->GetMirFunc(); + } + + private: + MeFunction *func; +}; + +class MeDoSSADevirtual : public MeFuncPhase { + public: + explicit MeDoSSADevirtual(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoSSADevirtual() = default; + + AnalysisResult *Run(MeFunction *func, MeFuncResultMgr *frm, ModuleResultMgr *mrm) override; + + virtual std::string PhaseName() const override { + return "ssadevirt"; + } +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_MESSADEVIRTUAL_H diff --git a/src/maple_me/include/preg_renamer.h b/src/maple_me/include/preg_renamer.h new file mode 100644 index 0000000000000000000000000000000000000000..275530a826ea4aa4b4a084703771714d1f53f16e --- /dev/null +++ b/src/maple_me/include/preg_renamer.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_PREGRENAMER_H +#define MAPLE_ME_INCLUDE_PREGRENAMER_H +#include "me_irmap.h" +#include "ssa_pre.h" +namespace maple { +class PregRenamer { + public: + PregRenamer(MemPool *memPool, MeFunction *f, MeIRMap *irMap, bool enabledDebug) + : alloc(memPool), func(f), irMap(irMap), enabledDebug(enabledDebug) {} + virtual ~PregRenamer() = default; + void RunSelf(); + + private: + void EnqueDefUses(std::list &qu, RegMeExpr *node, std::set &curVisited); + MapleAllocator alloc; + MeFunction *func; + MeIRMap *irMap; + bool enabledDebug; +}; +class MeDoPregRename : public MeFuncPhase { + public: + explicit MeDoPregRename(MePhaseID id) : MeFuncPhase(id) {} + + virtual ~MeDoPregRename() = default; + AnalysisResult *Run(MeFunction *ir, MeFuncResultMgr *m, ModuleResultMgr *mrm) override; + std::string PhaseName() const override { + return "pregrename"; + } +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_PREGRENAMER_H diff --git a/src/maple_me/include/ssa_devirtual.h b/src/maple_me/include/ssa_devirtual.h new file mode 100644 index 0000000000000000000000000000000000000000..d96702b58a299a14d9000b9f77977061c4d6ad4e --- /dev/null +++ b/src/maple_me/include/ssa_devirtual.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_ME_INCLUDE_SSADEVIRTUAL_H +#define MAPLE_ME_INCLUDE_SSADEVIRTUAL_H +#include "me_ir.h" +#include "me_phase.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "clone.h" +namespace maple { +class SSADevirtual { + public: + static bool debug; + SSADevirtual(MemPool *memPool, MIRModule *currMod, IRMap *irMap, KlassHierarchy *currKh, + Dominance *currDom, size_t bbVecSize, Clone *currClone) + : devirtualAlloc(memPool), + mod(currMod), + irMap(irMap), + kh(currKh), + dom(currDom), + bbVisited(bbVecSize, false, devirtualAlloc.Adapter()), + clone(currClone), + retTy(kNotSeen), + inferredRetTyIdx(0), + totalVirtualCalls(0), + optedVirtualCalls(0), + totalInterfaceCalls(0), + optedInterfaceCalls(0), + nullCheckCount(0) {} + + virtual ~SSADevirtual() {} + + void Perform(BB *entryBB); + + protected: + virtual MIRFunction *GetMIRFunction() { + return nullptr; + } + + virtual BB *GetBB(BBId id) = 0; + void TraversalBB(BB*); + void TraversalMeStmt(MeStmt *Stmt); + void VisitVarPhiNode(MeVarPhiNode*); + void VisitMeExpr(MeExpr*); + void PropVarInferredType(VarMeExpr*); + void PropIvarInferredType(IvarMeExpr*); + void ReturnTyIdxInferring(RetMeStmt*); + bool NeedNullCheck(MeExpr*) const; + void InsertNullCheck(CallMeStmt*, MeExpr*); + bool DevirtualizeCall(CallMeStmt*); + void SSADevirtualize(CallNode *stmt); + void ReplaceCall(CallMeStmt*, MIRFunction*); + TyIdx GetInferredTyIdx(MeExpr *expr); + + private: + MapleAllocator devirtualAlloc; + MIRModule *mod; + IRMap *irMap; + KlassHierarchy *kh; + Dominance *dom; + MapleVector bbVisited; // needed because dominator tree is a DAG in wpo + Clone *clone; + enum TagRetTyIdx { + kNotSeen, + kSeen, + kFailed + } retTy; + TyIdx inferredRetTyIdx; + unsigned int totalVirtualCalls; + unsigned int optedVirtualCalls; + unsigned int totalInterfaceCalls; + unsigned int optedInterfaceCalls; + unsigned int nullCheckCount; +}; +} // namespace maple +#endif // MAPLE_ME_INCLUDE_SSADEVIRTUAL_H diff --git a/src/maple_me/src/dse.cpp b/src/maple_me/src/dse.cpp index 27d18131482a967efaee29013e437227d928aaa2..59d5dc714d912663d011d5ef4b3ef577600467fd 100644 --- a/src/maple_me/src/dse.cpp +++ b/src/maple_me/src/dse.cpp @@ -42,7 +42,7 @@ bool DSE::ExprNonDeletable(const BaseNode &expr) { case OP_dread: { auto &dread = static_cast(expr); const MIRSymbol &sym = dread.GetMIRSymbol(); - return sym.IsVolatile() || sym.IsTypeVolatile(dread.GetFieldID()) || (decoupleStatic && sym.IsGlobal()); + return sym.IsVolatile() || sym.IsTypeVolatile(dread.GetFieldID()); } case OP_iread: { auto &iread = static_cast(expr); diff --git a/src/maple_me/src/hdse.cpp b/src/maple_me/src/hdse.cpp index 56c83234cd6f49bc074d1db9a19191a7dbb2e781..7df4fa3b96782c6d8fc2b95dde3fe311dc2771f6 100644 --- a/src/maple_me/src/hdse.cpp +++ b/src/maple_me/src/hdse.cpp @@ -189,8 +189,7 @@ bool HDSE::ExprNonDeletable(MeExpr &meExpr) { } case kMeOpVar: { auto &varMeExpr = static_cast(meExpr); - return varMeExpr.IsVolatile(ssaTab) || - (decoupleStatic && ssaTab.GetSymbolOriginalStFromID(varMeExpr.GetOStIdx())->GetMIRSymbol()->IsGlobal()); + return varMeExpr.IsVolatile(ssaTab); } case kMeOpIvar: { auto &opIvar = static_cast(meExpr); diff --git a/src/maple_me/src/me_analyze_rc.cpp b/src/maple_me/src/me_analyze_rc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8f67462c18267289f9332bf33ddf4b9462b2c4d3 --- /dev/null +++ b/src/maple_me/src/me_analyze_rc.cpp @@ -0,0 +1,457 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_analyze_rc.h" +#include "me_option.h" +#include "me_dominance.h" + +// This phase analyzes the defs and uses of ref pointers in the function and +// performs the following modifications to the code: +// +// A. Insert a decref for the ref pointer before each of its definition. If the +// ref pointer is local (a localrefvar) and the analysis shows that it is the +// first definition since entering the function, the decref will be omitted. +// +// B. At each statement that assigns a new value to a ref pointer, insert an +// incref after the assignment. In cases where the incref has already been +// performed when the assigned value is computed, it will not insert the incref. +// +// C. A localrefvar need to be cleaned up before function exit. This clean-up +// corresponds to performing a decref. Instead of inserting a decref to clean up +// each localrefvar, the cleanup for the localrefvars in the function is +// represented aggregate via a call to the intrinsic CLEANUP_LOCALREFVARS +// inserted before each return statement. The localrefvars to be cleaned up +// for each return statement are indicated as actual parameters in the +// intrinsiccall statement. If a localrefvar's definition cannot reach a return +// statement, it will not be included in the actual parameters. If the number +// of localrefvar parameters in the intrinsiccall is more than +// `kCleanupLocalRefVarsLimit`, the intrinsiccall will be omitted, in which +// case the code generator will clean up all localrefvars. +// +// For C, analyzerc can try to do more optimization by inserting decrefs for +// individual localrefvars earlier than the return statements. This is done +// when the placementRC flag is set to true. This optimization is performed by +// calling PlacementRC::ApplySSUPre for all localrefvars. +// Under placementRC, the CLEANUP_LOCALREFVARS intrinsiccall will still be +// inserted for the decrefs being inserted before the return statement. +// +// When a formal parameters of ref type is ever assigned inside the function, +// an incref for it needs to be inserted at function entry. This is done +// by the placementRC phase. For such formal parameters, placementRC phase will +// also insert decref's to clean them up after their last use. +// +// If the return statement returns a ref pointer variable, an incref for it +// needs to be effected. This is not handled by analyzerc, but is handled by +// the rcLowering phase. +// +// This phase needs to be run before register promotion, because some alias +// information is lost after a pointer is promoted to preg. Because it is run +// after EPRE phase, there can be regassign's and regread's of ref type but +// they will not cause decref or incref insertion. + +// if number of live localrefvars is more than this limit at a return, we will +// not insert the intrinsiccall to CLEANUP_LOCALREFVARS +constexpr int kCleanupLocalRefVarsLimit = 200; + +namespace maple { +void RCItem::Dump() { + ost.Dump(); + if (!noAlias) { + LogInfo::Info() << " aliased"; + } + if (nonLocal) { + LogInfo::Info() << " nonLocal"; + } + if (isFormal) { + LogInfo::Info() << " isFormal"; + } + for (BBId bbId : occurBBs) { + LogInfo::Info() << " " << bbId; + } + LogInfo::Info() << '\n'; +} + +RCItem *AnalyzeRC::FindOrCreateRCItem(const OriginalSt &ost) { + auto mapIt = rcItemsMap.find(ost.GetIndex()); + if (mapIt != rcItemsMap.end()) { + return mapIt->second; + } + RCItem *rcItem = analyzeRCMp->New(ost, analyzeRCAllocator); + rcItemsMap[ost.GetIndex()] = rcItem; + if (ost.GetIndex() >= aliasClass.GetAliasElemCount()) { + rcItem->noAlias = true; + } else { + AliasElem *ae = aliasClass.FindAliasElem(ost); + rcItem->noAlias = ae->GetClassSet() == nullptr; + } + rcItem->nonLocal = ost.GetIndirectLev() > 0 || ost.GetMIRSymbol()->IsGlobal(); + if (!rcItem->nonLocal) { + rcItem->isFormal = ost.GetMIRSymbol()->GetStorageClass() == kScFormal; + } + return rcItem; +} + +OriginalSt *AnalyzeRC::GetOriginalSt(MeExpr &refLHS) { + if (refLHS.GetMeOp() == kMeOpVar) { + auto &varMeExpr = static_cast(refLHS); + return ssaTab.GetSymbolOriginalStFromID(varMeExpr.GetOStIdx()); + } + ASSERT(refLHS.GetMeOp() == kMeOpIvar, "GetOriginalSt: unexpected node type"); + auto &ivarMeExpr = static_cast(refLHS); + if (ivarMeExpr.GetMu() != nullptr) { + return ssaTab.GetSymbolOriginalStFromID(ivarMeExpr.GetMu()->GetOStIdx()); + } + ASSERT(ivarMeExpr.GetDefStmt() != nullptr, "GetOriginalSt: ivar with mu==nullptr has no defStmt"); + IassignMeStmt *iass = ivarMeExpr.GetDefStmt(); + CHECK_FATAL(!iass->GetChiList()->empty(), "GetOriginalSt: ivar with mu==nullptr has empty chiList at its def"); + return ssaTab.GetSymbolOriginalStFromID(iass->GetChiList()->begin()->second->GetLHS()->GetOStIdx()); +} + +VarMeExpr *AnalyzeRC::GetZeroVersionVarMeExpr(const VarMeExpr &var) { + const OriginalSt *ost = ssaTab.GetSymbolOriginalStFromID(var.GetOStIdx()); + return irMap.GetOrCreateZeroVersionVarMeExpr(*ost); +} + +// check if incref needs to be inserted after this ref pointer assignment; +// if it is callassigned, the incref has already been done in the callee; +// if rhs is gcmalloc/gcmallocjarray, the refcount is already 1; +// if rhs is neither dread or iread, it cannot be a pointer, so incref not needed +bool AnalyzeRC::NeedIncref(const MeStmt &stmt) { + if (kOpcodeInfo.IsCallAssigned(stmt.GetOp())) { + return false; + } + MeExpr *rhs = stmt.GetRHS(); + CHECK_NULL_FATAL(rhs); + return rhs->PointsToSomethingThatNeedsIncRef(); +} + +// identify assignments to ref pointers and insert decref before it and incref +// after it +void AnalyzeRC::IdentifyRCStmts() { + auto eIt = func.valid_end(); + for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { + auto &bb = **bIt; + for (auto &stmt : bb.GetMeStmts()) { + MeExpr *lhsRef = stmt.GetLHSRef(ssaTab, skipLocalRefVars); + if (lhsRef != nullptr) { + const OriginalSt *ost = GetOriginalSt(*lhsRef); + ASSERT(ost != nullptr, "IdentifyRCStmts: cannot get SymbolOriginalSt"); + (void)FindOrCreateRCItem(*ost); + // this part for inserting decref + if (lhsRef->GetMeOp() == kMeOpVar) { + // insert a decref statement + UnaryMeStmt &decrefStmt = meBuilder.BuildUnaryMeStmt( + OP_decref, *GetZeroVersionVarMeExpr(static_cast(*lhsRef)), bb, stmt.GetSrcPosition()); + // insertion position is before stmt + bb.InsertMeStmtBefore(&stmt, &decrefStmt); + } else { + auto *lhsIvar = static_cast(lhsRef); + { + // insert a decref statement + IvarMeExpr &ivarMeExpr = utils::ToRef(safe_cast( + meBuilder.CreateMeExpr(kInvalidExprID, *lhsIvar))); + ivarMeExpr.SetDefStmt(nullptr); + ivarMeExpr.SetMuVal(nullptr); + // form mu from chiList + auto &iass = static_cast(stmt); + MapleMap::iterator xit = iass.GetChiList()->begin(); + for (; xit != iass.GetChiList()->end(); ++xit) { + ChiMeNode *chi = xit->second; + if (chi->GetRHS()->GetOStIdx() == ost->GetIndex()) { + ivarMeExpr.SetMuVal(chi->GetRHS()); + break; + } + } + ASSERT(xit != iass.GetChiList()->end(), "IdentifyRCStmts: failed to find corresponding chi node"); + UnaryMeStmt &decrefStmt = meBuilder.BuildUnaryMeStmt( + OP_decref, *irMap.HashMeExpr(ivarMeExpr), bb, stmt.GetSrcPosition()); + // insertion position is before stmt + bb.InsertMeStmtBefore(&stmt, &decrefStmt); + ost = GetOriginalSt(*decrefStmt.GetOpnd()); + ASSERT(ost != nullptr, "IdentifyRCStmts: cannot get SymbolOriginalSt"); + (void)FindOrCreateRCItem(*ost); + } + } + // this part for inserting incref + if (NeedIncref(stmt)) { + stmt.EnableNeedIncref(); + } + } + } // end of stmt iteration + } +} + +void AnalyzeRC::CreateCleanupIntrinsics() { + for (BB *bb : func.GetCommonExitBB()->GetPred()) { + auto &meStmts = bb->GetMeStmts(); + if (meStmts.empty() || meStmts.back().GetOp() != OP_return) { + continue; + } + std::vector opnds; + for (const auto &mapItem : rcItemsMap) { + RCItem *rcItem = mapItem.second; + if (rcItem->nonLocal || rcItem->isFormal) { + continue; + } + opnds.push_back(irMap.GetOrCreateZeroVersionVarMeExpr(rcItem->ost)); + } + IntrinsiccallMeStmt *intrn = irMap.CreateIntrinsicCallMeStmt(INTRN_MPL_CLEANUP_LOCALREFVARS, opnds); + bb->InsertMeStmtBefore(&(meStmts.back()), intrn); + } +} + +void AnalyzeRC::TraverseStmt(BB &bb) { + if (bb.GetMeStmts().empty()) { + return; + } + for (auto &meStmt : bb.GetMeStmts()) { + if (meStmt.GetOp() == OP_decref || (meStmt.GetOp() == OP_intrinsiccall && + static_cast(meStmt).GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS)) { + RenameUses(meStmt); + } else { + MeExpr *lhsRef = meStmt.GetLHSRef(ssaTab, skipLocalRefVars); + if (lhsRef == nullptr) { + continue; + } + const OriginalSt *ost = GetOriginalSt(*lhsRef); + RCItem *rcItem = rcItemsMap[ost->GetIndex()]; + if (!rcItem->nonLocal) { + rcItem->versionStack.push(lhsRef); + } + } + } +} + +void AnalyzeRC::RenameRefPtrs(BB *bb) { + if (skipLocalRefVars || bb == nullptr) { + return; + } + std::map savedStacksize; // to record stack size + // in each RCItem for stack pop-ups + for (const auto &mapItem : rcItemsMap) { + RCItem *rcItem = mapItem.second; + if (rcItem->nonLocal) { + continue; + } + // record stack size + savedStacksize[rcItem] = rcItem->versionStack.size(); + // if there is a phi, push stack + auto phiIt = bb->GetMevarPhiList().find(mapItem.second->ost.GetIndex()); + if (phiIt != bb->GetMevarPhiList().end()) { + rcItem->versionStack.push((*phiIt).second->GetLHS()); + } + } + // traverse the BB stmts + TraverseStmt(*bb); + // recursive call in preorder traversal of dominator tree + ASSERT(bb->GetBBId() < dominance.GetDomChildrenSize(), "index out of range in AnalyzeRC::RenameRefPtrs"); + const MapleSet &domChildren = dominance.GetDomChildren(bb->GetBBId()); + for (const auto &childBBId : domChildren) { + RenameRefPtrs(func.GetAllBBs().at(childBBId)); + } + // restore the stacks to their size at entry to this function invocation + for (const auto &mapItem : rcItemsMap) { + RCItem *rcItem = mapItem.second; + if (rcItem->nonLocal) { + continue; + } + size_t lastSize = savedStacksize[rcItem]; + while (rcItem->versionStack.size() > lastSize) { + rcItem->versionStack.pop(); + } + } +} + +void AnalyzeRC::RenameUses(MeStmt &meStmt) { + for (size_t i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { + const OriginalSt *ost = GetOriginalSt(*meStmt.GetOpnd(i)); + RCItem *rcItem = rcItemsMap[ost->GetIndex()]; + if (meStmt.GetOp() == OP_intrinsiccall) { + ASSERT(!rcItem->nonLocal, "cleanupLocalrefvars only takes locals"); + } + if (!rcItem->nonLocal && !rcItem->versionStack.empty()) { + meStmt.SetOpnd(i, rcItem->versionStack.top()); + } + } +} + +DassignMeStmt *AnalyzeRC::CreateDassignInit(OriginalSt &ost, BB &bb) { + VarMeExpr *lhs = irMap.CreateNewVarMeExpr(ost, PTY_ref, ost.GetFieldID()); + MeExpr *rhs = irMap.CreateIntConstMeExpr(0, PTY_ref); + return irMap.CreateDassignMeStmt(utils::ToRef(lhs), utils::ToRef(rhs), bb); +} + +UnaryMeStmt *AnalyzeRC::CreateIncrefZeroVersion(const OriginalSt &ost) { + return &meBuilder.BuildUnaryMeStmt(OP_incref, *irMap.GetOrCreateZeroVersionVarMeExpr(ost)); +} + +void AnalyzeRC::OptimizeRC() { + auto eIt = func.valid_end(); + for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { + auto *bb = *bIt; + for (auto itStmt = bb->GetMeStmts().begin(); itStmt != bb->GetMeStmts().end(); ++itStmt) { + MeStmt *stmt = to_ptr(itStmt); + if (itStmt->GetOp() != OP_decref) { + continue; + } + auto *decref = static_cast(stmt); + MeStmt *refAssign = stmt->GetNext(); + MeExpr *opnd = decref->GetOpnd(); + OriginalSt *ost = GetOriginalSt(*opnd); + if (opnd->GetMeOp() == kMeOpVar && ost->IsLocal() && skipLocalRefVars) { + continue; + } + RCItem *rcItem = rcItemsMap[ost->GetIndex()]; + if (!rcItem->isFormal && opnd->GetMeOp() == kMeOpVar) { + // !nonLocal && noAlias + rcItem->occurBBs.insert(bb->GetBBId()); + rcItem->needSomeRC = true; + } + if (NeedDecRef(*rcItem, *opnd)) { + refAssign->EnableNeedDecref(); + } else { + bb->RemoveMeStmt(stmt); // delete the decref + } + ++itStmt; // next iteration will process the stmt after refAssign + } + } +} + +bool AnalyzeRC::NeedDecRef(RCItem &rcItem, MeExpr &expr) { + CHECK_FATAL((rcItem.nonLocal || rcItem.noAlias), "OptimizeRC: local pointers cannot have alias"); + // see if the decref can be optimized away + if (rcItem.nonLocal) { + // the decref can be avoided for iassign thru this in constructor funcs + if (rcItem.ost.GetIndirectLev() == 1 && func.GetMirFunc()->IsConstructor()) { + auto &ivarMeExpr = static_cast(expr); + return NeedDecRef(ivarMeExpr); + } + return true; + } + if (rcItem.isFormal || expr.GetMeOp() != kMeOpVar) { + return true; + } + auto &varMeExpr = static_cast(expr); + return NeedDecRef(varMeExpr); +} + +bool AnalyzeRC::NeedDecRef(IvarMeExpr &ivar) { + auto *base = ivar.GetBase(); + if (base->GetMeOp() != kMeOpVar) { + return true; + } + auto *baseVar = static_cast(base); + const MIRSymbol *sym = ssaTab.GetMIRSymbolFromID(baseVar->GetOStIdx()); + if (sym->GetStorageClass() != kScFormal || sym != func.GetMirFunc()->GetFormal(0)) { + return true; + } + MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); + MIRType *type = static_cast(baseType)->GetPointedType(); + if (!instance_of(type)) { + return true; + } + auto *classType = static_cast(type); + // check ivarMeExpr->fieldID is not from base classes + if (!classType->IsOwnField(ivar.GetFieldID())) { + return true; + } + // check the ivar's mu is zero version + OriginalSt *ost = GetOriginalSt(ivar); + return ivar.GetMu()->GetVstIdx() != ost->GetZeroVersionIndex() && ivar.GetMu()->GetDefBy() != kDefByNo; +} + +bool AnalyzeRC::NeedDecRef(VarMeExpr &var) { + OriginalSt *ost = GetOriginalSt(var); + return var.GetVstIdx() != ost->GetZeroVersionIndex() && var.GetDefBy() != kDefByNo; +} + +// among the arguments in the intrinsiccall to INTRN_CLEANUP_LOCALREFVARS, those +// that are zero version are not live, and can be deleted; if the number of +// arguments left are > `kCleanupLocalRefVarsLimit`, delete the intrinsiccall. +void AnalyzeRC::RemoveUnneededCleanups() { + for (BB *bb : func.GetCommonExitBB()->GetPred()) { + auto &meStmts = bb->GetMeStmts(); + if (meStmts.empty() || meStmts.back().GetOp() != OP_return) { + continue; + } + MeStmt *meStmt = meStmts.back().GetPrev(); + CHECK_NULL_FATAL(meStmt); + ASSERT(meStmt->GetOp() == OP_intrinsiccall, "RemoveUnneededCleanups: cannot find cleanup intrinsic stmt"); + auto intrn = static_cast(meStmt); + ASSERT(intrn->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS, + "RemoveUnneededCleanups: cannot find cleanup intrinsic stmt"); + size_t nextPos = 0; + size_t i = 0; + for (; i < intrn->NumMeStmtOpnds(); ++i) { + auto varMeExpr = static_cast(intrn->GetOpnds()[i]); + if (varMeExpr->IsZeroVersion(ssaTab)) { + continue; + } + if (nextPos != i) { + intrn->GetOpnds()[nextPos] = varMeExpr; + } + ++nextPos; + } + while (nextPos < i) { + intrn->GetOpnds().pop_back(); + --i; + } + if (intrn->NumMeStmtOpnds() > kCleanupLocalRefVarsLimit) { + bb->RemoveMeStmt(intrn); // delete the intrinsiccall stmt + } + } +} + +void AnalyzeRC::Run() { + func.SetHints(func.GetHints() | kAnalyzeRCed); + IdentifyRCStmts(); + if (!skipLocalRefVars) { + CreateCleanupIntrinsics(); + } + RenameRefPtrs(func.GetCommonEntryBB()); + if (MeOption::optLevel > 0 && !skipLocalRefVars) { + RemoveUnneededCleanups(); + } + OptimizeRC(); +} + +AnalysisResult *MeDoAnalyzeRC::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr*) { + auto *dom = static_cast(m->GetAnalysisResult(MeFuncPhase_DOMINANCE, func)); + ASSERT(dom != nullptr, "dominance phase has problem"); + auto *aliasClass = static_cast(m->GetAnalysisResult(MeFuncPhase_ALIASCLASS, func)); + ASSERT(aliasClass != nullptr, "aliasClass phase has problem"); + ASSERT_NOT_NULL(m->GetAnalysisResult(MeFuncPhase_IRMAP, func)); + if (DEBUGFUNC(func)) { + LogInfo::Info() << " Processing " << func->GetMirFunc()->GetName() << '\n'; + } + // add extra scope so destructor for analyzerc will be invoked earlier + AnalyzeRC analyzerc(*func, *dom, *aliasClass, NewMemPool()); + analyzerc.Run(); + if (DEBUGFUNC(func)) { + LogInfo::Info() << "\n============== After ANALYZE RC =============" << '\n'; + func->Dump(false); + } + if (!MeOption::noDelegateRC && MeOption::rcLowering && MeOption::optLevel > 0) { + m->GetAnalysisResult(MeFuncPhase_DELEGATERC, func); + } + if (!MeOption::noCondBasedRC && MeOption::rcLowering && MeOption::optLevel > 0) { + m->GetAnalysisResult(MeFuncPhase_CONDBASEDRC, func); + } + if (DEBUGFUNC(func)) { + LogInfo::Info() << "\n============== After delegate RC and condbased RC =============" << '\n'; + func->Dump(false); + } + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/me_bb_layout.cpp b/src/maple_me/src/me_bb_layout.cpp index 26ea51f7b0f7f3fe3ba16c67d4b96fc197e70642..418083aa7be37f60291f7ccc732f274a3b5003b1 100644 --- a/src/maple_me/src/me_bb_layout.cpp +++ b/src/maple_me/src/me_bb_layout.cpp @@ -235,7 +235,7 @@ void BBLayout::OptimizeBranchTarget(BB &bb) { } do { ASSERT(!bb.GetSucc().empty(), "container check"); - BB *brTargetBB = bb.GetKind() == kBBCondGoto ? bb.GetSucc(1) : bb.GetSucc(0); + BB *brTargetBB = bb.GetKind() == kBBCondGoto ? bb.GetSucc().back() : bb.GetSucc().front(); if (brTargetBB->GetAttributes(kBBAttrWontExit)) { return; } @@ -247,7 +247,7 @@ void BBLayout::OptimizeBranchTarget(BB &bb) { // optimize stmt BB *newTargetBB = brTargetBB->GetSucc().front(); if (brTargetBB->GetKind() == kBBCondGoto) { - newTargetBB = brTargetBB->GetSucc(1); + newTargetBB = brTargetBB->GetSucc().back(); } LabelIdx newTargetLabel = func.GetOrCreateBBLabel(*newTargetBB); if (func.GetIRMap() != nullptr) { diff --git a/src/maple_me/src/me_critical_edge.cpp b/src/maple_me/src/me_critical_edge.cpp new file mode 100644 index 0000000000000000000000000000000000000000..08e02510b1652b53c4f3496daf734408f9b7c663 --- /dev/null +++ b/src/maple_me/src/me_critical_edge.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_critical_edge.h" +#include +#include "me_cfg.h" +#include "me_option.h" +#include "dominance.h" +#include "me_function.h" + +// This phase finds critical edges and split them into two, because their +// presence would restrict the optimizations performed by SSAPRE-based phases. +// An edge is a critical edge when its pred has more than one succ and its succ +// has more than one pred +// pred +// / \ +// newbb \ <-- newbb (newbb is an empty bb but may carry a label) +// \ / \ +// succ +// +// newbb is always appended at the end of bb_vec_ and pred/succ will be updated. +// The bblayout phase will determine the final layout order of the bbs. + +namespace maple { +void MeDoSplitCEdge::BreakCriticalEdge(MeFunction &func, BB &pred, BB &succ) { + if (DEBUGFUNC(&func)) { + LogInfo::MapleLogger() << "******before break : critical edge : BB" << pred.GetBBId() << " -> BB" << + succ.GetBBId() << "\n"; + pred.Dump(&func.GetMIRModule()); + succ.Dump(&func.GetMIRModule()); + } + ASSERT(!succ.GetAttributes(kBBAttrIsCatch), "BreakCriticalEdge: cannot break an EH edge"); + // create newBB and set pred/succ + BB *newBB = nullptr; + // use replace instead of remove/add to keep position in pred/succ + if (&pred == func.GetCommonEntryBB()) { + newBB = &func.InsertNewBasicBlock(*func.GetFirstBB()); + pred.ReplaceSuccOfCommonEntryBB(&succ, newBB); + newBB->GetSucc().push_back(&succ); + succ.GetPred().push_back(newBB); + newBB->SetAttributes(kBBAttrIsEntry); + succ.ClearAttributes(kBBAttrIsEntry); + } else { + newBB = func.NewBasicBlock(); + pred.ReplaceSucc(&succ, newBB); + succ.ReplacePred(&pred, newBB); + } + newBB->SetKind(kBBFallthru); // default kind + newBB->SetAttributes(kBBAttrArtificial); + + // update statement offset if succ is goto target + if (pred.GetKind() == kBBCondGoto) { + auto &gotoStmt = static_cast(pred.GetStmtNodes().back()); + BB *gotoBB = pred.GetSucc().at(1); + LabelIdx oldLabelIdx = gotoStmt.GetOffset(); + if (oldLabelIdx != gotoBB->GetBBLabel()) { + // original gotoBB is replaced by newBB + LabelIdx label = func.GetOrCreateBBLabel(*gotoBB); + gotoStmt.SetOffset(label); + } + if (DEBUGFUNC(&func)) { + LogInfo::MapleLogger() << "******after break: dump updated condgoto_BB *****\n"; + pred.Dump(&func.GetMIRModule()); + newBB->Dump(&func.GetMIRModule()); + succ.Dump(&func.GetMIRModule()); + } + } else if (pred.GetKind() == kBBSwitch) { + auto &switchStmt = static_cast(pred.GetStmtNodes().back()); + LabelIdx oldLabelIdx = succ.GetBBLabel(); + LabelIdx label = func.GetOrCreateBBLabel(*newBB); + if (switchStmt.GetDefaultLabel() == oldLabelIdx) { + switchStmt.SetDefaultLabel(label); + } + for (size_t i = 0; i < switchStmt.GetSwitchTable().size(); ++i) { + LabelIdx labelIdx = switchStmt.GetCasePair(i).second; + if (labelIdx == oldLabelIdx) { + switchStmt.UpdateCaseLabelAt(i, label); + } + } + if (DEBUGFUNC(&func)) { + LogInfo::MapleLogger() << "******after break: dump updated switchBB *****\n"; + pred.Dump(&func.GetMIRModule()); + newBB->Dump(&func.GetMIRModule()); + succ.Dump(&func.GetMIRModule()); + } + } +} + +AnalysisResult *MeDoSplitCEdge::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr*) { + std::vector> criticalEdge; + auto eIt = func->valid_end(); + for (auto bIt = func->valid_begin(); bIt != eIt; ++bIt) { + if (bIt == func->common_exit()) { + continue; + } + auto *bb = *bIt; + MapleVector &preds = bb->GetPred(); + // skip fallthrough bb or bb is handler block + if (preds.size() < 2 || bb->GetAttributes(kBBAttrIsCatch)) { + continue; + } + // current BB is a merge + for (BB *pred : preds) { + if (pred->GetKind() == kBBGoto) { + continue; + } + if (pred->GetSucc().size() > 1) { + // pred has more than one succ + criticalEdge.push_back(std::make_pair(pred, bb)); + } + } + } + // separate treatment for commonEntryBB's succ BBs + for (BB *entryBB : func->GetCommonEntryBB()->GetSucc()) { + if (!entryBB->GetPred().empty()) { + criticalEdge.push_back(std::make_pair(func->GetCommonEntryBB(), entryBB)); + } + } + if (!criticalEdge.empty()) { + if (DEBUGFUNC(func)) { + LogInfo::MapleLogger() << "*******************before break dump function*****************\n"; + func->GetTheCfg()->DumpToFile("cfgbeforebreak"); + } + for (auto it = criticalEdge.begin(); it != criticalEdge.end(); ++it) { + BreakCriticalEdge(*func, *((*it).first), *((*it).second)); + } + if (DEBUGFUNC(func)) { + LogInfo::MapleLogger() << "******************after break dump function******************\n"; + func->Dump(true); + func->GetTheCfg()->DumpToFile("cfgafterbreak"); + } + if (func->GetMIRModule().IsInIPA()) { + m->InvalidAnalysisResult(MeFuncPhase_DOMINANCE, func); + } else { + m->InvalidAllResults(); + } + } + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/me_delegate_rc.cpp b/src/maple_me/src/me_delegate_rc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f2d33d5d78ab15cfa7ad19b52a13ef96e26335ec --- /dev/null +++ b/src/maple_me/src/me_delegate_rc.cpp @@ -0,0 +1,808 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_delegate_rc.h" +#include "mir_builder.h" +#include "me_hdse.h" +// This phase finds local ref pointer variables that are delegated and thus can +// have their RC omitted. This optimization is done on a per-SSA version basis. +// As a result, an overall criterion is that the SSA version must not have +// appeared as phi operand; +// +// There are two forms of delegation: +// +// Form A. The following conditions have to be satisfied for an SSA version to be +// delegated: +// (1) In the BB where it is defined, there is a last use of it as the assigned +// value in an assignment statement, which is either iassign or +// dassign-to-global, and the assignment statement is marked need_incref. +// (2) Between its definition and last use, there is no statement that may +// potentially raise exception. +// For each such case, the optimization is as follows: +// (a) The SSA version is renamed to a new and unique preg with PTY_ptr type; +// this has the effect of eliminating its store and loads and its +// decref-before-return; +// (b) The need_incref flag in the respective assignment statement is cleared. +// The reference count correctness is maintained because the removed decref and +// incref are for the same offect. +// +// Form B. An SSA version is determined to not need incref at its def and not +// need decref when it becomes dead. In this case, the SSA version is renamed +// to a new and unique preg with PTY_ptr type as in (a) above. +// +// There are different ways to establish that an SSA version does not any RC: +// B1: The SSA version is never dereferenced and never copied to another variable +// or passed on as a parameter. +// B2: The RHS of its definition is iread of a final field with this as base or +// dread of a static final field. +// B3: Within the SSA version's live range, there is no operation that can result +// in decref of any object. (TODO) + +namespace { +// following intrinsics can throw exception +const std::set canThrowIntrinsicsList { + maple::INTRN_MPL_CLINIT_CHECK, + maple::INTRN_MPL_BOUNDARY_CHECK, + maple::INTRN_JAVA_CLINIT_CHECK, + maple::INTRN_JAVA_CHECK_CAST, + maple::INTRN_JAVA_THROW_ARITHMETIC, +}; +} + +namespace maple { +static bool CheckOp(const MeStmt &stmt, const Opcode op) { + return stmt.GetOp() == op; +} +static bool IsIntrinsic(const MeStmt &stmt) { + return CheckOp(stmt, OP_intrinsiccall) || CheckOp(stmt, OP_intrinsiccallassigned) || + CheckOp(stmt, OP_xintrinsiccall) || CheckOp(stmt, OP_xintrinsiccallassigned) || + CheckOp(stmt, OP_intrinsiccallwithtype) || CheckOp(stmt, OP_intrinsiccallwithtypeassigned); +} + +static bool IsCleanUpStmt(const MeStmt &stmt) { + return CheckOp(stmt, OP_intrinsiccall) && + static_cast(stmt).GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS; +} + +static bool IsVarDecRefStmt(const MeStmt &stmt) { + return (CheckOp(stmt, OP_decref) && stmt.GetOpnd(0)->GetMeOp() == kMeOpVar) || + CheckOp(stmt, OP_decrefreset); +} + +void DelegateRC::SetCantDelegate(const MapleMap &meVarPhiList) { + for (auto it = meVarPhiList.begin(); it != meVarPhiList.end(); ++it) { + const OriginalSt *ost = ssaTab.GetOriginalStFromID(it->first); + if (!ost->IsSymbolOst() || ost->GetIndirectLev() != 0) { + continue; + } + MeVarPhiNode *mePhi = it->second; + if (!mePhi->GetIsLive()) { + continue; + } + for (VarMeExpr *phiOpnd : mePhi->GetOpnds()) { + verStCantDelegate[phiOpnd->GetVstIdx()] = true; + } + } +} + +void DelegateRC::SaveDerefedOrCopiedVst(const MeExpr *expr) { + CHECK_NULL_FATAL(expr); + while (expr->GetOp() == OP_retype) { + expr = expr->GetOpnd(0); + } + if (expr->GetMeOp() == kMeOpVar) { + const auto *varExpr = static_cast(expr); + verStDerefedCopied[varExpr->GetVstIdx()] = true; + } +} + +bool DelegateRC::IsCopiedOrDerefedOp(const Opcode op) { + return op == OP_dassign || op == OP_maydassign || op == OP_regassign || op == OP_syncenter || + op == OP_syncexit || op == OP_throw || op == OP_return || op == OP_iassign || // cause var copied + kOpcodeInfo.IsCall(op); // callstmt need considering parameter +} + +void DelegateRC::CollectDerefedOrCopied(const MeStmt &stmt) { + Opcode op = stmt.GetOp(); + if (!IsCopiedOrDerefedOp(op)) { + return; + } + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { + MeExpr *curOpnd = stmt.GetOpnd(i); + SaveDerefedOrCopiedVst(curOpnd); + } +} + +void DelegateRC::CollectDerefedOrCopied(const MeExpr &x) { + Opcode op = x.GetOp(); + if (op == OP_iaddrof || op == OP_add) { + for (size_t i = 0; i < x.GetNumOpnds(); ++i) { + SaveDerefedOrCopiedVst(x.GetOpnd(i)); + } + return; + } + + if (op == OP_cvt) { + // in some cases, we have cvt from int to ref + // dassign %Reg0_I 0 (cvt i32 ref (dread ref %Reg0_XXXX)) + // cvt ref i32 (dread i32 %Reg0_I) + if (x.GetPrimType() == PTY_ref && x.GetOpnd(0)->GetMeOp() == kMeOpVar) { + auto *baseVar = static_cast(x.GetOpnd(0)); + verStDerefedCopied[baseVar->GetVstIdx()] = true; + // collect the def of basevar + if (baseVar->GetDefBy() == kDefByStmt) { + MeStmt *defStmt = baseVar->GetDefStmt(); + if (defStmt->GetOp() == OP_dassign && defStmt->GetRHS()->GetOp() == OP_cvt) { + SaveDerefedOrCopiedVst(defStmt->GetRHS()->GetOpnd(0)); + } + } else if (baseVar->GetDefBy() == kDefByPhi) { + MeVarPhiNode &defPhi = baseVar->GetDefPhi(); + for (VarMeExpr *phiOpnd : defPhi.GetOpnds()) { + if (phiOpnd->GetDefBy() == kDefByStmt) { + MeStmt *defStmt = phiOpnd->GetDefStmt(); + if (defStmt->GetOp() == OP_dassign && defStmt->GetRHS()->GetOp() == OP_cvt) { + SaveDerefedOrCopiedVst(defStmt->GetRHS()->GetOpnd(0)); + } + } + } // next phi opnd + } // end of all phi + } + return; + } + + if (op == OP_array || + (op == OP_intrinsicop && static_cast(x).GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) || + (op == OP_intrinsicopwithtype && static_cast(x).GetIntrinsic() == INTRN_JAVA_INSTANCE_OF)) { + CHECK_FATAL(x.GetNumOpnds() != 0, "container check"); + SaveDerefedOrCopiedVst(x.GetOpnd(0)); + return; + } + + if (x.GetMeOp() == kMeOpIvar) { + SaveDerefedOrCopiedVst(x.GetOpnd(0)); + } +} + +void DelegateRC::CollectVstCantDecrefEarly(MeExpr &opnd0, MeExpr &opnd1) { + if (opnd0.GetMeOp() != kMeOpVar || opnd1.GetMeOp() != kMeOpVar) { + return; + } + auto &varOpnd0 = static_cast(opnd0); + auto &varOpnd1 = static_cast(opnd1); + if (varOpnd0.GetDefBy() == kDefByNo || varOpnd1.GetDefBy() == kDefByNo) { + return; + } + BB *opnd0DefBB = varOpnd0.DefByBB(); + BB *opnd1DefBB = varOpnd1.DefByBB(); + + if (opnd0DefBB == opnd1DefBB) { + verStCantDecrefEarly[varOpnd0.GetVstIdx()] = true; + verStCantDecrefEarly[varOpnd1.GetVstIdx()] = true; + } else if (dominance.Dominate(*opnd0DefBB, *opnd1DefBB)) { + verStCantDecrefEarly[varOpnd0.GetVstIdx()] = true; + } else { + verStCantDecrefEarly[varOpnd1.GetVstIdx()] = true; + } +} + +void DelegateRC::CollectUseCounts(const MeExpr &x) { + if (x.GetMeOp() == kMeOpVar) { + const auto &varMeExpr = static_cast(x); + ASSERT(varMeExpr.GetVstIdx() < verStUseCounts.size(), "CollectUsesInfo: vstIdx out of bounds"); + verStUseCounts[varMeExpr.GetVstIdx()]++; + } +} + +void DelegateRC::CollectUsesInfo(const MeExpr &x) { + for (size_t i = 0; i < x.GetNumOpnds(); ++i) { + CollectUsesInfo(*x.GetOpnd(i)); + } + if ((x.GetOp() == OP_eq || x.GetOp() == OP_ne) && static_cast(x).GetOpndType() == PTY_ref) { + CollectVstCantDecrefEarly(*x.GetOpnd(0), *x.GetOpnd(1)); + } + CollectDerefedOrCopied(x); + CollectUseCounts(x); +} + +// traverse expression x; at each occurrence of rhsvar in x, decrement +// remaining_uses +void DelegateRC::FindAndDecrUseCount(VarMeExpr *rhsVar, MeExpr *x, int32 &remainingUses) { + for (size_t i = 0; i < x->GetNumOpnds(); ++i) { + FindAndDecrUseCount(rhsVar, x->GetOpnd(i), remainingUses); + } + if (x == rhsVar) { + remainingUses--; + } +} + +bool DelegateRC::MayThrowException(MeStmt &stmt) { + if (CheckOp(stmt, OP_maydassign) || CheckOp(stmt, OP_throw)) { + return true; + } + + if (CheckOp(stmt, OP_dassign)) { + auto &dass = static_cast(stmt); + const MeExpr *rhs = dass.GetRHS(); + CHECK_NULL_FATAL(rhs); + return dass.GetWasMayDassign() || rhs->GetOp() == OP_gcmalloc || rhs->GetOp() == OP_gcmallocjarray; + } + + if (CheckOp(stmt, OP_regassign)) { + return stmt.GetRHS()->GetOp() == OP_gcmalloc || stmt.GetRHS()->GetOp() == OP_gcmallocjarray; + } + + if (IsIntrinsic(stmt)) { + auto &intrn = static_cast(stmt); + return canThrowIntrinsicsList.find(intrn.GetIntrinsic()) != canThrowIntrinsicsList.end(); + } + + if (kOpcodeInfo.IsCall(stmt.GetOp())) { + auto &callStmt = static_cast(stmt); + MIRFunction *callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt.GetPUIdx()); + return (!callee->GetFuncAttrs().GetAttr(FUNCATTR_nothrow_exception)) || + (!MeOption::ignoreIPA && !callee->IsNoThrowException()); + } + return false; +} + +// Traverse backwards from fromstmt to tostmt to see if the single use of rhsvar +// in fromstmt is the last use of rhsvar; tostmt is the statement that defines +// rhsvar, so it can be assumed that tostmt does not contain any use; this check +// make use of verStUseCounts in its determination. In addition, if it comes +// across any stmt that can raise exception, also return false. +bool DelegateRC::ContainAllTheUses(VarMeExpr *rhsVar, const MeStmt &fromStmt, const MeStmt *toStmt) { + int32 remainingUses = static_cast(verStUseCounts[rhsVar->GetVstIdx()]) - 1; + for (MeStmt *cur = fromStmt.GetPrev(); cur != toStmt; cur = cur->GetPrev()) { + // do not count decref operands + if (IsVarDecRefStmt(*cur)) { + continue; + } + if (MayThrowException(*cur)) { + return false; + } + if (IsCleanUpStmt(*cur)) { + continue; + } + for (size_t i = 0; i < cur->NumMeStmtOpnds(); ++i) { + FindAndDecrUseCount(rhsVar, cur->GetOpnd(i), remainingUses); + } + } + CHECK_FATAL(remainingUses >= 0, "ContainAllTheUses: inconsistent use count"); + return remainingUses == 0; +} + +// return the RegMeExpr node to replace the original temp; nullptr if not successful +RegMeExpr *DelegateRC::RHSTempDelegated(MeExpr *rhs, MeStmt &useStmt) { + if (rhs->GetMeOp() != kMeOpVar) { + return nullptr; + } + auto *rhsVar = static_cast(rhs); + if (verStCantDelegate[rhsVar->GetVstIdx()]) { + return nullptr; + } + if (refVar2RegMap.find(rhsVar) != refVar2RegMap.end()) { + return nullptr; // already delegated by another assignment + } + const OriginalSt *ost = ssaTab.GetOriginalStFromID(rhsVar->GetOStIdx()); + if (ost->IsFormal() || ost->GetMIRSymbol()->IsGlobal()) { + return nullptr; + } + if (rhsVar->GetDefBy() == kDefByMustDef) { + MustDefMeNode &mustDef = rhsVar->GetDefMustDef(); + ASSERT(mustDef.GetLHS() == rhsVar, "DelegateRCTemp: inconsistent mustdef"); + MeStmt *callStmt = mustDef.GetBase(); + if (callStmt->GetBB() != useStmt.GetBB()) { + return nullptr; + } + if (!ContainAllTheUses(rhsVar, useStmt, callStmt)) { + return nullptr; + } + if (enabledDebug) { + LogInfo::MapleLogger() << "delegaterc of form A for func " << func.GetName() << '\n'; + LogInfo::MapleLogger() << "\nreplace stmt :\n" << '\n'; + callStmt->Dump(func.GetIRMap()); + } + // replace temp by a new preg + rhsVar->SetDefBy(kDefByNo); + RegMeExpr *curReg = nullptr; + if (ost->GetMIRSymbol()->GetType()->GetPrimType() == PTY_ptr) { + curReg = irMap.CreateRegMeExpr(PTY_ptr); + } else { + curReg = irMap.CreateRegRefMeExpr(*rhsVar); + } + refVar2RegMap[rhsVar] = curReg; // record this replacement + mustDef.UpdateLHS(*curReg); + if (enabledDebug) { + LogInfo::MapleLogger() << "with stmt :\n" << '\n'; + mustDef.GetBase()->Dump(func.GetIRMap()); + } + return curReg; + } else if (rhsVar->GetDefBy() == kDefByStmt) { + MeStmt *defStmt = rhsVar->GetDefStmt(); + if (defStmt->GetBB() != useStmt.GetBB()) { + return nullptr; + } + if (!ContainAllTheUses(rhsVar, useStmt, defStmt)) { + return nullptr; + } + MeExpr *rhsExpr = defStmt->GetRHS(); + bool defStmtNeedIncref = defStmt->NeedIncref(); + CHECK_FATAL(defStmt->GetOp() == OP_dassign || defStmt->GetOp() == OP_maydassign, + "DelegateRCTemp: unexpected stmt op for kDefByStmt"); + ASSERT(defStmt->GetVarLHS() == rhsVar, "DelegateRCTemp: inconsistent def by dassign"); + if (enabledDebug) { + LogInfo::MapleLogger() << "delegaterc of form A for func " << func.GetName() << '\n'; + LogInfo::MapleLogger() << "\nreplace stmt :\n" << '\n'; + defStmt->Dump(func.GetIRMap()); + } + // replace temp by a new preg + rhsVar->SetDefBy(kDefByNo); + RegMeExpr *curReg = irMap.CreateRegRefMeExpr(*rhsVar); + refVar2RegMap[rhsVar] = curReg; // record this replacement + // create new regassign statement + MeStmt *regass = irMap.CreateRegassignMeStmt(*curReg, *rhsExpr, *defStmt->GetBB()); + curReg->SetDefByStmt(*regass); + if (defStmtNeedIncref) { + regass->EnableNeedIncref(); + } else { + regass->DisableNeedIncref(); + } + defStmt->GetBB()->ReplaceMeStmt(defStmt, regass); + regass->SetSrcPos(defStmt->GetSrcPosition()); + if (enabledDebug) { + LogInfo::MapleLogger() << "with stmt :\n" << '\n'; + regass->Dump(func.GetIRMap()); + } + return curReg; + } + return nullptr; +} + +// process each assignment statement for Form A delegation +void DelegateRC::DelegateRCTemp(MeStmt &stmt) { + switch (stmt.GetOp()) { + case OP_iassign: { + if (!stmt.NeedIncref()) { + break; + } + IvarMeExpr *lhs = static_cast(stmt).GetLHSVal(); + if (lhs->IsRCWeak() || lhs->IsVolatile()) { + break; + } + if (lhs->GetBase()->GetOp() == OP_array) { + // array may raise exception + break; + } + MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhs->GetTyIdx()); + ASSERT(baseType->GetKind() == kTypePointer, "DelegateRCTemp: unexpected type"); + MIRType *pointedType = static_cast(baseType)->GetPointedType(); + if (pointedType->GetKind() == kTypeClass && + static_cast(pointedType)->IsFieldRCUnownedRef(lhs->GetFieldID())) { + break; + } + MeExpr *rhs = stmt.GetRHS(); + CHECK_FATAL(rhs != nullptr, "null rhs check"); + RegMeExpr *curReg = RHSTempDelegated(rhs, stmt); + if (curReg != nullptr) { + rhs = curReg; + stmt.DisableNeedIncref(); + } + break; + } + case OP_dassign: { + if (!stmt.NeedIncref()) { + break; + } + VarMeExpr *lhsVar = stmt.GetVarLHS(); + CHECK_FATAL(lhsVar != nullptr, "null lhs check"); + MeExpr *rhs = stmt.GetRHS(); + CHECK_FATAL(rhs != nullptr, "null rhs check"); + RegMeExpr *curReg = RHSTempDelegated(rhs, stmt); + if (curReg != nullptr) { + rhs = curReg; + stmt.DisableNeedIncref(); + } + break; + } + case OP_return: { + auto &retStmt = static_cast(stmt); + if (!retStmt.NumMeStmtOpnds()) { + break; + } + MeExpr *ret = retStmt.GetOpnd(0); + if (ret->GetPrimType() != PTY_ref && ret->GetPrimType() != PTY_ptr) { + break; + } + if (ret->GetMeOp() == kMeOpVar) { + auto *val = static_cast(ret); + if (val->GetDefBy() == kDefByMustDef) { + MeStmt *defStmt = val->GetDefMustDef().GetBase(); + if (retStmt.GetBB() == defStmt->GetBB() && ContainAllTheUses(val, stmt, defStmt)) { + RegMeExpr *curReg = RHSTempDelegated(ret, stmt); + if (curReg != nullptr) { + retStmt.SetOpnd(0, curReg); + } + } + } else if (val->GetDefBy() == kDefByStmt) { + MeStmt *defStmt = val->GetDefStmt(); + MeExpr *rhs = defStmt->GetRHS(); + CHECK_FATAL(rhs != nullptr, "null rhs check"); + const OriginalSt *ost = nullptr; + if (rhs->GetMeOp() == kMeOpVar) { + auto *theVar = static_cast(rhs); + ost = ssaTab.GetSymbolOriginalStFromID(theVar->GetOStIdx()); + } + if (rhs->IsGcmalloc() || (rhs->GetMeOp() == kMeOpIvar && !static_cast(rhs)->IsFinal()) || + (rhs->GetMeOp() == kMeOpVar && !ost->IsFinal() && ost->GetMIRSymbol()->IsGlobal()) || + (rhs->GetOp() == OP_regread && static_cast(rhs)->GetRegIdx() == -kSregThrownval)) { + if (retStmt.GetBB() == defStmt->GetBB() && ContainAllTheUses(val, stmt, defStmt)) { + RegMeExpr *curReg = RHSTempDelegated(ret, stmt); + if (curReg != nullptr) { + retStmt.SetOpnd(0, curReg); + // Convert following cases: + // dassign %Reg_xxx (iread ref xxx) + // return (dread ref %Reg_xxx) + // To: + // // iread will be converted to LoadRefField + // regassign %1 (iread ref xxx) [RC+] + // return (regread ref %1) + if (rhs->GetMeOp() == kMeOpIvar || + (rhs->GetMeOp() == kMeOpVar && !ost->IsFinal() && ost->GetMIRSymbol()->IsGlobal())) { + curReg->GetDefStmt()->EnableNeedIncref(); + } + } + } + } + } + } + break; + } + default:; + } +} + +bool DelegateRC::FinalRefNoRC(const MeExpr &x) { + if (x.GetMeOp() == kMeOpVar) { + const auto &theVar = static_cast(x); + const OriginalSt *ost = ssaTab.GetSymbolOriginalStFromID(theVar.GetOStIdx()); + return ost->IsFinal() && ost->GetMIRSymbol()->IsGlobal(); + } else if (x.GetMeOp() == kMeOpIvar) { + if (func.GetMirFunc()->IsConstructor() || func.GetMirFunc()->IsStatic() || + func.GetMirFunc()->GetFormalCount() == 0) { + return false; + } + const auto &ivar = static_cast(x); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); + ASSERT(ty->GetKind() == kTypePointer, "FinalRefNoRC: pointer type expected"); + MIRType *pointedTy = static_cast(ty)->GetPointedType(); + if (pointedTy->GetKind() == kTypeClass) { + auto *structType = static_cast(pointedTy); + FieldID fieldID = ivar.GetFieldID(); + if (structType->IsFieldFinal(fieldID) && !structType->IsFieldRCUnownedRef(fieldID)) { + if (ivar.GetBase()->GetMeOp() != kMeOpVar) { + return false; + } + const auto *varMeExpr = static_cast(ivar.GetBase()); + const OriginalSt *ost = ssaTab.GetOriginalStFromID(varMeExpr->GetOStIdx()); + if (ost->IsSymbolOst()) { + const MIRSymbol *mirst = ost->GetMIRSymbol(); + return mirst == func.GetMirFunc()->GetFormal(0); + } + } + } + } + return false; +} + +// return true if it is OK to omit reference counting for the LHS variable; if +// returning true, only_with_decref specifies whether a decref needs inserted +bool DelegateRC::CanOmitRC4LHSVar(const MeStmt &stmt, bool &onlyWithDecref) { + onlyWithDecref = false; + switch (stmt.GetOp()) { + case OP_dassign: + case OP_maydassign: { + const VarMeExpr *theLhs = stmt.GetVarLHS(); + MeExpr *theRhs = stmt.GetRHS(); + CHECK_FATAL(theLhs != nullptr, "null ptr check"); + CHECK_FATAL(theRhs != nullptr, "null ptr check"); + if (theLhs->GetPrimType() != PTY_ref || theLhs->GetNoDelegateRC()) { + return false; + } + const OriginalSt *ost = ssaTab.GetOriginalStFromID(theLhs->GetOStIdx()); + if (!ost->IsLocal() || ost->IsFormal()) { + return false; + } + if (ost->GetMIRSymbol()->IsInstrumented()) { + return false; + } + if (verStCantDelegate[theLhs->GetVstIdx()]) { + return false; + } + if (theRhs->GetMeOp() == kMeOpIvar) { + auto *ivarRhs = static_cast(theRhs); + if (ivarRhs->IsVolatile() || ivarRhs->IsRCWeak()) { + return false; + } + } + // condition B2 + if (FinalRefNoRC(*theRhs)) { + return true; + } + // condition B1 + if (!verStDerefedCopied[theLhs->GetVstIdx()]) { + onlyWithDecref = theRhs->GetOp() == OP_gcmalloc || theRhs->GetOp() == OP_gcmallocjarray || + (theRhs->GetOp() == OP_regread && static_cast(theRhs)->GetRegIdx() == -kSregThrownval); + if (onlyWithDecref && verStCantDecrefEarly[theLhs->GetVstIdx()]) { + onlyWithDecref = false; + return false; + } + return true; + } + break; + } + default: + if (kOpcodeInfo.IsCallAssigned(stmt.GetOp())) { + const MapleVector &mustdefList = stmt.GetMustDefList(); + if (mustdefList.empty()) { + return false; + } + const MeExpr *lhs = mustdefList.front().GetLHS(); + if (lhs->GetMeOp() != kMeOpVar) { + return false; + } + const auto *theLhs = static_cast(lhs); + if (theLhs->GetPrimType() != PTY_ref) { + return false; + } + const OriginalSt *ost = ssaTab.GetOriginalStFromID(theLhs->GetOStIdx()); + if (!ost->IsLocal() || ost->IsFormal()) { + return false; + } + if (verStCantDelegate[theLhs->GetVstIdx()]) { + return false; + } + if (!verStDerefedCopied[theLhs->GetVstIdx()]) { + // condition B1 + if (!verStCantDecrefEarly[theLhs->GetVstIdx()]) { + onlyWithDecref = true; + return true; + } + } + } + break; + } + return false; +} + +void DelegateRC::DelegateHandleNoRCStmt(MeStmt &stmt, bool addDecref) { + VarMeExpr *theLhs = nullptr; + MeExpr *rhsExpr = stmt.GetRHS(); + if (CheckOp(stmt, OP_dassign) || CheckOp(stmt, OP_maydassign)) { + theLhs = stmt.GetVarLHS(); + } else if (kOpcodeInfo.IsCallAssigned(stmt.GetOp()) && addDecref) { + theLhs = static_cast(stmt.GetAssignedLHS()); + } else { + return; + } + CHECK_FATAL(theLhs != nullptr, "null ptr check"); + if (theLhs->GetPrimType() != PTY_ref) { + return; + } + + BB &bb = *stmt.GetBB(); + // bool defstmt_need_incref; + if (enabledDebug) { + LogInfo::MapleLogger() << "delegaterc of form B for func " << func.GetName() << '\n'; + LogInfo::MapleLogger() << "\nreplace stmt :\n" << '\n'; + stmt.Dump(func.GetIRMap()); + } + // replace temp by a new preg + MeStmt *newStmt = &stmt; + theLhs->SetDefBy(kDefByNo); + RegMeExpr *curReg = irMap.CreateRegRefMeExpr(*theLhs); + refVar2RegMap[theLhs] = curReg; // record this replacement + if (rhsExpr != nullptr) { + // create new regassign statement + MeStmt *regass = irMap.CreateRegassignMeStmt(*curReg, *rhsExpr, *stmt.GetBB()); + curReg->SetDefByStmt(*regass); + bb.ReplaceMeStmt(newStmt, regass); + newStmt = regass; // for inserting defref after it below + } else { + // callassigned + static_cast(stmt).SetCallReturn(*curReg); + } + if (enabledDebug) { + LogInfo::MapleLogger() << "with stmt :\n" << '\n'; + newStmt->Dump(func.GetIRMap()); + } + if (addDecref) { + // We use RC intrinsic instead of direct function call, + // so that mplcg can decide how to generate code for it. + // for example: when GCONLY is enabled, decref will be omitted. + std::vector opnds = { curReg }; + IntrinsiccallMeStmt *decRefIntrin = irMap.CreateIntrinsicCallMeStmt(INTRN_MCCDecRef, opnds); + decRefIntrin->SetSrcPos(newStmt->GetSrcPosition()); + bb.InsertMeStmtAfter(newStmt, decRefIntrin); + } +} + +void DelegateRC::RenameDelegatedRefVarUses(MeStmt &meStmt, MeExpr *meExpr) { + CHECK_NULL_FATAL(meExpr); + for (size_t i = 0; i < meExpr->GetNumOpnds(); ++i) { + RenameDelegatedRefVarUses(meStmt, meExpr->GetOpnd(i)); + } + if (meExpr->GetMeOp() == kMeOpVar) { + auto *varMeExpr = static_cast(meExpr); + auto it = refVar2RegMap.find(varMeExpr); + if (it != refVar2RegMap.end()) { + irMap.ReplaceMeExprStmt(meStmt, *varMeExpr, *it->second); + } + } +} + +void DelegateRC::SetCantDelegateAndCountUses() { + auto eIt = func.valid_end(); + for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { + auto &bb = **bIt; + SetCantDelegate(bb.GetMevarPhiList()); + for (auto &stmt : bb.GetMeStmts()) { + // do not count decref operands + if (IsVarDecRefStmt(stmt)) { + continue; + } + // do not count the cleanup intrinsic + if (IsCleanUpStmt(stmt)) { + continue; + } + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); i++) { + CHECK_FATAL(stmt.GetOpnd(i), "null mestmtopnd check"); + CollectUsesInfo(*stmt.GetOpnd(i)); + } + CollectDerefedOrCopied(stmt); + } + } +} + +void DelegateRC::DelegateStmtRC() { + auto eIt = func.valid_end(); + for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { + auto &bb = **bIt; + for (auto &stmt : bb.GetMeStmts()) { + bool withDecref = false; + if (CanOmitRC4LHSVar(stmt, withDecref)) { + DelegateHandleNoRCStmt(stmt, withDecref); // Form B + } else { + DelegateRCTemp(stmt); // Form A + } + } + } +} + +std::set DelegateRC::RenameAndGetLiveLocalRefVar() { + std::set liveLocalrefvars; + auto eIt = func.valid_end(); + for (auto bIt = func.valid_begin(); bIt != eIt; ++bIt) { + auto &bb = **bIt; + for (auto &stmt : bb.GetMeStmts()) { + if (IsVarDecRefStmt(stmt)) { + continue; // it is wrong to replace decref operand as it is intended for the original localrefvar + } + // no need process the cleanup intrinsic + if (IsCleanUpStmt(stmt)) { + continue; + } + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { + CHECK_FATAL(stmt.GetOpnd(i), "null mestmtopnd check"); + RenameDelegatedRefVarUses(stmt, stmt.GetOpnd(i)); + } + // for live_localrefvars + if (CheckOp(stmt, OP_dassign) || CheckOp(stmt, OP_maydassign)) { + VarMeExpr *lhs = stmt.GetVarLHS(); + CHECK_FATAL(lhs != nullptr, "null ptr check"); + const OriginalSt *ost = ssaTab.GetOriginalStFromID(lhs->GetOStIdx()); + if (ost->IsLocal() && !ost->IsFormal() && !ost->IsIgnoreRC() && lhs->GetPrimType() == PTY_ref) { + liveLocalrefvars.insert(lhs->GetOStIdx()); + } + } else if (kOpcodeInfo.IsCallAssigned(stmt.GetOp())) { + MapleVector *mustdefList = stmt.GetMustDefList(); + CHECK_NULL_FATAL(mustdefList); + if (mustdefList->empty()) { + continue; + } + MeExpr *theLhs = mustdefList->front().GetLHS(); + if (theLhs->GetMeOp() == kMeOpVar && theLhs->GetPrimType() == PTY_ref) { + auto *varLhs = static_cast(theLhs); + const OriginalSt *ost = ssaTab.GetOriginalStFromID(varLhs->GetOStIdx()); + if (ost->IsLocal() && !ost->IsFormal() && !ost->IsIgnoreRC()) { + liveLocalrefvars.insert(varLhs->GetOStIdx()); + } + } + } + } + } + return liveLocalrefvars; +} + +void DelegateRC::CleanUpDeadLocalRefVar(const std::set &liveLocalrefvars) { + for (BB *bb : func.GetCommonExitBB()->GetPred()) { + auto &meStmts = bb->GetMeStmts(); + if (meStmts.empty() || meStmts.back().GetOp() != OP_return) { + continue; + } + MeStmt *stmt = meStmts.back().GetPrev(); + while (stmt != nullptr && stmt->GetOp() != OP_intrinsiccall) { + stmt = stmt->GetPrev(); + } + if (stmt == nullptr) { + continue; + } + if (IsCleanUpStmt(*stmt)) { + continue; + } + // delete the operands that are not live + size_t nextPos = 0; + IntrinsiccallMeStmt *intrin = static_cast(stmt); + for (size_t i = 0; i < intrin->NumMeStmtOpnds(); ++i) { + auto *varMeExpr = static_cast(intrin->GetOpnd(i)); + if (liveLocalrefvars.find(varMeExpr->GetOStIdx()) == liveLocalrefvars.end()) { + continue; + } + if (nextPos != i) { + intrin->SetOpnd(nextPos, varMeExpr); + } + ++nextPos; + } + intrin->GetOpnds().erase(intrin->GetOpnds().begin() + nextPos, intrin->GetOpnds().end()); + } +} + +AnalysisResult *MeDoDelegateRC::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr*) { + static uint32 pUcount = 0; + auto *dom = static_cast(m->GetAnalysisResult(MeFuncPhase_DOMINANCE, func)); + ASSERT(dom != nullptr, "dominance phase has problem"); + { + // invoke hdse to update isLive only + MeHDSE hdse(*func, *dom, *func->GetIRMap(), DEBUGFUNC(func)); + hdse.InvokeHDSEUpdateLive(); + } + if (DEBUGFUNC(func)) { + LogInfo::MapleLogger() << " Processing " << func->GetMirFunc()->GetName() << '\n'; + } + DelegateRC delegaterc(*func, *dom, NewMemPool(), DEBUGFUNC(func)); + if (pUcount > MeOption::delRcPULimit) { + ++pUcount; + return nullptr; + } + if (pUcount == MeOption::delRcPULimit) { + LogInfo::MapleLogger() << func->GetMirFunc()->GetName() + << " is last PU optimized by delegaterc under -delrcpulimit option" << '\n'; + } + // first pass + delegaterc.SetCantDelegateAndCountUses(); + // main pass + delegaterc.DelegateStmtRC(); + // final pass: rename the uses of the delegated ref pointer variable versions; + // set live_localrefvars based on appearances on LHS + // to detect dead localrefvars + std::set liveLocalrefvars = delegaterc.RenameAndGetLiveLocalRefVar(); + // postpass: go through the cleanup intrinsics to delete dead localrefvars + delegaterc.CleanUpDeadLocalRefVar(liveLocalrefvars); + if (DEBUGFUNC(func)) { + LogInfo::MapleLogger() << "\n============== After DELEGATE RC =============" << '\n'; + func->GetIRMap()->Dump(); + } + ++pUcount; + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/me_may2dassign.cpp b/src/maple_me/src/me_may2dassign.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4fbe780dd6eb30722f262b382032aa60f4e0ff84 --- /dev/null +++ b/src/maple_me/src/me_may2dassign.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_may2dassign.h" + +// this phase converts all maydassign back to dassign +namespace maple { +void May2Dassign::DoIt() { + auto eIt = func->valid_end(); + for (auto bIt = func->valid_begin(); bIt != eIt; ++bIt) { + auto *bb = *bIt; + for (auto &stmt : bb->GetMeStmts()) { + if (stmt.GetOp() != OP_maydassign) { + continue; + } + auto &mass = static_cast(stmt); + // chiList for Maydassign has only 1 element + CHECK_FATAL(!mass.GetChiList()->empty(), "chiList is empty in DoIt"); + VarMeExpr *thelhs = mass.GetChiList()->begin()->second->GetLHS(); + ASSERT(mass.GetMayDassignSym() == ssaTab->GetOriginalStFromID(thelhs->GetOStIdx()), + "MeDoMay2Dassign: cannot find maydassign lhs"); + auto *dass = static_cast(irMap->CreateDassignMeStmt(*thelhs, *mass.GetRHS(), *bb)); + dass->SetNeedDecref(mass.NeedDecref()); + dass->SetNeedIncref(mass.NeedIncref()); + dass->SetWasMayDassign(true); + dass->SetChiList(*mass.GetChiList()); + dass->GetChiList()->clear(); + bb->ReplaceMeStmt(&mass, dass); + } + } +} + +AnalysisResult *MeDoMay2Dassign::Run(MeFunction *func, MeFuncResultMgr*, ModuleResultMgr*) { + May2Dassign may2dassign(func); + may2dassign.DoIt(); + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/me_option.cpp b/src/maple_me/src/me_option.cpp index 6e121ff963269f3a2d2882ec9e1cc45e5037e407..fe82545779fc7d3b1e120fad733a9e244ee024be 100644 --- a/src/maple_me/src/me_option.cpp +++ b/src/maple_me/src/me_option.cpp @@ -49,6 +49,7 @@ uint32 MeOption::epreLimit = UINT32_MAX; uint32 MeOption::eprePULimit = UINT32_MAX; uint32 MeOption::lpreLimit = UINT32_MAX; uint32 MeOption::lprePULimit = UINT32_MAX; +uint32 MeOption::pregRenameLimit = UINT32_MAX; bool MeOption::noDelegateRC = false; bool MeOption::noCondBasedRC = false; bool MeOption::clinitPre = true; @@ -62,6 +63,8 @@ bool MeOption::lpreSpeculate = false; bool MeOption::spillAtCatch = false; bool MeOption::rcLowering = true; bool MeOption::optDirectCall = false; +bool MeOption::propAtPhi = true; +bool MeOption::dseKeepRef = false; void MeOption::SplitPhases(const std::string &str, std::unordered_set &set) const { std::string s{ str }; diff --git a/src/maple_me/src/me_phase_manager.cpp b/src/maple_me/src/me_phase_manager.cpp index 53437b15485ecba9a4a5cbcdb11f65e310f906aa..b29be385697dc7fb8c5e42fefd839a95dd4607c0 100644 --- a/src/maple_me/src/me_phase_manager.cpp +++ b/src/maple_me/src/me_phase_manager.cpp @@ -20,12 +20,26 @@ #include "me_phase.h" #include "me_cfg.h" #include "me_alias_class.h" +#include "me_bypath_eh.h" +#include "me_critical_edge.h" +#include "me_loop_canon.h" +#include "me_abco.h" +#include "me_dse.h" +#include "me_hdse.h" +#include "me_prop.h" +#include "me_rename2preg.h" #include "me_ssa_lpre.h" #include "me_ssa_epre.h" #include "me_stmt_pre.h" #include "me_store_pre.h" #include "me_cond_based_rc.h" #include "me_cond_based_npc.h" +#include "preg_renamer.h" +#include "me_ssa_devirtual.h" +#include "me_delegate_rc.h" +#include "me_analyze_rc.h" +#include "me_may2dassign.h" +#include "me_loop_analysis.h" #include "me_ssa.h" #include "me_irmap.h" #include "me_bb_layout.h" diff --git a/src/maple_me/src/me_rc_lowering.cpp b/src/maple_me/src/me_rc_lowering.cpp index d5467bab225bf3c72b68b75159bfd6aebc41eab1..3fe6599922ece9714bdb518f127bf245b86a3905 100644 --- a/src/maple_me/src/me_rc_lowering.cpp +++ b/src/maple_me/src/me_rc_lowering.cpp @@ -499,26 +499,17 @@ void RCLowering::RCLower() { MeExpr *RCLowering::HandleIncRefAndDecRefStmt(MeStmt &stmt) { Opcode opCode = stmt.GetOp(); - MIRIntrinsicID rcCallID = INTRN_UNDEFINED; - - if (opCode == OP_incref) { - rcCallID = INTRN_MCCIncRef; - } else if (opCode == OP_decref) { - rcCallID = INTRN_MCCDecRef; - } else if (opCode == OP_decrefreset) { - rcCallID = INTRN_MCCDecRefReset; + if (opCode == OP_decref) { + stmt.GetBB()->RemoveMeStmt(&stmt); + return stmt.GetOpnd(0); } - if (rcCallID == INTRN_UNDEFINED) { + if (opCode != OP_incref && opCode != OP_decrefreset) { return nullptr; } - auto &unaryMeStmt = static_cast(stmt); - if (opCode == OP_decref && !unaryMeStmt.GetDecrefBeforeExit()) { - stmt.GetBB()->RemoveMeStmt(&stmt); - return unaryMeStmt.GetOpnd(); - } - std::vector opnds = { unaryMeStmt.GetOpnd() }; + MIRIntrinsicID rcCallID = opCode == OP_incref ? INTRN_MCCIncRef : INTRN_MCCDecRefReset; + std::vector opnds = { stmt.GetOpnd(0) }; IntrinsiccallMeStmt *rcCall = CreateRCIntrinsic(rcCallID, stmt, opnds); stmt.GetBB()->ReplaceMeStmt(&stmt, rcCall); return nullptr; @@ -556,7 +547,9 @@ void RCLowering::BBLower(BB &bb) { initializedFields.clear(); needSpecialHandleException = bb.GetAttributes(kBBAttrIsCatch); for (auto &stmt : bb.GetMeStmts()) { - pendingDec = stmt.GetLHSRef(ssaTab, false); + if ((func.GetHints() & kAnalyzeRCed) == 0) { + pendingDec = stmt.GetLHSRef(ssaTab, false); + } Opcode opcode = stmt.GetOp(); if (opcode == OP_return) { rets.push_back(&stmt); diff --git a/src/maple_me/src/me_ssa_devirtual.cpp b/src/maple_me/src/me_ssa_devirtual.cpp new file mode 100644 index 0000000000000000000000000000000000000000..80dd519bee0fe6574fada77277149e39cc10283f --- /dev/null +++ b/src/maple_me/src/me_ssa_devirtual.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "me_ssa_devirtual.h" +#include "me_function.h" +#include "me_option.h" + +namespace maple { +AnalysisResult *MeDoSSADevirtual::Run(MeFunction *func, MeFuncResultMgr *frm, ModuleResultMgr *mrm) { + auto *dom = static_cast(frm->GetAnalysisResult(MeFuncPhase_DOMINANCE, func)); + ASSERT(dom != nullptr, "dominance phase has problem"); + auto *hMap = static_cast(frm->GetAnalysisResult(MeFuncPhase_IRMAP, func)); + ASSERT(hMap != nullptr, "hssaMap has problem"); + CHECK_FATAL(mrm != nullptr, "Needs module result manager for ipa"); + auto *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + ASSERT(kh != nullptr, "KlassHierarchy has problem"); + Clone *clone = nullptr; + if (Options::O2) { + clone = static_cast(mrm->GetAnalysisResult(MoPhase_CLONE, &func->GetMIRModule())); + } + MeSSADevirtual meSSADevirtual(NewMemPool(), &func->GetMIRModule(), func, hMap, kh, dom, clone); + if (DEBUGFUNC(func)) { + SSADevirtual::debug = true; + } + meSSADevirtual.Perform(func->GetCommonEntryBB()); + if (DEBUGFUNC(func)) { + SSADevirtual::debug = false; + LogInfo::MapleLogger() << "\n============== After SSA Devirtualization =============" << "\n"; + func->Dump(false); + } + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/me_stmt_pre.cpp b/src/maple_me/src/me_stmt_pre.cpp index 671788f639a600bd705df809d943bd5970e801c5..02ad99facf916917a4f55af33ed7d55e9e392b17 100644 --- a/src/maple_me/src/me_stmt_pre.cpp +++ b/src/maple_me/src/me_stmt_pre.cpp @@ -370,7 +370,7 @@ MeStmt *MeStmtPre::PhiOpndFromRes4Stmt(MeRealOcc *realZ, size_t j, MeExpr *&lhsV default: ASSERT(false, "MeStmtPre::PhiOpndFromRes4Stmt: NYI"); } - if (stmtQ->GetOp() == OP_dassign || stmtQ->GetOp() == OP_callassigned) { + if ((stmtQ->GetOp() == OP_dassign || stmtQ->GetOp() == OP_callassigned) && realZ->GetMeExpr() != nullptr) { MeExpr *retOpnd = GetReplaceMeExpr(realZ->GetMeExpr(), phiBB, j); if (retOpnd != nullptr) { lhsVar = retOpnd; @@ -532,7 +532,7 @@ void MeStmtPre::CreateSortedOccs() { PreWorkCand *workCand = GetWorkCand(); auto *stmtWkCand = static_cast(workCand); if ((stmtWkCand->GetTheMeStmt()->GetOp() == OP_dassign || stmtWkCand->GetTheMeStmt()->GetOp() == OP_callassigned) && - !stmtWkCand->LHSIsFinal()) { + stmtWkCand->GetTheMeStmt()->GetVarLHS() != nullptr && !stmtWkCand->LHSIsFinal()) { VarMeExpr *lhsVar = stmtWkCand->GetTheMeStmt()->GetVarLHS(); OStIdx ostIdx = lhsVar->GetOStIdx(); MapleMap*>::iterator uMapIt = useOccurMap.find(ostIdx); @@ -732,6 +732,9 @@ void MeStmtPre::ConstructUseOccurMap() { continue; } VarMeExpr *lhsVar = stmtWkCand->GetTheMeStmt()->GetVarLHS(); + if (lhsVar == nullptr) { + continue; + } OStIdx ostIdx = lhsVar->GetOStIdx(); if (useOccurMap.find(ostIdx) == useOccurMap.end()) { // add an entry for ostIdx @@ -763,7 +766,7 @@ PreStmtWorkCand *MeStmtPre::CreateStmtRealOcc(MeStmt &meStmt, int seqStmt) { wkCand = static_cast(wkCand->GetNext()); } MeExpr *meExpr = nullptr; - if (meStmt.GetOp() == OP_dassign || meStmt.GetOp() == OP_callassigned) { + if ((meStmt.GetOp() == OP_dassign || meStmt.GetOp() == OP_callassigned) && meStmt.GetVarLHS() != nullptr) { MapleStack *pStack = versionStackVec.at(meStmt.GetVarLHS()->GetOStIdx()); meExpr = pStack->top(); } diff --git a/src/maple_me/src/preg_renamer.cpp b/src/maple_me/src/preg_renamer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f25342a945b6f3ed1700ebd0a4108458c86e58cb --- /dev/null +++ b/src/maple_me/src/preg_renamer.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "preg_renamer.h" +#include "alias_class.h" +#include "mir_builder.h" +#include "me_irmap.h" +namespace maple { +void PregRenamer::EnqueDefUses(std::list &qu, RegMeExpr *node, std::set &curVisited) { + // get its define + if (node->GetDefBy() == kDefByPhi) { + MeRegPhiNode &defPhi = node->GetDefPhi(); + for (auto it : defPhi.GetOpnds()) { + RegMeExpr *neibNode = it; // node's connected register node + if (neibNode != node && curVisited.find(neibNode) == curVisited.end()) { + qu.push_back(neibNode); + curVisited.insert(neibNode); + } + } + } + // get the phi which uses node as an operand + MapleSet &phiUseSet = node->GetPhiUseSet(); + for (auto setIt : phiUseSet) { + MeRegPhiNode *meRegPhi = setIt; + RegMeExpr *lhsReg = meRegPhi->GetLHS(); + if (lhsReg != node && curVisited.find(lhsReg) == curVisited.end()) { + qu.push_back(lhsReg); + curVisited.insert(lhsReg); + } + for (auto opdIt : meRegPhi->GetOpnds()) { + RegMeExpr *opndReg = opdIt; + if (opndReg != node && curVisited.find(opndReg) == curVisited.end()) { + qu.push_back(opndReg); + curVisited.insert(opndReg); + } + } + } +} + +void PregRenamer::RunSelf() { + // BFS the graph of register phi node; + std::set curVisited; + const MapleVector ®MeExprTable = irMap->GetRegMeExprTable(); + MIRPregTable *pregTab = func->GetMirFunc()->GetPregTab(); + std::vector firstAppearTable(pregTab->GetPregTable().size()); + uint32 renameCount = 0; + for (auto it : regMeExprTable) { + RegMeExpr *regMeExpr = it; + if (regMeExpr->GetRegIdx() < 0) { + continue; // special register + } + if (curVisited.find(regMeExpr) != curVisited.end()) { + continue; + } + // BFS the node and add all related nodes to the vector; + std::vector candidates; + std::list qu; + qu.push_back(regMeExpr); + candidates.push_back(regMeExpr); + bool useDefFromZeroVersion = false; + bool definedInTryBlock = false; + while (!qu.empty()) { + RegMeExpr *curNode = qu.back(); + qu.pop_back(); + // put all its neighbors into the queue + EnqueDefUses(qu, curNode, curVisited); + curVisited.insert(curNode); + candidates.push_back(curNode); + if (curNode->GetDefBy() == kDefByNo) { + // if any use are from zero version, we stop renaming all the candidates related to it issue #1420 + useDefFromZeroVersion = true; + } else if (curNode->DefByBB()->GetAttributes(kBBAttrIsTry)) { + definedInTryBlock = true; + } + } + if (useDefFromZeroVersion || definedInTryBlock) { + continue; // must be zero version. issue #1420 + } + // get all the nodes in candidates the same register + PregIdx newPregIdx = regMeExpr->GetRegIdx(); + ASSERT(static_cast(newPregIdx) < firstAppearTable.size(), "oversize "); + if (!firstAppearTable[newPregIdx]) { + // use the previous register + firstAppearTable[newPregIdx] = true; + continue; + } + newPregIdx = (regMeExpr->GetPrimType() == PTY_ref) ? + pregTab->CreateRefPreg(*pregTab->PregFromPregIdx(regMeExpr->GetRegIdx())->GetMIRType()) : + pregTab->CreatePreg(regMeExpr->GetPrimType()); + ++renameCount; + if (enabledDebug) { + LogInfo::MapleLogger() << "%" << + pregTab->PregFromPregIdx(static_cast(regMeExpr->GetRegIdx()))->GetPregNo(); + LogInfo::MapleLogger() << " renamed to %" << pregTab->PregFromPregIdx(newPregIdx)->GetPregNo() << '\n'; + } + // reneme all the register + for (auto candiIt : candidates) { + RegMeExpr *candiRegNode = candiIt; + candiRegNode->SetRegIdx(newPregIdx); // rename it to a new register + } + if (renameCount == MeOption::pregRenameLimit) { + break; + } + } +} + +AnalysisResult *MeDoPregRename::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr*) { + auto *irMap = static_cast(m->GetAnalysisResult(MeFuncPhase_IRMAP, func)); + CHECK_NULL_FATAL(irMap); + PregRenamer pregRenamer(NewMemPool(), func, irMap, DEBUGFUNC(func)); + pregRenamer.RunSelf(); + if (DEBUGFUNC(func)) { + LogInfo::MapleLogger() << "------------after pregrename:-------------------\n"; + func->Dump(false); + } + return nullptr; +} +} // namespace maple diff --git a/src/maple_me/src/ssa_devirtual.cpp b/src/maple_me/src/ssa_devirtual.cpp new file mode 100644 index 0000000000000000000000000000000000000000..38dd25d56b779f1af5c87e698e27c3c977f11c61 --- /dev/null +++ b/src/maple_me/src/ssa_devirtual.cpp @@ -0,0 +1,659 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "ssa_devirtual.h" +// This phase performs devirtualization based on SSA. Ideally, we should have +// precise alias information, so that for each reference we know exactly the +// objects it refers to, then the exact method it calls. However, precise alias +// analysis costs a lot. +// +// For now, we only use a simple policy to help devirtualize, E.g. +// { +// Base b = new Derived(); +// b.foo(); +// } +// We can devirtual the b.foo to be Derived::foo(). +namespace maple { +bool SSADevirtual::debug = false; + +static bool MaybeNull(MeExpr *expr) { + if (expr->GetMeOp() == kMeOpVar) { + return static_cast(expr)->GetMaybeNull(); + } + if (expr->GetMeOp() == kMeOpIvar) { + return static_cast(expr)->GetMaybeNull(); + } + if (expr->GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(expr))->GetOpnd(0); + if (retypeRHS->GetMeOp() == kMeOpVar) { + return static_cast(retypeRHS)->GetMaybeNull(); + } + } + return true; +} + +static bool IsFinalMethod(const MIRFunction *mirFunc) { + if (mirFunc == nullptr) { + return false; + } + const auto *classType = static_cast(mirFunc->GetClassType()); + // Return true if the method or its class is declared as final + return (classType != nullptr && (mirFunc->IsFinal() || classType->IsFinal())); +} + +TyIdx SSADevirtual::GetInferredTyIdx(MeExpr *expr) { + if (expr->GetMeOp() == kMeOpVar) { + auto *varMeExpr = static_cast(expr); + if (varMeExpr->GetInferredTyIdx() == 0u) { + // If varMeExpr->inferredTyIdx has not been set, we can double check + // if it is coming from a static final field + const OriginalSt *ost = irMap->GetSSATab().GetOriginalStFromID(varMeExpr->GetOStIdx()); + const MIRSymbol *mirSym = ost->GetMIRSymbol(); + if (mirSym->IsStatic() && mirSym->IsFinal() && mirSym->GetInferredTyIdx() != kInitTyIdx && + mirSym->GetInferredTyIdx() != kNoneTyIdx) { + varMeExpr->SetInferredTyIdx(mirSym->GetInferredTyIdx()); + } + if (mirSym->GetType()->GetKind() == kTypePointer) { + MIRType *pointedType = (static_cast(mirSym->GetType()))->GetPointedType(); + if (pointedType->GetKind() == kTypeClass) { + if ((static_cast(pointedType))->IsFinal()) { + varMeExpr->SetInferredTyIdx(pointedType->GetTypeIndex()); + } + } + } + } + return varMeExpr->GetInferredTyIdx(); + } + if (expr->GetMeOp() == kMeOpIvar) { + return static_cast(expr)->GetInferredTyIdx(); + } + if (expr->GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(expr))->GetOpnd(0); + if (retypeRHS->GetMeOp() == kMeOpVar) { + return static_cast(retypeRHS)->GetInferredTyIdx(); + } + } + return TyIdx(0); +} + +void SSADevirtual::ReplaceCall(CallMeStmt *callStmt, MIRFunction *targetFunc) { + if (SSADevirtual::debug) { + MIRFunction &mirFunc = callStmt->GetTargetFunction(); + LogInfo::MapleLogger() << "[SSA-DEVIRT] " << kOpcodeInfo.GetTableItemAt(callStmt->GetOp()).name << " " << + NameMangler::DecodeName(mirFunc.GetName()); + } + if (callStmt->GetOp() == OP_virtualicall || callStmt->GetOp() == OP_virtualicallassigned || + callStmt->GetOp() == OP_interfaceicall || callStmt->GetOp() == OP_interfaceicallassigned) { + // delete 1st argument + MapleVector::iterator opndIt = callStmt->GetOpnds().begin(); + callStmt->GetOpnds().erase(opndIt); + } + MeExpr *receiver = callStmt->GetOpnd(0); + if (NeedNullCheck(receiver)) { + InsertNullCheck(callStmt, receiver); + ++nullCheckCount; + } + // Set the actuall callee puIdx + callStmt->SetPUIdx(targetFunc->GetPuidx()); + if (callStmt->GetOp() == OP_virtualcall || callStmt->GetOp() == OP_virtualicall) { + callStmt->SetOp(OP_call); + ++optedVirtualCalls; + } else if (callStmt->GetOp() == OP_virtualcallassigned || callStmt->GetOp() == OP_virtualicallassigned) { + callStmt->SetOp(OP_callassigned); + ++optedVirtualCalls; + } else if (callStmt->GetOp() == OP_interfacecall || callStmt->GetOp() == OP_interfaceicall) { + callStmt->SetOp(OP_call); + ++optedInterfaceCalls; + } else if (callStmt->GetOp() == OP_interfacecallassigned || callStmt->GetOp() == OP_interfaceicallassigned) { + callStmt->SetOp(OP_callassigned); + ++optedInterfaceCalls; + } + if (clone != nullptr && OP_callassigned == callStmt->GetOp()) { + clone->UpdateReturnVoidIfPossible(callStmt, targetFunc); + } + if (SSADevirtual::debug) { + LogInfo::MapleLogger() << "\t -> \t" << kOpcodeInfo.GetTableItemAt(callStmt->GetOp()).name << " " << + NameMangler::DecodeName(targetFunc->GetName()); + if (NeedNullCheck(receiver)) { + LogInfo::MapleLogger() << " with null-check "; + } + LogInfo::MapleLogger() << "\t at " << mod->GetFileNameFromFileNum(callStmt->GetSrcPosition().FileNum()) << ":" << + callStmt->GetSrcPosition().LineNum() << '\n'; + } +} + +bool SSADevirtual::DevirtualizeCall(CallMeStmt *callStmt) { + switch (callStmt->GetOp()) { + case OP_interfacecall: + case OP_interfaceicall: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + totalInterfaceCalls++; // FALLTHROUGH + case OP_virtualcall: + case OP_virtualicall: + case OP_virtualcallassigned: + case OP_virtualicallassigned: { + totalVirtualCalls++; // actually the number of interfacecalls + virtualcalls + const MapleVector &parms = callStmt->GetOpnds(); + if (parms.empty() || parms[0] == nullptr) { + break; + } + MeExpr *thisParm = parms[0]; + if (callStmt->GetOp() == OP_interfaceicall || callStmt->GetOp() == OP_interfaceicallassigned || + callStmt->GetOp() == OP_virtualicall || callStmt->GetOp() == OP_virtualicallassigned) { + thisParm = parms[1]; + } + TyIdx receiverInferredTyIdx = GetInferredTyIdx(thisParm); + MIRFunction &mirFunc = callStmt->GetTargetFunction(); + if (thisParm->GetPrimType() == PTY_ref && receiverInferredTyIdx != 0u) { + Klass *inferredKlass = kh->GetKlassFromTyIdx(receiverInferredTyIdx); + if (inferredKlass == nullptr) { + break; + } + GStrIdx funcName = mirFunc.GetBaseFuncNameWithTypeStrIdx(); + MIRFunction *inferredFunction = inferredKlass->GetClosestMethod(funcName); + if (inferredFunction == nullptr) { + if (SSADevirtual::debug) { + LogInfo::MapleLogger() << "Can not get function for " << inferredKlass->GetKlassName() << + mirFunc.GetBaseFuncNameWithType() << '\n'; + } + break; + } + if (thisParm->GetMeOp() != kMeOpVar && thisParm->GetMeOp() != kMeOpIvar) { + break; + } + ReplaceCall(callStmt, inferredFunction); + return true; + } else if (IsFinalMethod(&mirFunc)) { + GStrIdx uniqFuncNameStrIdx = mirFunc.GetNameStrIdx(); + CHECK_FATAL(uniqFuncNameStrIdx != 0u, "check"); + MIRSymbol *uniqFuncSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(uniqFuncNameStrIdx); + ASSERT(uniqFuncSym != nullptr, "The real callee %s has not been seen in any imported .mplt file", + mirFunc.GetName().c_str()); + MIRFunction *uniqFunc = uniqFuncSym->GetFunction(); + ASSERT(uniqFunc != nullptr, "Invalid function replacement in devirtualization"); + ASSERT(mirFunc.GetBaseFuncNameWithType() == uniqFunc->GetBaseFuncNameWithType(), + "Invalid function replacement in devirtualization"); + ReplaceCall(callStmt, uniqFunc); + return true; + } else { + if (thisParm->GetMeOp() == kMeOpVar) { + auto *varMeExpr = static_cast(thisParm); + const MapleVector inferredTypeCandidates = varMeExpr->GetInferredTypeCandidates(); + if (inferredTypeCandidates.size() > 0) { + GStrIdx funcName = mirFunc.GetBaseFuncNameWithTypeStrIdx(); + MIRFunction *inferredFunction = nullptr; + size_t i = 0; + for (; i < inferredTypeCandidates.size(); ++i) { + Klass *inferredKlass = kh->GetKlassFromTyIdx(inferredTypeCandidates.at(i)); + if (inferredKlass == nullptr) { + break; + } + MIRFunction *tmpFunction = inferredKlass->GetClosestMethod(funcName); + if (tmpFunction == nullptr) { + break; + } + if (inferredFunction == nullptr) { + inferredFunction = tmpFunction; + } else if (inferredFunction != tmpFunction) { + break; + } + } + if (i == inferredTypeCandidates.size() && inferredFunction != nullptr) { + if (SSADevirtual::debug) { + LogInfo::MapleLogger() << "Devirutalize based on set of inferred types: In " << + GetMIRFunction()->GetName() << "; Devirtualize: " << mirFunc.GetName() << '\n'; + } + ReplaceCall(callStmt, inferredFunction); + return true; + } + } + } + } + break; + } + default: + break; + } + return false; +} + +bool SSADevirtual::NeedNullCheck(MeExpr *receiver) const { + return MaybeNull(receiver); +} + +// Java requires to throw Null-Pointer-Execption if the receiver of +// the virtualcall is null. We insert an eval(iread recevier, 0) +// statment perform the null-check. +void SSADevirtual::InsertNullCheck(CallMeStmt *callStmt, MeExpr *receiver) { + UnaryMeStmt *nullCheck = irMap->New(OP_assertnonnull); + nullCheck->SetBB(callStmt->GetBB()); + nullCheck->SetSrcPos(callStmt->GetSrcPosition()); + nullCheck->SetMeStmtOpndValue(receiver); + callStmt->GetBB()->InsertMeStmtBefore(callStmt, nullCheck); +} + +void SSADevirtual::PropVarInferredType(VarMeExpr *VarMeExpr) { + if (VarMeExpr->GetInferredTyIdx() != 0u) { + return; + } + if (VarMeExpr->GetDefBy() == kDefByStmt) { + DassignMeStmt &defStmt = utils::ToRef(safe_cast(VarMeExpr->GetDefStmt())); + MeExpr *rhs = defStmt.GetRHS(); + if (rhs->GetOp() == OP_gcmalloc) { + VarMeExpr->SetInferredTyIdx(static_cast(rhs)->GetTyIdx()); + VarMeExpr->SetMaybeNull(false); + if (SSADevirtual::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(VarMeExpr->GetInferredTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] mx" << VarMeExpr->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else { + TyIdx tyIdx = GetInferredTyIdx(rhs); + VarMeExpr->SetMaybeNull(MaybeNull(rhs)); + if (tyIdx != 0u) { + VarMeExpr->SetInferredTyIdx(tyIdx); + if (SSADevirtual::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(VarMeExpr->GetInferredTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] mx" << VarMeExpr->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } + if (VarMeExpr->GetInferredTyIdx() != 0u) { + OriginalSt *ost = irMap->GetSSATab().GetOriginalStFromID(defStmt.GetVarLHS()->GetOStIdx()); + MIRSymbol *mirSym = ost->GetMIRSymbol(); + if (mirSym->IsStatic() && mirSym->IsFinal()) { + // static final field can store and propagate inferred typeinfo + if (mirSym->GetInferredTyIdx() == kInitTyIdx) { + // mirSym->_inferred_tyIdx has not been set before + mirSym->SetInferredTyIdx(VarMeExpr->GetInferredTyIdx()); + } else if (mirSym->GetInferredTyIdx() != VarMeExpr->GetInferredTyIdx()) { + // If mirSym->_inferred_tyIdx has been set before, it means we have + // seen a divergence on control flow. Set to NONE if not all + // branches reach the same conclusion. + mirSym->SetInferredTyIdx(kNoneTyIdx); + } + } + } + } else if (VarMeExpr->GetDefBy() == kDefByPhi) { + if (SSADevirtual::debug) { + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] " << "Def by phi " << '\n'; + } + } +} + +void SSADevirtual::PropIvarInferredType(IvarMeExpr *ivar) { + if (ivar->GetInferredTyIdx() != 0u) { + return; + } + IassignMeStmt *defStmt = ivar->GetDefStmt(); + if (defStmt == nullptr) { + return; + } + MeExpr *rhs = defStmt->GetRHS(); + CHECK_NULL_FATAL(rhs); + if (rhs->GetOp() == OP_gcmalloc) { + ivar->GetInferredTyIdx() = static_cast(rhs)->GetTyIdx(); + ivar->SetMaybeNull(false); + if (SSADevirtual::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar->GetInferredTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] mx" << ivar->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else { + TyIdx tyIdx = GetInferredTyIdx(rhs); + ivar->SetMaybeNull(MaybeNull(rhs)); + if (tyIdx != 0u) { + ivar->SetInferredTyidx(tyIdx); + if (SSADevirtual::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar->GetInferredTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] mx" << ivar->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } +} + +void SSADevirtual::VisitVarPhiNode(MeVarPhiNode *varPhi) { + MapleVector opnds = varPhi->GetOpnds(); + auto *lhs = varPhi->GetLHS(); + const MapleVector &inferredTypeCandidates = lhs->GetInferredTypeCandidates(); + for (size_t i = 0; i < opnds.size(); ++i) { + VarMeExpr *opnd = opnds[i]; + PropVarInferredType(opnd); + if (opnd->GetInferredTyIdx() != 0u) { + size_t j = 0; + for (; j < inferredTypeCandidates.size(); j++) { + if (inferredTypeCandidates.at(j) == opnd->GetInferredTyIdx()) { + break; + } + } + if (j == inferredTypeCandidates.size()) { + lhs->AddInferredTypeCandidate(opnd->GetInferredTyIdx()); + } + } else { + lhs->ClearInferredTypeCandidates(); + break; + } + } +} + +void SSADevirtual::VisitMeExpr(MeExpr *meExpr) { + if (meExpr == nullptr) { + return; + } + MeExprOp meOp = meExpr->GetMeOp(); + switch (meOp) { + case kMeOpVar: { + auto *varExpr = static_cast(meExpr); + PropVarInferredType(varExpr); + break; + } + case kMeOpReg: + break; + case kMeOpIvar: { + auto *iVar = static_cast(meExpr); + PropIvarInferredType(iVar); + break; + } + case kMeOpOp: { + auto *meOpExpr = static_cast(meExpr); + for (uint32 i = 0; i < 3; ++i) { + VisitMeExpr(meOpExpr->GetOpnd(i)); + } + break; + } + case kMeOpNary: { + auto *naryMeExpr = static_cast(meExpr); + for (MeExpr *opnd : naryMeExpr->GetOpnds()) { + VisitMeExpr(opnd); + } + break; + } + case kMeOpAddrof: + case kMeOpAddroffunc: + case kMeOpGcmalloc: + case kMeOpConst: + case kMeOpConststr: + case kMeOpConststr16: + case kMeOpSizeoftype: + case kMeOpFieldsDist: + break; + default: + CHECK_FATAL(false, "MeOP NIY"); + break; + } +} + +void SSADevirtual::ReturnTyIdxInferring(RetMeStmt *retMeStmt) { + MapleVector &opnds = retMeStmt->GetOpnds(); + CHECK_FATAL(opnds.size() <= 1, "Assume at most one return value for now"); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + TyIdx tyIdx = GetInferredTyIdx(opnd); + if (retTy == kNotSeen) { + // seen the first return stmt + retTy = kSeen; + inferredRetTyIdx = tyIdx; + } else if (retTy == kSeen) { + // has seen an inferred type before, check if they agreed + if (inferredRetTyIdx != tyIdx) { + retTy = kFailed; + inferredRetTyIdx = TyIdx(0); // not agreed, cleared. + } + } + } +} + +void SSADevirtual::TraversalMeStmt(MeStmt *meStmt) { + Opcode op = meStmt->GetOp(); + switch (op) { + case OP_dassign: { + auto *varMeStmt = static_cast(meStmt); + VisitMeExpr(varMeStmt->GetRHS()); + break; + } + case OP_regassign: { + auto *regMeStmt = static_cast(meStmt); + VisitMeExpr(regMeStmt->GetRHS()); + break; + } + case OP_maydassign: { + auto *maydStmt = static_cast(meStmt); + VisitMeExpr(maydStmt->GetRHS()); + break; + } + case OP_iassign: { + auto *ivarStmt = static_cast(meStmt); + VisitMeExpr(ivarStmt->GetRHS()); + break; + } + case OP_syncenter: + case OP_syncexit: { + auto *syncMeStmt = static_cast(meStmt); + MapleVector &opnds = syncMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_throw: { + auto *thrMeStmt = static_cast(meStmt); + VisitMeExpr(thrMeStmt->GetOpnd()); + break; + } + case OP_assertnonnull: + case OP_eval: + case OP_free: { + auto *umeStmt = static_cast(meStmt); + VisitMeExpr(umeStmt->GetOpnd()); + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: { + auto *callMeStmt = static_cast(meStmt); + MapleVector &opnds = callMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + (void)DevirtualizeCall(callMeStmt); + if (clone != nullptr && OP_callassigned == callMeStmt->GetOp()) { + MIRFunction &targetFunc = callMeStmt->GetTargetFunction(); + clone->UpdateReturnVoidIfPossible(callMeStmt, &targetFunc); + } + break; + } + case OP_icall: + case OP_icallassigned: { + auto *icallMeStmt = static_cast(meStmt); + MapleVector &opnds = icallMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_intrinsiccallwithtype: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallwithtypeassigned: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + auto *intrinCallStmt = static_cast(meStmt); + MapleVector &opnds = intrinCallStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_brtrue: + case OP_brfalse: { + auto *condGotoStmt = static_cast(meStmt); + VisitMeExpr(condGotoStmt->GetOpnd()); + break; + } + case OP_switch: { + auto *switchStmt = static_cast(meStmt); + VisitMeExpr(switchStmt->GetOpnd()); + break; + } + case OP_return: { + auto *retMeStmt = static_cast(meStmt); + MapleVector &opnds = retMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + ReturnTyIdxInferring(retMeStmt); + break; + } + case OP_assertlt: + case OP_assertge: { + auto *assMeStmt = static_cast(meStmt); + VisitMeExpr(assMeStmt->GetOpnd(0)); + VisitMeExpr(assMeStmt->GetOpnd(1)); + break; + } + case OP_jstry: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_try: + case OP_catch: + case OP_goto: + case OP_gosub: + case OP_retsub: + case OP_comment: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + break; + default: + CHECK_FATAL(false, "unexpected stmt in ssadevirt or NYI"); + } + if (meStmt->GetOp() != OP_callassigned) { + return; + } + MapleVector *mustDefList = meStmt->GetMustDefList(); + if (mustDefList->empty()) { + return; + } + MeExpr *meLHS = mustDefList->front().GetLHS(); + if (meLHS->GetMeOp() != kMeOpVar) { + return; + } + auto *lhsVar = static_cast(meLHS); + auto *callMeStmt = static_cast(meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + if (called.GetInferredReturnTyIdx() != 0u) { + lhsVar->SetInferredTyIdx(called.GetInferredReturnTyIdx()); + if (SSADevirtual::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsVar->GetInferredTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [TYPE-INFERRING] mx" << lhsVar->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } +} + +void SSADevirtual::TraversalBB(BB *bb) { + if (bb == nullptr) { + return; + } + if (bbVisited[bb->GetBBId()]) { + return; + } + bbVisited[bb->GetBBId()] = true; + // traversal var phi nodes + MapleMap &meVarPhiList = bb->GetMevarPhiList(); + for (auto it = meVarPhiList.begin(); it != meVarPhiList.end(); ++it) { + MeVarPhiNode *phiMeNode = it->second; + VisitVarPhiNode(phiMeNode); + } + // traversal reg phi nodes (NYI) + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(&meStmt); + } +} + +void SSADevirtual::Perform(BB *entryBB) { + // Pre-order traverse the cominance tree, so that each def is traversed + // before its use + std::queue bbList; + bbList.push(entryBB); + while (!bbList.empty()) { + BB *bb = bbList.front(); + bbList.pop(); + TraversalBB(bb); + const MapleSet &domChildren = dom->GetDomChildren(bb->GetBBId()); + for (const BBId &bbId : domChildren) { + bbList.push(GetBB(bbId)); + } + } + MIRFunction *mirFunc = GetMIRFunction(); + if (mirFunc == nullptr) { + return; // maybe wpo + } + if (retTy == kSeen) { + mirFunc->SetInferredReturnTyIdx(this->inferredRetTyIdx); + } + // Simple rule: if method's declared returning type is a final class, then + // the actual returning type is same with the declared returning type. + MIRType *declReturnType = mirFunc->GetReturnType(); + if (declReturnType->GetPrimType() == PTY_ref && declReturnType->GetKind() == kTypePointer) { + MIRType *pointedType = static_cast(declReturnType)->GetPointedType(); + MIRClassType *declReturnClass = safe_cast(pointedType); + if (declReturnClass != nullptr && declReturnClass->IsFinal()) { + mirFunc->SetInferredReturnTyIdx(declReturnClass->GetTypeIndex()); + } + } + if (SSADevirtual::debug) { + LogInfo::MapleLogger() << "[SSA-DEVIRT]" << " {virtualcalls: total " << (totalVirtualCalls - totalInterfaceCalls) << + ", devirtualized " << optedVirtualCalls << "}" << " {interfacecalls: total " << + totalInterfaceCalls << ", devirtualized " << optedInterfaceCalls << "}" << + ", {null-checks: " << nullCheckCount << "}" << "\t" << mirFunc->GetName() << '\n'; + if (mirFunc != nullptr && mirFunc->GetInferredReturnTyIdx() != 0u) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunc->GetInferredReturnTyIdx()); + LogInfo::MapleLogger() << "[SSA-DEVIRT] [FUNC-RETTYPE] "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } +} +} // namespace maple diff --git a/src/maple_util/include/mpl_scheduler.h b/src/maple_util/include/mpl_scheduler.h index eafdf5e448b7189460426916bbe2069908640401..c0994403c2c5fe6170db87a9af9066ea83d29b02 100644 --- a/src/maple_util/include/mpl_scheduler.h +++ b/src/maple_util/include/mpl_scheduler.h @@ -44,14 +44,6 @@ class MplTask { virtual ~MplTask() {} - virtual int Run(MplTaskParam*) { - return 0; - } - - virtual int Finish(MplTaskParam*) { - return 0; - } - void SetTaskId(uint32 id) { taskId = id; } @@ -60,7 +52,23 @@ class MplTask { return taskId; } + int Run(MplTaskParam *param = nullptr) { + return RunImpl(param); + } + + int Finish(MplTaskParam *param = nullptr) { + return FinishImpl(param); + } + protected: + virtual int RunImpl(MplTaskParam *param) { + return 0; + } + + virtual int FinishImpl(MplTaskParam *param) { + return 0; + } + uint32 taskId; }; diff --git a/src/maple_util/include/profile.h b/src/maple_util/include/profile.h new file mode 100644 index 0000000000000000000000000000000000000000..27f4365f57d7aef454f75d0f5aaf19d14c0bed5c --- /dev/null +++ b/src/maple_util/include/profile.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_UTIL_INCLUDE_PROFILE_H +#define MAPLE_UTIL_INCLUDE_PROFILE_H +#include +#include +#include +#include +#include "profile_type.h" +#include "mpl_logging.h" +#include "types_def.h" +#include "option.h" + +namespace maple { +class Profile { + public: + struct FuncItem { + uint32 callTimes; + uint8 type; + }; + + static const uint8 kStringEnd; + bool CheckFuncHot(const std::string &className) const; + bool CheckMethodHot(const std::string &className) const; + bool CheckFieldHot(const std::string &className) const; + bool CheckClassHot(const std::string &className) const; + bool CheckLiteralHot(const std::string &literal) const; + bool CheckReflectionStrHot(const std::string &str, uint8 &layoutType) const; + void InitPreHot(); + // default get all kind profile + bool DeCompress(const std::string &fileName, const std::string &dexName, ProfileType type = kAll); + const std::unordered_map &GetFunctionProf() const; + size_t GetLiteralProfileSize() const; + bool CheckProfValid() const; + bool CheckDexValid(uint32 idx); + void SetProfileMode(); + void Dump() const; + Profile(); + ~Profile() = default; + + private: + bool valid; + bool profileMode = false; + bool isCoreSo = false; + bool isAppProfile = false; + static bool debug; + static uint32 hotFuncCountThreshold; + std::vector strMap; + std::string dexName; + std::string appPackageName; + std::unordered_set classMeta; + std::unordered_set methodMeta; + std::unordered_set fieldMeta; + std::unordered_set literal; + std::unordered_map reflectionStrData; + std::unordered_map funcProfData; + std::unordered_set &GetMeta(uint8 type); + bool CheckProfileHeader(const Header *header) const; + std::string GetProfileNameByType(uint8 type) const; + void ParseMeta(const char *data, int fileNum, std::unordered_set &metaData); + void ParseReflectionStr(const char *data, int fileNum); + void ParseFunc(const char *data, int fileNum); + void ParseLiteral(const char *data, const char *end); +}; + +} // namespace maple +#endif // MAPLE_UTIL_INCLUDE_PROFILE_H diff --git a/src/maple_util/include/profile_type.h b/src/maple_util/include/profile_type.h new file mode 100644 index 0000000000000000000000000000000000000000..cb871f321dbdfbcbdc08ddd66828ee368055a675 --- /dev/null +++ b/src/maple_util/include/profile_type.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef PROFILE_TYPE_H +#define PROFILE_TYPE_H + +static constexpr uint8_t kProfileMagic[] = { 'm', 'a', 'p', 'l', 'e', '.', 'p', 'r', 'o', 'f', 'i', 'l', 'e', '\0' }; +static constexpr uint8_t kVer[] = { 0, 0, 1 }; + +enum ProfileFileType : uint8_t { + kSystemServer = 0x00, + kApp = 0x01 +}; + +enum ProfileType : uint8_t { + kFunction = 0x00, + kClassMeta = 0x01, + kFieldMeta = 0x02, + kMethodMeta = 0x03, + kReflectionStr = 0x04, + kLiteral = 0x05, + kBBInfo = 0x06, + kAll = 0x06, + kFileDesc = 0xFF +}; + +struct ProfileDataInfo { + uint32_t profileDataOff; + uint8_t profileType; + uint8_t mapleFileNum; + uint16_t pad = 0; + ProfileDataInfo() = default; + ProfileDataInfo(uint32_t profileDataOff, uint8_t profileType, uint8_t mapleFileNum) + : profileDataOff(profileDataOff), profileType(profileType), mapleFileNum(mapleFileNum) {} +}; + +struct FunctionItem { + uint32_t classIdx; + uint32_t methodIdx; + uint32_t sigIdx; + uint32_t callTimes; + uint8_t type; + FunctionItem(uint32_t classIdx, uint32_t methodIdx, uint32_t sigIdx, uint32_t callTimes, uint8_t type) + : classIdx(classIdx), methodIdx(methodIdx), sigIdx(sigIdx), callTimes(callTimes), type(type) {} +}; + +struct MetaItem { + uint32_t idx; + MetaItem(uint32_t idx) : idx(idx) {} +}; + +struct ReflectionStrItem { + uint8_t type; + uint32_t idx; + ReflectionStrItem(uint32_t idx, uint8_t type) : type(type), idx(idx) {} +}; + +template +struct MapleFileProf { + uint32_t idx; + uint32_t num; + uint32_t size; + T items[1]; +}; + +constexpr int kMagicNum = 14; +constexpr int kVerNum = 3; +constexpr int kCheckSumNum = 4; +struct Header { + uint8_t magic[kMagicNum] = {}; + uint8_t ver[kVerNum] = {}; + uint8_t checkSum[kCheckSumNum] = {}; + uint8_t profileNum = 0; + uint8_t profileFileType = 0; + uint8_t pad = 0; + uint32_t headerSize = 0; + uint32_t stringCount = 0; + uint32_t stringTabOff = 0; + ProfileDataInfo data[1] = {}; // profile data info detemined by runtime +}; + +#endif diff --git a/src/maple_util/src/profile.cpp b/src/maple_util/src/profile.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4bd141aa497f6eb949f7d77d7334211f0b956e97 --- /dev/null +++ b/src/maple_util/src/profile.cpp @@ -0,0 +1,444 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "profile.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "name_mangler.h" +#include "file_layout.h" +#include "types_def.h" + +namespace maple { +const uint8 Profile::kStringEnd = 0x00; +uint32 Profile::hotFuncCountThreshold = 0; +bool Profile::debug = false; +const uint32 kPrecision = 1000000; +// preHot data +static const std::string preClassHot[] = { + "Ljava/lang/Class;", + "Ljava/lang/Object;", + "Ljava/lang/reflect/Field;", + "Ljava/lang/reflect/Constructor;", + "Ljava/lang/reflect/Method;", + "Ljava/lang/reflect/Executable;", + "Ljava/lang/String;" +}; + + +Profile::Profile() : valid(false) {} + +bool Profile::CheckProfileHeader(const Header *header) const { + return (memcmp(header->magic, kProfileMagic, sizeof(kProfileMagic)) == 0); +} + +std::string Profile::GetProfileNameByType(uint8 type) const { + switch (type) { + case kFunction: + return "FUNCTION"; + case kClassMeta: + return "CLASSMETA"; + case kFieldMeta: + return "FIELDMETA"; + case kMethodMeta: + return "METHODMETA"; + case kReflectionStr: + return "ReflectionStr"; + case kLiteral: + return "Literal"; + case kFileDesc: + return "FileDescription"; + default: + CHECK_FATAL(false, "type not found"); + } + return ""; +} + +bool Profile::CheckDexValid(uint32 idx) { + if (isAppProfile) { + return true; // for app dont't check dexName + } + return (dexName.empty() || dexName.find(strMap.at(idx)) != std::string::npos); +} + +void Profile::ParseLiteral(const char *data, const char *end) { + if(data > end) { + LogInfo::MapleLogger() << "parse Literal error" << std::endl; + } + std::string str(data,end-data); + std::stringstream ss; + ss.str(str); + std::string item; + while (std::getline(ss, item)) { + literal.insert(item); + } + if (debug) { + LogInfo::MapleLogger() << "parse Literal succ literal size " << literal.size() << "\n"; + } +} + +void Profile::ParseFunc(const char *data, int fileNum) { + const MapleFileProf *funcProf = nullptr; + const FunctionItem *funcItem = nullptr; + uint32 offset = 0; + for (int32 mapleFileIdx = 0; mapleFileIdx < fileNum; mapleFileIdx++) { + funcProf = reinterpret_cast*>(data + offset); + if (CheckDexValid(funcProf->idx)) { + if (debug) { + LogInfo::MapleLogger() << "FuncProfile" << ":" << strMap.at(funcProf->idx) << ":" << funcProf->num << "\n"; + } + for (uint32 item = 0; item < funcProf->num; item++) { + funcItem = &(funcProf->items[item]); + if (funcItem->type >= kLayoutTypeCount) { + if (debug) { + LogInfo::MapleLogger() << "ParseFunc Error usupport type " << funcItem->type << "\n"; + } + continue; + } + std::string className = NameMangler::EncodeName(strMap.at(funcItem->classIdx)); + std::string methodName = NameMangler::EncodeName(strMap.at(funcItem->methodIdx)); + std::string sigName = NameMangler::EncodeName(strMap.at(funcItem->sigIdx)); + std::string funcName = className + "_7C" + methodName + "_7C" + sigName; + funcProfData.insert( + std::make_pair(funcName, (FuncItem){ .callTimes = funcItem->callTimes, .type = funcItem->type })); + } + } + // new maple file's profile + offset += sizeof(MapleFileProf) + sizeof(FunctionItem) * (funcProf->num - 1); + } +} + +void Profile::ParseMeta(const char *data, int fileNum, std::unordered_set &metaData) { + const MapleFileProf *metaProf = nullptr; + uint32 offset = 0; + for (int32 mapleFileIdx = 0; mapleFileIdx < fileNum; mapleFileIdx++) { + metaProf = reinterpret_cast*>(data + offset); + if (CheckDexValid(metaProf->idx)) { + if (debug) { + LogInfo::MapleLogger() << "dex name " << strMap.at(metaProf->idx) << std::endl; + } + for (uint32 item = 0; item < metaProf->num; item++) { + const MetaItem *metaItem = &(metaProf->items[item]); + metaData.insert(strMap.at(metaItem->idx)); + } + } + offset += sizeof(MapleFileProf) + sizeof(MetaItem) * (metaProf->num - 1); + } +} + +void Profile::ParseReflectionStr(const char *data, int fileNum) { + const MapleFileProf *metaProf = nullptr; + uint32 offset = 0; + for (int32 mapleFileIdx = 0; mapleFileIdx < fileNum; mapleFileIdx++) { + metaProf = reinterpret_cast*>(data + offset); + if (CheckDexValid(metaProf->idx)) { + if (debug) { + LogInfo::MapleLogger() << "dex name " << strMap.at(metaProf->idx) << std::endl; + } + for (uint32 item = 0; item < metaProf->num; item++) { + const ReflectionStrItem *strItem = &(metaProf->items[item]); + reflectionStrData.insert(std::make_pair(strMap.at(strItem->idx), strItem->type)); + } + } + offset += sizeof(MapleFileProf) + sizeof(ReflectionStrItem) * (metaProf->num - 1); + } +} + +void Profile::InitPreHot() { + const char *kcoreDexName = "core-all"; + if (dexName.find(kcoreDexName) != std::string::npos) { + for (auto &item : preClassHot) { + classMeta.insert(item); + } + isCoreSo = true; + } +} + +bool Profile::DeCompress(const std::string &path, const std::string &dexNameInner, ProfileType type) { + this->dexName = dexNameInner; + InitPreHot(); + bool res = true; + std::ifstream in(path, std::ios::binary); + if (!in) { + if (errno != ENOENT && errno != EACCES) { + LogInfo::MapleLogger() << "WARN: DeCompress(" + << "), failed to open " << path << ", " << strerror(errno) << std::endl; + } + res = false; + return res; + } + in.seekg(0, std::ios::end); + size_t byteCount = in.tellg(); + in.seekg(0, std::ios::beg); + std::vector bufVector; + bufVector.resize(byteCount); + char *buf = reinterpret_cast(bufVector.data()); + if (!in.read(buf, byteCount)) { + LogInfo::MapleLogger() << "WARN: DeCompress(" + << "), failed to read all data for " << path << ", " << strerror(errno) << std::endl; + res = false; + return res; + } + if (byteCount < sizeof(Header)) { + LogInfo::MapleLogger() << "WARN: DeCompress(" + << "), failed, read no data for " << path << ", " << strerror(errno) << std::endl; + res = false; + return res; + } + Header *header = reinterpret_cast(buf); + if (!CheckProfileHeader(header)) { + if (debug) { + LogInfo::MapleLogger() << "invalid maigc number " << reinterpret_cast(header->magic) << std::endl; + } + res = false; + return res; + } + this->isAppProfile = (header->profileFileType == kApp) ? true : false; + uint32 stringTabSize = byteCount - header->stringTabOff + 1; + if (debug) { + LogInfo::MapleLogger() << "Header summary " + << "profile num " << static_cast(header->profileNum) << "string table size" << stringTabSize + << std::endl; + } + const char *strBuf = buf + header->stringTabOff; + uint32 idx = 0; + strMap.push_back(strBuf); + while (idx < stringTabSize) { + if (*(strBuf + idx) == kStringEnd) { + strMap.push_back(strBuf + idx + 1); + } + idx++; + } + if (debug) { + LogInfo::MapleLogger() << "str size " << idx << std::endl; + for (auto item : strMap) { + LogInfo::MapleLogger() << item << std::endl; + } + LogInfo::MapleLogger() << "str size print end " << std::endl; + } + for (idx = 0; idx < header->profileNum; idx++) { + ProfileDataInfo *profileDataInfo = &(header->data[idx]); + if (debug) { + LogInfo::MapleLogger() << "profile file num for type " << GetProfileNameByType(profileDataInfo->profileType) << " " + << static_cast(profileDataInfo->mapleFileNum) << std::endl; + } + if (debug) { + LogInfo::MapleLogger() << GetProfileNameByType(profileDataInfo->profileType) << " Start" << std::endl; + } + char *proFileData = buf + profileDataInfo->profileDataOff; + if (type != kAll && type != profileDataInfo->profileType) { + continue; // only parse the indicated type + } + switch(profileDataInfo->profileType) { + case kFunction: + ParseFunc(proFileData, profileDataInfo->mapleFileNum); + break; + case kClassMeta: + ParseMeta(proFileData, profileDataInfo->mapleFileNum, classMeta); + break; + case kFieldMeta: + ParseMeta(proFileData, profileDataInfo->mapleFileNum, fieldMeta); + break; + case kMethodMeta: + ParseMeta(proFileData, profileDataInfo->mapleFileNum, methodMeta); + break; + case kReflectionStr: + ParseReflectionStr(proFileData, profileDataInfo->mapleFileNum); + break; + case kLiteral: + ParseLiteral(proFileData,strBuf); + break; + case kFileDesc: { + uint32_t appPackageNameIdx = *reinterpret_cast(proFileData); + this->appPackageName = strMap.at(appPackageNameIdx); + if (!appPackageName.empty() && this->appPackageName != appPackageName) { + LogInfo::MapleLogger() << "app profile doesnt match expect " << this->appPackageName + << " but got " << appPackageName << std::endl; + return false; + } + break; + } + default: + LogInfo::MapleLogger() << "unsupported tag " << profileDataInfo->profileType << std::endl; + break; + } + } + LogInfo::MapleLogger() << "SUCC parse " << path << std::endl; + valid = true; + return res; +} + +void Profile::SetProfileMode() { + profileMode = true; +} + +bool Profile::CheckFuncHot(const std::string &funcName) const { + if (funcProfData.empty()) { + return false; + } + if (valid) { + auto iter = funcProfData.find(funcName); + if (iter == funcProfData.end()) { + return false; + } + if (hotFuncCountThreshold == 0) { + if (Options::profileHotCountSeted) { + hotFuncCountThreshold = Options::profileHotCount; + } else { + std::vector times; + for (auto item : funcProfData) { + times.push_back((item.second).callTimes); + } + std::sort(times.begin(), times.end(), std::greater()); + size_t index = static_cast(static_cast(times.size()) / kPrecision * (Options::profileHotRate)); + hotFuncCountThreshold = times.at(index); + } + } + return (iter->second).callTimes >= hotFuncCountThreshold; + } + return false; +} + +bool Profile::CheckMethodHot(const std::string &className) const { + if (methodMeta.empty()) { + return true; + } + if (valid) { + if (methodMeta.find(className) == methodMeta.end()) { + return false; + } + return true; + } + return false; +} + +bool Profile::CheckFieldHot(const std::string &className) const { + if (fieldMeta.empty()) { + return true; + } + if (valid) { + if (fieldMeta.find(className) == fieldMeta.end()) { + return false; + } + return true; + } + return false; +} + +bool Profile::CheckClassHot(const std::string &className) const { + // If in mode sample all class set to cold, except for core-all so,core-all have + // some hot class pre defined + if (profileMode && !isCoreSo) { + return false; + } + if (classMeta.empty()) { + return true; + } + if (valid || isCoreSo) { + return classMeta.find(className) != classMeta.end(); + } + return false; +} + +bool Profile::CheckLiteralHot(const std::string &literalInner) const { + if (valid) { + if ((this->literal).find(literalInner) == (this->literal).end()) { + return false; + } + return true; + } + return false; +} + +size_t Profile::GetLiteralProfileSize() const { + if (valid) { + return literal.size(); + } + return 0; +} + +bool Profile::CheckReflectionStrHot(const std::string &str, uint8 &layoutType) const { + if (valid) { + auto item = reflectionStrData.find(str); + if (item == reflectionStrData.end()) { + return false; + } + layoutType = item->second; + return true; + } + return false; +} + +const std::unordered_map& Profile::GetFunctionProf() const { + return funcProfData; +} + +std::unordered_set &Profile::GetMeta(uint8 type) { + switch (type) { + case kClassMeta: + return classMeta; + case kFieldMeta: + return fieldMeta; + case kMethodMeta: + return methodMeta; + default: + CHECK_FATAL(0, "type not found"); + return classMeta; + } +} + +void Profile::Dump() const { + std::ofstream outfile; + outfile.open("prof.dump"); + outfile << "classMeta profile start " <((item.second).type) << " " << (item.second).callTimes << std::endl; + } + + outfile << "reflectStr profile start " <(item.second) << std::endl; + } +} + +} // namespace maple diff --git a/src/mpl2mpl/BUILD.gn b/src/mpl2mpl/BUILD.gn index c45af2a446079b90a25fcfefb46480257f52fd03..90bdf75c994eb11fa215a836645aab30009587c9 100644 --- a/src/mpl2mpl/BUILD.gn +++ b/src/mpl2mpl/BUILD.gn @@ -35,6 +35,9 @@ src_libmpl2mpl = [ "src/native_stub_func.cpp", "src/vtable_impl.cpp", "src/class_hierarchy.cpp", + "src/constantfold.cpp", + "src/analyzector.cpp", + "src/coderelayout.cpp", ] configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] diff --git a/src/mpl2mpl/include/analyzector.h b/src/mpl2mpl/include/analyzector.h new file mode 100644 index 0000000000000000000000000000000000000000..f30c274c4eaaac47881663f43a90eb3db582890e --- /dev/null +++ b/src/mpl2mpl/include/analyzector.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MPL2MPL_INCLUDE_ANALYZECTOR_H +#define MPL2MPL_INCLUDE_ANALYZECTOR_H +#include "module_phase.h" +#include "phase_impl.h" +namespace maple { +class AnalyzeCtor : public FuncOptimizeImpl { + public: + AnalyzeCtor(MIRModule *mod, KlassHierarchy *kh, bool trace) : FuncOptimizeImpl(mod, kh, trace) {} + ~AnalyzeCtor() = default; + + FuncOptimizeImpl *Clone() override { + return new AnalyzeCtor(*this); + } + + void ProcessFunc(MIRFunction *func) override; + void Finish() override; + + private: + bool hasSideEffect = false; + std::unordered_set fieldSet; + void ProcessStmt(StmtNode&) override; +}; + +class DoAnalyzeCtor : public ModulePhase { + public: + explicit DoAnalyzeCtor(ModulePhaseID id) : ModulePhase(id) {} + + ~DoAnalyzeCtor() = default; + + std::string PhaseName() const override { + return "analyzector"; + } + + AnalysisResult *Run(MIRModule *mod, ModuleResultMgr *mrm) override { + OPT_TEMPLATE(AnalyzeCtor); + return nullptr; + } +}; +} // namespace maple +#endif // MPL2MPL_INCLUDE_ANALYZECTOR_H diff --git a/src/mpl2mpl/include/coderelayout.h b/src/mpl2mpl/include/coderelayout.h new file mode 100644 index 0000000000000000000000000000000000000000..a4aecf9fc746bf37dbe5d3f27cf784730a50e5a3 --- /dev/null +++ b/src/mpl2mpl/include/coderelayout.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MPL2MPL_INCLUDE_CODERELAYOUT_H +#define MPL2MPL_INCLUDE_CODERELAYOUT_H +#include +#include "phase_impl.h" +#include "module_phase.h" +#include "file_layout.h" +namespace maple { +class CodeReLayout : public FuncOptimizeImpl { + public: + explicit CodeReLayout(MIRModule *mod, KlassHierarchy *kh, bool dump); + ~CodeReLayout() = default; + + FuncOptimizeImpl *Clone() override { + return new CodeReLayout(*this); + } + + void ProcessFunc(MIRFunction *func) override; + void Finish() override; + + private: + const std::string kExeFuncTag = "executedFuncStart"; + const std::string kProfileStartTag = "#profile_start"; + const std::string kProfileSummaryTag = "#profile_summary"; + std::unordered_map str2SymMap; + uint32 layoutCount[static_cast(LayoutType::kLayoutTypeCount)] = {}; + std::string StaticFieldFilename(const std::string &mplFile); + void GenLayoutSym(); + void AddStaticFieldRecord(); + CallNode *CreateRecordFieldStaticCall(BaseNode *node, const std::string &name); + void FindDreadRecur(StmtNode *stmt, BaseNode *node); + void InsertProfileBeforeDread(StmtNode *stmt, BaseNode *opnd); + MIRSymbol *GetorCreateStaticFieldSym(const std::string &fieldName); + MIRSymbol *GenStrSym(const std::string &str); +}; + +class DoCodeReLayout : public ModulePhase { + public: + DoCodeReLayout(ModulePhaseID id) : ModulePhase(id) {} + + ~DoCodeReLayout() = default; + + std::string PhaseName() const override { + return "CodeReLayout"; + } + + AnalysisResult *Run(MIRModule *mod, ModuleResultMgr *mrm) override { + OPT_TEMPLATE(CodeReLayout); + return nullptr; + } +}; +} // namespace maple +#endif // MPL2MPL_INCLUDE_CODERELAYOUT_H diff --git a/src/mpl2mpl/include/constantfold.h b/src/mpl2mpl/include/constantfold.h new file mode 100644 index 0000000000000000000000000000000000000000..e2cf04cc42996ded3c5060336cc1100806b10505 --- /dev/null +++ b/src/mpl2mpl/include/constantfold.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MPL2MPL_INCLUDE_CONSTANTFOLD_H +#define MPL2MPL_INCLUDE_CONSTANTFOLD_H +#include "mir_nodes.h" +#include "module_phase.h" +#include "phase_impl.h" + +namespace maple { +class ConstantFold : public FuncOptimizeImpl { + public: + // Fold an expression. + // + // It returns a new expression if there was something to fold, or + // nullptr otherwise. + BaseNode *Fold(BaseNode *node); + + // Simplify a statement + // + // It returns the original statement or the changed statement if a + // simplification happened. If the statement can be deleted after a + // simplification, it returns nullptr. + StmtNode *Simplify(StmtNode *node); + + ConstantFold(MIRModule *mod, KlassHierarchy *kh, bool trace) : FuncOptimizeImpl(mod, kh, trace), mirModule(mod) {} + + ConstantFold(MIRModule *mod) : FuncOptimizeImpl(mod, nullptr, false), mirModule(mod) {} + + FuncOptimizeImpl *Clone() { + return new ConstantFold(*this); + } + + void ProcessFunc(MIRFunction *func); + virtual ~ConstantFold() = default; + + MIRConst *FoldFloorMIRConst(MIRConst*, PrimType, PrimType); + MIRConst *FoldRoundMIRConst(MIRConst*, PrimType, PrimType); + MIRConst *FoldTypeCvtMIRConst(MIRConst*, PrimType, PrimType); + MIRConst *FoldSignExtendMIRConst(Opcode, PrimType, uint8, MIRConst*); + MIRConst *FoldConstComparisonMIRConst(Opcode, PrimType, PrimType, MIRConst&, MIRConst&); + + private: + MIRModule *mirModule; + StmtNode *SimplifyBinary(BinaryStmtNode *node); + StmtNode *SimplifyBlock(BlockNode *node); + StmtNode *SimplifyCondGoto(CondGotoNode *node); + StmtNode *SimplifyCondGotoSelect(CondGotoNode *node); + StmtNode *SimplifyDassign(DassignNode *node); + StmtNode *SimplifyIassign(IassignNode *node); + StmtNode *SimplifyNary(NaryStmtNode *node); + StmtNode *SimplifyIcall(IcallNode *node); + StmtNode *SimplifyIf(IfStmtNode *node); + StmtNode *SimplifySwitch(SwitchNode *node); + StmtNode *SimplifyUnary(UnaryStmtNode *node); + StmtNode *SimplifyWhile(WhileStmtNode *node); + std::pair FoldArray(ArrayNode *node); + std::pair FoldBase(BaseNode *node) const; + std::pair FoldBinary(BinaryNode *node); + std::pair FoldCompare(CompareNode *node); + std::pair FoldDepositbits(DepositbitsNode *node); + std::pair FoldExtractbits(ExtractbitsNode *node); + ConstvalNode *FoldSignExtend(Opcode opcode, PrimType resultType, uint8 size, ConstvalNode *cst); + std::pair FoldIread(IreadNode *node); + std::pair FoldSizeoftype(SizeoftypeNode *node); + std::pair FoldRetype(RetypeNode *node); + std::pair FoldGcmallocjarray(JarrayMallocNode *node); + std::pair FoldUnary(UnaryNode *node); + std::pair FoldTernary(TernaryNode *node); + std::pair FoldTypeCvt(TypeCvtNode *node); + ConstvalNode *FoldCeil(ConstvalNode *cst, PrimType fromType, PrimType toType); + ConstvalNode *FoldFloor(ConstvalNode *cst, PrimType fromType, PrimType toType); + ConstvalNode *FoldRound(ConstvalNode *cst, PrimType fromType, PrimType toType); + ConstvalNode *FoldTrunk(ConstvalNode *cst, PrimType fromType, PrimType toType); + ConstvalNode *FoldTypeCvt(ConstvalNode *cst, PrimType fromType, PrimType toType); + ConstvalNode *FoldConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, ConstvalNode &const0, + ConstvalNode &const1); + ConstvalNode *FoldConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, ConstvalNode &const1); + ConstvalNode *FoldIntConstComparison(Opcode opcode, PrimType resultType, ConstvalNode &const0, ConstvalNode &const1); + MIRIntConst *FoldIntConstComparisonMIRConst(Opcode, PrimType, const MIRIntConst&, const MIRIntConst&); + ConstvalNode *FoldIntConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, ConstvalNode &const1); + ConstvalNode *FoldFPConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, ConstvalNode &const0, + ConstvalNode &const1); + MIRIntConst *FoldFPConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, MIRConst &const0, + MIRConst &const1); + ConstvalNode *FoldFPConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, ConstvalNode &const1); + ConstvalNode *FoldConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c); + ConstvalNode *FoldIntConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c); + template + ConstvalNode *FoldFPConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c); + BaseNode *NegateTree(BaseNode *node); + BaseNode *Negate(BaseNode *node); + BaseNode *Negate(UnaryNode *node); + BaseNode *Negate(ConstvalNode *node); + BinaryNode *NewBinaryNode(BinaryNode *old, Opcode op, PrimType primeType, BaseNode *l, BaseNode *r); + UnaryNode *NewUnaryNode(UnaryNode *old, Opcode op, PrimType primeType, BaseNode *e); + std::pair DispatchFold(BaseNode *node); + BaseNode *PairToExpr(PrimType resultType, const std::pair &p); + BaseNode *SimplifyDoubleCompare(CompareNode *node); +}; + +class DoConstantFold : public ModulePhase { + public: + explicit DoConstantFold(ModulePhaseID id) : ModulePhase(id) {} + + ~DoConstantFold() = default; + + std::string PhaseName() const override { + return "ConstantFold"; + } + + AnalysisResult *Run(MIRModule *mod, ModuleResultMgr *mrm) override { + OPT_TEMPLATE(ConstantFold); + return nullptr; + } +}; +} // namespace maple +#endif // MPL2MPL_INCLUDE_CONSTANTFOLD_H diff --git a/src/mpl2mpl/src/analyzector.cpp b/src/mpl2mpl/src/analyzector.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7559c3538936c755cbd74c4f7bc5033a50e18f04 --- /dev/null +++ b/src/mpl2mpl/src/analyzector.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "analyzector.h" +#include "utils.h" +// AnalyzeCtor analyzes which fields are assgiend inside of of each constructor +namespace maple { +void AnalyzeCtor::ProcessFunc(MIRFunction *func) { + if (!func->IsConstructor() || func->IsEmpty() || func->GetParamSize() == 0) { + return; + } + SetCurrentFunction(*func); + hasSideEffect = false; + fieldSet.clear(); + if (func->GetBody() != nullptr) { + ProcessBlock(*func->GetBody()); + } + PUIdx puIdx = func->GetPuidx(); + const MapleMap*> &puIdxFieldMap = GetMIRModule().GetPuIdxFieldInitializedMap(); + CHECK_FATAL(puIdxFieldMap.find(puIdx) == puIdxFieldMap.end(), + "%s has been processed before", func->GetName().c_str()); + // if the function has calls with sideeffect, conservatively + // we assume all fields are modified in ctor + if (hasSideEffect) { + MapleSet *fieldSubSet = + GetMIRModule().GetMemPool()->New>(std::less(), + GetMIRModule().GetMPAllocator().Adapter()); + fieldSubSet->insert(0); // write to all + GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); + } else if (!fieldSet.empty()) { + MapleSet *fieldSubSet = + GetMIRModule().GetMemPool()->New>(std::less(), + GetMIRModule().GetMPAllocator().Adapter()); + std::copy(fieldSet.begin(), fieldSet.end(), std::inserter(*fieldSubSet, fieldSubSet->begin())); + GetMIRModule().SetPuIdxFieldSet(puIdx, fieldSubSet); + } else { + // no fields are assigned in constructor + GetMIRModule().SetPuIdxFieldSet(puIdx, nullptr); + } +} + +// collect field ids which are assigned inside the stmt and mark sideeffect +// flag for non-ctor calls +void AnalyzeCtor::ProcessStmt(StmtNode &stmt) { + switch (stmt.GetOpCode()) { + case OP_iassign: { + auto &iassign = static_cast(stmt); + MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx()); + MIRType *pointedType = utils::ToRef(safe_cast(baseType)).GetPointedType(); + auto structType = safe_cast(pointedType); + if (structType != nullptr) { + MIRType *fieldType = structType->GetFieldType(iassign.GetFieldID()); + if (fieldType->GetPrimType() != PTY_ref) { + break; + } + } + fieldSet.insert(iassign.GetFieldID()); + break; + } + case OP_callassigned: + case OP_call: + case OP_icall: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_virtualcall: + case OP_superclasscall: + case OP_interfacecall: { + hasSideEffect = true; + break; + } + default: + break; + } +} + +void AnalyzeCtor::Finish() { + if (!trace) { + return; + } + for (auto &pit : GetMIRModule().GetPuIdxFieldInitializedMap()) { + GlobalTables::GetFunctionTable().GetFunctionFromPuidx(pit.first)->Dump(true); + LogInfo::MapleLogger() << "field:"; + MapleSet *fieldIDSet = pit.second; + if (fieldIDSet == nullptr) { + LogInfo::MapleLogger() << "write nothing\n"; + continue; + } + for (FieldID fid : *fieldIDSet) { + LogInfo::MapleLogger() << fid << " "; + } + LogInfo::MapleLogger() << '\n'; + } +} +} // namespace maple diff --git a/src/mpl2mpl/src/coderelayout.cpp b/src/mpl2mpl/src/coderelayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4779c0e4141155d2bd21194796296f46453f0cad --- /dev/null +++ b/src/mpl2mpl/src/coderelayout.cpp @@ -0,0 +1,273 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "coderelayout.h" +#include +#include +#include +#include "profile.h" +// This phase layout the function according the profile. +// First parse the profile,find the corresponding dex file's +// function type info,the profile file give three function type +// 1.boot-only-hot function which is only hot in phone boot phase +// 2.both-hot function which is hot in phone run phase and phone boot phase +// 3.run-hot function which is only hot in phone run phase +// Every functon have just one layout type,layout the function +// together according there layout type.Currently the layout is below +// +// [BootHot] +// [BothHot] +// [RunHot] +// [StartupOnly] +// [UsedOnce] +// [Executed] function excuted in some condition +// [Unused] +namespace maple { +CodeReLayout::CodeReLayout(MIRModule *mod, KlassHierarchy *kh, bool dump) : FuncOptimizeImpl(mod, kh, dump) { + if (!Options::proFileData.empty()) { + size_t pos = 0; + if ((pos = Options::proFileData.find(':')) != std::string::npos) { + Options::proFileFuncData = Options::proFileData.substr(0, pos); + Options::proFileClassData = Options::proFileData.substr(pos + 1); + } + Options::proFileFuncData = Options::proFileData; + LogInfo::MapleLogger() << "func profile " << Options::proFileFuncData << " class profile " + << Options::proFileClassData << "\n"; + } + if (Options::profileStaticFields) { + std::string staticFieldsFile = StaticFieldFilename(GetMIRModule().GetFileName()); + LogInfo::MapleLogger() << staticFieldsFile << "\n"; + std::ofstream staticFields; + staticFields.open(staticFieldsFile, std::ofstream::trunc); + if (!staticFields.is_open()) { + ERR(kLncErr, " %s open failed!", staticFieldsFile.c_str()); + } + staticFields.close(); + } +} + +CallNode *CodeReLayout::CreateRecordFieldStaticCall(BaseNode *node, const std::string &name) { + MIRFunction *callee = builder->GetOrCreateFunction("MCC_RecordStaticField", static_cast(PTY_void)); + BaseNode *nameAddr = builder->CreateExprAddrof(0, *GetorCreateStaticFieldSym(name)); + MapleVector args(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(node); + args.push_back(nameAddr); + return builder->CreateStmtCall(callee->GetPuidx(), args); +} + +std::string CodeReLayout::StaticFieldFilename(const std::string &mplFile) { + size_t pos = mplFile.rfind(".mpl"); + size_t postfixSize = 4; + CHECK_FATAL(pos != std::string::npos && pos == mplFile.length() - postfixSize, "Not compiling .mpl file?"); + std::string smryFileName = mplFile.substr(0, pos) + ".staticfields"; + return smryFileName; +} + +void CodeReLayout::AddStaticFieldRecord() { + StmtNode *stmt = currFunc->GetBody()->GetFirst(); + StmtNode *next = nullptr; + while (stmt != nullptr) { + next = stmt->GetNext(); + FindDreadRecur(stmt, stmt); + if (stmt->GetOpCode() == OP_dassign) { + MIRSymbol *mirSym = currFunc->GetLocalOrGlobalSymbol(static_cast(stmt)->GetStIdx()); + if (mirSym->IsStatic()) { + BaseNode *node = builder->CreateExprAddrof((static_cast(stmt))->GetFieldID(), + (static_cast(stmt))->GetStIdx()); + CallNode *call = CreateRecordFieldStaticCall(node, mirSym->GetName()); + currFunc->GetBody()->InsertBefore(stmt, call); + } + } + stmt = next; + } +} + +void CodeReLayout::FindDreadRecur(StmtNode *stmt, BaseNode *node) { + if (node == nullptr) { + return; + } + BinaryOpnds *bOpnds = nullptr; + switch (node->GetOpCode()) { + case OP_dread: + case OP_addrof: { + return InsertProfileBeforeDread(stmt, node); + } + case OP_array: + case OP_intrinsicop: + case OP_intrinsicopwithtype: + case OP_call: + case OP_callassigned: + case OP_icall: + case OP_icallassigned: + case OP_intrinsiccall: + case OP_intrinsiccallwithtype: + case OP_return: + case OP_switch: + case OP_dassign: + case OP_iassign: { + for (size_t i = 0; i < node->NumOpnds(); i++) { + FindDreadRecur(stmt, node->Opnd(i)); + } + break; + } + default: { + if (node->IsUnaryNode()) { + UnaryNode *uNode = static_cast(node); + FindDreadRecur(stmt, uNode->Opnd(0)); + } else if (node->IsBinaryNode()) { + BinaryNode *bNode = static_cast(node); + bOpnds = static_cast(bNode); + FindDreadRecur(stmt, bOpnds->GetBOpnd(0)); + FindDreadRecur(stmt, bOpnds->GetBOpnd(1)); + } else { + break; + } + } + } +} + +void CodeReLayout::InsertProfileBeforeDread(StmtNode *stmt, BaseNode *opnd) { + if (!opnd || (opnd->GetOpCode() != OP_dread && opnd->GetOpCode() != OP_addrof)) { + return; + } + DreadNode *dreadNode = static_cast(opnd); + MIRSymbol *mirSym = currFunc->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + if (mirSym->IsStatic()) { + BaseNode *node = opnd; + if (opnd->GetOpCode() == OP_dread) { + node = builder->CreateExprAddrof(dreadNode->GetFieldID(), dreadNode->GetStIdx()); + } + CallNode *call = CreateRecordFieldStaticCall(node, mirSym->GetName()); + currFunc->GetBody()->InsertBefore(stmt, call); + } +} + +void CodeReLayout::ProcessFunc(MIRFunction *func) { + if (func->IsEmpty()) { + return; + } + SetCurrentFunction(*func); + if (Options::profileStaticFields) { + AddStaticFieldRecord(); + } + if (func->IsClinit()) { + func->SetLayoutType(kLayoutUsedOnce); + } +} + +void CodeReLayout::Finish() { + const auto &proFileData = GetMIRModule().GetProfile().GetFunctionProf(); + for (auto &item : proFileData) { + std::string funcName = item.first; + MIRFunction *sortFunction = builder->GetFunctionFromName(funcName); + if (sortFunction != nullptr && sortFunction->GetBody()) { + sortFunction->SetCallTimes((item.second).callTimes); + sortFunction->SetLayoutType((item.second).type); + } + } + for (auto &function : GetMIRModule().GetFunctionList()) { + ++layoutCount[static_cast(function->GetLayoutType())]; + } + if (trace) { + for (uint32 i = 0; i < static_cast(LayoutType::kLayoutTypeCount); ++i) { + LogInfo::MapleLogger() << "function in category\t" << i << "\tcount=" << layoutCount[i] << "\n"; + } + } + std::stable_sort(GetMIRModule().GetFunctionList().begin(), GetMIRModule().GetFunctionList().end(), + [](const MIRFunction *a, const MIRFunction *b) { return a->GetLayoutType() < b->GetLayoutType(); }); + uint32 last = 0; + for (uint32 i = 0; i <= static_cast(LayoutType::kLayoutRunHot); i++) { + if (trace) { + LogInfo::MapleLogger() << "last\t" << last << "\tcount\t" << layoutCount[i] << "\n"; + } + std::stable_sort(GetMIRModule().GetFunctionList().begin() + last, + GetMIRModule().GetFunctionList().begin() + last + layoutCount[i], + [](const MIRFunction *a, const MIRFunction *b) { return a->GetCallTimes() < b->GetCallTimes(); }); + last += layoutCount[i]; + } + // Create layoutInfo + GenLayoutSym(); +} + +MIRSymbol *CodeReLayout::GenStrSym(const std::string &str) { + std::string newStr = str + '\0'; + MIRArrayType &strTabType = + *GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetUInt8(), newStr.length()); + std::string strTabName = NameMangler::kStaticFieldNamePrefixStr + str; + MIRSymbol *staticSym = builder->CreateGlobalDecl(strTabName, strTabType); + MIRAggConst *strTabAggConst = GetMIRModule().GetMemPool()->New(GetMIRModule(), strTabType); + staticSym->SetStorageClass(kScFstatic); + for (const char &c : newStr) { + MIRConst *newConst = GetMIRModule().GetMemPool()->New(c, *GlobalTables::GetTypeTable().GetUInt8()); + strTabAggConst->PushBack(newConst); + } + staticSym->SetKonst(strTabAggConst); + return staticSym; +} + +MIRSymbol *CodeReLayout::GetorCreateStaticFieldSym(const std::string &fieldName) { + auto it = str2SymMap.find(fieldName); + if (it != str2SymMap.end()) { + return it->second; + } else { + MIRSymbol *sym = GenStrSym(fieldName); + str2SymMap.insert(std::make_pair(fieldName, sym)); + return sym; + } +} + +void CodeReLayout::GenLayoutSym() { + MIRArrayType &arrayType = + *GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetVoidPtr(), 1); + MIRAggConst *funcLayoutConst = GetMIRModule().GetMemPool()->New(GetMIRModule(), arrayType); + uint32 funcIdx = 0; + MIRConst *fieldConst = nullptr; + MIRFunction *vMethod = nullptr; + for (uint32 i = 0; i < static_cast(LayoutType::kLayoutTypeCount); ++i) { + if (funcIdx < GetMIRModule().GetFunctionList().size()) { + vMethod = GetMIRModule().GetFunction(funcIdx); + } else { + std::cerr << "no method for codelayout type " << GetLayoutTypeString(i) << "\n"; + return; + } + while (vMethod->IsAbstract() || vMethod->GetBody() == nullptr) { + // find the function not Abstract + if (trace) { + LogInfo::MapleLogger() << "encounter valid method " << funcIdx << "\n"; + } + funcIdx++; + if (funcIdx < GetMIRModule().GetFunctionList().size()) { + vMethod = GetMIRModule().GetFunction(funcIdx); + } else { + std::cerr << "no method for codelayout" << GetLayoutTypeString(i) << "\n"; + return; + } + } + if (trace) { + LogInfo::MapleLogger() << "Start of category " << i << " in funcIdx " << funcIdx << " " + << vMethod->GetName() << "\n"; + } + AddroffuncNode *addroffuncExpr = builder->CreateExprAddroffunc(vMethod->GetPuidx(), GetMIRModule().GetMemPool()); + fieldConst = + GetMIRModule().GetMemPool()->New(addroffuncExpr->GetPUIdx(), + *GlobalTables::GetTypeTable().GetVoidPtr()); + funcLayoutConst->PushBack(fieldConst); + funcIdx += layoutCount[i]; + } + std::string funcLayoutSymName = NameMangler::kFunctionLayoutStr + GetMIRModule().GetFileNameAsPostfix(); + MIRSymbol *funcLayoutSym = builder->CreateGlobalDecl(funcLayoutSymName, arrayType); + funcLayoutSym->SetKonst(funcLayoutConst); + funcLayoutSym->SetStorageClass(kScFstatic); +} +} // namespace maple diff --git a/src/mpl2mpl/src/constantfold.cpp b/src/mpl2mpl/src/constantfold.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5d409909a8a1509b2dba501ef59284f9cbe993d4 --- /dev/null +++ b/src/mpl2mpl/src/constantfold.cpp @@ -0,0 +1,1979 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "constantfold.h" +#include +#include +#include "mpl_logging.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "global_tables.h" + +namespace { +constexpr maple::uint64 kJsTypeNumber = 4; // JSTYPE_NUMBER +constexpr maple::uint64 kJsTypeNumberInHigh32Bit = kJsTypeNumber << 32; // set high 32 bit as JSTYPE_NUMBER +constexpr maple::uint32 kByteSizeOfBit64 = 8; // byte number for 64 bit +enum CompareRes : maple::int64 { + kLess = -1, + kEqual = 0, + kGreater = 1 +}; +} + +namespace maple { +// This phase is designed to achieve compiler optimization by +// simplifying constant expressions. The constant expression +// is evaluated and replaced by the value calculated on compile +// time to save time on runtime. +// +// The main procedure shows as following: +// A. Analyze expression type +// B. Analysis operator type +// C. Replace the expression with the result of the operation +BinaryNode *ConstantFold::NewBinaryNode(BinaryNode *old, Opcode op, PrimType primType, BaseNode *lhs, BaseNode *rhs) { + CHECK_NULL_FATAL(old); + BinaryNode *result = nullptr; + if (old->GetOpCode() == op && old->GetPrimType() == primType && old->Opnd(0) == lhs && old->Opnd(1) == rhs) { + result = old; + } else { + result = mirModule->CurFuncCodeMemPool()->New(op, primType, lhs, rhs); + } + return result; +} + +UnaryNode *ConstantFold::NewUnaryNode(UnaryNode *old, Opcode op, PrimType primType, BaseNode *e) { + CHECK_NULL_FATAL(old); + UnaryNode *result = nullptr; + if (old->GetOpCode() == op && old->GetPrimType() == primType && old->Opnd(0) == e) { + result = old; + } else { + result = mirModule->CurFuncCodeMemPool()->New(op, primType, e); + } + return result; +} + +BaseNode *ConstantFold::PairToExpr(PrimType resultType, const std::pair &p) { + CHECK_NULL_FATAL(p.first); + BaseNode *result = p.first; + if (p.second == 0) { + return result; + } + if (p.first->GetOpCode() == OP_neg && p.second > 0) { + // -a, 5 -> 5 - a + ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(p.second, resultType); + BaseNode *r = static_cast(p.first)->Opnd(0); + result = mirModule->CurFuncCodeMemPool()->New(OP_sub, resultType, val, r); + } else { + if (p.second > 0) { + // +-a, 5 -> a + 5 + ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(p.second, resultType); + result = mirModule->CurFuncCodeMemPool()->New(OP_add, resultType, p.first, val); + } else { + // +-a, -5 -> a + -5 + ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(-p.second, resultType); + result = mirModule->CurFuncCodeMemPool()->New(OP_sub, resultType, p.first, val); + } + } + return result; +} + +std::pair ConstantFold::FoldBase(BaseNode *node) const { + return std::make_pair(node, 0); +} + +StmtNode *ConstantFold::Simplify(StmtNode *node) { + CHECK_NULL_FATAL(node); + switch (node->GetOpCode()) { + case OP_dassign: + case OP_maydassign: + return SimplifyDassign(static_cast(node)); + case OP_iassign: + return SimplifyIassign(static_cast(node)); + case OP_block: + return SimplifyBlock(static_cast(node)); + case OP_if: + return SimplifyIf(static_cast(node)); + case OP_dowhile: + case OP_while: + return SimplifyWhile(static_cast(node)); + case OP_switch: + return SimplifySwitch(static_cast(node)); + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + case OP_regassign: + case OP_assertnonnull: + return SimplifyUnary(static_cast(node)); + case OP_assertge: + case OP_assertlt: + return SimplifyBinary(static_cast(node)); + case OP_brfalse: + case OP_brtrue: + return SimplifyCondGoto(static_cast(node)); + case OP_return: + case OP_syncenter: + case OP_syncexit: + case OP_call: + case OP_virtualcall: + case OP_superclasscall: + case OP_interfacecall: + case OP_customcall: + case OP_polymorphiccall: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallwithtype: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_intrinsiccallassigned: + case OP_intrinsiccallwithtypeassigned: + case OP_xintrinsiccallassigned: + case OP_callinstant: + case OP_callinstantassigned: + case OP_virtualcallinstant: + case OP_virtualcallinstantassigned: + case OP_superclasscallinstant: + case OP_superclasscallinstantassigned: + case OP_interfacecallinstant: + case OP_interfacecallinstantassigned: + return SimplifyNary(static_cast(node)); + case OP_icall: + case OP_icallassigned: + return SimplifyIcall(static_cast(node)); + default: + return node; + } +} + +std::pair ConstantFold::DispatchFold(BaseNode *node) { + CHECK_NULL_FATAL(node); + switch (node->GetOpCode()) { + case OP_sizeoftype: + return FoldSizeoftype(static_cast(node)); + case OP_abs: + case OP_bnot: + case OP_lnot: + case OP_neg: + case OP_recip: + case OP_sqrt: + return FoldUnary(static_cast(node)); + case OP_ceil: + case OP_floor: + case OP_round: + case OP_trunc: + case OP_cvt: + return FoldTypeCvt(static_cast(node)); + case OP_sext: + case OP_zext: + case OP_extractbits: + return FoldExtractbits(static_cast(node)); + case OP_iaddrof: + case OP_iread: + return FoldIread(static_cast(node)); + case OP_add: + case OP_ashr: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_div: + case OP_land: + case OP_lior: + case OP_lshr: + case OP_max: + case OP_min: + case OP_mul: + case OP_rem: + case OP_shl: + case OP_sub: + return FoldBinary(static_cast(node)); + case OP_eq: + case OP_ne: + case OP_ge: + case OP_gt: + case OP_le: + case OP_lt: + case OP_cmp: + return FoldCompare(static_cast(node)); + case OP_depositbits: + return FoldDepositbits(static_cast(node)); + case OP_select: + return FoldTernary(static_cast(node)); + case OP_array: + return FoldArray(static_cast(node)); + case OP_retype: + return FoldRetype(static_cast(node)); + case OP_gcmallocjarray: + case OP_gcpermallocjarray: + return FoldGcmallocjarray(static_cast(node)); + default: + return FoldBase(static_cast(node)); + } +} + +BaseNode *ConstantFold::Negate(BaseNode *node) { + CHECK_NULL_FATAL(node); + return mirModule->CurFuncCodeMemPool()->New(OP_neg, PrimType(node->GetPrimType()), node); +} + +BaseNode *ConstantFold::Negate(UnaryNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + if (node->GetOpCode() == OP_neg) { + result = static_cast(node->Opnd(0)); + } else { + BaseNode *n = static_cast(node); + result = NewUnaryNode(node, OP_neg, node->GetPrimType(), n); + } + return result; +} + +BaseNode *ConstantFold::Negate(ConstvalNode *node) { + CHECK_NULL_FATAL(node); + ConstvalNode *copy = node->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + CHECK_NULL_FATAL(copy); + copy->GetConstVal()->Neg(); + return copy; +} + +BaseNode *ConstantFold::NegateTree(BaseNode *node) { + CHECK_NULL_FATAL(node); + if (node->IsUnaryNode()) { + return Negate(static_cast(node)); + } else if (node->GetOpCode() == OP_constval) { + return Negate(static_cast(node)); + } else { + return Negate(static_cast(node)); + } +} + +MIRIntConst *ConstantFold::FoldIntConstComparisonMIRConst(Opcode opcode, PrimType resultType, + const MIRIntConst &intConst0, + const MIRIntConst &intConst1) { + int64 result = 0; + bool greater = (intConst0.GetValue() > intConst1.GetValue()); + bool equal = (intConst0.GetValue() == intConst1.GetValue()); + bool less = (intConst0.GetValue() < intConst1.GetValue()); + switch (opcode) { + case OP_eq: { + result = equal; + break; + } + case OP_ge: { + result = (greater || equal); + break; + } + case OP_gt: { + result = greater; + break; + } + case OP_le: { + result = (less || equal); + break; + } + case OP_lt: { + result = less; + break; + } + case OP_ne: { + result = !equal; + break; + } + case OP_cmp: { + if (greater) { + result = kGreater; + } else if (equal) { + result = kEqual; + } else { + result = kLess; + } + break; + } + default: + ASSERT(false, "Unknown opcode for FoldIntConstComparison"); + } + // determine the type + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(resultType); + // form the constant + MIRIntConst *constValue = nullptr; + if (type.GetPrimType() == PTY_dyni32) { + constValue = mirModule->GetMemPool()->New(0, type); + constValue->SetValue(kJsTypeNumberInHigh32Bit | (static_cast(result))); + } else { + constValue = mirModule->GetMemPool()->New(result, type); + } + return constValue; +} + +ConstvalNode *ConstantFold::FoldIntConstComparison(Opcode opcode, PrimType resultType, + ConstvalNode &const0, ConstvalNode &const1) { + MIRIntConst *intConst0 = safe_cast(const0.GetConstVal()); + MIRIntConst *intConst1 = safe_cast(const1.GetConstVal()); + CHECK_NULL_FATAL(intConst0); + CHECK_NULL_FATAL(intConst1); + MIRIntConst *constValue = FoldIntConstComparisonMIRConst(opcode, resultType, *intConst0, *intConst1); + // form the ConstvalNode + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(resultType); + resultConst->SetConstVal(constValue); + return resultConst; +} + +ConstvalNode *ConstantFold::FoldIntConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, + ConstvalNode &const1) { + MIRIntConst *intConst0 = safe_cast(const0.GetConstVal()); + MIRIntConst *intConst1 = safe_cast(const1.GetConstVal()); + CHECK_NULL_FATAL(intConst0); + CHECK_NULL_FATAL(intConst1); + int64 intValueOfConst0 = intConst0->GetValue(); + int64 intValueOfConst1 = intConst1->GetValue(); + uint64 result64 = 0; + uint32 result32 = 0; + bool useResult64 = (GetPrimTypeSize(resultType) == kByteSizeOfBit64); + switch (opcode) { + case OP_add: { + if (useResult64) { + result64 = intValueOfConst0 + intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) + static_cast(intValueOfConst1); + } + break; + } + case OP_sub: { + if (useResult64) { + result64 = intValueOfConst0 - intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) - static_cast(intValueOfConst1); + } + break; + } + case OP_mul: { + if (useResult64) { + result64 = intValueOfConst0 * intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) * static_cast(intValueOfConst1); + } + break; + } + case OP_div: { + if (IsUnsignedInteger(const0.GetPrimType())) { + if (useResult64) { + result64 = static_cast(intValueOfConst0) / static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) / static_cast(intValueOfConst1); + } + } else { + if (useResult64) { + result64 = intValueOfConst0 / intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) / static_cast(intValueOfConst1); + } + } + break; + } + case OP_rem: { + if (IsUnsignedInteger(const0.GetPrimType())) { + if (useResult64) { + result64 = static_cast(intValueOfConst0) % static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) % static_cast(intValueOfConst1); + } + } else { + if (useResult64) { + result64 = intValueOfConst0 % intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) % static_cast(intValueOfConst1); + } + } + break; + } + case OP_ashr: { + if (useResult64) { + result64 = intValueOfConst0 >> intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) >> static_cast(intValueOfConst1); + } + break; + } + case OP_lshr: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) >> intValueOfConst1; + } else { + result32 = static_cast(intValueOfConst0) >> static_cast(intValueOfConst1); + } + break; + } + case OP_shl: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) << static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) << static_cast(intValueOfConst1); + } + break; + } + case OP_max: { + if (IsUnsignedInteger(const0.GetPrimType())) { + if (useResult64) { + result64 = (static_cast(intValueOfConst0) >= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } else { + result32 = (static_cast(intValueOfConst0) >= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } + } else { + if (useResult64) { + result64 = (intValueOfConst0 >= intValueOfConst1) ? intValueOfConst0 : intValueOfConst1; + } else { + result32 = (static_cast(intValueOfConst0) >= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } + } + break; + } + case OP_min: { + if (IsUnsignedInteger(const0.GetPrimType())) { + if (useResult64) { + result64 = (static_cast(intValueOfConst0) <= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } else { + result32 = (static_cast(intValueOfConst0) <= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } + } else { + if (useResult64) { + result64 = (intValueOfConst0 <= intValueOfConst1) ? intValueOfConst0 : intValueOfConst1; + } else { + result32 = (static_cast(intValueOfConst0) <= static_cast(intValueOfConst1)) ? + intValueOfConst0 : intValueOfConst1; + } + } + break; + } + case OP_band: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) & static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) & static_cast(intValueOfConst1); + } + break; + } + case OP_bior: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) | static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) | static_cast(intValueOfConst1); + } + break; + } + case OP_bxor: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) ^ static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) ^ static_cast(intValueOfConst1); + } + break; + } + case OP_cand: + case OP_land: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) && static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) && static_cast(intValueOfConst1); + } + break; + } + case OP_cior: + case OP_lior: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) || static_cast(intValueOfConst1); + } else { + result32 = static_cast(intValueOfConst0) || static_cast(intValueOfConst1); + } + break; + } + case OP_depositbits: { + // handled in FoldDepositbits + ASSERT(false, "Unexpected opcode in FoldIntConstBinary"); + break; + } + default: + ASSERT(false, "Unknown opcode for FoldIntConstBinary"); + } + // determine the type + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(resultType); + // form the constant + MIRIntConst *constValue = nullptr; + if (type.GetPrimType() == PTY_dyni32) { + constValue = mirModule->GetMemPool()->New(0, type); + constValue->SetValue(kJsTypeNumberInHigh32Bit | (static_cast(result32))); + } else if (useResult64) { + constValue = mirModule->GetMemPool()->New(result64, type); + } else { + constValue = mirModule->GetMemPool()->New(result32, type); + } + // form the ConstvalNode + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(type.GetPrimType()); + resultConst->SetConstVal(constValue); + return resultConst; +} + +ConstvalNode *ConstantFold::FoldFPConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, + ConstvalNode &const1) { + ASSERT(const0.GetPrimType() == const1.GetPrimType(), "The types of the operands must match"); + MIRDoubleConst *doubleConst0 = nullptr; + MIRDoubleConst *doubleConst1 = nullptr; + MIRFloatConst *floatConst0 = nullptr; + MIRFloatConst *floatConst1 = nullptr; + bool useDouble = (const0.GetPrimType() == PTY_f64); + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(resultType); + if (useDouble) { + doubleConst0 = safe_cast(const0.GetConstVal()); + doubleConst1 = safe_cast(const1.GetConstVal()); + CHECK_NULL_FATAL(doubleConst0); + CHECK_NULL_FATAL(doubleConst1); + } else { + floatConst0 = safe_cast(const0.GetConstVal()); + floatConst1 = safe_cast(const1.GetConstVal()); + CHECK_NULL_FATAL(floatConst0); + CHECK_NULL_FATAL(floatConst1); + } + float constValuefloat = 0.0; + double constValueDouble = 0.0; + switch (opcode) { + case OP_add: { + if (useDouble) { + constValueDouble = doubleConst0->GetValue() + doubleConst1->GetValue(); + } else { + constValuefloat = floatConst0->GetValue() + floatConst1->GetValue(); + } + break; + } + case OP_sub: { + if (useDouble) { + constValueDouble = doubleConst0->GetValue() - doubleConst1->GetValue(); + } else { + constValuefloat = floatConst0->GetValue() - floatConst1->GetValue(); + } + break; + } + case OP_mul: { + if (useDouble) { + constValueDouble = doubleConst0->GetValue() * doubleConst1->GetValue(); + } else { + constValuefloat = floatConst0->GetValue() * floatConst1->GetValue(); + } + break; + } + case OP_div: { + // for floats div by 0 is well defined + if (useDouble) { + constValueDouble = doubleConst0->GetValue() / doubleConst1->GetValue(); + } else { + constValuefloat = floatConst0->GetValue() / floatConst1->GetValue(); + } + break; + } + case OP_max: { + if (useDouble) { + constValueDouble = (doubleConst0->GetValue() >= doubleConst1->GetValue()) ? doubleConst0->GetValue() + : doubleConst1->GetValue(); + } else { + constValuefloat = (floatConst0->GetValue() >= floatConst1->GetValue()) ? floatConst0->GetValue() + : floatConst1->GetValue(); + } + break; + } + case OP_min: { + if (useDouble) { + constValueDouble = (doubleConst0->GetValue() <= doubleConst1->GetValue()) ? doubleConst0->GetValue() + : doubleConst1->GetValue(); + } else { + constValuefloat = (floatConst0->GetValue() <= floatConst1->GetValue()) ? floatConst0->GetValue() + : floatConst1->GetValue(); + } + break; + } + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_land: + case OP_cior: + case OP_lior: + case OP_depositbits: { + ASSERT(false, "Unexpected opcode in FoldFPConstBinary"); + break; + } + default: + ASSERT(false, "Unknown opcode for FoldFPConstBinary"); + } + if (resultType == PTY_f64) { + resultConst->SetConstVal(GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(constValueDouble)); + } else { + resultConst->SetConstVal(GlobalTables::GetFpConstTable().GetOrCreateFloatConst(constValuefloat)); + } + return resultConst; +} + +MIRIntConst *ConstantFold::FoldFPConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, + MIRConst &const0, MIRConst &const1) { + MIRDoubleConst *doubleConst0 = nullptr; + MIRDoubleConst *doubleConst1 = nullptr; + MIRFloatConst *floatConst0 = nullptr; + MIRFloatConst *floatConst1 = nullptr; + bool useDouble = (opndType == PTY_f64); + if (useDouble) { + doubleConst0 = safe_cast(&const0); + CHECK_FATAL(doubleConst0 != nullptr, "doubleConst0 is nullptr"); + doubleConst1 = safe_cast(&const1); + CHECK_FATAL(doubleConst1 != nullptr, "doubleConst1 is nullptr"); + } else { + floatConst0 = safe_cast(&const0); + CHECK_FATAL(floatConst0 != nullptr, "floatConst0 is nullptr"); + floatConst1 = safe_cast(&const1); + CHECK_FATAL(floatConst1 != nullptr, "floatConst1 is nullptr"); + } + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(resultType); + int64 constValue = 0; + switch (opcode) { + case OP_eq: { + if (useDouble) { + constValue = (doubleConst0->GetValue() == doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() == floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_ge: { + if (useDouble) { + constValue = (doubleConst0->GetValue() >= doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() >= floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_gt: { + if (useDouble) { + constValue = (doubleConst0->GetValue() > doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() > floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_le: { + if (useDouble) { + constValue = (doubleConst0->GetValue() <= doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() <= floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_lt: { + if (useDouble) { + constValue = (doubleConst0->GetValue() < doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() < floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_ne: { + if (useDouble) { + constValue = (doubleConst0->GetValue() != doubleConst1->GetValue()) ? 1 : 0; + } else { + constValue = (floatConst0->GetValue() != floatConst1->GetValue()) ? 1 : 0; + } + break; + } + case OP_cmp: + case OP_cmpl: + case OP_cmpg: { + if (useDouble) { + CHECK_NULL_FATAL(doubleConst0); + CHECK_NULL_FATAL(doubleConst1); + if (doubleConst0->GetValue() > doubleConst1->GetValue() || + (opcode == OP_cmpg && (std::isnan(doubleConst0->GetValue()) || std::isnan(doubleConst1->GetValue())))) { + constValue = 1; + } else if (doubleConst0->GetValue() == doubleConst1->GetValue()) { + constValue = 0; + } else if (doubleConst0->GetValue() < doubleConst1->GetValue() || + (opcode == OP_cmpl && (std::isnan(doubleConst0->GetValue()) || + std::isnan(doubleConst1->GetValue())))) { + constValue = -1; + } + } else { + if (floatConst0->GetValue() > floatConst1->GetValue() || + (opcode == OP_cmpg && (std::isnan(floatConst0->GetValue()) || std::isnan(floatConst1->GetValue())))) { + constValue = 1; + } else if (floatConst0->GetValue() == floatConst1->GetValue()) { + constValue = 0; + } else if (floatConst0->GetValue() < floatConst1->GetValue() || + (opcode == OP_cmpl && (std::isnan(floatConst0->GetValue()) || + std::isnan(floatConst1->GetValue())))) { + constValue = -1; + } + } + break; + } + default: + ASSERT(false, "Unknown opcode for FoldFPConstComparison"); + } + MIRIntConst *resultConst = mirModule->GetMemPool()->New(constValue, type); + return resultConst; +} + +ConstvalNode *ConstantFold::FoldFPConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, + ConstvalNode &const0, ConstvalNode &const1) { + ASSERT(const0.GetPrimType() == const1.GetPrimType(), "The types of the operands must match"); + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(resultType); + resultConst->SetConstVal(FoldFPConstComparisonMIRConst(opcode, resultType, opndType, *const0.GetConstVal(), + *const1.GetConstVal())); + return resultConst; +} + +MIRConst *ConstantFold::FoldConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, + MIRConst &const0, MIRConst &const1) { + MIRConst *returnValue = nullptr; + if (IsPrimitiveInteger(opndType) || IsPrimitiveDynInteger(opndType)) { + returnValue = FoldIntConstComparisonMIRConst(opcode, resultType, *safe_cast(&const0), + *safe_cast(&const1)); + } else if (opndType == PTY_f32 || opndType == PTY_f64) { + returnValue = FoldFPConstComparisonMIRConst(opcode, resultType, opndType, const0, const1); + } else { + ASSERT(false, "Unhandled case for FoldConstComparisonMIRConst"); + } + return returnValue; +} + +ConstvalNode *ConstantFold::FoldConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, + ConstvalNode &const0, ConstvalNode &const1) { + ConstvalNode *returnValue = nullptr; + if (IsPrimitiveInteger(opndType) || IsPrimitiveDynInteger(opndType)) { + returnValue = FoldIntConstComparison(opcode, resultType, const0, const1); + } else if (opndType == PTY_f32 || opndType == PTY_f64) { + returnValue = FoldFPConstComparison(opcode, resultType, opndType, const0, const1); + } else { + ASSERT(false, "Unhandled case for FoldConstComparison"); + } + return returnValue; +} + +ConstvalNode *ConstantFold::FoldConstBinary(Opcode opcode, PrimType resultType, ConstvalNode &const0, + ConstvalNode &const1) { + ConstvalNode *returnValue = nullptr; + if (IsPrimitiveInteger(resultType) || IsPrimitiveDynInteger(resultType)) { + returnValue = FoldIntConstBinary(opcode, resultType, const0, const1); + } else if (resultType == PTY_f32 || resultType == PTY_f64) { + returnValue = FoldFPConstBinary(opcode, resultType, const0, const1); + } else { + ASSERT(false, "Unhandled case for FoldConstBinary"); + } + return returnValue; +} + +ConstvalNode *ConstantFold::FoldIntConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c) { + CHECK_NULL_FATAL(c); + MIRIntConst *cst = safe_cast(c->GetConstVal()); + uint32 result32 = 0; + uint64 result64 = 0; + bool useResult64 = (GetPrimTypeSize(resultType) == kByteSizeOfBit64); + switch (opcode) { + case OP_abs: { + if (IsUnsignedInteger(resultType)) { + if (useResult64) { + result64 = static_cast(cst->GetValue()); + } else { + result32 = static_cast(cst->GetValue()); + } + } else { + if (useResult64) { + result64 = (cst->GetValue() >= 0) ? cst->GetValue() : -cst->GetValue(); + } else { + result32 = (static_cast(cst->GetValue()) >= 0) ? + cst->GetValue() : -static_cast(cst->GetValue()); + } + } + break; + } + case OP_bnot: { + if (useResult64) { + result64 = ~static_cast(cst->GetValue()); + } else { + result32 = ~static_cast(cst->GetValue()); + } + break; + } + case OP_lnot: { + if (useResult64) { + result64 = cst->GetValue() == 0; + } else { + result32 = static_cast(cst->GetValue()) == 0; + } + break; + } + case OP_neg: { + if (useResult64) { + result64 = -cst->GetValue(); + } else { + result32 = -static_cast(cst->GetValue()); + } + break; + } + case OP_sext: // handled in FoldExtractbits + case OP_zext: // handled in FoldExtractbits + case OP_extractbits: // handled in FoldExtractbits + case OP_recip: + case OP_sqrt: { + ASSERT(false, "Unexpected opcode in FoldIntConstUnary"); + break; + } + default: + ASSERT(false, "Unknown opcode for FoldIntConstUnary"); + } + // determine the type + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(resultType); + // form the constant + MIRIntConst *constValue = nullptr; + if (type.GetPrimType() == PTY_dyni32) { + constValue = mirModule->GetMemPool()->New(0, type); + constValue->SetValue(kJsTypeNumberInHigh32Bit | (static_cast(result32))); + } else if (useResult64) { + constValue = mirModule->GetMemPool()->New(result64, type); + } else { + constValue = mirModule->GetMemPool()->New(result32, type); + } + // form the ConstvalNode + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(type.GetPrimType()); + resultConst->SetConstVal(constValue); + return resultConst; +} + +template +ConstvalNode *ConstantFold::FoldFPConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c) { + CHECK_NULL_FATAL(c); + ConstvalNode *resultConst = c; + typename T::value_type constValue = 0.0; + T *fpCst = static_cast(c->GetConstVal()); + switch (opcode) { + case OP_recip: + case OP_neg: + case OP_abs: { + if (OP_recip == opcode) { + constValue = typename T::value_type(1.0 / fpCst->GetValue()); + } else if (OP_neg == opcode) { + constValue = typename T::value_type(-fpCst->GetValue()); + } else if (OP_abs == opcode) { + constValue = typename T::value_type(fabs(fpCst->GetValue())); + } + break; + } + case OP_sqrt: { + constValue = typename T::value_type(sqrt(fpCst->GetValue())); + break; + } + case OP_bnot: + case OP_lnot: + case OP_sext: + case OP_zext: + case OP_extractbits: { + ASSERT(false, "Unexpected opcode in FoldFPConstUnary"); + break; + } + default: + ASSERT(false, "Unknown opcode for FoldFPConstUnary"); + } + resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(resultType); + if (resultType == PTY_f32) { + resultConst->SetConstVal(GlobalTables::GetFpConstTable().GetOrCreateFloatConst(constValue)); + } else { + resultConst->SetConstVal(GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(constValue)); + } + return resultConst; +} + +ConstvalNode *ConstantFold::FoldConstUnary(Opcode opcode, PrimType resultType, ConstvalNode *c) { + ConstvalNode *returnValue = nullptr; + if (IsPrimitiveInteger(resultType) || IsPrimitiveDynInteger(resultType)) { + returnValue = FoldIntConstUnary(opcode, resultType, c); + } else if (resultType == PTY_f32) { + returnValue = FoldFPConstUnary(opcode, resultType, c); + } else if (resultType == PTY_f64) { + returnValue = FoldFPConstUnary(opcode, resultType, c); + } else if (PTY_f128 == resultType) { + } else { + ASSERT(false, "Unhandled case for FoldConstUnary"); + } + return returnValue; +} + +std::pair ConstantFold::FoldSizeoftype(SizeoftypeNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = node; + MIRType *argType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(node->GetTyIdx()); + if (argType->GetKind() == kTypeScalar) { + MIRType &resultType = *GlobalTables::GetTypeTable().GetPrimType(node->GetPrimType()); + uint32 size = GetPrimTypeSize(argType->GetPrimType()); + ConstvalNode *constValueNode = mirModule->CurFuncCodeMemPool()->New(); + constValueNode->SetPrimType(node->GetPrimType()); + constValueNode->SetConstVal(mirModule->GetMemPool()->New(static_cast(size), resultType)); + result = constValueNode; + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldRetype(RetypeNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = node; + std::pair p = DispatchFold(node->Opnd(0)); + if (node->Opnd(0) != p.first) { + RetypeNode *newRetNode = node->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + CHECK_FATAL(newRetNode != nullptr, "newRetNode is null in ConstantFold::FoldRetype"); + newRetNode->SetOpnd(PairToExpr(node->Opnd(0)->GetPrimType(), p), 0); + result = newRetNode; + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldGcmallocjarray(JarrayMallocNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = node; + std::pair p = DispatchFold(node->Opnd(0)); + if (node->Opnd(0) != p.first) { + JarrayMallocNode *newRetNode; + newRetNode = node->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + CHECK_FATAL(newRetNode != nullptr, "newRetNode is null in ConstantFold::FoldGcmallocjarray"); + newRetNode->SetOpnd(PairToExpr(node->Opnd(0)->GetPrimType(), p), 0); + result = newRetNode; + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldUnary(UnaryNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + int64 sum = 0; + std::pair p = DispatchFold(node->Opnd(0)); + ConstvalNode *cst = safe_cast(p.first); + if (cst != nullptr) { + result = FoldConstUnary(node->GetOpCode(), node->GetPrimType(), cst); + sum = 0; + } else { + bool isInt = IsPrimitiveInteger(node->GetPrimType()); + if (isInt && node->GetOpCode() == OP_neg) { + result = NegateTree(p.first); + if (result->GetOpCode() == OP_neg && result->GetPrimType() == node->GetPrimType() && + static_cast(result)->Opnd(0) == node->Opnd(0)) { + // NegateTree returned an UnaryNode quivalent to `n`, so keep the + // original UnaryNode to preserve identity + result = node; + } + sum = -p.second; + } else { + result = NewUnaryNode(node, node->GetOpCode(), node->GetPrimType(), PairToExpr(node->Opnd(0)->GetPrimType(), p)); + sum = 0; + } + } + return std::make_pair(result, sum); +} + +ConstvalNode *ConstantFold::FoldCeil(ConstvalNode *cst, PrimType fromType, PrimType toType) { + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(toType); + MIRType &resultType = *GlobalTables::GetTypeTable().GetPrimType(toType); + CHECK_NULL_FATAL(cst); + if (fromType == PTY_f32) { + MIRFloatConst *constValue = safe_cast(cst->GetConstVal()); + float floutValue = ceil(constValue->GetValue()); + resultConst->SetConstVal(mirModule->GetMemPool()->New(static_cast(floutValue), resultType)); + } else { + MIRDoubleConst *constValue = safe_cast(cst->GetConstVal()); + double doubleValue = ceil(constValue->GetValue()); + resultConst->SetConstVal(mirModule->GetMemPool()->New(static_cast(doubleValue), resultType)); + } + return resultConst; +} + +MIRConst *ConstantFold::FoldFloorMIRConst(MIRConst *cst, PrimType fromType, PrimType toType) { + MIRType &resultType = *GlobalTables::GetTypeTable().GetPrimType(toType); + CHECK_NULL_FATAL(cst); + if (fromType == PTY_f32) { + MIRFloatConst *constValue = safe_cast(cst); + float floutValue = floor(constValue->GetValue()); + return mirModule->GetMemPool()->New(static_cast(floutValue), resultType); + } else { + MIRDoubleConst *constValue = safe_cast(cst); + double doubleValue = floor(constValue->GetValue()); + return mirModule->GetMemPool()->New(static_cast(doubleValue), resultType); + } +} + +ConstvalNode *ConstantFold::FoldFloor(ConstvalNode *cst, PrimType fromType, PrimType toType) { + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(toType); + resultConst->SetConstVal(FoldFloorMIRConst(cst->GetConstVal(), fromType, toType)); + return resultConst; +} + +MIRConst *ConstantFold::FoldRoundMIRConst(MIRConst *cst, PrimType fromType, PrimType toType) { + MIRType &resultType = *GlobalTables::GetTypeTable().GetPrimType(toType); + if (fromType == PTY_f32) { + MIRFloatConst *constValue = safe_cast(cst); + float floutValue = round(constValue->GetValue()); + return mirModule->GetMemPool()->New(static_cast(floutValue), resultType); + } else if (fromType == PTY_f64) { + MIRDoubleConst *constValue = safe_cast(cst); + double doubleValue = round(constValue->GetValue()); + return mirModule->GetMemPool()->New(static_cast(doubleValue), resultType); + } else if (toType == PTY_f32 && IsPrimitiveInteger(fromType)) { + MIRIntConst *constValue = safe_cast(cst); + int64 fromValue = constValue->GetValue(); + float floutValue = round(static_cast(fromValue)); + if (static_cast(floutValue) == fromValue) { + return GlobalTables::GetFpConstTable().GetOrCreateFloatConst(floutValue); + } + } else if (toType == PTY_f64 && IsPrimitiveInteger(fromType)) { + MIRIntConst *constValue = safe_cast(cst); + int64 fromValue = constValue->GetValue(); + double doubleValue = round(static_cast(fromValue)); + if (static_cast(doubleValue) == fromValue) { + return GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(doubleValue); + } + } + return nullptr; +} + +ConstvalNode *ConstantFold::FoldRound(ConstvalNode *cst, PrimType fromType, PrimType toType) { + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(toType); + CHECK_NULL_FATAL(cst); + resultConst->SetConstVal(FoldRoundMIRConst(cst->GetConstVal(), fromType, toType)); + return resultConst; +} + +ConstvalNode *ConstantFold::FoldTrunk(ConstvalNode *cst, PrimType fromType, PrimType toType) { + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(toType); + MIRType &resultType = *GlobalTables::GetTypeTable().GetPrimType(toType); + CHECK_NULL_FATAL(cst); + if (fromType == PTY_f32) { + MIRFloatConst *constValue = safe_cast(cst->GetConstVal()); + float floutValue = trunc(constValue->GetValue()); + resultConst->SetConstVal(mirModule->GetMemPool()->New(static_cast(floutValue), resultType)); + } else { + MIRDoubleConst *constValue = safe_cast(cst->GetConstVal()); + double doubleValue = trunc(constValue->GetValue()); + resultConst->SetConstVal(mirModule->GetMemPool()->New(static_cast(doubleValue), resultType)); + } + return resultConst; +} + +MIRConst *ConstantFold::FoldTypeCvtMIRConst(MIRConst *cst, PrimType fromType, PrimType toType) { + if (IsPrimitiveDynType(fromType) || IsPrimitiveDynType(toType)) { + // do not fold + return nullptr; + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + MIRConst *toConst = nullptr; + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (toSize > fromSize) { + Opcode op = OP_zext; + if (IsSignedInteger(toType) && IsSignedInteger(fromType)) { + op = OP_sext; + } + toConst = FoldSignExtendMIRConst(op, toType, fromSize, cst); + } else { + MIRIntConst *c = safe_cast(cst); + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(toType); + toConst = mirModule->GetMemPool()->New(c->GetValue(), type); + } + return toConst; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveFloat(toType)) { + MIRConst *toConst = nullptr; + if (GetPrimTypeBitSize(toType) < GetPrimTypeBitSize(fromType)) { + ASSERT(GetPrimTypeBitSize(toType) == 32, "We suppot F32 and F64"); + MIRDoubleConst *fromValue = safe_cast(cst); + float floutValue = static_cast(fromValue->GetValue()); + MIRFloatConst *toValue = GlobalTables::GetFpConstTable().GetOrCreateFloatConst(floutValue); + toConst = toValue; + } else { + ASSERT(GetPrimTypeBitSize(toType) == 64, "We suppot F32 and F64"); + MIRFloatConst *fromValue = safe_cast(cst); + double doubleValue = static_cast(fromValue->GetValue()); + MIRDoubleConst *toValue = GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(doubleValue); + toConst = toValue; + } + return toConst; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + return FoldFloorMIRConst(cst, fromType, toType); + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { + return FoldRoundMIRConst(cst, fromType, toType); + } + CHECK_FATAL(false, "Unexpected case in ConstFoldTypeCvt"); + return nullptr; +} + +ConstvalNode *ConstantFold::FoldTypeCvt(ConstvalNode *cst, PrimType fromType, PrimType toType) { + CHECK_NULL_FATAL(cst); + MIRConst *toConstValue = FoldTypeCvtMIRConst(cst->GetConstVal(), fromType, toType); + if (toConstValue == nullptr) { + return nullptr; + } + ConstvalNode *toConst = mirModule->CurFuncCodeMemPool()->New(); + toConst->SetPrimType(toConstValue->GetType().GetPrimType()); + toConst->SetConstVal(toConstValue); + return toConst; +} + +std::pair ConstantFold::FoldTypeCvt(TypeCvtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + std::pair p = DispatchFold(node->Opnd(0)); + ConstvalNode *cst = safe_cast(p.first); + if (cst != nullptr) { + switch (node->GetOpCode()) { + case OP_ceil: { + result = FoldCeil(cst, node->FromType(), node->GetPrimType()); + break; + } + case OP_cvt: { + result = FoldTypeCvt(cst, node->FromType(), node->GetPrimType()); + break; + } + case OP_floor: { + result = FoldFloor(cst, node->FromType(), node->GetPrimType()); + break; + } + case OP_round: { + result = FoldRound(cst, node->FromType(), node->GetPrimType()); + break; + } + case OP_trunc: { + result = FoldTrunk(cst, node->FromType(), node->GetPrimType()); + break; + } + default: + result = nullptr; + ASSERT(false, "Unexpected opcode in TypeCvtNodeConstFold"); + } + } + if (result == nullptr) { + BaseNode *e = PairToExpr(node->Opnd(0)->GetPrimType(), p); + if (e != node->Opnd(0)) { + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + PrimType(node->FromType()), e); + } else { + result = node; + } + } + return std::make_pair(result, 0); +} + +MIRConst *ConstantFold::FoldSignExtendMIRConst(Opcode opcode, PrimType resultType, uint8 size, MIRConst *cst) { + MIRIntConst *c = safe_cast(cst); + uint64 result64 = 0; + if (opcode == OP_sext) { + result64 = (c->GetValue() << (64u - size)) >> (64u - size); + } else { + result64 = ((static_cast(c->GetValue())) << (64u - size)) >> (64u - size); + } + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(resultType); + MIRIntConst *constValue = mirModule->GetMemPool()->New(result64, type); + return constValue; +} + +ConstvalNode *ConstantFold::FoldSignExtend(Opcode opcode, PrimType resultType, uint8 size, ConstvalNode *cst) { + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + CHECK_NULL_FATAL(cst); + MIRConst *toConst = FoldSignExtendMIRConst(opcode, resultType, size, cst->GetConstVal()); + resultConst->SetPrimType(toConst->GetType().GetPrimType()); + resultConst->SetConstVal(toConst); + return resultConst; +} + +// sext and zext also handled automatically +std::pair ConstantFold::FoldExtractbits(ExtractbitsNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + uint8 offset = node->GetBitsOffset(); + uint8 size = node->GetBitsSize(); + Opcode opcode = node->GetOpCode(); + std::pair p = DispatchFold(node->Opnd(0)); + ConstvalNode *cst = safe_cast(p.first); + if (cst && (opcode == OP_sext || opcode == OP_zext)) { + result = FoldSignExtend(opcode, node->GetPrimType(), size, cst); + } else { + BaseNode *e = PairToExpr(node->Opnd(0)->GetPrimType(), p); + if (e != node->Opnd(0)) { + result = mirModule->CurFuncCodeMemPool()->New(opcode, PrimType(node->GetPrimType()), + offset, size, e); + } else { + result = node; + } + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldIread(IreadNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = node; + Opcode op = node->GetOpCode(); + FieldID fieldID = node->GetFieldID(); + std::pair p = DispatchFold(node->Opnd(0)); + BaseNode *e = PairToExpr(node->Opnd(0)->GetPrimType(), p); + if (op == OP_iaddrof && e->GetOpCode() == OP_addrof) { + AddrofNode *addrofNode = static_cast(e); + AddrofNode *newAddrof = addrofNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + CHECK_NULL_FATAL(newAddrof); + newAddrof->SetFieldID(newAddrof->GetFieldID() + fieldID); + result = newAddrof; + } else if (op == OP_iread && e->GetOpCode() == OP_addrof) { + AddrofNode *addrofNode = static_cast(e); + MIRSymbol *msy = mirModule->CurFunction()->GetLocalOrGlobalSymbol(addrofNode->GetStIdx()); + TyIdx typeId = msy->GetTyIdx(); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().empty() == false, "container check"); + MIRType *msyType = GlobalTables::GetTypeTable().GetTypeTable()[typeId]; + if (msyType->GetKind() == kTypeStruct || msyType->GetKind() == kTypeClass) { + FieldID newFieldId = fieldID + addrofNode->GetFieldID(); + MIRStructType *stty = static_cast(msyType); + MIRType *fieldTy = stty->GetFieldType(newFieldId); + result = + mirModule->CurFuncCodeMemPool()->New(OP_dread, fieldTy->GetPrimType(), + addrofNode->GetStIdx(), newFieldId); + } + } else if (e != node->Opnd(0)) { + result = mirModule->CurFuncCodeMemPool()->New( + op, PrimType(node->GetPrimType()), node->GetTyIdx(), fieldID, static_cast(e)); + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldBinary(BinaryNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + int64 sum; + Opcode op = node->GetOpCode(); + PrimType primType = node->GetPrimType(); + PrimType lPrimTypes = node->Opnd(0)->GetPrimType(); + PrimType rPrimTypes = node->Opnd(1)->GetPrimType(); + std::pair lp = DispatchFold(node->Opnd(0)); + std::pair rp = DispatchFold(node->Opnd(1)); + BaseNode *l = lp.first; + BaseNode *r = rp.first; + ConstvalNode *lConst = safe_cast(l); + ConstvalNode *rConst = safe_cast(r); + bool isInt = IsPrimitiveInteger(primType); + if (lConst != nullptr && rConst != nullptr) { + // Don't fold div by 0, for floats div by 0 is well defined. + if ((op == OP_div || op == OP_rem) && isInt && + (safe_cast(rConst->GetConstVal())->GetValue() == 0 || + safe_cast(lConst->GetConstVal())->GetValue() == LONG_MIN || + safe_cast(lConst->GetConstVal())->GetValue() == INT_MIN)) { + result = NewBinaryNode(node, op, primType, lConst, rConst); + sum = 0; + } else { + // 4 + 2 -> return a pair(result = ConstValNode(6), sum = 0) + // Create a new ConstvalNode for 6 but keep the sum = 0. This simplify the + // logic since the alternative is to return pair(result = nullptr, sum = 6). + // Doing so would introduce many nullptr checks in the code. See previous + // commits that implemented that logic for a comparison. + result = FoldConstBinary(op, primType, *lConst, *rConst); + sum = 0; + } + } else if (lConst != nullptr && isInt) { + MIRIntConst *mcst = safe_cast(lConst->GetConstVal()); + PrimType cstTyp = mcst->GetType().GetPrimType(); + int64 cst = mcst->GetValue(); + if (op == OP_add) { + sum = cst + rp.second; + result = r; + } else if (op == OP_sub) { + sum = cst - rp.second; + result = NegateTree(r); + } else if ((op == OP_mul || op == OP_div || op == OP_rem || op == OP_ashr || op == OP_lshr || op == OP_shl || + op == OP_band || op == OP_cand || op == OP_land) && + cst == 0) { + // 0 * X -> 0 + // 0 / X -> 0 + // 0 % X -> 0 + // 0 >> X -> 0 + // 0 << X -> 0 + // 0 & X -> 0 + // 0 && X -> 0 + sum = 0; + result = mirModule->GetMIRBuilder()->CreateIntConst(0, cstTyp); + } else if (op == OP_mul && cst == 1) { + // 1 * X --> X + sum = rp.second; + result = r; + } else if (op == OP_bior && cst == -1) { + // (-1) | X -> -1 + sum = 0; + result = mirModule->GetMIRBuilder()->CreateIntConst(-1, cstTyp); + } else if ((op == OP_lior || op == OP_cior) && cst >= 0) { + sum = 0; + if (cst > 0) { + // 5 || X -> 1 + result = mirModule->GetMIRBuilder()->CreateIntConst(1, cstTyp); + } else { + // when cst is zero + // 0 || X -> X; + result = r; + } + } else if ((op == OP_cand || op == OP_land) && cst == -1) { + // (-1) && X -> X + sum = 0; + result = r; + } else if ((op == OP_bior || op == OP_bxor) && cst == 0) { + // 0 | X -> X + // 0 ^ X -> X + sum = 0; + result = r; + } else { + result = NewBinaryNode(node, op, primType, l, PairToExpr(rPrimTypes, rp)); + sum = 0; + } + } else if (rConst != nullptr && isInt) { + MIRIntConst *mcst = safe_cast(rConst->GetConstVal()); + PrimType cstTyp = mcst->GetType().GetPrimType(); + int64 cst = mcst->GetValue(); + if (op == OP_add) { + result = l; + sum = lp.second + cst; + } else if (op == OP_sub) { + result = l; + sum = lp.second - cst; + } else if ((op == OP_mul || op == OP_band || op == OP_cand || op == OP_land) && cst == 0) { + // X * 0 -> 0 + // X & 0 -> 0 + // X && 0 -> 0 + sum = 0; + result = mirModule->GetMIRBuilder()->CreateIntConst(0, cstTyp); + } else if ((op == OP_mul || op == OP_div) && cst == 1) { + // case [X * 1 -> X] + // case [X / 1 = X] + sum = lp.second; + result = l; + } else if (op == OP_band && cst == -1) { + // X & (-1) -> X + sum = 0; + result = l; + } else if (op == OP_bior && cst == -1) { + // X | (-1) -> -1 + sum = 0; + result = mirModule->GetMIRBuilder()->CreateIntConst(-1, cstTyp); + } else if ((op == OP_lior || op == OP_cior)) { + sum = 0; + if (cst > 0) { + // X || 5 -> 1 + result = mirModule->GetMIRBuilder()->CreateIntConst(1, cstTyp); + } else if (cst == 0) { + // X || 0 -> X + result = l; + } else { + result = NewBinaryNode(node, op, primType, PairToExpr(lPrimTypes, lp), r); + } + } else if ((op == OP_ashr || op == OP_lshr || op == OP_shl || op == OP_bior || op == OP_bxor) && cst == 0) { + // X >> 0 -> X + // X << 0 -> X + // X | 0 -> X + // X ^ 0 -> X + sum = 0; + result = l; + } else if (op == OP_bxor && cst == 1 && primType != PTY_u1) { + // bxor i32 ( + // cvt i32 u1 (regread u1 %13), + // constValue i32 1), + result = NewBinaryNode(node, op, primType, PairToExpr(lPrimTypes, lp), PairToExpr(rPrimTypes, rp)); + sum = 0; + if (l->GetOpCode() == OP_cvt) { + TypeCvtNode *cvtNode = static_cast(l); + if (cvtNode->Opnd(0)->GetPrimType() == PTY_u1) { + BaseNode *base = cvtNode->Opnd(0); + BaseNode *constValue = mirModule->GetMIRBuilder()->CreateIntConst(1, base->GetPrimType()); + std::pair p = DispatchFold(base); + BinaryNode *temp = NewBinaryNode(node, op, PTY_u1, PairToExpr(base->GetPrimType(), p), constValue); + result = mirModule->CurFuncCodeMemPool()->New(OP_cvt, primType, PTY_u1, temp); + } + } + } else if (op == OP_rem && cst == 1) { + // X % 1 -> 0 + sum = 0; + result = mirModule->GetMIRBuilder()->CreateIntConst(0, cstTyp); + } else { + result = NewBinaryNode(node, op, primType, PairToExpr(lPrimTypes, lp), r); + sum = 0; + } + } else if (isInt && (op == OP_add || op == OP_sub)) { + if (op == OP_add) { + result = NewBinaryNode(node, op, primType, l, r); + sum = lp.second + rp.second; + } else { + result = NewBinaryNode(node, op, primType, l, r); + sum = lp.second - rp.second; + } + } else { + result = NewBinaryNode(node, op, primType, PairToExpr(lPrimTypes, lp), PairToExpr(rPrimTypes, rp)); + sum = 0; + } + return std::make_pair(result, sum); +} + +BaseNode *ConstantFold::SimplifyDoubleCompare(CompareNode *node) { + // For cases on gitlab issue 636. + // See arm manual B.cond(P2993) and FCMP(P1091) + CHECK_NULL_FATAL(node); + BaseNode *result = node; + BaseNode *l = node->Opnd(0); + BaseNode *r = node->Opnd(1); + if (node->GetOpCode() == OP_ne || node->GetOpCode() == OP_eq) { + if (l->GetOpCode() == OP_cmp && r->GetOpCode() == OP_constval) { + ConstvalNode *constNode = static_cast(r); + if (constNode->GetConstVal()->GetKind() == kConstInt && constNode->GetConstVal()->IsZero()) { + const CompareNode *compNode = static_cast(l); + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + compNode->GetOpndType(), + compNode->Opnd(0), + compNode->Opnd(1)); + } + } else if (r->GetOpCode() == OP_cmp && l->GetOpCode() == OP_constval) { + ConstvalNode *constNode = static_cast(l); + if (constNode->GetConstVal()->GetKind() == kConstInt && constNode->GetConstVal()->IsZero()) { + const CompareNode *compNode = static_cast(r); + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + compNode->GetOpndType(), + compNode->Opnd(0), + compNode->Opnd(1)); + } + } else if (node->GetOpCode() == OP_ne && r->GetOpCode() == OP_constval) { + // ne (u1 x, constValue 0) <==> x + ConstvalNode *constNode = static_cast(r); + if (constNode->GetConstVal()->GetKind() == kConstInt && constNode->GetConstVal()->IsZero()) { + BaseNode *opnd = l; + do { + if (opnd->GetPrimType() == PTY_u1) { + result = opnd; + break; + } else if (opnd->GetOpCode() == OP_cvt) { + TypeCvtNode *cvtNode = static_cast(opnd); + opnd = cvtNode->Opnd(0); + } else { + opnd = nullptr; + } + } while (opnd != nullptr); + } + } + } else if (node->GetOpCode() == OP_gt || node->GetOpCode() == OP_lt) { + if (l->GetOpCode() == OP_cmp && r->GetOpCode() == OP_constval) { + ConstvalNode *constNode = static_cast(r); + if (constNode->GetConstVal()->GetKind() == kConstInt && constNode->GetConstVal()->IsZero()) { + const CompareNode *compNode = static_cast(l); + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + compNode->GetOpndType(), + compNode->Opnd(0), + compNode->Opnd(1)); + } + } else if (r->GetOpCode() == OP_cmp && l->GetOpCode() == OP_constval) { + ConstvalNode *constNode = static_cast(l); + if (constNode->GetConstVal()->GetKind() == kConstInt && constNode->GetConstVal()->IsZero()) { + const CompareNode *compNode = static_cast(r); + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + compNode->GetOpndType(), + compNode->Opnd(1), + compNode->Opnd(0)); + } + } + } + return result; +} + +std::pair ConstantFold::FoldCompare(CompareNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + std::pair lp = DispatchFold(node->Opnd(0)); + std::pair rp = DispatchFold(node->Opnd(1)); + ConstvalNode *lConst = safe_cast(lp.first); + ConstvalNode *rConst = safe_cast(rp.first); + if (lConst != nullptr && rConst != nullptr && !IsPrimitiveDynType(node->GetOpndType())) { + result = FoldConstComparison(node->GetOpCode(), node->GetPrimType(), node->Opnd(0)->GetPrimType(), + *lConst, *rConst); + } else { + BaseNode *l = PairToExpr(node->Opnd(0)->GetPrimType(), lp); + BaseNode *r = PairToExpr(node->Opnd(1)->GetPrimType(), rp); + if (l != node->Opnd(0) || r != node->Opnd(1)) { + result = mirModule->CurFuncCodeMemPool()->New( + Opcode(node->GetOpCode()), PrimType(node->GetPrimType()), PrimType(node->GetOpndType()), l, r); + } else { + result = node; + } + result = SimplifyDoubleCompare(static_cast(result)); + } + return std::make_pair(result, 0); +} + +BaseNode *ConstantFold::Fold(BaseNode *node) { + if (node == nullptr || kOpcodeInfo.IsStmt(node->GetOpCode())) { + return nullptr; + } + std::pair p = DispatchFold(node); + BaseNode *result = PairToExpr(node->GetPrimType(), p); + if (result == node) { + result = nullptr; + } + return result; +} + +std::pair ConstantFold::FoldDepositbits(DepositbitsNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + uint8 bOffset = node->GetBitsOffset(); + uint8 bSize = node->GetBitsSize(); + std::pair lp = DispatchFold(node->Opnd(0)); + std::pair rp = DispatchFold(node->Opnd(1)); + ConstvalNode *lConst = safe_cast(lp.first); + ConstvalNode *rConst = safe_cast(rp.first); + if (lConst != nullptr && rConst != nullptr) { + MIRIntConst *intConst0 = safe_cast(lConst->GetConstVal()); + MIRIntConst *intConst1 = safe_cast(rConst->GetConstVal()); + ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); + resultConst->SetPrimType(node->GetPrimType()); + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(node->GetPrimType()); + MIRIntConst *constValue = mirModule->GetMemPool()->New(0, type); + uint64 op0ExtractVal = 0; + uint64 op1ExtractVal = 0; + uint64 mask0 = (1LLU << (bSize + bOffset)) - 1; + uint64 mask1 = (1LLU << bOffset) - 1; + uint64 op0Mask = ~(mask0 ^ mask1); + op0ExtractVal = (static_cast(intConst0->GetValue()) & op0Mask); + op1ExtractVal = (static_cast(intConst1->GetValue()) << bOffset) & ((1ULL << (bSize + bOffset)) - 1); + constValue->SetValue(op0ExtractVal | op1ExtractVal); + resultConst->SetConstVal(constValue); + result = resultConst; + } else { + BaseNode *l = PairToExpr(node->Opnd(0)->GetPrimType(), lp); + BaseNode *r = PairToExpr(node->Opnd(1)->GetPrimType(), rp); + if (l != node->Opnd(0) || r != node->Opnd(1)) { + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + bOffset, bSize, l, r); + } else { + result = node; + } + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldArray(ArrayNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = nullptr; + size_t i = 0; + bool isFolded = false; + ArrayNode *arrNode = + mirModule->CurFuncCodeMemPool()->New(*mirModule, + PrimType(node->GetPrimType()), + node->GetTyIdx(), + node->GetBoundsCheck()); + for (i = 0; i < node->GetNopndSize(); i++) { + std::pair p = DispatchFold(node->GetNopndAt(i)); + BaseNode *tmpNode = PairToExpr(node->GetNopndAt(i)->GetPrimType(), p); + if (tmpNode != node->GetNopndAt(i)) { + isFolded = true; + } + arrNode->GetNopnd().push_back(tmpNode); + arrNode->SetNumOpnds(arrNode->GetNumOpnds() + 1); + } + if (isFolded) { + result = arrNode; + } else { + result = node; + } + return std::make_pair(result, 0); +} + +std::pair ConstantFold::FoldTernary(TernaryNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *result = node; + std::vector primTypes; + std::vector> p; + for (size_t i = 0; i < node->NumOpnds(); i++) { + BaseNode *tempNopnd = node->Opnd(i); + CHECK_NULL_FATAL(tempNopnd); + primTypes.push_back(tempNopnd->GetPrimType()); + p.push_back(DispatchFold(tempNopnd)); + } + if (node->GetOpCode() == OP_select) { + ConstvalNode *const0 = safe_cast(p[0].first); + if (const0 != nullptr) { + MIRIntConst *intConst0 = safe_cast(const0->GetConstVal()); + // Selecting the first value if not 0, selecting the second value otherwise. + if (intConst0->GetValue()) { + result = PairToExpr(primTypes[1], p[1]); + } else { + result = PairToExpr(primTypes[2], p[2]); + } + } else if (node->Opnd(0) && node->Opnd(0)->GetOpCode() == OP_dread && primTypes[0] == PTY_u1) { + ConstvalNode *const1 = safe_cast(p[1].first); + ConstvalNode *const2 = safe_cast(p[2].first); + if (const1 != nullptr && const2 != nullptr) { + MIRIntConst *intConst1 = safe_cast(const1->GetConstVal()); + MIRIntConst *int2 = safe_cast(const2->GetConstVal()); + if (intConst1->GetValue() == 1 && int2->GetValue() == 0) { + BaseNode *tmpNode = node->Opnd(0); + if (node->GetPrimType() != PTY_u1) { + tmpNode = mirModule->CurFuncCodeMemPool()->New(OP_cvt, PrimType(node->GetPrimType()), + PTY_u1, node->Opnd(0)); + } + std::pair pairTemp = DispatchFold(tmpNode); + result = PairToExpr(node->GetPrimType(), pairTemp); + } else if (intConst1->GetValue() == 0 && int2->GetValue() == 1) { + BaseNode *lnot = mirModule->CurFuncCodeMemPool()->New(OP_lnot, PTY_u1, node->Opnd(0)); + BaseNode *tmpNode = lnot; + if (node->GetPrimType() != PTY_u1) { + tmpNode = mirModule->CurFuncCodeMemPool()->New(OP_cvt, PrimType(node->GetPrimType()), + PTY_u1, lnot); + } + std::pair pairTemp = DispatchFold(tmpNode); + result = PairToExpr(node->GetPrimType(), pairTemp); + } + } + } + } else { + BaseNode *e0 = PairToExpr(primTypes[0], p[0]); + BaseNode *e1 = PairToExpr(primTypes[1], p[1]); + BaseNode *e2 = PairToExpr(primTypes[2], p[2]); // count up to 3 for ternary node + if (e0 != node->Opnd(0) || e1 != node->Opnd(1) || e2 != node->Opnd(2)) { + result = mirModule->CurFuncCodeMemPool()->New(Opcode(node->GetOpCode()), + PrimType(node->GetPrimType()), + e0, e1, e2); + } + } + return std::make_pair(result, 0); +} + +StmtNode *ConstantFold::SimplifyDassign(DassignNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + returnValue = Fold(node->GetRHS()); + if (returnValue != nullptr) { + node->SetRHS(returnValue); + } + return node; +} + +StmtNode *ConstantFold::SimplifyIassign(IassignNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + returnValue = Fold(node->Opnd(0)); + if (returnValue != nullptr) { + node->SetOpnd(returnValue, 0); + } + returnValue = Fold(node->GetRHS()); + if (returnValue != nullptr) { + node->SetRHS(returnValue); + } + switch (node->Opnd(0)->GetOpCode()) { + case OP_addrof: { + AddrofNode *addrofNode = static_cast(node->Opnd(0)); + DassignNode *dassignNode = mirModule->CurFuncCodeMemPool()->New(); + dassignNode->SetStIdx(addrofNode->GetStIdx()); + dassignNode->SetRHS(node->GetRHS()); + dassignNode->SetFieldID(addrofNode->GetFieldID() + node->GetFieldID()); + return dassignNode; + } + case OP_iaddrof: { + IreadNode *iaddrofNode = static_cast(node->Opnd(0)); + if (iaddrofNode->Opnd(0)->GetOpCode() == OP_dread) { + AddrofNode *dreadNode = static_cast(iaddrofNode->Opnd(0)); + node->SetFieldID(node->GetFieldID() + iaddrofNode->GetFieldID()); + node->SetOpnd(dreadNode, 0); + node->SetTyIdx(iaddrofNode->GetTyIdx()); + } + break; + } + default: + break; + } + return node; +} + +StmtNode *ConstantFold::SimplifyCondGoto(CondGotoNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + returnValue = Fold(node->Opnd(0)); + if (returnValue != nullptr) { + node->SetOpnd(returnValue, 0); + ConstvalNode *cst = safe_cast(node->Opnd(0)); + if (cst == nullptr) { + return node; + } + MIRIntConst *intConst = safe_cast(cst->GetConstVal()); + if ((OP_brtrue == node->GetOpCode() && intConst->GetValueUnderType() != 0) || + (OP_brfalse == node->GetOpCode() && intConst->GetValueUnderType() == 0)) { + GotoNode *gotoNode = mirModule->CurFuncCodeMemPool()->New(OP_goto); + gotoNode->SetOffset(node->GetOffset()); + return gotoNode; + } else { + return nullptr; + } + } else if (node->Opnd(0)->GetOpCode() == OP_select) { + return SimplifyCondGotoSelect(node); + } + return node; +} + +StmtNode *ConstantFold::SimplifyCondGotoSelect(CondGotoNode *node) { + CHECK_NULL_FATAL(node); + TernaryNode *sel = static_cast(node->Opnd(0)); + if (sel == nullptr || sel->GetOpCode() != OP_select) { + return node; + } + ConstvalNode *const1 = safe_cast(sel->Opnd(1)); + ConstvalNode *const2 = safe_cast(sel->Opnd(2)); + if (const1 != nullptr && const2 != nullptr) { + MIRIntConst *intConst1 = safe_cast(const1->GetConstVal()); + MIRIntConst *int2 = safe_cast(const2->GetConstVal()); + if (intConst1->GetValue() == 1 && int2->GetValue() == 0) { + node->SetOpnd(sel->Opnd(0), 0); + } else if (intConst1->GetValue() == 0 && int2->GetValue() == 1) { + node->SetOpCode((node->GetOpCode() == OP_brfalse) ? OP_brtrue : OP_brfalse); + node->SetOpnd(sel->Opnd(0), 0); + } + } + return node; +} + +StmtNode *ConstantFold::SimplifySwitch(SwitchNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + returnValue = Fold(node->GetSwitchOpnd()); + if (returnValue != nullptr) { + node->SetSwitchOpnd(returnValue); + ConstvalNode *cst = safe_cast(node->GetSwitchOpnd()); + if (cst == nullptr) { + return node; + } + MIRIntConst *intConst = safe_cast(cst->GetConstVal()); + GotoNode *gotoNode = mirModule->CurFuncCodeMemPool()->New(OP_goto); + bool isdefault = true; + for (unsigned i = 0; i < node->GetSwitchTable().size(); i++) { + if (node->GetCasePair(i).first == intConst->GetValue()) { + isdefault = false; + gotoNode->SetOffset((LabelIdx)node->GetCasePair(i).second); + break; + } + } + if (isdefault) { + gotoNode->SetOffset(node->GetDefaultLabel()); + } + return gotoNode; + } + return node; +} + +StmtNode *ConstantFold::SimplifyUnary(UnaryStmtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + if (node->Opnd(0) == nullptr) { + return node; + } + returnValue = Fold(node->Opnd(0)); + if (returnValue != nullptr) { + node->SetOpnd(returnValue, 0); + } + return node; +} + +StmtNode *ConstantFold::SimplifyBinary(BinaryStmtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + returnValue = Fold(node->GetBOpnd(0)); + if (returnValue != nullptr) { + node->SetBOpnd(returnValue, 0); + } + returnValue = Fold(node->GetBOpnd(1)); + if (returnValue != nullptr) { + node->SetBOpnd(returnValue, 1); + } + return node; +} + +StmtNode *ConstantFold::SimplifyBlock(BlockNode *node) { + CHECK_NULL_FATAL(node); + if (node->GetFirst() == nullptr) { + return node; + } + StmtNode *s = node->GetFirst(); + StmtNode *prevStmt = nullptr; + do { + StmtNode *returnValue = Simplify(s); + if (returnValue != nullptr) { + if (returnValue->GetOpCode() == OP_block) { + BlockNode *blk = static_cast(returnValue); + node->ReplaceStmtWithBlock(*s, *blk); + } else { + node->ReplaceStmt1WithStmt2(s, returnValue); + } + prevStmt = s; + s = s->GetNext(); + } else { + // delete s from block + StmtNode *nextStmt = s->GetNext(); + if (s == node->GetFirst()) { + node->SetFirst(nextStmt); + if (nextStmt != nullptr) { + nextStmt->SetPrev(nullptr); + } + } else { + CHECK_NULL_FATAL(prevStmt); + prevStmt->SetNext(nextStmt); + if (nextStmt != nullptr) { + nextStmt->SetPrev(prevStmt); + } + } + if (s == node->GetLast()) { + node->SetLast(prevStmt); + } + s = nextStmt; + } + } while (s != nullptr); + return node; +} + +StmtNode *ConstantFold::SimplifyIf(IfStmtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + (void)Simplify(node->GetThenPart()); + if (node->GetElsePart()) { + (void)Simplify(node->GetElsePart()); + } + returnValue = Fold(node->Opnd()); + if (returnValue != nullptr) { + node->SetOpnd(returnValue, 0); + ConstvalNode *cst = safe_cast(node->Opnd()); + if (cst == nullptr) { + return node; + } + MIRIntConst *intConst = safe_cast(cst->GetConstVal()); + if (0 == intConst->GetValue()) { + return node->GetElsePart(); + } else { + return node->GetThenPart(); + } + } + return node; +} + +StmtNode *ConstantFold::SimplifyWhile(WhileStmtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + if (node->Opnd(0) == nullptr) { + return node; + } + if (node->GetBody()) { + (void)Simplify(node->GetBody()); + } + returnValue = Fold(node->Opnd(0)); + if (returnValue != nullptr) { + node->SetOpnd(returnValue, 0); + ConstvalNode *cst = safe_cast(node->Opnd(0)); + if (cst == nullptr) { + return node; + } + if (cst->GetConstVal()->IsZero()) { + if (OP_while == node->GetOpCode()) { + return nullptr; + } else { + return node->GetBody(); + } + } + } + return node; +} + +StmtNode *ConstantFold::SimplifyNary(NaryStmtNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + for (size_t i = 0; i < node->NumOpnds(); i++) { + returnValue = Fold(node->GetNopndAt(i)); + if (returnValue != nullptr) { + node->SetNOpndAt(i, returnValue); + } + } + return node; +} + +StmtNode *ConstantFold::SimplifyIcall(IcallNode *node) { + CHECK_NULL_FATAL(node); + BaseNode *returnValue = nullptr; + for (size_t i = 0; i < node->NumOpnds(); i++) { + returnValue = Fold(node->GetNopndAt(i)); + if (returnValue != nullptr) { + node->SetNOpndAt(i, returnValue); + } + } + // icall node transform to call node + CHECK_FATAL(node->GetNopnd().empty() == false, "container check"); + switch (node->GetNopndAt(0)->GetOpCode()) { + case OP_addroffunc: { + AddroffuncNode *addrofNode = static_cast(node->GetNopndAt(0)); + CallNode *callNode = + mirModule->CurFuncCodeMemPool()->New(*mirModule, + node->GetOpCode() == OP_icall ? OP_call : OP_callassigned); + if (node->GetOpCode() == OP_icallassigned) { + callNode->SetReturnVec(node->GetReturnVec()); + } + callNode->SetPUIdx(addrofNode->GetPUIdx()); + for (size_t i = 1; i < node->GetNopndSize(); i++) { + callNode->GetNopnd().push_back(node->GetNopndAt(i)); + } + callNode->SetNumOpnds(callNode->GetNopndSize()); + return callNode; + } + default: + break; + } + return node; +} + +void ConstantFold::ProcessFunc(MIRFunction *func) { + if (func->IsEmpty()) { + return; + } + mirModule->SetCurFunction(func); + (void)Simplify(func->GetBody()); +} +} // namespace maple