diff --git a/.gitignore b/.gitignore index f71bf2703f6ebf9a684037b3df228a6082d56a73..dcf0dec08f06bb5391c8935caae1a02ce71b0c3c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ third_party/aosp_10.0.0_r35* third_party/aosp_modified* third_party/ctorture* third_party/llvm_modified* +third_party/llvm-* +tools/lib tools/bin* tools/android* tools/aosp* diff --git a/BUILD.gn b/BUILD.gn index 6801cf5b4813146f60163e5bf837271829983af0..40e691379fdb548eb9a6292775b15262bfa1c51d 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -13,52 +13,56 @@ # See the Mulan PSL v2 for more details. # group("maple") { - deps = [ - "${MAPLEALL_ROOT}:maple", - ] + deps = [ "${MAPLEALL_ROOT}:maple" ] } group("irbuild") { - deps = [ - "${MAPLEALL_ROOT}:irbuild", - ] + deps = [ "${MAPLEALL_ROOT}:irbuild" ] } group("hir2mpl") { - deps = [ - "${HIR2MPL_ROOT}:hir2mpl", - ] + deps = [ "${HIR2MPL_ROOT}:hir2mpl" ] } group("hir2mplUT") { - deps = [ - "${HIR2MPL_ROOT}/test:hir2mplUT", - ] + deps = [ "${HIR2MPL_ROOT}/test:hir2mplUT" ] } group("maple-rt") { - deps = [ - "${MAPLE_MRT_ROOT}:maple-rt", - ] + deps = [ "${MAPLE_MRT_ROOT}:maple-rt" ] } group("ast2mpl") { deps = [] if (IS_AST2MPL_EXISTS == "1") { - deps = [ - "${AST2MPL_ROOT}/src:ast2mpl", - ] + deps = [ "${AST2MPL_ROOT}/src:ast2mpl" ] } } +group("mapleallUT") { + deps = [ + "${MAPLEALL_ROOT}/test:mapleallUT", + ] +} + group("maplegendef") { - exec_script("${MAPLEALL_ROOT}/maple_be/mdgen/gendef.py", - [ - rebase_path("${GN_BINARY_OUTPUT_DIRECTORY}/maplegen", - root_build_dir), - rebase_path("${MAPLEALL_ROOT}/maple_be/include/ad/cortex_a55", - root_build_dir), - rebase_path("${MAPLE_BUILD_OUTPUT}/common/target", - root_build_dir), - ]) + exeTool = "-e" + rebase_path("${GN_BINARY_OUTPUT_DIRECTORY}/maplegen", root_build_dir) + mdDir = "-m" + rebase_path("${MAPLEALL_ROOT}/maple_be/include/ad/cortex_a55", root_build_dir) + outDir = "-o" + rebase_path("${MAPLE_BUILD_OUTPUT}/common/target", root_build_dir) + if (ASAN == 1) { + exec_script("${MAPLEALL_ROOT}/maple_be/mdgen/gendef.py", + [ + "-aLD_PRELOAD=${LLVMLIBDIR}/libclang_rt.asan-x86_64.so", + exeTool, + mdDir, + outDir, + ]) + } else { + exec_script("${MAPLEALL_ROOT}/maple_be/mdgen/gendef.py", + [ + exeTool, + mdDir, + outDir + ]) + } } diff --git a/Makefile b/Makefile index 250f27b5c0b3e6adb5888bb50a361d67a321f1e7..71227d9d4d11e7bc700e701a8d4df39cf943efda 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,8 @@ RELEASE_VERSION := $(MAPLE_RELEASE_VERSION) BUILD_VERSION := $(MAPLE_BUILD_VERSION) GIT_REVISION := $(shell git log --pretty=format:"%H" -1) MAST := 0 - +ASAN := 0 +ONLY_C := 0 ifeq ($(DEBUG),0) BUILD_TYPE := RELEASE else @@ -55,7 +56,9 @@ GN_OPTIONS := \ RELEASE_VERSION="$(RELEASE_VERSION)" \ BUILD_VERSION="$(BUILD_VERSION)" \ GIT_REVISION="$(GIT_REVISION)" \ - MAST=$(MAST) + MAST=$(MAST) \ + ASAN=$(ASAN) \ + ONLY_C=$(ONLY_C) .PHONY: default default: install @@ -117,13 +120,17 @@ libcore: maple-rt maple-rt: java-core-def $(call build_gn, $(GN_OPTIONS), maple-rt) +.PHONY: mapleallUT +mapleallUT: install_patch + $(call build_gn, $(GN_OPTIONS), mapleallUT) + .PHONY: java-core-def java-core-def: install mkdir -p $(LIB_CORE_PATH); \ cp -rp $(MAPLE_ROOT)/libjava-core/* $(LIB_CORE_PATH)/; \ cd $(LIB_CORE_PATH); \ ln -f -s $(MAPLE_ROOT)/build/core/libcore.mk ./makefile; \ - $(MAKE) gen-def OPT=$(OPT) DEBUG=$(DEBUG) OPS_ANDROID=$(OPS_ANDROID) + $(MAKE) gen-def OPT=$(OPT) DEBUG=$(DEBUG) .PHONY: install install: maple dex2mpl_install irbuild hir2mpl @@ -158,6 +165,10 @@ ctorture-ci: ctorture: (cd third_party/ctorture; git checkout .; git pull; ./run.sh work.list) +.PHONY: ctorture2 +ctorture2: + (cd third_party/ctorture; git checkout .; git pull; ./run.sh work.list hir2mpl) + THREADS := 50 ifneq ($(findstring test,$(MAKECMDGOALS)),) TESTTARGET := $(MAKECMDGOALS) diff --git a/Readme.md b/Readme.md index 36f87f02582b5b8c3612d3080da5f0a8c59c0062..870f27a720326ec28ca5afd48ac9a87b4d16265f 100644 --- a/Readme.md +++ b/Readme.md @@ -38,7 +38,7 @@ Open the front end, back end, and compilation optimization of compilers. Support - source build/envsetup.sh arm release or - source build/envsetup.sh arm debug -- make setup +- make setup (In this step, you may need to configure the proxy or VPN to download all dependencies.) - make - make libcore - make testall @@ -79,4 +79,4 @@ C example: - 9:00 am - 10:30 am everyday, developers can discuss community issues in the voice channel "panel discussion" - [Discord-MapleFE](https://discord.gg/sBj3cc4GhM) -- we have weekly meeting about projects related to MapleFE \ No newline at end of file +- we have weekly meeting about projects related to MapleFE diff --git a/Readme_zh.md b/Readme_zh.md index e1eb04f06f47761671c74246c7cc94ff275111bb..a2aab5783752ad526e2a212a7374fd72ca03abbd 100644 --- a/Readme_zh.md +++ b/Readme_zh.md @@ -38,7 +38,7 @@ OpenArkCompiler是来自华为方舟编译器的开源项目。 - source build/envsetup.sh arm release 或 - source build/envsetup.sh arm debug -- make setup +- make setup (这一步可能需要配置代理或者vpn,才能将所有依赖下载下来) - make - make libcore - make testall @@ -73,4 +73,4 @@ OpenArkCompiler是来自华为方舟编译器的开源项目。 - 每天上午9:00~10:30不定时在语音频道“panel disscussion”交流社区议题。 ## 如何提交代码 -- [wiki](https://gitee.com/openarkcompiler/OpenArkCompiler/wikis/%E4%BB%A3%E7%A0%81%E6%8F%90%E4%BA%A4%E6%B5%81%E7%A8%8B?sort_id=2447213) \ No newline at end of file +- [wiki](https://gitee.com/openarkcompiler/OpenArkCompiler/wikis/%E4%BB%A3%E7%A0%81%E6%8F%90%E4%BA%A4%E6%B5%81%E7%A8%8B?sort_id=2447213) diff --git a/build/config/BUILDCONFIG.gn b/build/config/BUILDCONFIG.gn index 81fc233d64b05293fc648a8cf990823947250c91..efe75a739dccc3c7aab81a16bd9b50ff48e092cd 100755 --- a/build/config/BUILDCONFIG.gn +++ b/build/config/BUILDCONFIG.gn @@ -31,6 +31,8 @@ declare_args() { BUILD_VERSION = "" GIT_REVISION = "" MAST = 0 + ASAN = 0 + ONLY_C = 0 } # Define global args diff --git a/doc/cn/DeveloperGuide.md b/doc/cn/DeveloperGuide.md index c99545f42b7b02dbf8f7f1fc527d5294395d8fd6..969f52b08816f29a9a796d8852ce78f9b60395ee 100644 --- a/doc/cn/DeveloperGuide.md +++ b/doc/cn/DeveloperGuide.md @@ -27,17 +27,17 @@ make - `make` 编译OpenArkCompiler的Release版本; - `make BUILD_TYPE=DEBUG` 编译OpenArkCompiler的Debug版本。 -在openarkcompiler目录下执行以下命令,编译出OpenArkCompiler及maple runtime部分,默认输出路径 openarkcompiler/output/TYPE。 +在openarkcompiler目录下执行以下命令,编译出OpenArkCompiler及maple runtime部分,默认输出路径 openarkcompiler/output/TYPE, TYPE: aarch64-clang-release。 ``` source build/envsetup.sh arm release +make setup make libcore ``` 命令说明: - `make libcore` 编译OpenArkCompiler及maple runtime部分的Release版本; -- `make libcore OPS_ANDROID=1` 编译Android版本的OpenArkCompiler及maple runtime部分的Release版本; - `make libcore BUILD_TYPE=DEBUG` 编译OpenArkCompiler及maple runtime部分的Debug版本; 此外,方舟编译器还提供了源码编译脚本,开发者也可以通过在openarkcompiler目录下执行该脚本,默认编译出OpenArkCompiler及maple runtime部分的Release版本。执行命令如下: diff --git a/doc/en/DeveloperGuide.md b/doc/en/DeveloperGuide.md index d5ecfa04d30547db1fbea61a12d32a8a1c0a8dd2..7b604e2540d76f5c04f236e94ee23ebed3a2717e 100644 --- a/doc/en/DeveloperGuide.md +++ b/doc/en/DeveloperGuide.md @@ -17,10 +17,11 @@ You can download the OpenArkCompiler source code in `Clone` or `Download` mode. ## Compiling Source Code -Run the following command in the openarkcompiler directory to compile OpenArkCompiler. The output path is openarkcompiler/output/bin by default. +Run the following command in the openarkcompiler directory to compile OpenArkCompiler. The output path is openarkcompiler/output/TYPE/bin, TYPE: aarch64-clang-release by default. ``` source build/envsetup.sh +make setup make ``` Command description: @@ -29,6 +30,19 @@ Command description: - `make`: Compile the release version of OpenArkCompiler. - `make BUILD_TYPE=DEBUG`: Compile the debug version of OpenArkCompiler. +Run the following command in the openarkcompiler directory to compile OpenArkCompiler and maple runtime. The output path is openarkcompiler/output/TYPE, TYPE: aarch64-clang-release by default. + +``` +source build/envsetup.sh arm release +make setup +make libcore +``` + +命令说明: + +- `make libcore` Compile the release version of OpenArkCompiler; +- `make libcore BUILD_TYPE=DEBUG` Compile the debug version of OpenArkCompiler; + In addition, the OpenArkCompiler also provides a shell script which contains the command to compile OpenArkCompiler. The developer can run the script in the openarkcompiler directory to compile OpenArkCompiler. The command to run the script: ``` diff --git a/src/MapleFE/.gitignore b/src/MapleFE/.gitignore index 46890e54e45d66060ba85adb278ca197da8524d8..533695ff01a419af9567d36b8d2cf41970296906 100644 --- a/src/MapleFE/.gitignore +++ b/src/MapleFE/.gitignore @@ -10,6 +10,7 @@ output/ *.png *.out.ts *.ts.ast +*.java.ast gdbfile test/typescript/**/*.cpp test/typescript/**/*.h diff --git a/src/MapleFE/Makefile b/src/MapleFE/Makefile index a0d1730fdc576a8dff4cb459cd068a3ad16241a1..f1dbcac61dd5bed54e947fda4a6c6be58a2144c1 100644 --- a/src/MapleFE/Makefile +++ b/src/MapleFE/Makefile @@ -14,7 +14,7 @@ include Makefile.in -TARGS = autogen shared recdetect ladetect astopt java2mpl ast2mpl ts2ast ast2cpp c2ast +TARGS = autogen shared recdetect ladetect astopt java2mpl ast2mpl ts2ast ast2cpp c2ast obfuscate # create BUILDDIR first $(shell $(MKDIR_P) $(BUILDDIR)) @@ -22,7 +22,7 @@ $(shell $(MKDIR_P) $(BUILDDIR)) ifeq ($(SRCLANG),java) TARGET := java2ast ast2mpl else ifeq ($(SRCLANG),typescript) - TARGET := ts2ast ast2cpp + TARGET := ts2ast ast2cpp obfuscate else ifeq ($(SRCLANG),c) TARGET := c2ast endif @@ -53,6 +53,9 @@ astopt: shared recdetect ladetect ast2cpp: astopt ts2ast $(MAKE) -C ast2cpp +obfuscate: astopt ts2ast ast2cpp + $(MAKE) -C tools/obfuscate + shared: autogen $(MAKE) -C shared diff --git a/src/MapleFE/ast2cpp/include/cpp_declaration.h b/src/MapleFE/ast2cpp/include/cpp_declaration.h index fcf6b61c323171b9e610b6907c739231132cc512..352a0006bc51c858d06c97b5bd1a24562cb4e327 100644 --- a/src/MapleFE/ast2cpp/include/cpp_declaration.h +++ b/src/MapleFE/ast2cpp/include/cpp_declaration.h @@ -36,8 +36,6 @@ public: return EmitTreeNode(GetASTModule()); } - std::string GenFunctionClass(FunctionNode* node); - void AddImportedModule(const std::string& module); bool IsImportedModule(const std::string& module); @@ -60,7 +58,6 @@ public: std::string EmitCallNode(CallNode *node) override; std::string EmitFunctionNode(FunctionNode *node) override; std::string EmitPrimTypeNode(PrimTypeNode *node) override; - std::string EmitPrimArrayTypeNode(PrimArrayTypeNode *node) override; std::string EmitModuleNode(ModuleNode *node) override; std::string EmitClassNode(ClassNode *node) override; @@ -71,13 +68,15 @@ public: std::string EmitStructNode(StructNode *node) override; std::string EmitTypeAliasNode(TypeAliasNode* node) override; std::string EmitLiteralNode(LiteralNode* node) override; + std::string EmitArrayTypeNode(ArrayTypeNode *node) override; std::string GetTypeString(TreeNode *node, TreeNode *child = nullptr); - std::string EmitArrayLiteral(ArrayLiteralNode *node, int dim, std::string type); std::string EmitTSEnum(StructNode *node); std::string EmitInterface(StructNode *node); void CollectFuncArgInfo(TreeNode* node); + std::string ConstructArray(ArrayLiteralNode* node, int dim, std::string type); + std::string ConstructArrayAny(ArrayLiteralNode* node); }; inline bool IsVarInitStructLiteral(DeclNode* node) { @@ -92,8 +91,6 @@ inline bool IsVarInitClass(DeclNode* node) { node->GetInit()->IsIdentifier(); } -bool IsBuiltinObj(std::string name); - template bool HasAttrStatic(T* node) { for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { diff --git a/src/MapleFE/ast2cpp/include/cpp_definition.h b/src/MapleFE/ast2cpp/include/cpp_definition.h index 07dfec249ed6cc74b436aac3a7d428f40a6bcd12..ab4c1b077356d1a8b6d1ac7f80797a6e58a25583 100644 --- a/src/MapleFE/ast2cpp/include/cpp_definition.h +++ b/src/MapleFE/ast2cpp/include/cpp_definition.h @@ -26,8 +26,9 @@ class CppDef : public CppEmitter { public: CppDecl &mCppDecl; bool mIsInit; + bool mIsGenerator; - CppDef(Module_Handler *h, CppDecl &d) : CppEmitter(h), mCppDecl(d), mIsInit(false) {} + CppDef(Module_Handler *h, CppDecl &d) : CppEmitter(h), mCppDecl(d), mIsInit(false), mIsGenerator(false) {} std::string Emit() { return EmitTreeNode(GetASTModule()); @@ -64,24 +65,26 @@ public: std::string EmitAsTypeNode(AsTypeNode *node) override; std::string EmitNamespaceNode(NamespaceNode *node) override; std::string EmitRegExprNode(RegExprNode *node); + std::string EmitStructNode(StructNode *node) override; + std::string EmitStructLiteralNode(StructLiteralNode* node) override; + std::string EmitWhileLoopNode(WhileLoopNode *node) override; + std::string EmitYieldNode(YieldNode *node) override; std::string& HandleTreeNode(std::string &str, TreeNode *node) override; std::string EmitClassProps(TreeNode *node); std::string EmitFuncScopeVarDecls(FunctionNode *node); - std::string EmitStructNode(StructNode *node); - std::string EmitStructLiteralNode(StructLiteralNode* node); std::string EmitCppCtor(ClassNode* node); std::string EmitCtorInstance(ClassNode *c); std::string EmitDefaultCtor(ClassNode *c); std::string EmitBracketNotationProp(ArrayElementNode* ae, OprId binOpId, bool isLhs, bool& isDynProp); - std::string EmitArrayLiteral(TreeNode* arrType, TreeNode* arrLiteral); - std::string EmitArrayLiterals(TreeNode* arrLiteral, int dim, std::string type); TypeId GetTypeIdFromDecl(TreeNode* id); bool IsClassField(ArrayElementNode* node, std::string propKey); std::string GetTypeForTemplateArg(TreeNode* node); TreeNode* FindDeclType(TreeNode* node); std::string GetThisParamObjType(TreeNode *node); - std::string GenArrayOfAny(TreeNode* node); + + std::string ConstructArray(ArrayLiteralNode* node, int dim, std::string type); + std::string ConstructArrayAny(ArrayLiteralNode* node); std::string GenObjectLiteral(TreeNode* var, std::string varName, TreeNode* idType, StructLiteralNode* n); std::string GenDirectFieldInit(std::string varName, StructLiteralNode* node); }; diff --git a/src/MapleFE/ast2cpp/include/cpp_emitter.h b/src/MapleFE/ast2cpp/include/cpp_emitter.h index 74d81734fdf2746d8b6853074ccabc0cf39d7ef1..ec2f269dba259ae718c2db3d18ca98b74b05c500 100644 --- a/src/MapleFE/ast2cpp/include/cpp_emitter.h +++ b/src/MapleFE/ast2cpp/include/cpp_emitter.h @@ -34,6 +34,11 @@ public: bool IsClassId(TreeNode *node); bool IsVarTypeClass(TreeNode* var); void InsertEscapes(std::string& str); + bool IsGenerator(TreeNode *node); + FunctionNode* GetGeneratorFunc(TreeNode *node); + void GetArrayTypeInfo(ArrayLiteralNode* node, int& numDim, std::string& type); + std::string FunctionHeader(FunctionNode* node, std::string retType); + std::string GetClassName(TreeNode* node); }; } // namespace maplefe diff --git a/src/MapleFE/ast2cpp/include/emitter.h b/src/MapleFE/ast2cpp/include/emitter.h index 7ad2b1e895eccb8911887cf04aa5a5431ea06007..f8447db75616fe3886cdee9d83e863082a074955 100644 --- a/src/MapleFE/ast2cpp/include/emitter.h +++ b/src/MapleFE/ast2cpp/include/emitter.h @@ -120,11 +120,13 @@ public: virtual std::string EmitAwaitNode(AwaitNode *node); virtual std::string EmitNameTypePairNode(NameTypePairNode *node); virtual std::string EmitTupleTypeNode(TupleTypeNode *node); + virtual std::string EmitTripleSlashNode(TripleSlashNode *node); virtual std::string EmitModuleNode(ModuleNode *node); virtual std::string EmitAttrNode(AttrNode *node); + virtual std::string EmitArrayTypeNode(ArrayTypeNode *node); + virtual std::string EmitFunctionTypeNode(FunctionTypeNode *node); virtual std::string EmitPrimTypeNode(PrimTypeNode *node); virtual std::string EmitPrimArrayTypeNode(PrimArrayTypeNode *node); - virtual std::string EmitArrayTypeNode(ArrayTypeNode *node); virtual std::string EmitTreeNode(TreeNode *node); virtual std::string& HandleTreeNode(std::string &str, TreeNode *node); @@ -141,6 +143,7 @@ public: //static const char *GetEnumStructProp(StructProp k); //static const char *GetEnumForLoopProp(ForLoopProp k); //static const char *GetEnumLambdaProperty(LambdaProperty k); + const char *GetEnumTripleSlashProp(TripleSlashProp k); std::string &AddParentheses(std::string &str, TreeNode *node); }; diff --git a/src/MapleFE/ast2cpp/include/helper.h b/src/MapleFE/ast2cpp/include/helper.h index 297d57f4070a956300db59c14899edab68cef580..0eb8e89843ecb23e65507e68955a116bbc319deb 100644 --- a/src/MapleFE/ast2cpp/include/helper.h +++ b/src/MapleFE/ast2cpp/include/helper.h @@ -29,13 +29,16 @@ using namespace std::string_literals; namespace maplefe { +extern std::string GeneratorFn_start; +extern std::string GeneratorFn_return; extern std::unordered_mapTypeIdToJSType; extern std::unordered_mapTypeIdToJSTypeCXX; extern TypeId hlpGetTypeId(TreeNode* node); extern std::string GenClassFldAddProp(std::string, std::string, std::string, std::string, std::string); -extern std::string FunctionTemplate(std::string retType, std::string funcName, std::string params, std::string args); -extern std::string GenGeneratorClass(std::string funcName, std::vector>args); +extern std::string FunctionClassDecl(std::string retType, std::string funcName, unsigned nodeId); +extern std::string GeneratorClassDecl(std::string funcName, unsigned nodeId); +extern std::string GeneratorClassDef(std::string ns, std::string funcName, unsigned nodeId); extern std::string tab(int n); extern bool IsClassMethod(TreeNode* node); extern std::string GetClassOfAssignedFunc(TreeNode* node); @@ -43,9 +46,31 @@ extern std::string GenAnonFuncName(TreeNode* node); inline std::string ClsName(std::string func) { return "Cls_"s + func; } inline std::string GeneratorName(std::string func) { return "Generator_"s + func; } inline std::string GeneratorFuncName(std::string func) { return "GeneratorFunc_"s + func; } -extern void HandleThisParam(unsigned nParams, TreeNode* node, std::string& params, std::string&args); extern std::string hlpGetJSValTypeStr(TypeId typeId); extern std::string ArrayCtorName(int dim, std::string type); +extern bool IsBuiltinObj(std::string name); +extern std::string ObjectTypeStr(std::string name); +extern std::string GeneratorFuncHeader(std::string cls, unsigned nodeId); +extern std::string FunctionParams(unsigned nodeId, bool handleThis, bool argsOnly = false, bool byRef = false, bool fdInit = false, bool capture = false); + +class GeneratorLabels { +private: + unsigned GenLoopId = 0; + unsigned GenYieldId= 0; +public: + std::string NextLoopLabel(void) { + std::string label = "_loop_" + std::to_string(++GenLoopId); + return label; + } + std::string NextYieldLabel(void) { + std::string label = "_yield_" + std::to_string(++GenYieldId); + return label; + } + void ResetLabels(void) { + GenLoopId = 0; + GenYieldId = 0; + } +}; class FuncTable { private: @@ -111,6 +136,7 @@ public: }; extern FuncTable hFuncTable; +extern GeneratorLabels GenFnLabels; } #endif // __HELPER_H__ diff --git a/src/MapleFE/ast2cpp/runtime/include/builtins.h b/src/MapleFE/ast2cpp/runtime/include/builtins.h index 8acac0e030cdd30ce4605930dec3629cac8ddc83..af0fe8f50dd34f39cf5912d7ed54593be4ce5748 100644 --- a/src/MapleFE/ast2cpp/runtime/include/builtins.h +++ b/src/MapleFE/ast2cpp/runtime/include/builtins.h @@ -133,12 +133,15 @@ class Error : public Object { // ecma-262 section references are based on ecma-262 edition 12.0 // ecma262 27.1.1.5 IteratorResult interface: -struct IteratorResult { - bool _done; // status of iterator next() call - JS_Val _value; // done=false: current iteration element value - // done=true: return value of the iterator, undefined if none returned - IteratorResult() : _done(true), _value(undefined) { } - IteratorResult(bool done, JS_Val val) : _done(done), _value(val) { } +struct IteratorResult : public Object { + bool done; // status of iterator next() call + JS_Val value; // done=false: current iteration element value + // done=true: return value of the iterator, undefined if none returned + IteratorResult() : done(true), value(undefined) { + this->AddProp("done", t2crt::ClassFld(&IteratorResult::done).NewProp(this, t2crt::TY_CXX_Bool)); + this->AddProp("value", t2crt::ClassFld(&IteratorResult::value).NewProp(this, t2crt::TY_CXX_Any)); + } + IteratorResult(bool done, JS_Val val) : done(done), value(val) { } ~IteratorResult() { } }; @@ -159,12 +162,13 @@ struct IteratorResult { // 3) %IteratorPrototype%[Symbol.iterator]() = this (current iterator instance) - used in for loops class IteratorProto : public Object { public: + IteratorResult _res; IteratorProto(Function* ctor, Object* proto) : Object(ctor, proto) { } ~IteratorProto() { } // note: the arg on an iterator's 1st next() call is ignored per spec 27.5.1.2 - virtual IteratorResult _next (JS_Val* arg = nullptr) { return IteratorResult(); } - virtual IteratorResult _return(JS_Val* val = nullptr) { return IteratorResult(); } - virtual IteratorResult _throw(Error exception) { return IteratorResult(); } + virtual IteratorResult* next (JS_Val* arg = nullptr) { return &_res; } + virtual IteratorResult* _return(JS_Val* val = nullptr) { return &_res; } + virtual IteratorResult* _throw(Error exception) { return &_res; } // TODO: %IteratorPrototype%[Symbol.iterator]() = this (current iterator instance) }; @@ -182,13 +186,12 @@ public: bool _finished = false; // flag if generator is in finished state bool _firstNext = true; // flag if first next has been called on iterator (27.5.1.2) - IteratorResult _return(JS_Val* arg = nullptr) override { - IteratorResult res; + IteratorResult* _return(JS_Val* arg = nullptr) override { _finished = true; if (arg != nullptr) { - res._value = *arg; + _res.value = *arg; } - return res; + return &_res; } }; diff --git a/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h b/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h index c94ea6fd2d282526fd2e10f8fea0dde29a300334..cdf48d2d55cc3ac65f17f6585192b2efbcd8fb25 100644 --- a/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h +++ b/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h @@ -52,6 +52,7 @@ typedef enum JS_Type : uint8_t { TY_Function, // "function" TY_Object, // "object" TY_Array, + TY_Any, // JS_Val::x.field points to a JS_Val TY_LAST, TY_CXX_Undef = TY_Undef | TY_CXX, TY_CXX_Null, @@ -64,7 +65,7 @@ typedef enum JS_Type : uint8_t { TY_CXX_Function, TY_CXX_Object, TY_CXX_Array, - TY_CXX_Any, //indicate JS_Val::x.field pointing to a JS_Val + TY_CXX_Any, // JS_Val::x.field points to a JS_Val TY_CXX_LAST, } JS_Type; diff --git a/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp b/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp index dc0177419dadae333d0bdc07177ff59f42d093d4..50fecb38bc70c6b4c71e517d5bf98f7392e4178e 100644 --- a/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp +++ b/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp @@ -16,6 +16,7 @@ std::ostream& operator<< (std::ostream& out, const t2crt::JS_Val& v) { case t2crt::TY_Symbol: out << "symbol"; break; case t2crt::TY_Function: out << "function"; break; case t2crt::TY_Object: out << v.x.val_obj; break; + case t2crt::TY_Any: out << *(t2crt::JS_Val*)v.x.field; break; case t2crt::TY_CXX_Undef: out << "undefined"; break; case t2crt::TY_CXX_Null: out << "null"; break; @@ -27,11 +28,14 @@ std::ostream& operator<< (std::ostream& out, const t2crt::JS_Val& v) { case t2crt::TY_CXX_Symbol: out << "symbol"; break; case t2crt::TY_CXX_Function: out << "function"; break; case t2crt::TY_CXX_Object: out << *(Object**)v.x.field; break; + case t2crt::TY_CXX_Any: out << *(t2crt::JS_Val*)v.x.field; break; } return out; } std::ostream& operator<< (std::ostream& out, t2crt::Object *obj) { + if (obj == nullptr) + return out; out << obj->Dump(); return out; } diff --git a/src/MapleFE/ast2cpp/src/Makefile b/src/MapleFE/ast2cpp/src/Makefile index 32bb62fbc396ac746d0bc80f8b53c2182d27d328..084bcad114a3f5728966433d2aaf28c3569731ba 100644 --- a/src/MapleFE/ast2cpp/src/Makefile +++ b/src/MapleFE/ast2cpp/src/Makefile @@ -27,10 +27,12 @@ SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) OBJG := $(patsubst %.cpp, %.o, $(SRCG)) DEPG := $(patsubst %.cpp, %.d, $(SRCG)) -OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) $(OBJG) -DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) $(DEPG) +LOCALOBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) +LOCALDEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) +OBJS :=$(LOCALOBJS) $(OBJG) +DEPS :=$(LOCALDEPS) $(DEPG) -LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(OBJS)) +LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(LOCALOBJS)) GENDIR:=${BUILDDIR}/ast_gen/shared @@ -45,6 +47,8 @@ INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ INCLUDEGEN := -I $(MAPLEFE_ROOT)/shared/include -I $(BUILDDIR)/gen -I $(BUILDASTGEN) TARGET=ast2cpp +TARGET_A=ast2cpp.a + SHAREDLIB = $(BUILDDIR)/astopt/astopt.a $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a LANGSPEC=$(BUILDDIR)/typescript/lang_spec.o @@ -78,9 +82,12 @@ $(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp # TARGET depends on OBJS and shared OBJS from shared directory # as well as mapleall libraries -$(BUILDBIN)/$(TARGET): $(OBJS) $(SHAREDLIB) +$(BUILD)/$(TARGET_A): $(LIBOBJS) + /usr/bin/ar rcs $(BUILD)/$(TARGET_A) $(LIBOBJS) + +$(BUILDBIN)/$(TARGET): $(BUILD)/$(TARGET_A) $(OBJS) $(SHAREDLIB) @mkdir -p $(BUILDBIN) - $(LD) -o $(BUILDBIN)/$(TARGET) $(OBJS) $(LANGSPEC) $(SHAREDLIB) + $(LD) -o $(BUILDBIN)/$(TARGET) $(BUILD)/main.o $(BUILD)/$(TARGET_A) $(OBJG) $(LANGSPEC) $(SHAREDLIB) clean: rm -rf $(BUILD) diff --git a/src/MapleFE/ast2cpp/src/cpp_declaration.cpp b/src/MapleFE/ast2cpp/src/cpp_declaration.cpp index 96a1e654b0ad4ce3770cbe39f3821cb78f2548cd..315c8624b6a528f0959b5770b4a3d2642a03470b 100644 --- a/src/MapleFE/ast2cpp/src/cpp_declaration.cpp +++ b/src/MapleFE/ast2cpp/src/cpp_declaration.cpp @@ -327,28 +327,6 @@ bool CppDecl::IsImportedModule(const std::string& module) { return res != mImportedModules.end(); } -// Generate class to encap TS/JS required func interfaces -// note: for top level and nested functions only. Not for class methods. -std::string CppDecl::GenFunctionClass(FunctionNode* node) { - std::string params, args, retType; - for (unsigned i = 0; i < node->GetParamsNum(); ++i) { - if (i) { - params += ", "s; - args += ", "s; - } - if (auto n = node->GetParam(i)) { - params += EmitTreeNode(n); - args += GetIdentifierName(n); - if (i==0) - HandleThisParam(node->GetParamsNum(), n, params, args); - } - } - if (node->GetParamsNum() == 0) - HandleThisParam(0, nullptr, params, args); - - return FunctionTemplate(GetTypeString(node->GetType(), nullptr), GetIdentifierName(node), params, args); -} - void CppDecl::CollectFuncArgInfo(TreeNode* node) { if (!node->IsFunction()) return; @@ -403,26 +381,34 @@ namespace )""" + module + R"""( { for(unsigned i = 0; i < num; ++i) { CfgFunc *func = mod->GetNestedFuncAtIndex(i); TreeNode *node = func->GetFuncNode(); + std::string funcName = GetIdentifierName(node); + + CollectFuncArgInfo(node); if (!IsClassMethod(node)) { - bool isGenerator = static_cast(node)->IsGenerator(); - CollectFuncArgInfo(node); std::string ns = GetNamespace(node); if (!ns.empty()) str += "namespace "s + ns + " {\n"s; - if (isGenerator) - str += GenGeneratorClass(GetIdentifierName(node), hFuncTable.GetArgInfo(node->GetNodeId())); - else - str += GenFunctionClass(static_cast(node)); // gen func cls for each top level func + bool isGenerator = static_cast(node)->IsGenerator(); + std::string generatorClassDef; + if (isGenerator) { + str += GeneratorClassDecl(funcName, node->GetNodeId()); + generatorClassDef = GeneratorClassDef(ns, funcName, node->GetNodeId()); + AddDefinition(generatorClassDef); + } + else { + // gen function class for each top level function + str += FunctionClassDecl(GetTypeString(static_cast(node)->GetRetType(), nullptr), GetIdentifierName(node), node->GetNodeId()); + } if (!mHandler->IsFromLambda(node)) { // top level funcs instantiated here as function objects from their func class // top level lamda funcs instantiated later in assignment stmts - std::string typeName = isGenerator? GeneratorFuncName(node->GetName()): ClsName(node->GetName()); - std::string funcinit = typeName + "* "s + node->GetName() + " = new "s + typeName + "();\n"s; + std::string typeName = isGenerator? GeneratorFuncName(funcName): ClsName(funcName); + std::string funcinit = typeName + "* "s + funcName + " = new "s + typeName + "();\n"s; if (ns.empty()) AddDefinition(funcinit); else AddDefinition("namespace "s + ns + " {\n"s + funcinit + "\n}\n"s); - str += "extern "s + typeName + "* "s + node->GetName() + ";\n"s; + str += "extern "s + typeName + "* "s + funcName + ";\n"s; } if (!ns.empty()) str += "\n} // namespace " + ns + '\n'; @@ -455,7 +441,7 @@ namespace )""" + module + R"""( { std::string CppDecl::EmitFunctionNode(FunctionNode *node) { if (node == nullptr) return std::string(); - std::string str(GetTypeString(node->GetType(), node->GetType())); + std::string str(GetTypeString(node->GetRetType(), node->GetRetType())); if(node->GetStrIdx()) str += " "s + node->GetName(); str += "("s; @@ -527,6 +513,11 @@ std::string CppDecl::EmitIdentifierNode(IdentifierNode *node) { if (HasAttrStatic(node)) str = "static "s + str; + else if (auto n = node->GetInit()) { + // emit init for non static class field + if (node->GetParent() && node->GetParent()->IsClass()) + str += " = "s + EmitTreeNode(n); + } return str; } @@ -571,25 +562,36 @@ std::string CppDecl::EmitAssertNode(AssertNode *node) { return std::string(); } -std::string CppDecl::EmitArrayLiteralNode(ArrayLiteralNode *node) { - if (node == nullptr) +// Generate code to construct an array of type any from an ArrayLiteral. TODO: merge with similar in cppdef +std::string CppDecl::ConstructArrayAny(ArrayLiteralNode *node) { + if (node == nullptr || !node->IsArrayLiteral()) return std::string(); - std::string str("{"s); + + // Generate array ctor call to instantiate array + std::string literals; for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { if (i) - str += ", "s; + literals += ", "s; if (auto n = node->GetLiteral(i)) { - str += EmitTreeNode(n); + if (n->IsArrayLiteral()) + // Recurse to handle array elements that are arrays + literals += ConstructArrayAny(static_cast(n)); + else { + // Wrap element in JS_Val. C++ class constructor of JS_Val + // will set tupe tag in JS_Val according to element type. + literals += "t2crt::JS_Val("s + EmitTreeNode(n) + ")"s; + } } } - str += "}"s; + std::string str = ArrayCtorName(1, "t2crt::JS_Val") + "._new({"s + literals + "})"s; return str; } -std::string CppDecl::EmitArrayLiteral(ArrayLiteralNode *node, int dim, std::string type) { - if (node == nullptr) - return std::string(); - +// Generate code to construct an array object with brace-enclosed initializer list TODO: merge with similar in cppdef +std::string CppDecl::ConstructArray(ArrayLiteralNode *node, int dim, std::string type) { + if (type.empty()) { + return ConstructArrayAny(node); // proceed as array of type any if no type info + } // Generate array ctor call to instantiate array std::string str = ArrayCtorName(dim, type) + "._new({"s; for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { @@ -597,7 +599,7 @@ std::string CppDecl::EmitArrayLiteral(ArrayLiteralNode *node, int dim, std::stri str += ", "s; if (auto n = node->GetLiteral(i)) { if (n->IsArrayLiteral()) - str += EmitArrayLiteral(static_cast(n), dim-1, type); + str += ConstructArray(static_cast(n), dim-1, type); else str += EmitTreeNode(n); } @@ -606,7 +608,35 @@ std::string CppDecl::EmitArrayLiteral(ArrayLiteralNode *node, int dim, std::stri return str; } -std::string GetArrayTypeString(int dim, std::string typeStr) { +std::string CppDecl::EmitArrayLiteralNode(ArrayLiteralNode *node) { // TODO: merge with similar in cppdef + if (node == nullptr) + return std::string(); + if (node->GetParent() && + node->GetParent()->IsDecl() || // for var decl init + node->GetParent()->IsIdentifier() || // for default val init in class field decl + node->GetParent()->IsFieldLiteral()) { // for obj decl with struct literal init + // emit code to construct array object with brace-enclosed initializer list + int dim; + std::string str, type; + GetArrayTypeInfo(node, dim, type); + str = ConstructArray(node, dim, type); + return str; + } + + // emit code to build a brace-enclosed intializer list (for rhs of array var assignment op) + std::string str("{"s); + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + str += EmitTreeNode(n); + } + } + str += "}"s; + return str; +} + +std::string BuildArrayType(int dim, std::string typeStr) { std::string str; str = "t2crt::Array<"s + typeStr + ">*"s;; for (unsigned i = 1; i < dim; ++i) { @@ -615,20 +645,16 @@ std::string GetArrayTypeString(int dim, std::string typeStr) { return str; } -std::string CppDecl::EmitPrimArrayTypeNode(PrimArrayTypeNode *node) { +std::string CppDecl::EmitArrayTypeNode(ArrayTypeNode *node) { if (node == nullptr) return std::string(); std::string str; - if (node->GetPrim() && node->GetDims()) { - str = GetArrayTypeString( - static_cast(node->GetDims())->GetDimensionsNum(), - EmitPrimTypeNode(node->GetPrim())); - } - /* - if (auto n = node->GetDims()) { - str += EmitDimensionNode(n); + + if (node->GetElemType() && node->GetDims()) { + str = BuildArrayType( + node->GetDims()->GetDimensionsNum(), + EmitTreeNode(node->GetElemType())); } - */ return str; } @@ -636,27 +662,13 @@ std::string CppDecl::EmitFieldNode(FieldNode *node) { return std::string(); } -// note: entries below are to match values from ast nodes. Do not prepend with "t2crt::" -std::vectorbuiltins = {"Object", "Function", "Number", "Array", "Record"}; - -bool IsBuiltinObj(std::string name) { - return std::find(builtins.begin(), builtins.end(), name) != builtins.end(); -} - -std::string GetUserTypeString(UserTypeNode* n) { - std::string str=""; - if (n->GetId()->IsTypeIdClass()) - str = n->GetId()->GetName() + "*"s; - else if (IsBuiltinObj(n->GetId()->GetName())) - str = "t2crt::"s + n->GetId()->GetName() + "*"s; - else // TypeAlias Id - str = n->GetId()->GetName(); - return str; -} - std::string CppDecl::GetTypeString(TreeNode *node, TreeNode *child) { std::string str; if (node) { + if (IsGenerator(node)) { // check generator type + if (auto func = GetGeneratorFunc(node)) + return GeneratorName(GetIdentifierName(func)) + "*"s; + } TypeId k = node->GetTypeId(); if (k == TY_None || k == TY_Class) { switch(node->GetKind()) { @@ -706,11 +718,6 @@ std::string CppDecl::GetTypeString(TreeNode *node, TreeNode *child) { if (str != "none"s) return str + " "s; } - if (mHandler->IsGeneratorUsed(node->GetNodeId())) { - // check if generator type - if (auto func = mHandler->GetGeneratorUsed(node->GetNodeId())) - return GeneratorName(GetIdentifierName(func)) + "*"s; - } } return "t2crt::JS_Val "s; } @@ -727,18 +734,20 @@ std::string CppDecl::EmitUserTypeNode(UserTypeNode *node) { std::string str, usrType; if (auto n = node->GetId()) { - if (n->IsTypeIdClass()) - usrType = n->GetName() + "*"s; + if (n->IsTypeIdClass()) { + if (mHandler->IsGeneratorUsed(n->GetNodeId())) { + // Check if a generator type : TODO: this needs TI + auto func = mHandler->GetGeneratorUsed(n->GetNodeId()); + usrType = GetIdentifierName(func) + "*"s; + } else + usrType = n->GetName() + "*"s; + } else if (IsBuiltinObj(n->GetName())) usrType = "t2crt::"s + n->GetName() + "*"s; else // TypeAlias Id gets returned here usrType = n->GetName(); - if (node->GetDims()) { - return GetArrayTypeString(node->GetDims()->GetDimensionsNum(), usrType); - } else { - str = usrType; - } + str = usrType; // note: array dimension now come from ArrayTypeNode auto num = node->GetTypeGenericsNum(); if(num) { std::string lastChar = ""; @@ -796,26 +805,13 @@ std::string CppDecl::EmitClassNode(ClassNode *node) { // class field decl and init. TODO: handle private, protected attrs. for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { auto n = node->GetField(i); - if (n->IsIdentifier() && GetIdentifierName(n).compare("private")==0) - continue; str += " "s + EmitTreeNode(n); if (n->IsIdentifier()) { - IdentifierNode* id = static_cast(n); - if (HasAttrStatic(id)) { + if (HasAttrStatic(static_cast(n))) { // static field - add field to ctor prop and init later at field def in cpp staticProps += tab(3) + "this->AddProp(\""s + clsName + "\", t2crt::JS_Val("s + - TypeIdToJSTypeCXX[n->GetTypeId()] + ", &"s + clsName + "::"s + id->GetName() + "));\n"s; - } else if (auto init = id->GetInit()) { - if (init->IsArrayLiteral() && id->GetType() && id->GetType()->IsPrimArrayType()) { - // Generate initializer for t2crt::Array member field decl in header file - PrimArrayTypeNode* mtype = static_cast(id->GetType()); - str += " = "s + EmitArrayLiteral(static_cast(init), - mtype->GetDims()->GetDimensionsNum(), - EmitPrimTypeNode(mtype->GetPrim())); - } else { - str += " = "s + EmitTreeNode(init); - } + TypeIdToJSTypeCXX[n->GetTypeId()] + ", &"s + clsName + "::"s + GetIdentifierName(n) + "));\n"s; } } str += ";\n"; @@ -906,11 +902,8 @@ std::string CppDecl::EmitInterface(StructNode *node) { if (superClass.back() == '*') superClass.pop_back(); } - - if (auto n = node->GetStructId()) { - ifName = GetIdentifierName(n); - str = "class "s + ifName + " : public "s + superClass + " {\n"s; - } + ifName = GetIdentifierName(node); + str = "class "s + ifName + " : public "s + superClass + " {\n"s; str += " public:\n"s; // Generate code to add prop in class constructor @@ -1124,17 +1117,7 @@ std::string CppDecl::EmitLiteralNode(LiteralNode *node) { mPrecedence = '\030'; str = HandleTreeNode(str, node); if (auto n = node->GetType()) { - if (str.compare("this") == 0) { - // handle special literal "this" - std::string type = EmitTreeNode(n); - if (type.compare("t2crt::JS_Val ") == 0) - // map type ANY for "this" to generic object type - str = "t2crt::Object* "s + "_this"s; - else - str = type + " _this"; - } - else - str += ": "s + EmitTreeNode(n); + str += ": "s + EmitTreeNode(n); } if (auto n = node->GetInit()) { str += " = "s + EmitTreeNode(n); diff --git a/src/MapleFE/ast2cpp/src/cpp_definition.cpp b/src/MapleFE/ast2cpp/src/cpp_definition.cpp index 6ac4cfd41551455954c094d2a829b45992f6f6e9..d6b1935446c577ae09fecea01366394615c1d2df 100644 --- a/src/MapleFE/ast2cpp/src/cpp_definition.cpp +++ b/src/MapleFE/ast2cpp/src/cpp_definition.cpp @@ -188,14 +188,6 @@ std::string CppDef::EmitXXportAsPairNode(XXportAsPairNode *node) { return std::string(); } -// Return class name from class method or class field -inline std::string GetClassName(TreeNode* node) { - TreeNode* n = node->GetParent(); - if (n && n->IsClass()) - return n->GetName(); - return ""s; -} - inline bool IsClassMethod(FunctionNode* f) { return (f && f->GetParent() && f->GetParent()->IsClass()); } @@ -236,7 +228,7 @@ std::string CppDef::EmitClassProps(TreeNode* node) { // a decl list with unique names for insert into function definition. // // The var may be initialized to different values in different -// blocks which will done in the individual DeclNodes (re: var-dup.ts) +// blocks which will be done in the individual DeclNodes (re: var-dup.ts) std::string CppDef::EmitFuncScopeVarDecls(FunctionNode *node) { std::unordered_mapvarDeclsInScope; ASTScope* s = node->GetScope(); @@ -258,57 +250,89 @@ std::string CppDef::EmitFuncScopeVarDecls(FunctionNode *node) { } std::string str; for (auto const&[key, val] : varDeclsInScope) { - // Emit decl for the var (just type, name - init part emitted - // when corresponding DeclNode processed + // Emit decl for the var (just type and name). The init part + // to be emitted when corresponding DeclNode is processed str += tab(1) + mCppDecl.EmitTreeNode(val->GetVar()) + ";\n"s; } return str; } -std::string CppDef::EmitFunctionNode(FunctionNode *node) { - bool isTopLevel = hFuncTable.IsTopLevelFunc(node); - if (mIsInit || node == nullptr) +std::string CppDef::EmitYieldNode(YieldNode *node) { + if (node == nullptr) return std::string(); + //std::string str(node->IsTransfer() ? "yield* " : "yield "); + std::string str, res; + if (auto n = node->GetResult()) + res = EmitTreeNode(n); + else + res = "undefined"; - std::string str, className, ns = GetNamespace(node); - if (!ns.empty()) - ns += "::"s; - if (node->IsConstructor()) { - className = ns + GetClassName(node); - str = "\n"s; - str += className + "* "s + className + "::Ctor::operator()("s + className + "* obj"s; - } else { - str = mCppDecl.GetTypeString(node->GetType(), node->GetType()); - std::string funcName = GetIdentifierName(node); - str += " "s; - - if (IsClassMethod(node)) - str += ns + GetClassName(node) + "::"s + funcName; - else if (isTopLevel) - str += ns + "Cls_"s + funcName + "::_body"s; // emit body of top level function - else - str += ns + funcName; - str += "("s; - } - - std::string params, unused; - for (unsigned i = 0; i < node->GetParamsNum(); ++i) { - if (i || node->IsConstructor()) - params += ", "s; - if (auto n = node->GetParam(i)) { - params += mCppDecl.EmitTreeNode(n); - if (isTopLevel && i == 0) { - HandleThisParam(node->GetParamsNum(), n, params, unused); - } + std::string yieldLabel = GenFnLabels.NextYieldLabel(); + str += " yield = &&" + yieldLabel + ";\n"; // save yp + str += " res.value = t2crt::JS_Val(" +res+ ");\n"; // init value and return + str += " res.done = false;\n"; + str += " return;\n"; + str += yieldLabel + ":\n"; // label for this yp + + mPrecedence = '\024'; + return str; +} + +std::string CppDef::EmitWhileLoopNode(WhileLoopNode *node) { +// return(Emitter::EmitWhileLoopNode(node)); + + if (node == nullptr) + return std::string(); + std::string str; + std::string loopLabel; + + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + + if (mIsGenerator) { // insert label and loop cond check + loopLabel = GenFnLabels.NextLoopLabel(); + str += loopLabel + ":\n"; + if (auto n = node->GetCond()) { + std::string cond = EmitTreeNode(n); + str += " if (!(" +cond+ "))\n"; + str += " goto " +loopLabel+ "_exit;\n"; + } + } else { // normal while loop + str += "while("s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); } + str += ')'; } - if (isTopLevel && !IsClassMethod(node)) { - if (node->GetParamsNum() == 0) { - HandleThisParam(0, nullptr, params, unused); + if (auto n = node->GetBody()) { + str += EmitTreeNode(n) + GetEnding(n); + if (mIsGenerator) { + str.insert(str.find_first_of("{"), " "); + str.insert(str.find_last_of("}"), " "); } } - str += params + ") "s; + + if (mIsGenerator) { // insert loop back and label at loop exit + str += " goto " +loopLabel+ ";\n"; + str += loopLabel + "_exit:"; + } + + return HandleTreeNode(str, node); +} + + +std::string CppDef::EmitFunctionNode(FunctionNode *node) { + if (mIsInit || node == nullptr) + return std::string(); + + bool isTopLevel = hFuncTable.IsTopLevelFunc(node); + std::string str; + str += "\n"; + str += FunctionHeader(node, mCppDecl.GetTypeString(node->GetRetType(), node->GetRetType())); + mIsGenerator = node->IsGenerator(); + int bodyPos = str.size(); if (auto n = node->GetBody()) { auto varDecls = EmitFuncScopeVarDecls(node); @@ -323,6 +347,11 @@ std::string CppDef::EmitFunctionNode(FunctionNode *node) { } else str += "{}\n"s; + if (mIsGenerator) { + str.insert(str.find_first_of("{")+1, GeneratorFn_start); + str.insert(str.find_last_of("}"), GeneratorFn_return); + } + if (node->IsConstructor()) { Emitter::Replace(str, "this->", "obj->", 0); std::string ctorBody; @@ -331,6 +360,10 @@ std::string CppDef::EmitFunctionNode(FunctionNode *node) { str += EmitCtorInstance(static_cast(node->GetParent())); } + if (mIsGenerator) { + mIsGenerator = false; + GenFnLabels.ResetLabels(); + } return str; } @@ -370,7 +403,7 @@ std::string CppDef::EmitStructLiteralNode(StructLiteralNode* node) { case TY_Function: break; case TY_Array: - fieldVal = EmitArrayLiteral(nullptr, lit); + fieldVal = EmitTreeNode(lit); // ArrayLiteralNode str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val("s + fieldVal + "))"s; break; case TY_Boolean: @@ -456,19 +489,19 @@ std::string CppDef::GenObjectLiteral(TreeNode* var, std::string varName, TreeNod } // Generate code to construct an array of type any from an ArrayLiteral. -std::string CppDef::GenArrayOfAny(TreeNode *node) { +std::string CppDef::ConstructArrayAny(ArrayLiteralNode *node) { if (node == nullptr || !node->IsArrayLiteral()) return std::string(); // Generate array ctor call to instantiate array std::string literals; - for (unsigned i = 0; i < static_cast(node)->GetLiteralsNum(); ++i) { + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { if (i) literals += ", "s; - if (auto n = static_cast(node)->GetLiteral(i)) { + if (auto n = node->GetLiteral(i)) { if (n->IsArrayLiteral()) // Recurse to handle array elements that are arrays - literals += GenArrayOfAny(n); + literals += ConstructArrayAny(static_cast(n)); else { // Wrap element in JS_Val. C++ class constructor of JS_Val // will set tupe tag in JS_Val according to element type. @@ -480,21 +513,19 @@ std::string CppDef::GenArrayOfAny(TreeNode *node) { return str; } -std::string CppDef::EmitArrayLiterals(TreeNode *node, int dim, std::string type) { - if (node == nullptr || !node->IsArrayLiteral()) - return std::string(); - - if (type.back() == ' ') - type.pop_back(); - +// Generate code to construct an array object with brace-enclosed initializer list +std::string CppDef::ConstructArray(ArrayLiteralNode *node, int dim, std::string type) { + if (type.empty()) { + return ConstructArrayAny(node); // proceed as array of type any if no type info + } // Generate array ctor call to instantiate array std::string str = ArrayCtorName(dim, type) + "._new({"s; - for (unsigned i = 0; i < static_cast(node)->GetLiteralsNum(); ++i) { + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { if (i) str += ", "s; - if (auto n = static_cast(node)->GetLiteral(i)) { + if (auto n = node->GetLiteral(i)) { if (n->IsArrayLiteral()) - str += EmitArrayLiterals(static_cast(n), dim-1, type); + str += ConstructArray(static_cast(n), dim-1, type); else str += EmitTreeNode(n); } @@ -503,36 +534,19 @@ std::string CppDef::EmitArrayLiterals(TreeNode *node, int dim, std::string type) return str; } -std::string CppDef::EmitArrayLiteral(TreeNode* arrType, TreeNode* arrLiteral) { - std::string str, type; - int dims = 1; // default to 1 dim array if no Dims info - if (arrLiteral == nullptr) - return "nullptr"s; - - if (arrType == nullptr) { - // if no arrary type info proceed as array of type any (JS_Val) - str = GenArrayOfAny(arrLiteral); - } else if (arrType->IsUserType()) { // array of usertyp - if (static_cast(arrType)->GetDims()) - dims = static_cast(arrType)->GetDimsNum(); - if (auto id = static_cast(arrType)->GetId()) { - type = id->GetName(); - if (type.compare("t2crt::Object") == 0 || type.compare("Object") == 0) - type = "t2crt::Object*"; - } - str = EmitArrayLiterals(arrLiteral, dims, type); - } else if (arrType->IsPrimArrayType()) { // array of prim type - if (static_cast(arrType)->GetDims()) - dims = static_cast(arrType)->GetDims()->GetDimensionsNum(); - type= EmitPrimTypeNode(static_cast(arrType)->GetPrim()); - str = EmitArrayLiterals(arrLiteral, dims, type); - } - return str; -} - // decl of global var is handled by EmitDeclNode in cpp_declaration // decl of function vars of type JS_Var is handled in EmitFuncSCopeVarDecls // This function handles init of global/func var, and decl/init of func let/const. +// +// Declaration of Javascript "var", "let" an "const" variables: +// - "var" decls are function/global scoped +// - "let" and "const" are block scoped +// TS/JS allows duplicate "var" declarations in global scope as well as +// function scope. Duplicate global scope var decls are resolved +// by the front end which make only 1 decl for the dup and changes any inits +// in the dup decls to assigments. Duplicate function scope var decls are +// handled in CppDef::EmitFuncScopeVarDecls. +// std::string CppDef::EmitDeclNode(DeclNode *node) { if (node == nullptr) return std::string(); @@ -560,9 +574,7 @@ std::string CppDef::EmitDeclNode(DeclNode *node) { } } if (auto n = node->GetInit()) { - if (n->IsArrayLiteral()) - str += varStr + " = " + EmitArrayLiteral(idType, n); - else if (n->IsStructLiteral()) + if (n->IsStructLiteral()) str += GenObjectLiteral(node->GetVar(), varStr, idType, static_cast(n)); else if (node->GetVar()->IsIdentifier() && n->IsIdentifier() && n->IsTypeIdClass()) str += varStr + "= &"s + n->GetName() + "::ctor"s; // init with ctor address @@ -572,9 +584,8 @@ std::string CppDef::EmitDeclNode(DeclNode *node) { hFuncTable.AddNameIsTopLevelFunc(varStr); } } else { - // if no type info, assume type is any and wrap initializer in JS_Val. str += varStr + " = "; - if (varType == TY_None) + if (varType == TY_None) // no type info. assume TY_Any and wrap val in JS_Val str += "t2crt::JS_Val("s + EmitTreeNode(n) + ")"s; else str += EmitTreeNode(n); @@ -713,9 +724,23 @@ std::string CppDef::EmitArrayElementNode(ArrayElementNode *node) { return HandleTreeNode(str, node); } + std::string CppDef::EmitArrayLiteralNode(ArrayLiteralNode *node) { if (node == nullptr) return std::string(); + if (node->GetParent() && + node->GetParent()->IsDecl() || // for var decl init + node->GetParent()->IsIdentifier() || // for default val init in class field decl + node->GetParent()->IsFieldLiteral()) { // for obj decl with struct literal init + // emit code to construct array object with brace-enclosed initializer list + int dim; + std::string str, type; + GetArrayTypeInfo(node, dim, type); + str = ConstructArray(node, dim, type); + return str; + } + + // emit code to build a brace-enclosed intializer list (for rhs of array var assignment op) std::string str("{"s); for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { if (i) @@ -734,9 +759,11 @@ std::string CppDef::EmitFieldNode(FieldNode *node) { std::string upper, field, propType; bool isRhs = false; // indicate if field is rhs (val) or lhs (ref) auto upnode = node->GetUpper(); + bool upperIsGenerator = false; if (upnode) { upper = EmitTreeNode(upnode); isRhs = !mHandler->IsDef(upnode); + upperIsGenerator = IsGenerator(upnode); // TODO: await TI fix for generator3.ts } if (auto n = node->GetField()) { if (isRhs) { @@ -796,6 +823,10 @@ std::string CppDef::EmitBlockNode(BlockNode *node) { for (unsigned i = 0; i < node->GetChildrenNum(); ++i) { if (auto n = node->GetChildAtIndex(i)) { std::string s = EmitTreeNode(n); + if (n->IsYield()) { + str += s; + continue; + } if (!s.empty()) str += " "s + s + GetEnding(n); } @@ -1013,6 +1044,9 @@ std::string CppDef::EmitBracketNotationProp(ArrayElementNode* ae, OprId binOpId, case TY_Object: str = objName + "->GetPropObj("s + propKey + ")"s; break; + case TY_Any: + str = objName + "->GetProp("s + propKey + ")"s; + break; default: str = "(*"s + objName + ")["s + propKey + ']'; } @@ -1243,7 +1277,7 @@ std::string CppDef::GetThisParamObjType(TreeNode *node) { std::string str = "t2crt::Object"; if (static_cast(node)->GetParamsNum()) { auto n = static_cast(node)->GetParam(0); - if (n->IsIdentifier() && n->IsThis()) { + if (n->IsThis()) { TreeNode* tn = static_cast(n)->GetType(); str = mCppDecl.GetTypeString(tn, nullptr); if (str.back() == '*') @@ -1287,6 +1321,7 @@ std::string CppDef::EmitNewNode(NewNode *node) { str = fnName + "->ctor("s + newObj + ", "s; // call ctor function with new obj as this arg } } else { + // for builtins str = "new "s + EmitTreeNode(node->GetId()); str += "("s; } diff --git a/src/MapleFE/ast2cpp/src/cpp_emitter.cpp b/src/MapleFE/ast2cpp/src/cpp_emitter.cpp index ab730ccb84fc15d9f1f607c160f8bddd81f275db..c5c92c0b0409646a598f2cc081f2b3d50197f638 100644 --- a/src/MapleFE/ast2cpp/src/cpp_emitter.cpp +++ b/src/MapleFE/ast2cpp/src/cpp_emitter.cpp @@ -26,7 +26,13 @@ std::string CppEmitter::GetIdentifierName(TreeNode *node) { case NK_Decl: return GetIdentifierName(static_cast(node)->GetVar()); case NK_Struct: - return GetIdentifierName(static_cast(node)->GetStructId()); + // Named StructNode has name in StructId. Unamed StructNode is assigned + // anonymous name by frontend and can be accessed using node mStrIdx + // through node GetName() interface. + if (auto n = static_cast(node)->GetStructId()) + return GetIdentifierName(n); + else + return node->GetName(); // for anonomyous name case NK_Function: if (static_cast(node)->GetFuncName()) return GetIdentifierName(static_cast(node)->GetFuncName()); @@ -126,4 +132,114 @@ void CppEmitter::InsertEscapes(std::string& str) { Emitter::Replace(str, "\"", "\\\"", 0); } +bool CppEmitter::IsGenerator(TreeNode* node) { + return mHandler->IsGeneratorUsed(node->GetNodeId()); +} + +FunctionNode* CppEmitter::GetGeneratorFunc(TreeNode* node) { + return mHandler->GetGeneratorUsed(node->GetNodeId()); +} + +// +// Interface to get array type and dimension interface for an ArrayLiteral +// (should be just a wrapper to call TI interfaces GetArrayElemTypeId() +// and GetArrayDim(), but until the usage of those 2 interface can cover all +// use caes, this interface encaps any additional work to get array type info. +// +void CppEmitter::GetArrayTypeInfo(ArrayLiteralNode* node, int& numDim, std::string& type) { + TypeId typeId = mHandler->GetArrayElemTypeId(node->GetNodeId()); + DimensionNode* dim = mHandler->GetArrayDim(node->GetNodeId()); + if (dim) + numDim = dim->GetDimensionsNum(); + switch(typeId) { + case TY_Class: { + unsigned tIdx = mHandler->GetArrayElemTypeIdx(node->GetNodeId()); + TreeNode* tp = gTypeTable.GetTypeFromTypeIdx(tIdx); + type = ObjectTypeStr(tp->GetName()); + break; + } + case TY_Int: + type = "long"; + break; + case TY_String: + type = "std::string"; + break; + case TY_Double: + type = "double"; + break; + case TY_None: + type = "t2crt::JS_Val"; + break; +#if 0 + case TY_Array: + type = "t2crt::Array*"; + break; +#endif + case TY_Function: + default: + // TODO + dim = 0; + type = "TBD"; + break; + } + return; + +#if 0 + if (!node->GetParent()) + return; + + switch(node->GetParent()->GetKind()) { + case NK_Decl: + // e.g. var arr: number[]=[1,2,3]; + //GetArrInfoByVarId(node, dim, type); + break; + case NK_Identifier: + // e.g. class Foo { arr: number[]=[1,2,3]; } + //GetArrInfoByClassFieldId(node, dim, type); + break; + case NK_FieldLiteral: + // e.g. var: {arr:number[]} = { n:[1,2,3] }; + //GetArrInfoByObjLiteralClassField(node, dim, type); + break; + } +#endif +} + +// C++ function header for different TS function types: +// Generator: t2crt::IteratorResult [::]GeneratorFunc_::_body(t2crt::Object* _this, void*& yield[, &]...) +// Class ctor: [::]* ::Ctor::operator()(* obj[, ]...) +// Class method: [::]::([params]...) +// Function: [::]Cls_::_body(t2crt::Object|* _this[, params]...) +std::string CppEmitter::FunctionHeader(FunctionNode* node, std::string retType) { + std::string str; + std::string ns = GetNamespace(node).empty() ? ""s : GetNamespace(node)+"::"; + std::string funcName = GetIdentifierName(node); + std::string className= ns + GetClassName(node); + bool isTopLevel = hFuncTable.IsTopLevelFunc(node); + retType = retType + " "s + ns; + + if (node->IsGenerator()) // generator + str += GeneratorFuncHeader(ns+GeneratorFuncName(funcName)+"::", node->GetNodeId()); + else if (node->IsConstructor()) { // class constructor + std::string param = FunctionParams(node->GetNodeId(), false); + param = param.empty() ? ""s : (", "s+param); + str += className + "* "s + className + "::Ctor::operator()" + "(" +className+ "* obj" +param+ ") "; + } + else if (IsClassMethod(node)) // class method + str += retType + GetClassName(node) + "::" + funcName + "(" + FunctionParams(node->GetNodeId(), false) + ") "; + else if (isTopLevel) // top level function + str += retType + "Cls_" + funcName + "::_body" + "(" + FunctionParams(node->GetNodeId(), true) + ") "; + else + str += retType + funcName + "(" + FunctionParams(node->GetNodeId(), false) + ") "; + return str; +} + +// Return class name from class method or class field +std::string CppEmitter::GetClassName(TreeNode* node) { + TreeNode* n = node->GetParent(); + if (n && n->IsClass()) + return n->GetName(); + return ""s; +} + } // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/emitter.cpp b/src/MapleFE/ast2cpp/src/emitter.cpp index e4926858fae238e405496ce4a490d8e76b6d6703..fbf695857eaf577226ec8854914a7f84f48dba2c 100644 --- a/src/MapleFE/ast2cpp/src/emitter.cpp +++ b/src/MapleFE/ast2cpp/src/emitter.cpp @@ -45,7 +45,8 @@ std::string Emitter::GetEnding(TreeNode *n) { std::string str; switch(n->GetKind()) { case NK_Function: - str = "\n"s; + case NK_TripleSlash: + str += '\n'; break; default: str += ';'; @@ -60,7 +61,7 @@ std::string Emitter::GetEnding(TreeNode *n) { case NK_Namespace: case NK_Declare: case NK_Module: - str += "\n"s; + str += '\n'; } return str; } @@ -284,7 +285,7 @@ std::string Emitter::EmitFunctionNode(FunctionNode *node) { str += " : asserts "s + EmitTreeNode(n); auto body = node->GetBody(); - if (auto n = node->GetType()) { + if (auto n = node->GetRetType()) { std::string s = EmitTreeNode(n); if(!s.empty()) { str += (body || has_name || inside ? " : "s : " => "s) + s; @@ -703,6 +704,7 @@ std::string Emitter::EmitConditionalTypeNode(ConditionalTypeNode *node) { precd = mPrecedence; } if (auto n = node->GetTypeB()) { + str = Clean(str); if (precd < '\024') str = '(' + str + ')'; str += " extends "s + EmitTreeNode(n); @@ -1536,7 +1538,7 @@ std::string Emitter::EmitCallNode(CallNode *node) { bool optional = n->IsOptional(); if (optional && !s.empty() && s.back() == '?') s.pop_back(); - if(n->IsFunction() || n->IsLambda()) + if(n->IsFunction() || n->IsLambda() || n->IsTerOperator()) str += '(' + s + ')'; else str += s; @@ -1764,7 +1766,7 @@ std::string Emitter::EmitLambdaNode(LambdaNode *node) { str += ')'; if (auto n = node->GetBody()) { - if (auto t = node->GetType()) { + if (auto t = node->GetRetType()) { str += ": "s + EmitTreeNode(t); } std::string s = EmitTreeNode(n); @@ -1774,7 +1776,7 @@ std::string Emitter::EmitLambdaNode(LambdaNode *node) { str += " => "s + s; } else { - if (auto t = node->GetType()) { + if (auto t = node->GetRetType()) { str += " => "s + EmitTreeNode(t); } } @@ -1925,6 +1927,19 @@ std::string Emitter::EmitTupleTypeNode(TupleTypeNode *node) { mPrecedence = '\030'; return str; } + +std::string Emitter::EmitTripleSlashNode(TripleSlashNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + str += "/// GetProp()); + if (auto n = node->GetValue()) { + str += '=' + EmitTreeNode(n); + } + str += " />"s; + return str; +} + std::string Emitter::EmitModuleNode(ModuleNode *node) { if (node == nullptr) return std::string(); @@ -1953,6 +1968,18 @@ std::string Emitter::EmitAttrNode(AttrNode *node) { return HandleTreeNode(str, node); } +std::string Emitter::EmitArrayTypeNode(ArrayTypeNode *node) { + // TODO + std::string str = ""; + return str; +} + +std::string Emitter::EmitFunctionTypeNode(FunctionTypeNode *node) { + // TODO + std::string str = ""; + return str; +} + std::string Emitter::EmitPrimTypeNode(PrimTypeNode *node) { if (node == nullptr) return std::string(); @@ -1986,12 +2013,6 @@ std::string Emitter::EmitPrimArrayTypeNode(PrimArrayTypeNode *node) { return HandleTreeNode(str, node); } -std::string Emitter::EmitArrayTypeNode(ArrayTypeNode *node) { - // TODO - std::string str = ""; - return str; -} - std::string Emitter::EmitTreeNode(TreeNode *node) { if (node == nullptr) return std::string(); @@ -2041,6 +2062,9 @@ std::string Emitter::EmitTreeNode(TreeNode *node) { case NK_ArrayType: return EmitArrayTypeNode(static_cast(node)); break; + case NK_FunctionType: + return EmitFunctionTypeNode(static_cast(node)); + break; case NK_UserType: return EmitUserTypeNode(static_cast(node)); break; @@ -2140,6 +2164,9 @@ std::string Emitter::EmitTreeNode(TreeNode *node) { case NK_Infer: return EmitInferNode(static_cast(node)); break; + case NK_TripleSlash: + return EmitTripleSlashNode(static_cast(node)); + break; case NK_Block: return EmitBlockNode(static_cast(node)); break; @@ -2405,4 +2432,22 @@ const char *Emitter::GetEnumOprId(OprId k) { return "UNEXPECTED OprId"; } +const char *Emitter::GetEnumTripleSlashProp(TripleSlashProp k) { + switch (k) { + case TSP_Path: + return "path"; + case TSP_Types: + return "types"; + case TSP_Lib: + return "lib"; + case TSP_NoDefaultLib: + return "no-default-lib"; + case TSP_NA: + return "TSP_NA"; + default: + MASSERT(0 && "Unexpected enumerator"); + } + return "UNEXPECTED TripleSlashProp"; +} + } // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/helper.cpp b/src/MapleFE/ast2cpp/src/helper.cpp index 5ee7e1b5f90db1160d66573baa814b8228c9af0f..1222f2341802e47bf06284101f033cd65dd21376 100644 --- a/src/MapleFE/ast2cpp/src/helper.cpp +++ b/src/MapleFE/ast2cpp/src/helper.cpp @@ -16,6 +16,9 @@ namespace maplefe { +FuncTable hFuncTable; +GeneratorLabels GenFnLabels; + std::unordered_mapTypeIdToJSTypeCXX = { // AST TypeId to t2crt JS_Type mapping for JS_Val type of obj props that pts to CXX class fields {TY_Object, "t2crt::TY_CXX_Object"}, @@ -30,15 +33,25 @@ std::unordered_mapTypeIdToJSTypeCXX = { {TY_Any, "t2crt::TY_CXX_Any"}, }; -FuncTable hFuncTable; +std::string GeneratorFn_start = R"""( + if (yield != nullptr) + goto *yield; +)"""; + +std::string GeneratorFn_return = R"""( + res.value = undefined; + res.done = true; + return; +)"""; // Used to build GetProp for calls to get Object (class Object in ts2cpp.h) property std::string hlpGetJSValTypeStr(TypeId typeId) { switch(typeId) { case TY_Object: case TY_Class: - case TY_Any: return "Obj"; + case TY_Any: + return ""; case TY_Function: return "Func"; case TY_Boolean: @@ -95,6 +108,66 @@ std::string GenClassFldAddProp(std::string objName, return str; } +// From TS func param info, generate param and arg list for corresponding mapped C++ func. +// +// Different formats of arg list as needed by C++ mapping of function/class/generators +// - args for function class functor and generation class constructor +// () - generator class constructor field init list +// & - args passed by reference to generation function _body method +// ; - generator class fields for capturing closure +// +std::string FunctionParams(unsigned nodeId, bool handleThis, bool argsOnly, bool byRef, bool fdInit, bool capture) { + std::vector> funcParams = hFuncTable.GetArgInfo(nodeId); + std::string ObjT = "t2crt::Object*"; + std::string str; + + // "this" in TS function paramter mapping to C++: + // + // TS2cpp's C++ mapping for TS func has a "this" obj in the c++ func param list + // which will be generated from AST if "this" is declared as a TS func parameter + // as required by TS strict mode. However TS funcs that do not reference 'this' + // are not required to declare it, in which case emitter has to insert one. + // + // Cases: + // if TS func has no param + // - insert param "ts2crt::Object* _this" + // if 1st TS func param is not "this" + // - insert param "ts2crt::Object* _this" + // if 1st TS func param is "this" + // - rename to "_this" + // - if type is Any (JS_Val), change to "ts2crt::Object*" + // + if (handleThis) { + if (funcParams.size() == 0) // func has no param + return argsOnly ? "_this"s : (ObjT + " _this"); + } + + for (bool first=true; auto elem : funcParams) { + std::string type = elem.first, name = elem.second; + if (!first) + str += ", "s; + else { // 1st param of TS func + if (handleThis) { + if (name.compare("this") != 0) // if not "this", insert _this + str += argsOnly? ("_this, "s): (ObjT + " _this, "s); + else { // if "this" + name = "_this"; // rename to "_this" + if (type.compare("t2crt::JS_Val") == 0) + type = ObjT; // change type Any to Object* + } + } + first = false; + } + if (fdInit) + str += name + "(" + name + ")"; + else if (capture) + str += type + " " + name + ";\n"; + else + str += argsOnly? name: (type + (byRef?"\&":"") + " "s + name); + } + return str; +} + // Each first level function is instantiated from a corresponding class generated with interfaces below: // Body - user defined function code // () - functor for OrdinaryCallEvaluteBody [9.2.1.3] @@ -108,19 +181,22 @@ std::string GenClassFldAddProp(std::string objName, // for ctor(), it calls _body() but ignores return val from _body(), and instead returns _this // per TS/JS spec. -std::string FunctionTemplate(std::string retType, std::string funcName, std::string params, std::string args) { - std::string str; +std::string FunctionClassDecl(std::string retType, std::string funcName, unsigned nodeId) { + std::string str, args, params, thisType; + std::string clsName = ClsName(funcName); - std::string functorArgs = args; + params = FunctionParams(nodeId, true, false); + args = FunctionParams(nodeId, true, true); + thisType = params.substr(0, params.find(" ")); // extract return type of "this" parameter + std::string functorParams = params; - std::string thisType; + std::string functorArgs = args; functorArgs.replace(0, 5, "_thisArg"); // replace _this with _thisArg size_t pos; if ((pos = functorParams.find("_this, ")) != std::string::npos) functorParams.erase(0, pos+7); else if ((pos = functorParams.find("_this")) != std::string::npos) functorParams.erase(0, pos+5); - thisType = params.substr(0, pos-1); str = R"""( class )""" + clsName + R"""( : public t2crt::Function { @@ -146,65 +222,87 @@ class )""" + clsName + R"""( : public t2crt::Function { return str; } -// Template for generating Generators and Generator Functions: +// build generator function header for _body +std::string GeneratorFuncHeader(std::string cls, unsigned nodeId) { + std::string params = FunctionParams(nodeId, false, false, true); // pass params by ref into _body() + if (!params.empty()) + params = ", " + params; + return "void " + cls + "_body(t2crt::Object* _this, void*& yield, t2crt::IteratorResult& res" + params + ")"; +} + +// Generating Generators and Generator Functions: // For each TS generator function, 2 C++ classes: generator and generator function are emitted. // The generator function has only a single instance. It is called to create generator instances. -std::string GenGeneratorClass(std::string funcName, std::vector> args) { +std::string GeneratorClassDecl(std::string funcName, unsigned nodeId) { std::string str; std::string generatorName = GeneratorName(funcName); std::string generatorFuncName = GeneratorFuncName(funcName); - // Different formats of arg list as needed by generator and generator function interfaces: - // - args for function class functor and generation class constructor - // () - generator class constructor field init list - // & - args passed by reference to generation function _body method - // ; - generator class fields for capturing closure - std::string functorArgs, ctorArgs, refArgs, initList, captureFields; - - for (bool hasArg=false; auto elem : args) { - if (!hasArg) - hasArg = true; - else { - functorArgs += ", "s; - refArgs += ", "s; - initList += ", "s; - } - std::string type = elem.first, name = elem.second; - functorArgs += type + " " + name; - refArgs += type + "& "+ name; - initList += name + "("s+ name + ")"s; - captureFields += tab(1) + type + " " + name + ";\n"s; - } - if (!refArgs.empty()) - refArgs = ", " + refArgs; - if (!initList.empty()) - initList = ", " + initList; - ctorArgs = functorArgs.empty()? std::string(): (", "s + functorArgs); + std::string functorArgs = FunctionParams(nodeId, false, false); + std::string initList = FunctionParams(nodeId, false, false, false, true) ; + std::string captureFields = FunctionParams(nodeId, false, false, false, false, true); + std::string ctorArgs = functorArgs.empty()? std::string(): (", "s + functorArgs); + initList = initList.empty()? "": (", "s + initList); - str = R"""( -// )""" + funcName + R"""( generators -class )""" + generatorName + R"""( : public t2crt::GeneratorProto { -public: - )""" + generatorName + R"""((t2crt::Function* ctor, t2crt::Object* proto)""" + ctorArgs + R"""() : t2crt::GeneratorProto(ctor, proto))""" + initList + R"""( {} - ~)""" + generatorName + R"""(() {} - - // closure capture fields -)""" + captureFields + R"""( - // iterator interface (override _return and _throw when needed) - t2crt::IteratorResult _next(t2crt::JS_Val* arg) override; -}; + std::string genClsDecl[] = { +"// " +funcName+ " generators", +"class " +generatorName+ " : public t2crt::GeneratorProto {", +"public:", +" " +generatorName+ "(t2crt::Function* ctor, t2crt::Object* proto" +ctorArgs+ ") : t2crt::GeneratorProto(ctor, proto)" +initList+ " {}", +" ~" +generatorName+ "() {}", +" // closure capture fields", +" " +captureFields, +" // iterator interface (override _return and _throw when needed)", +" t2crt::IteratorResult* next(t2crt::JS_Val* arg = nullptr) override;", +"};", +"// " +funcName+ " generator function", +"class " +generatorFuncName+ " : public t2crt::GeneratorFuncPrototype {", +"public:", +" " +generatorFuncName+ "() : t2crt::GeneratorFuncPrototype(&t2crt::GeneratorFunction, &t2crt::Generator, t2crt::GeneratorPrototype) {}", +" ~" +generatorFuncName+ "() {}", +" // call operator returns generator instances", +" " +generatorName+ "* operator()(" +functorArgs+ ");", +" // generator function body", +" " +GeneratorFuncHeader("", nodeId)+ ";", +"};", +"" + }; -// )""" + funcName + R"""( generator function -class )""" + generatorFuncName + R"""( : public t2crt::GeneratorFuncPrototype { -public: - )""" + generatorFuncName + R"""(() : t2crt::GeneratorFuncPrototype(&t2crt::GeneratorFunction, &t2crt::Generator, t2crt::GeneratorPrototype) {} - ~)""" + generatorFuncName + R"""(() {} + str += "\n"; + for (auto elem : genClsDecl) + str += elem + "\n"; + return str; +} - // call operator returns generator instances - )""" + generatorName + R"""(* operator()()""" + functorArgs + R"""(); - // generator function body - t2crt::IteratorResult _body(t2crt::Object* _this, void*& yield)""" + refArgs + R"""(); -}; +std::string GeneratorClassDef(std::string ns, std::string funcName, unsigned nodeId) { + std::string str; + std::string generatorName = ns + GeneratorName(funcName); + std::string generatorFuncName = ns + GeneratorFuncName(funcName); + + if (!ns.empty()) + funcName = ns + "::" + funcName; + + std::string params = FunctionParams(nodeId, false, false); + std::string args = FunctionParams(nodeId, false, true); + if (!args.empty()) + args = ", " + args; + + str = R"""( +t2crt::IteratorResult* )""" + generatorName + R"""(::next(t2crt::JS_Val* arg) { + if (_finished) { + _res.done = true; + return &_res; + } + // iterate by calling generation function with captures in generator + )""" + funcName + R"""(->_body(this, _yield, _res)""" + args + R"""(); + if (_res.done == true) + _finished = true; + return &_res; +} + +)""" + generatorName + "* "s + generatorFuncName + R"""(::operator()()""" + params + R"""() { + return new )""" + generatorName + R"""((&t2crt::Generator, foo->prototype)""" + args + R"""(); +} )"""; return str; @@ -222,29 +320,6 @@ std::string GenAnonFuncName(TreeNode* node) { return "_anon_func_"s + std::to_string(node->GetNodeId()); } -// Check 1st param of top level function for "this" and do substitution. -void HandleThisParam(unsigned nParams, TreeNode* node, std::string& params, std::string&args) { - if (nParams == 0) { - // ts2cpp's C++ mapping for TS func has a "this" obj in the c++ func param list - // which will be generated from AST if "this" is declared as a TS func parameter - // as required by TS strict mode. However TS funcs that do not reference 'this' - // are not required to declare it, so emitter has to check and insert one. - params = "t2crt::Object* _this"s; - args = "_this"s; - return; - } - - if (node->IsThis()) { - args = "_this"; - Emitter::Replace(params, "this", "_this"); // change this to _this to avoid c++ keyword - Emitter::Replace(params, "t2crt::JS_Val", "t2crt::Object*"); // change type any (JS_Val) to Object* per ts2cpp func mapping to C++ interface - } else { - // if 1st func param is not "this", insert one to work with c++ mapping for TS func - args = "_this, "s + args; - params = "t2crt::Object* _this, "s + params; - } -} - // return array constructor name of given type // format: // 1D array: t2crt::Array::ctor @@ -263,4 +338,18 @@ std::string ArrayCtorName(int dim, std::string type) { return str; } +// note: entries below are to match values from ast nodes. Do not prepend with "t2crt::" +std::vectorbuiltins = {"Object", "Function", "Number", "Array", "Record"}; + +bool IsBuiltinObj(std::string name) { + return std::find(builtins.begin(), builtins.end(), name) != builtins.end(); +} + +std::string ObjectTypeStr(std::string name) { + if (IsBuiltinObj(name)) + return "t2crt::" + name + "*"; + else + return name + "*"; +} + } // namespace maplefe diff --git a/src/MapleFE/ast2mpl/src/mpl_processor.cpp b/src/MapleFE/ast2mpl/src/mpl_processor.cpp index e66c75643e45e1dd2d50e92777fdf1c50cf68cb8..378c18860ff09647cbe0d32e5dd21e526a94b0f3 100644 --- a/src/MapleFE/ast2mpl/src/mpl_processor.cpp +++ b/src/MapleFE/ast2mpl/src/mpl_processor.cpp @@ -609,6 +609,14 @@ maple::BaseNode *Ast2MplBuilder::ProcessInfer(StmtExprKind skind, TreeNode *tnod return nullptr; } +maple::BaseNode *Ast2MplBuilder::ProcessTripleSlash(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessFunctionType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + maple::BaseNode *Ast2MplBuilder::ProcessBlockDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { BlockNode *ast_block = static_cast(tnode); for (int i = 0; i < ast_block->GetChildrenNum(); i++) { @@ -650,7 +658,7 @@ maple::BaseNode *Ast2MplBuilder::ProcessFuncDecl(StmtExprKind skind, TreeNode *t // SmallVector mAttrs; // SmallVector mAnnotations; //annotation or pragma // SmallVector mThrows; // exceptions it can throw - TreeNode *ast_rettype = ast_func->GetType(); // return type + TreeNode *ast_rettype = ast_func->GetRetType(); // return type // SmallVector mParams; // BlockNode *ast_body = ast_func->GetBody(); // DimensionNode *mDims; diff --git a/src/MapleFE/astopt/include/ast_info.h b/src/MapleFE/astopt/include/ast_info.h index 4753f50dace00bff128a2e2753bec54cd5d5773f..83a0fc37122e10231fdde2f6d7050bd0eaf908ff 100644 --- a/src/MapleFE/astopt/include/ast_info.h +++ b/src/MapleFE/astopt/include/ast_info.h @@ -77,6 +77,7 @@ class AST_INFO { TypeAliasNode *CreateTypeAliasNode(TreeNode *to, TreeNode *from); StructNode *CreateStructFromStructLiteral(StructLiteralNode *node); + unsigned GetAnonymousName(); TreeNode *GetAnonymousStruct(TreeNode *node); bool IsInterface(TreeNode *node); @@ -153,6 +154,21 @@ class ClassStructVisitor : public AstVisitor { FunctionNode *VisitFunctionNode(FunctionNode *node); }; +class FunctionVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + + public: + explicit FunctionVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) { + mInfo= mHandler->GetINFO(); + } + ~FunctionVisitor() = default; + + FunctionNode *VisitFunctionNode(FunctionNode *node); +}; + class FindStrIdxVisitor : public AstVisitor { private: Module_Handler *mHandler; diff --git a/src/MapleFE/astopt/include/ast_scp.h b/src/MapleFE/astopt/include/ast_scp.h index fd2dcb485df20f92f96bb929b1b4fc31c5662012..1a9a7124a2adbd4d89b4da617fa21fbb57763f6a 100644 --- a/src/MapleFE/astopt/include/ast_scp.h +++ b/src/MapleFE/astopt/include/ast_scp.h @@ -93,7 +93,7 @@ class BuildScopeVisitor : public BuildScopeBaseVisitor { void SetRunIt(bool b) { mRunIt = b; } void InitInternalTypes(); - ClassNode *AddClass(std::string name, unsigned tyidx = 0); + ClassNode *AddClass(unsigned stridx, unsigned tyidx = 0); FunctionNode *AddFunction(std::string name); void AddType(ASTScope *scope, TreeNode *node); diff --git a/src/MapleFE/astopt/src/ast_adj.cpp b/src/MapleFE/astopt/src/ast_adj.cpp index d7015ef214abb17ceabce1118442277c042c16e6..64b8e82bf1f0de9ea9ecc9c1e7f3ef79609bd954 100644 --- a/src/MapleFE/astopt/src/ast_adj.cpp +++ b/src/MapleFE/astopt/src/ast_adj.cpp @@ -53,6 +53,18 @@ ClassNode *AdjustASTVisitor::VisitClassNode(ClassNode *node) { (void) AstVisitor::VisitClassNode(node); CheckAndRenameCppKeywords(node); AssignPseudoName(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + // skip getting canonical type if not only fields if (node->GetMethodsNum() || node->GetSuperClassesNum() || node->GetSuperInterfacesNum() || node->GetSuperClassesNum() || node->GetTypeParamsNum()) { @@ -76,6 +88,18 @@ ClassNode *AdjustASTVisitor::VisitClassNode(ClassNode *node) { InterfaceNode *AdjustASTVisitor::VisitInterfaceNode(InterfaceNode *node) { (void) AstVisitor::VisitInterfaceNode(node); CheckAndRenameCppKeywords(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + // skip getting canonical type if not only fields if (node->GetMethodsNum() || node->GetSuperInterfacesNum()) { return node; @@ -108,6 +132,7 @@ StructLiteralNode *AdjustASTVisitor::VisitStructLiteralNode(StructLiteralNode *n } TreeNode *newnode = mInfo->GetCanonicStructNode(node); + gStringPool.AddAltStrIdx(newnode->GetStrIdx()); if (newnode != node) { node->SetTypeIdx(newnode->GetTypeIdx()); } @@ -118,6 +143,18 @@ StructLiteralNode *AdjustASTVisitor::VisitStructLiteralNode(StructLiteralNode *n StructNode *AdjustASTVisitor::VisitStructNode(StructNode *node) { (void) AstVisitor::VisitStructNode(node); CheckAndRenameCppKeywords(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + // skip getting canonical type for TypeAlias TreeNode *parent_orig = node->GetParent(); TreeNode *p = parent_orig; @@ -214,6 +251,7 @@ UserTypeNode *AdjustASTVisitor::VisitUserTypeNode(UserTypeNode *node) { ArrayTypeNode *arr = mHandler->NewTreeNode(); arr->SetDims(dim); arr->SetElemType(etype); + node->SetDims(NULL); node = (UserTypeNode *)arr; } @@ -283,7 +321,14 @@ FunctionNode *AdjustASTVisitor::VisitFunctionNode(FunctionNode *node) { (void) AstVisitor::VisitFunctionNode(node); CheckAndRenameCppKeywords(node); - TreeNode *type = node->GetType(); + gStringPool.AddAltStrIdx(node->GetStrIdx()); + + for(unsigned i = 0; i < node->GetParamsNum(); i++) { + TreeNode *it = node->GetParam(i); + gStringPool.AddAltStrIdx(it->GetStrIdx()); + } + + TreeNode *type = node->GetRetType(); if (type && type->IsUserType()) { type->SetParent(node); } @@ -313,6 +358,7 @@ DeclNode *AdjustASTVisitor::VisitDeclNode(DeclNode *node) { unsigned stridx = inode->GetStrIdx(); if (stridx) { node->SetStrIdx(stridx); + gStringPool.AddAltStrIdx(stridx); mUpdated = true; } @@ -508,7 +554,7 @@ LambdaNode *AdjustASTVisitor::VisitLambdaNode(LambdaNode *node) { if (tn) { if (tn->IsBlock()) { func->SetBody(static_cast(tn)); - func->SetType(node->GetType()); + func->SetRetType(node->GetRetType()); } else { BlockNode *blk = mHandler->NewTreeNode(); ReturnNode *ret = mHandler->NewTreeNode(); @@ -520,8 +566,8 @@ LambdaNode *AdjustASTVisitor::VisitLambdaNode(LambdaNode *node) { } // func return type - if (node->GetType()) { - func->SetType(node->GetType()); + if (node->GetRetType()) { + func->SetRetType(node->GetRetType()); } mUpdated = true; diff --git a/src/MapleFE/astopt/src/ast_info.cpp b/src/MapleFE/astopt/src/ast_info.cpp index e841e0d65a0c061c6c4ee9df7481a9b74f4b654b..5c8ef881ec5d39d5ac2f54f6a6cae737fbb8cb82 100644 --- a/src/MapleFE/astopt/src/ast_info.cpp +++ b/src/MapleFE/astopt/src/ast_info.cpp @@ -67,15 +67,31 @@ void AST_INFO::CollectInfo() { mPass = 2; MSGNOLOC0("============== merge class/interface/struct =============="); visitor.Visit(module); + + // collect function types + FunctionVisitor func_visitor(mHandler, mFlags, true); + func_visitor.Visit(module); } void AST_INFO::AddBuiltInTypes() { + unsigned size = gTypeTable.size(); + for (unsigned idx = 1; idx < size; idx++) { + TreeNode *node = gTypeTable.GetTypeFromTypeIdx(idx); + if (node->IsUserType()) { + mStrIdx2TypeIdxMap[node->GetStrIdx()] = node->GetTypeIdx(); + } + } + // add language builtin types TreeNode *node = NULL; + unsigned stridx = 0; #define BUILTIN(T) \ - node = gTypeTable.CreateBuiltinType(#T, TY_Class);\ - gTypeTable.AddType(node);\ - mStrIdx2TypeIdxMap[node->GetStrIdx()] = node->GetTypeIdx(); + stridx = gStringPool.GetStrIdx(#T);\ + if (mStrIdx2TypeIdxMap.find(stridx) == mStrIdx2TypeIdxMap.end()) {\ + node = gTypeTable.CreateBuiltinType(#T, TY_Class);\ + gTypeTable.AddType(node);\ + mStrIdx2TypeIdxMap[stridx] = node->GetTypeIdx();\ + } #include "lang_builtin.def" } @@ -245,16 +261,16 @@ bool AST_INFO::IsTypeCompatible(TreeNode *node1, TreeNode *node2) { if ((!node1 && node2) || (node1 && !node2)) { return false; } + // not same kind + if (node1->GetKind() != node2->GetKind()) { + return false; + } // at least one is prim if (node1->IsPrimType() || node2->IsPrimType()) { TypeId tid_field = GetTypeId(node2); TypeId tid_target = GetTypeId(node1); return (tid_field == tid_target); } - // not same kind - if (node1->GetKind() != node2->GetKind()) { - return false; - } bool result = false; // same kind NodeKind nk = node1->GetKind(); @@ -490,10 +506,15 @@ StructNode *AST_INFO::CreateStructFromStructLiteral(StructLiteralNode *node) { return newnode; } -TreeNode *AST_INFO::GetAnonymousStruct(TreeNode *node) { - std::string str("AnonymousStruct_"); +unsigned AST_INFO::GetAnonymousName() { + std::string str("AnonymousStruct__"); str += std::to_string(mNum++); unsigned stridx = gStringPool.GetStrIdx(str); + return stridx; +} + +TreeNode *AST_INFO::GetAnonymousStruct(TreeNode *node) { + unsigned stridx = GetAnonymousName(); TreeNode *newnode = node; if (newnode->IsStructLiteral()) { StructLiteralNode *sl = static_cast(node); @@ -665,7 +686,7 @@ IdentifierNode *FillNodeInfoVisitor::VisitIdentifierNode(IdentifierNode *node) { FunctionNode *FillNodeInfoVisitor::VisitFunctionNode(FunctionNode *node) { (void) AstVisitor::VisitFunctionNode(node); - TreeNode *type = node->GetType(); + TreeNode *type = node->GetRetType(); if (type) { mInfo->SetTypeId(node, type->GetTypeId()); mInfo->SetTypeIdx(node, type->GetTypeIdx()); @@ -673,7 +694,7 @@ FunctionNode *FillNodeInfoVisitor::VisitFunctionNode(FunctionNode *node) { unsigned stridx = gStringPool.GetStrIdx("Generator"); unsigned tidx = mInfo->GetBuiltInTypeIdx(stridx); UserTypeNode *ut = mInfo->CreateUserTypeNode(stridx); - node->SetType(ut); + node->SetRetType(ut); } return node; } @@ -729,7 +750,15 @@ LiteralNode *FillNodeInfoVisitor::VisitLiteralNode(LiteralNode *node) { PrimTypeNode *FillNodeInfoVisitor::VisitPrimTypeNode(PrimTypeNode *node) { (void) AstVisitor::VisitPrimTypeNode(node); - mInfo->SetTypeIdx(node, node->GetPrimType()); + TypeId prim = node->GetPrimType(); + bool isprim = gTypeTable.IsPrimTypeId(prim); + + if (isprim) { + mInfo->SetTypeIdx(node, prim); + } else { + TreeNode *type = gTypeTable.GetTypeFromTypeId(prim); + mInfo->SetTypeIdx(node, type->GetTypeIdx()); + } return node; } @@ -739,7 +768,6 @@ UserTypeNode *FillNodeInfoVisitor::VisitUserTypeNode(UserTypeNode *node) { if (id) { unsigned tidx = mInfo->GetBuiltInTypeIdx(id); if (tidx) { - mInfo->SetTypeId(id, TY_Class); mInfo->SetTypeIdx(id, tidx); } if (!id->IsTypeIdNone()) { @@ -793,6 +821,9 @@ StructNode *ClassStructVisitor::VisitStructNode(StructNode *node) { if (id && node->GetStrIdx() == 0) { node->SetStrIdx(id->GetStrIdx()); } + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { if (TreeNode *t = node->GetField(i)) { @@ -823,6 +854,9 @@ ClassNode *ClassStructVisitor::VisitClassNode(ClassNode *node) { mInfo->SetTypeId(node, TY_Class); (void) AstVisitor::VisitClassNode(node); if (mInfo->GetPass() == 0) { + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } gTypeTable.AddType(node); mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { @@ -854,6 +888,9 @@ InterfaceNode *ClassStructVisitor::VisitInterfaceNode(InterfaceNode *node) { mInfo->SetTypeId(node, TY_Class); (void) AstVisitor::VisitInterfaceNode(node); if (mInfo->GetPass() == 0) { + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } gTypeTable.AddType(node); mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { @@ -903,6 +940,24 @@ FunctionNode *ClassStructVisitor::VisitFunctionNode(FunctionNode *node) { return node; } +FunctionNode *FunctionVisitor::VisitFunctionNode(FunctionNode *node) { + FunctionTypeNode *functype = mHandler->NewTreeNode(); + TreeNode *n = NULL; + for (unsigned i = 0; i < node->GetParamsNum(); i++) { + n = node->GetParam(i); + functype->AddParam(n ? n->GetTypeIdx() : 0); + } + + // add return + n = node->GetRetType(); + functype->AddParam(n ? n->GetTypeIdx() : 0); + + unsigned tidx = gTypeTable.GetOrCreateFunctionTypeIdx(functype); + node->SetTypeIdx(tidx); + + return node; +} + IdentifierNode *FindStrIdxVisitor::VisitIdentifierNode(IdentifierNode *node) { (void) AstVisitor::VisitIdentifierNode(node); if (node->GetStrIdx() == mStrIdx) { diff --git a/src/MapleFE/astopt/src/ast_scp.cpp b/src/MapleFE/astopt/src/ast_scp.cpp index 9179704ab7e33a937ccc8b436abd125fe2d06911..fe472ccd5efd3289ee290da38b51789e456a4f01 100644 --- a/src/MapleFE/astopt/src/ast_scp.cpp +++ b/src/MapleFE/astopt/src/ast_scp.cpp @@ -128,29 +128,36 @@ void BuildScopeVisitor::InitInternalTypes() { TreeNode *node = gTypeTable.GetTypeFromTypeIdx(i); node->SetScope(scope); if (node->IsUserType()) { - static_cast(node)->GetId()->SetScope(scope); - AddType(scope, node); - AddDecl(scope, node); + UserTypeNode *ut = static_cast(node); + TreeNode *id = ut->GetId(); + id->SetScope(scope); + AddType(scope, ut); + // id as a decl + AddDecl(scope, id); } else { AddType(scope, node); } } // add dummpy console.log() - ClassNode *console = AddClass("console"); - ASTScope *scp = NewScope(scope, console); - mStrIdx2ScopeMap[console->GetStrIdx()] = scp; - FunctionNode *log = AddFunction("log"); - log->SetTypeIdx(TY_Void); - console->AddMethod(log); - log->SetScope(scp); - AddDecl(scp, log); -} - -ClassNode *BuildScopeVisitor::AddClass(std::string name, unsigned tyidx) { + unsigned size = gStringPool.GetSize(); + unsigned stridx = gStringPool.GetStrIdx("console"); + TreeNode *type = gTypeTable.GetTypeFromStrIdx(stridx); + if (!type) { + ClassNode *console = AddClass(stridx); + ASTScope *scp = NewScope(scope, console); + mStrIdx2ScopeMap[console->GetStrIdx()] = scp; + FunctionNode *log = AddFunction("log"); + log->SetTypeIdx(TY_Void); + console->AddMethod(log); + log->SetScope(scp); + AddDecl(scp, log); + } +} + +ClassNode *BuildScopeVisitor::AddClass(unsigned stridx, unsigned tyidx) { ClassNode *node = mHandler->NewTreeNode(); - unsigned idx = gStringPool.GetStrIdx(name); - node->SetStrIdx(idx); + node->SetStrIdx(stridx); node->SetTypeIdx(tyidx); ModuleNode *module = mHandler->GetASTModule(); @@ -428,6 +435,7 @@ NamespaceNode *BuildScopeVisitor::VisitNamespaceNode(NamespaceNode *node) { DeclNode *BuildScopeVisitor::VisitDeclNode(DeclNode *node) { BuildScopeBaseVisitor::VisitDeclNode(node); ASTScope *scope = NULL; + bool deep = true; if (node->GetProp() == JS_Var) { // promote to use function or module scope scope = mUserScopeStack.top(); @@ -438,10 +446,41 @@ DeclNode *BuildScopeVisitor::VisitDeclNode(DeclNode *node) { node->GetVar()->SetScope(scope); } } else { - // use current scope scope = mScopeStack.top(); + // for body of function use function scope instead of body scope + TreeNode *b = node->GetParent(); + if (b && b->IsBlock()) { + TreeNode *f = b->GetParent(); + if (f && f->IsFunction()) { + scope = mUserScopeStack.top(); + } + } + // restrict to current scope + deep = false; + } + // check if it is already a decl in the scope + unsigned stridx = node->GetStrIdx(); + TreeNode *decl = scope->FindDeclOf(stridx, deep); + if (decl) { + if (decl != node) { + // replace with an assignment if apply + if (node->GetInit()) { + BinOperatorNode *bop = mHandler->NewTreeNode(); + bop->SetOprId(OPR_Assign); + IdentifierNode *id = mHandler->NewTreeNode(); + id->SetStrIdx(stridx); + id->SetScope(scope); + + bop->SetOpndA(id); + bop->SetOpndB(node->GetInit()); + node = (DeclNode *)bop; + } else { + node = NULL; + } + } + } else { + AddDecl(scope, node); } - AddDecl(scope, node); return node; } @@ -453,7 +492,7 @@ UserTypeNode *BuildScopeVisitor::VisitUserTypeNode(UserTypeNode *node) { if (p->IsFunction()) { // exclude function return type FunctionNode *f = static_cast(p); - if (f->GetType() == node) { + if (f->GetRetType() == node) { return node; } } else if (p->IsTypeAlias()) { @@ -726,6 +765,7 @@ void AST_SCP::RenameVar() { str += std::to_string(size); visitor.mOldStrIdx = stridx; visitor.mNewStrIdx = gStringPool.GetStrIdx(str); + gStringPool.AddAltStrIdx(visitor.mNewStrIdx); TreeNode *tn = mHandler->GetAstOpt()->GetNodeFromNodeId(nid); ASTScope *scope = tn->GetScope(); tn = scope->GetTree(); diff --git a/src/MapleFE/astopt/src/ast_ti.cpp b/src/MapleFE/astopt/src/ast_ti.cpp index 22706289ed7bda7a3ff2fc87224962a9d16fc330..99439b59de694b757e30393cd8db4d3a3456d115 100644 --- a/src/MapleFE/astopt/src/ast_ti.cpp +++ b/src/MapleFE/astopt/src/ast_ti.cpp @@ -85,7 +85,7 @@ IdentifierNode *BuildIdNodeToDeclVisitor::VisitIdentifierNode(IdentifierNode *no (void) AstVisitor::VisitIdentifierNode(node); // mHandler->FindDecl() will use/add entries to mNodeId2Decl TreeNode *decl = mHandler->FindDecl(node); - if (decl) { + if (decl && decl != node) { mHandler->GetUtil()->SetTypeId(node, decl->GetTypeId()); mHandler->GetUtil()->SetTypeIdx(node, decl->GetTypeIdx()); } @@ -93,8 +93,10 @@ IdentifierNode *BuildIdNodeToDeclVisitor::VisitIdentifierNode(IdentifierNode *no if (type && type->IsPrimType()) { PrimTypeNode *ptn = static_cast(type); TypeId tid = ptn->GetPrimType(); - // mHandler->GetUtil()->SetTypeId(node, tid); - mHandler->GetUtil()->SetTypeIdx(node, tid); + if (gTypeTable.IsPrimTypeId(tid)) { + // mHandler->GetUtil()->SetTypeId(node, tid); + mHandler->GetUtil()->SetTypeIdx(node, tid); + } } return node; } @@ -226,6 +228,7 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { switch (tia) { case TY_None: result = tib; break; + case TY_Class: case TY_Object: case TY_User: result = tia; break; @@ -233,22 +236,34 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { case TY_Undefined: case TY_String: case TY_Function: - case TY_Class: case TY_Array: result = TY_Merge; break; + case TY_Number: { + switch (tib) { + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: result = tib; break; + default: result = TY_Merge; break; + } + break; + } + case TY_Boolean: { switch (tib) { case TY_Int: case TY_Long: case TY_Float: case TY_Double: result = tib; break; + case TY_Number: result = tia; break; default: result = TY_Merge; break; } break; } case TY_Int: { switch (tib) { - case TY_Boolean: result = TY_Int; break; + case TY_Number: + case TY_Boolean: result = tia; break; case TY_Long: case TY_Float: case TY_Double: result = tib; break; @@ -258,8 +273,9 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { } case TY_Long: { switch (tib) { + case TY_Number: case TY_Boolean: - case TY_Int: result = TY_Long; break; + case TY_Int: result = tia; break; case TY_Float: case TY_Double: result = TY_Double; break; default: result = TY_Merge; break; @@ -268,8 +284,9 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { } case TY_Float: { switch (tib) { + case TY_Number: case TY_Boolean: - case TY_Int: result = TY_Float; break; + case TY_Int: result = tia; break; case TY_Long: case TY_Double: result = TY_Double; break; default: result = TY_Merge; break; @@ -278,10 +295,11 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { } case TY_Double: { switch (tib) { + case TY_Number: case TY_Boolean: case TY_Int: case TY_Long: - case TY_Double: result = TY_Double; break; + case TY_Double: result = tia; break; default: result = TY_Merge; break; } break; @@ -302,11 +320,11 @@ TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { } unsigned TypeInferVisitor::MergeTypeIdx(unsigned tia, unsigned tib) { - if (tia == tib || tib == 0) { + if (tia == tib || tib <= 1) { return tia; } - if (tia == 0) { + if (tia <= 1) { return tib; } @@ -415,7 +433,11 @@ PrimTypeNode *TypeInferVisitor::GetOrClonePrimTypeNode(PrimTypeNode *pt, TypeId new_pt->SetPrimType(pt->GetPrimType()); } SetTypeId(new_pt, tid); - SetTypeIdx(new_pt, tid); + if (IsPrimTypeId(tid)) { + SetTypeIdx(new_pt, tid); + } else { + SetTypeIdx(new_pt, gTypeTable.GetTypeFromTypeId(tid)->GetTypeIdx()); + } SetUpdated(); } return new_pt; @@ -483,7 +505,7 @@ void TypeInferVisitor::UpdateTypeUseNode(TreeNode *target, TreeNode *input) { TypeId new_elemTypeId = GetArrayElemTypeId(target); TreeNode *type = static_cast(target)->GetType(); MASSERT(target->IsIdentifier() && "target node not identifier"); - if (type->IsPrimArrayType()) { + if (type && type->IsPrimArrayType()) { unsigned nid = target->GetNodeId(); mParam2ArgArrayDeclMap[nid].insert(decl); if (old_elemTypeId != new_elemTypeId) { @@ -552,13 +574,13 @@ void TypeInferVisitor::UpdateFuncRetTypeId(FunctionNode *node, TypeId tid, unsig if (!node || (node->GetTypeId() == tid && node->GetTypeIdx() == tidx)) { return; } - TreeNode *type = node->GetType(); + TreeNode *type = node->GetRetType(); // create new return type node if it was shared if (type) { if (type->IsPrimType() && type->IsTypeIdNone()) { type = GetOrClonePrimTypeNode((PrimTypeNode *)type, tid); - node->SetType(type); + node->SetRetType(type); } tid = MergeTypeId(type->GetTypeId(), tid); SetTypeId(type, tid); @@ -625,7 +647,7 @@ bool TypeInferVisitor::UpdateVarTypeWithInit(TreeNode *var, TreeNode *init) { IdentifierNode *idnode = static_cast(var); TreeNode *type = idnode->GetType(); // use init NewNode to set decl type - if (!type && init) { + if (init) { if (init->IsNew()) { NewNode *n = static_cast(init); if (n->GetId()) { @@ -647,28 +669,39 @@ bool TypeInferVisitor::UpdateVarTypeWithInit(TreeNode *var, TreeNode *init) { result = true; } } + } else if (init->IsStructLiteral()) { + if (!type && init->GetTypeIdx() != 0) { + type = gTypeTable.GetTypeFromTypeIdx(init->GetTypeIdx()); + UserTypeNode *utype = mInfo->CreateUserTypeNode(type->GetStrIdx(), var->GetScope()); + utype->SetParent(idnode); + idnode->SetType(utype); + SetUpdated(); + } } else if (init->IsArrayLiteral()) { TypeId tid = GetArrayElemTypeId(init); unsigned tidx = GetArrayElemTypeIdx(init); - if (IsPrimTypeId(tid)) { - PrimTypeNode *pt = mHandler->NewTreeNode(); - pt->SetPrimType(tid); - - PrimArrayTypeNode *pat = mHandler->NewTreeNode(); - pat->SetPrim(pt); - - DimensionNode *dims = mHandler->GetArrayDim(init->GetNodeId()); - pat->SetDims(dims); + if (type) { + if (type->IsArrayType()) { + ArrayTypeNode *pat = static_cast(type); + // update array element type + SetTypeId(pat->GetElemType(), tid); + SetTypeIdx(pat->GetElemType(), tidx); + SetUpdated(); + } else { + NOTYETIMPL("array type not ArrayTypeNode"); + } + return result; + } - pat->SetParent(idnode); - idnode->SetType(pat); - SetUpdated(); + TreeNode *elemtype = NULL; + if (IsPrimTypeId(tid)) { + elemtype = gTypeTable.GetTypeFromTypeId(tid); } else if (tidx != 0) { - TreeNode *t = gTypeTable.GetTypeFromTypeIdx(tidx); - UserTypeNode *utype = mInfo->CreateUserTypeNode(t->GetStrIdx(), var->GetScope()); - + elemtype = gTypeTable.GetTypeFromTypeIdx(tidx); + } + if (elemtype) { ArrayTypeNode *pat = mHandler->NewTreeNode(); - pat->SetElemType(utype); + pat->SetElemType(elemtype); DimensionNode *dims = mHandler->GetArrayDim(init->GetNodeId()); pat->SetDims(dims); @@ -835,15 +868,13 @@ FieldLiteralNode *TypeInferVisitor::VisitFieldLiteralNode(FieldLiteralNode *node ArrayLiteralNode *TypeInferVisitor::VisitArrayLiteralNode(ArrayLiteralNode *node) { UpdateTypeId(node, TY_Array); (void) AstVisitor::VisitArrayLiteralNode(node); - ArrayLiteralNode *al = node; if (node->IsArrayLiteral()) { - al = static_cast(node); - unsigned size = al->GetLiteralsNum(); + unsigned size = node->GetLiteralsNum(); TypeId tid = TY_None; unsigned tidx = 0; bool allElemArray = true; for (unsigned i = 0; i < size; i++) { - TreeNode *n = al->GetLiteral(i); + TreeNode *n = node->GetLiteral(i); TypeId id = n->GetTypeId(); unsigned idx = n->GetTypeIdx(); tid = MergeTypeId(tid, id); @@ -861,9 +892,9 @@ ArrayLiteralNode *TypeInferVisitor::VisitArrayLiteralNode(ArrayLiteralNode *node unsigned elemdim = DEFAULTVALUE; // recalculate element typeid tid = TY_None; - unsigned tidx = 0; + tidx = 0; for (unsigned i = 0; i < size; i++) { - TreeNode *n = al->GetLiteral(i); + TreeNode *n = node->GetLiteral(i); if (n->IsArrayLiteral()) { DimensionNode * dn = mHandler->GetArrayDim(n->GetNodeId()); unsigned currdim = dn ? dn->GetDimensionsNum() : 0; @@ -1062,8 +1093,8 @@ CallNode *TypeInferVisitor::VisitCallNode(CallNode *node) { mHandler->AddGeneratorUsed(node->GetNodeId(), func); } // update call's return type - if (func->GetType()) { - UpdateTypeId(node, func->GetType()->GetTypeId()); + if (func->GetRetType()) { + UpdateTypeId(node, func->GetRetType()->GetTypeId()); } // skip imported and exported functions as they are generic // so should not restrict their types @@ -1125,6 +1156,13 @@ CallNode *TypeInferVisitor::VisitCallNode(CallNode *node) { } } else if (decl->IsLiteral()) { NOTYETIMPL("VisitCallNode literal node"); + } else if (decl->IsTypeIdClass()) { + // object + if (node->GetArgsNum()) { + TreeNode *arg = node->GetArg(0); + SetTypeId(arg, TY_Object); + SetTypeIdx(arg, decl->GetTypeIdx()); + } } else { NOTYETIMPL("VisitCallNode not function node"); } @@ -1158,13 +1196,17 @@ CastNode *TypeInferVisitor::VisitCastNode(CastNode *node) { AsTypeNode *TypeInferVisitor::VisitAsTypeNode(AsTypeNode *node) { (void) AstVisitor::VisitAsTypeNode(node); TreeNode *dest = node->GetType(); - SetTypeId(node, dest); + if (node->GetTypeIdx() == 0) { + SetTypeId(node, dest); + } TreeNode *parent = node->GetParent(); if (parent) { // pass to parent, need refine if multiple AsTypeNode if (parent->GetAsTypesNum() == 1 && parent->GetAsTypeAtIndex(0) == node) { - SetTypeId(parent, dest); + if (parent->GetTypeIdx() == 0) { + SetTypeId(parent, dest); + } } } return node; @@ -1505,9 +1547,6 @@ FunctionNode *TypeInferVisitor::VisitFunctionNode(FunctionNode *node) { if (node->GetFuncName()) { SetTypeId(node->GetFuncName(), node->GetTypeId()); } - if (node->GetType()) { - SetTypeIdx(node, node->GetType()->GetTypeIdx()); - } return node; } @@ -1532,10 +1571,26 @@ IdentifierNode *TypeInferVisitor::VisitIdentifierNode(IdentifierNode *node) { SetUpdated(); } } - if (node->GetInit()) { - UpdateTypeId(node, node->GetInit()->GetTypeId()); - UpdateTypeIdx(node, node->GetInit()->GetTypeIdx()); + TreeNode *init = node->GetInit(); + if (init) { + if (node->GetTypeId() == TY_None) { + SetTypeId(node, init->GetTypeId()); + } + if (node->GetTypeIdx() == 0) { + SetTypeIdx(node, init->GetTypeIdx()); + } SetUpdated(); + if (init->IsArrayLiteral()) { + // pass array element info + TypeId tid = mHandler->GetArrayElemTypeId(init->GetNodeId()); + unsigned tidx = mHandler->GetArrayElemTypeIdx(init->GetNodeId()); + UpdateArrayElemTypeMap(node, tid, tidx); + if (type && type->IsArrayType()) { + TreeNode *et = static_cast(type)->GetElemType(); + et->SetTypeId(tid); + et->SetTypeIdx(tidx); + } + } return node; } TreeNode *parent = node->GetParent(); @@ -1583,6 +1638,8 @@ IdentifierNode *TypeInferVisitor::VisitIdentifierNode(IdentifierNode *node) { UpdateTypeId(node, decl); UpdateTypeIdx(node, decl); } + // pass IsGeneratorUsed + mHandler->UpdateGeneratorUsed(node->GetNodeId(), decl->GetNodeId()); } else { NOTYETIMPL("node not declared"); MSGNOLOC0(node->GetName()); @@ -1607,7 +1664,7 @@ IsNode *TypeInferVisitor::VisitIsNode(IsNode *node) { TreeNode *parent = node->GetParent(); if (parent->IsFunction()) { FunctionNode *func = static_cast(parent); - if (func->GetType() == node) { + if (func->GetRetType() == node) { TreeNode *right = node->GetRight(); if (right->IsUserType()) { TreeNode *id = static_cast(right)->GetId(); @@ -1724,16 +1781,16 @@ ReturnNode *TypeInferVisitor::VisitReturnNode(ReturnNode *node) { if (tn) { FunctionNode *func = static_cast(tn); // use dummy PrimTypeNode as return type of function if not set to carry return TypeId - if (!func->GetType()) { + if (!func->GetRetType()) { PrimTypeNode *type = mHandler->NewTreeNode(); type->SetPrimType(TY_None); - func->SetType(type); + func->SetRetType(type); } if (!func->IsGenerator() && !func->IsIterator()) { UpdateFuncRetTypeId(func, node->GetTypeId(), node->GetTypeIdx()); if (res) { // use res to update function's return type - UpdateTypeUseNode(func->GetType(), res); + UpdateTypeUseNode(func->GetRetType(), res); } } } @@ -1819,7 +1876,10 @@ UserTypeNode *TypeInferVisitor::VisitUserTypeNode(UserTypeNode *node) { SetTypeId(node, TY_Array); SetTypeIdx(node, TY_Array); } else if (node->GetId()) { - UpdateTypeId(node, node->GetId()); + // non-enum user type which keep TY_None + if (node->GetId()->GetTypeId() != TY_None) { + SetTypeId(node, TY_Class); + } UpdateTypeIdx(node, node->GetId()); } TreeNode *parent = node->GetParent(); diff --git a/src/MapleFE/autogen/reserved.spec b/src/MapleFE/autogen/reserved.spec index 445cc388cc7acb78a21415b5119eb36e5e3b8aff..cd258684dc2ad9b79fe06683a1d021930fe3fa98 100644 --- a/src/MapleFE/autogen/reserved.spec +++ b/src/MapleFE/autogen/reserved.spec @@ -34,10 +34,9 @@ rule HEXDIGIT : ONEOF(DIGIT, 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', ' # irregular char like \n, \, DEL, etc. will be handled in lexer.cpp if some language allows them in string literal. rule IRREGULAR_CHAR : "this_is_for_fake_rule" -# We will catch any utf-8 char in lexer in a short-cut. -rule UTF8 : "this_is_for_fake_rule" - -# Below are special rules handled in lexer source code. Since it'll be in lexer code, it means +# Below are special rules handled in lexer.cpp. Since it'll be in lexer code, it means # it's a shared rule of all languages. It has to be in reserved.spec. +rule UTF8 : "this_is_for_fake_rule" rule TemplateLiteral : "this_is_for_fake_rule" rule RegularExpression : "this_is_for_fake_rule" +rule NoLineTerminator : "this_is_for_fake_rule" diff --git a/src/MapleFE/c/src/main.cpp b/src/MapleFE/c/src/main.cpp index f2247efd5cadaca1e2d44f82036d1e305a77ee5f..b055675ccd5e23deec5d5292167d7cea2d3a57a6 100644 --- a/src/MapleFE/c/src/main.cpp +++ b/src/MapleFE/c/src/main.cpp @@ -112,7 +112,7 @@ int main (int argc, char *argv[]) { std::ofstream ofs; std::string fname(module->GetFilename()); - fname += ".mast"; + fname = fname.replace(fname.find(".c"), 2, ".mast"); ofs.open(fname, std::ofstream::out); const char *addr = (const char *)(&(ast_buf[0])); ofs.write(addr, ast_buf.size()); diff --git a/src/MapleFE/c/stmt.spec b/src/MapleFE/c/stmt.spec index 4b640fd0198c0619706904b2bc27c3b6a0b9f6e0..f71d36b960266b2d4a5e42e799f73bf60a0223ff 100644 --- a/src/MapleFE/c/stmt.spec +++ b/src/MapleFE/c/stmt.spec @@ -26,11 +26,14 @@ rule DimExprs : DimExpr + ZEROORMORE(DimExpr) rule DimExpr : '[' + Expression + ']' rule Expression : ONEOF( - PrimaryExpression) + PrimaryExpression, + UnaryExpression) rule UnaryExpression : ONEOF( PreIncrementExpression, - PreDecrementExpression) + PreDecrementExpression, + PostIncrementExpression, + PostDecrementExpression) rule PreIncrementExpression : "++" + PrimaryExpression attr.action : BuildUnaryOperation(%1, %2) @@ -38,6 +41,12 @@ rule PreIncrementExpression : "++" + PrimaryExpression rule PreDecrementExpression : "--" + PrimaryExpression attr.action : BuildUnaryOperation(%1, %2) +rule PostIncrementExpression : PrimaryExpression + "++" + attr.action : BuildPostfixOperation(%2, %1) + +rule PostDecrementExpression : PrimaryExpression + "--" + attr.action : BuildPostfixOperation(%2, %1) + ###################################################################### # Variable # ###################################################################### @@ -85,12 +94,24 @@ rule Dim : '[' + ']' ###################################################################### rule Statement : ONEOF(LocalVariableDeclarationStatement, + ExpressionStatement, ReturnStatement) attr.property: Single +rule ExpressionStatement : StatementExpression + ';' + +rule StatementExpression : ONEOF( + PreIncrementExpression, + PreDecrementExpression, + PostIncrementExpression, + PostDecrementExpression, + ) + attr.property: Single + rule ReturnStatement : "return" + ZEROORONE(Expression) + ';' attr.action : BuildReturn(%2) + ###################################################################### # Function # ###################################################################### diff --git a/src/MapleFE/docs/builtin-constructors.md b/src/MapleFE/docs/builtin-constructors.md index b38060c8add67af8b593b04c5294ecb66a0f55e2..aa844737710707a03f7a67d1b7751fb919b9b24c 100644 --- a/src/MapleFE/docs/builtin-constructors.md +++ b/src/MapleFE/docs/builtin-constructors.md @@ -25,7 +25,7 @@ Not all built-in objects work as object constructors. The following is a list of JavaScript built-in objects that works as object constructors to create objects of corresponding built-in type: -## List of JavaScript built-in object constructors +### 1. List of JavaScript built-in object constructors ``` 1 AggregateError 2 Array @@ -49,30 +49,58 @@ of corresponding built-in type: 20 Int8Array 21 InternalError (Mozilla only) 22 Map - 23 Math - 24 Number - 25 Object - 26 Promise - 27 Proxy - 28 RangeError - 29 ReferenceError - 30 RegExp - 31 Set - 32 SharedArrayBuffer - 33 String - 34 Symbol - 35 SyntaxError - 36 TypeError - 37 Uint16Array - 38 Uint32Array - 39 Uint8Array - 40 Uint8ClampedArray - 41 URIError - 42 WeakMap - 43 WeakRef - 44 WeakSet + 23 Number + 24 Object + 25 Promise + 26 Proxy + 27 RangeError + 28 ReferenceError + 29 RegExp + 30 Set + 31 SharedArrayBuffer + 32 String + 33 Symbol + 34 SyntaxError + 35 TypeError + 36 Uint16Array + 37 Uint32Array + 38 Uint8Array + 39 Uint8ClampedArray + 40 URIError + 41 WeakMap + 42 WeakRef + 43 WeakSet ``` +### 2. JavaScript builtin String/Number/Boolean object constructor and string/number/boolean primitive +https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String#string_primitives_and_string_objects + +"Note that JavaScript distinguishes between String objects and primitive string values. (The same is true of Boolean and Numbers.) + +String literals (denoted by double or single quotes) and strings returned from String calls in a non-constructor context (that is, called without using the new keyword) are primitive strings. JavaScript automatically converts primitives to String objects, so that it's possible to use String object methods for primitive strings. In contexts where a method is to be invoked on a primitive string or a property lookup occurs, JavaScript will automatically wrap the string primitive and call the method or perform the property lookup." +``` + 1 var s1 : string = "test"; // string literal + 2 var s2 : String = "test"; // string literal + 3 var s3 : string = String("test"); // string literal + 4 var s4 : String = String("test"); // string literal + 5 var s5 : String = new String("test"); // String object + 6 console.log(typeof(s1)); // string + 7 console.log(s1.slice(1,2)); // string literal s1 wrapped/converted to String object for call + 8 console.log(typeof(s2)); // string + 9 console.log(typeof(s3)); // string + 10 console.log(typeof(s4)); // string + 11 console.log(typeof(s5)); // object +``` + +For TypeScript to C++ mapping, string primitive maps to std::string, and String objects maples to builtin String object t2crt::String (same for Booelan and Numbers). + +The type returned by JavaScript/TypeScript String/Number/Boolean builtin/constructor function depends on the usage: +- when used as a function, it is a type converter (convert between literal typeis), returns primitve/literal type string/number/boolean +- when used with new op(), it is a constructor and returns an object +- A variable declared as primitve type string/number/boolean will be wrapped/converted to a String/Number/Boolean object if any object property/method is referenced + For TypeScript to C++, this conversion can be done by the runtime, but there is opportunity for optimization if it can be determined at compile time whether a primitive will be used as an object, in which case the primitve literal can be generated as object intead. + + ## TypeScript types Additionally these TypeScript types will be treated as built-in object types too: diff --git a/src/MapleFE/docs/readme b/src/MapleFE/docs/readme index 732f9f3725c3049fc9bbad1ecaa4016af160b2d0..9ff4bbefc70014338c1bd442785b82389db6b262 100644 --- a/src/MapleFE/docs/readme +++ b/src/MapleFE/docs/readme @@ -47,10 +47,19 @@ alias gounit='cd $unit' - For graphic view of JavaScript object inheritance relations: o cd $MFE/docs/util; node proto.js | ./viewdot.sh -3. TypeScript/JavaScript inheritance modeling in C++ +3. Run a single case from .ts + 1) ~/OpenArkCompiler/src/MapleFE/output/typescript/bin/ts2ast while-stmt.ts + This creates while-stmt.ts.ast at the same directory + 2) ~/OpenArkCompiler/src/MapleFE/output/typescript/bin/ast2cpp while-stmt.ts.ast + This generates the while-stmt.cpp + 3) g++ -g -o run -I/home/ubuntu/OpenArkCompiler/src/MapleFE/ast2cpp/runtime/include -I/home/ubuntu/OpenArkCompiler/src/MapleFE/astopt/include -std=c++17 while-stmt.cpp /home/ubuntu/OpenArkCompiler/src/MapleFE/ast2cpp/runtime/src/*.cpp + This generates the executable run. + 4) ./run to test the result. + +4. TypeScript/JavaScript inheritance modeling in C++ ==================================================== -3.1 JavaScript Object Properties +4.1 JavaScript Object Properties JavaScript objects have both instance properties and inherited properties. Instance properties are held in the object's own property @@ -67,7 +76,7 @@ In Javascript, object property lookup order is: - then lookup inherited property (from property list of object pointed to by chain of __proto__ starting from __proto__ of current object -3.2 TypeScript/JavaScript inheritance modeling in C++ +4.2 TypeScript/JavaScript inheritance modeling in C++ The inheritance relationship of TS/JS objects is mapped to C++ as classes derived hierarchically along the __proto__ chain. This allows @@ -100,7 +109,7 @@ instead of a prototype object, there is still only a single copy of inheited properties, because in JavaScript, there is only 1 single instance of each function constructor. -3.3 Property inheritance with __proto__ chain +4.3 Property inheritance with __proto__ chain See environment section in readme for instruction to view graphic display of JavaScript object inheritance relationship. The following diff --git a/src/MapleFE/scripts/maplefe-autogen.py b/src/MapleFE/scripts/maplefe-autogen.py index 1dd330b3171d70be47f029bddf72fa28b7e02749..f0ffce0cf1f8723de889853653e340c32e058df9 100755 --- a/src/MapleFE/scripts/maplefe-autogen.py +++ b/src/MapleFE/scripts/maplefe-autogen.py @@ -818,7 +818,7 @@ bool PutNode(TreeNode *n) {{ case NK_UserType: *mOs << EnumVal(UserTypeNode, UT_Type, Type); break; case NK_XXportAsPair: *mOs << (static_cast(n)->IsDefault() ? "default" : ""); *mOs << (static_cast(n)->IsRef() ? " ref" : " copy"); break; - case NK_Struct: *mOs << EnumVal(StructNode, StructProp, Prop); break; + case NK_Struct: *mOs << EnumVal(StructNode, StructProp, Prop); *mOs << "\\\\n" << n->GetName(); break; }} if(n->IsStmt()) *mOs << "\\",penwidth=2,color=\\"tomato"; diff --git a/src/MapleFE/scripts/perf-java.sh b/src/MapleFE/scripts/perf-java.sh new file mode 100755 index 0000000000000000000000000000000000000000..b838fd3675cf0b76f7ba0a6bc9aee3d477e59d70 --- /dev/null +++ b/src/MapleFE/scripts/perf-java.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script is to measure the runtime performance of java2ast and other executables +# If perf is not installed yet, please install the package linux-tools-common with all dependencies +TS2AST=$(dirname $0)/../output/java/bin/java2ast +CMD="sudo perf record -e cpu-cycles,cache-misses --call-graph fp -F 10000 -o perf.data" +if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo " $0 " + exit 1 +elif [ $# -eq 1 -a "$(basename $1)" != "$(basename $1 .ts)" ]; then + echo $CMD $TS2AST "$@" + $CMD $TS2AST "$@" +else + echo $CMD "$@" + $CMD "$@" +fi +echo sudo perf report +sudo perf report diff --git a/src/MapleFE/shared/include/ast.h b/src/MapleFE/shared/include/ast.h index 58d327ce4fc805b2834e182011b870f95f73f99f..347f8d443493ec5bb3bf048c33a8f7d29e0328a6 100644 --- a/src/MapleFE/shared/include/ast.h +++ b/src/MapleFE/shared/include/ast.h @@ -1830,7 +1830,7 @@ private: SmallVector mThrows; // exceptions it can throw SmallVector mTypeParams; TreeNode *mFuncName; // function name, usually an identifier - TreeNode *mType; // return type + TreeNode *mRetType; // return type SmallVector mParams; // BlockNode *mBody; DimensionNode *mDims; @@ -1909,8 +1909,8 @@ public: void SetTypeParamAtIndex(unsigned i, TreeNode* n) {*(mTypeParams.RefAtIndex(i)) = n; SETPARENT(n);} void AddTypeParam(TreeNode *); - void SetType(TreeNode *t) {mType = t; SETPARENT(t);} - TreeNode* GetType(){return mType;} + void SetRetType(TreeNode *t) {mRetType = t; SETPARENT(t);} + TreeNode* GetRetType(){return mRetType;} DimensionNode* GetDims() {return mDims;} void SetDims(DimensionNode *t) {mDims = t;} @@ -2120,7 +2120,7 @@ enum LambdaProperty { class LambdaNode : public TreeNode { private: LambdaProperty mProperty; - TreeNode *mType; // The return type. nullptr as Java Lambda. + TreeNode *mRetType; // The return type. nullptr as Java Lambda. SmallVector mParams; // A param could be an IdentifierNode or DeclNode. TreeNode *mBody; // the body could be an expression, or block. // nullptr as TS FunctionType and ConstructorType @@ -2128,7 +2128,7 @@ private: SmallVector mAttrs; public: LambdaNode() : TreeNode(NK_Lambda), - mBody(nullptr), mProperty(LP_JSArrowFunction), mType(nullptr) {} + mBody(nullptr), mProperty(LP_JSArrowFunction), mRetType(nullptr) {} ~LambdaNode(){Release();} @@ -2138,8 +2138,8 @@ public: LambdaProperty GetProperty() {return mProperty;} void SetProperty(LambdaProperty p) {mProperty = p;} - TreeNode* GetType() {return mType;} - void SetType(TreeNode* t) {mType = t; SETPARENT(t);} + TreeNode* GetRetType() {return mRetType;} + void SetRetType(TreeNode* t) {mRetType = t; SETPARENT(t);} unsigned GetParamsNum() {return mParams.GetNum();} TreeNode* GetParam(unsigned i) {return mParams.ValueAtIndex(i);} @@ -2388,5 +2388,35 @@ public: void Dump(unsigned); }; +////////////////////////////////////////////////////// +// Triple Slash Directive +////////////////////////////////////////////////////// + +enum TripleSlashProp { + TSP_Path, + TSP_Types, + TSP_Lib, + TSP_NoDefaultLib, + TSP_NA +}; + +class TripleSlashNode : public TreeNode { +private: + TripleSlashProp mProp; + TreeNode *mValue; +public: + TripleSlashNode() : TreeNode(NK_TripleSlash) {mValue = NULL; mProp = TSP_NA;} + ~TripleSlashNode() {} + + TreeNode* GetValue() {return mValue;} + void SetValue(TreeNode *n) {mValue = n; SETPARENT(n);} + + TripleSlashProp GetProp() {return mProp;} + void SetProp(TripleSlashProp p) {mProp = p;} + + void Dump(unsigned); +}; + + } #endif diff --git a/src/MapleFE/shared/include/ast_nk.def b/src/MapleFE/shared/include/ast_nk.def index df99869326ebf7eb81550f2a848277d7d739c885..091b6931090a7a6a88b7c0b802624032fbf30ddc 100644 --- a/src/MapleFE/shared/include/ast_nk.def +++ b/src/MapleFE/shared/include/ast_nk.def @@ -43,6 +43,7 @@ NODEKIND(AsType) NODEKIND(TypeAlias) NODEKIND(ConditionalType) // Conditional types in Typescript NODEKIND(TupleType) // tuple types in Typescript +NODEKIND(FunctionType) // NODEKIND(Cast) @@ -88,6 +89,8 @@ NODEKIND(In) NODEKIND(Is) NODEKIND(Infer) +NODEKIND(TripleSlash) // TS triple-slash directive + NODEKIND(Block) NODEKIND(Function) NODEKIND(Class) diff --git a/src/MapleFE/shared/include/ast_scope.h b/src/MapleFE/shared/include/ast_scope.h index 32131f660cd0b39e07589e967b8660f1f9c5c7b0..088072c362f6c492203110f4d0689223a2281428 100644 --- a/src/MapleFE/shared/include/ast_scope.h +++ b/src/MapleFE/shared/include/ast_scope.h @@ -83,7 +83,7 @@ public: TreeNode* GetExportedDecl(unsigned i) {return mExportedDecls.ValueAtIndex(i);} TreeNode* GetType(unsigned i) {return mTypes.ValueAtIndex(i);} - TreeNode* FindDeclOf(unsigned stridx); + TreeNode* FindDeclOf(unsigned stridx, bool deep = true); TreeNode* FindExportedDeclOf(unsigned stridx); TreeNode* FindTypeOf(unsigned stridx); diff --git a/src/MapleFE/shared/include/ast_type.h b/src/MapleFE/shared/include/ast_type.h index d120eafa87e479250a90ad64622fc82126e45fbc..0f4afe5ee73d61dff5d91d629ab2d40b51d7d471 100644 --- a/src/MapleFE/shared/include/ast_type.h +++ b/src/MapleFE/shared/include/ast_type.h @@ -103,7 +103,7 @@ public: void SetId(TreeNode *n) {mId = n; SETPARENT(n);} unsigned GetUnionInterTypesNum() {return mUnionInterTypes.GetNum();} - void AddUnionInterType(TreeNode *n); + void AddUnionInterType(TreeNode *n, bool front = false); TreeNode* GetUnionInterType(unsigned i) {return mUnionInterTypes.ValueAtIndex(i);} void SetUnionInterType(unsigned i, TreeNode* n) {*(mUnionInterTypes.RefAtIndex(i)) = n; SETPARENT(n);} @@ -136,6 +136,11 @@ public: void Dump(unsigned); }; +/////////////////////////////////////////////////////////////////////////////// +// ArrayTypeNode +// It is used to specify Array types, including element type and dimensions +/////////////////////////////////////////////////////////////////////////////// + class ArrayTypeNode : public TreeNode { private: TreeNode *mElemType; @@ -160,6 +165,29 @@ public: void Dump(unsigned); }; +/////////////////////////////////////////////////////////////////////////////// +// FunctionTypeNode +// It is used to specify function types with its parameters and return type +/////////////////////////////////////////////////////////////////////////////// +class FunctionTypeNode : public TreeNode { +private: + SmallVector mParams; // type index of formal parameters + // and return which is the last one + +public: + FunctionTypeNode() : TreeNode(NK_FunctionType) {} + ~FunctionTypeNode(){} + + unsigned GetParamsNum() {return mParams.GetNum();} + unsigned GetParam(unsigned i) {return mParams.ValueAtIndex(i);} + void SetParam(unsigned i, unsigned n) {*(mParams.RefAtIndex(i)) = n;} + void AddParam(unsigned i) {mParams.PushBack(i);} + void ClearParam() {mParams.Clear();} + + bool IsEqual(FunctionTypeNode *f); + void Dump(unsigned); +}; + /////////////////////////////////////////////////////////////////////////////// // PrimTypeNode & PrimTypePool // The size of PrimTypeNode is fixed, so it's good to use container for the storage. diff --git a/src/MapleFE/shared/include/container.h b/src/MapleFE/shared/include/container.h index 39066b4e9a211b7f2ce0b102e307216ce8099dd9..828a8379e00034339b1eeca24fbc55045b9a79f9 100644 --- a/src/MapleFE/shared/include/container.h +++ b/src/MapleFE/shared/include/container.h @@ -1,5 +1,6 @@ /* * Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) 2022 Tencent. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -33,6 +34,7 @@ #define __CONTAINER_H__ #include +#include #include "mempool.h" #include "massert.h" #include "macros.h" @@ -777,6 +779,338 @@ public: } }; +//////////////////////////////////////////////////////////////////////// +// GuamianFast uses unordered map to store the Knob-s in order to +// speed up the searching with big number of data. +//////////////////////////////////////////////////////////////////////// + +template class GuamianFast { +private: + struct Elem{ + E mData; + Elem *mNext; + }; + + // Sometimes people need save certain additional information to + // each knob. So we define mData. + struct Knob{ + D mData; + Elem *mChildren; // pointing to the first child + }; + + MemPool mMemPool; + std::unordered_map mKnobs; + + // allocate a new knob + Knob* NewKnob() { + Knob *knob = (Knob*)mMemPool.Alloc(sizeof(Knob)); + knob->mData = 0; + knob->mChildren = NULL; + return knob; + } + + // allocate a new element + Elem* NewElem() { + Elem *elem = (Elem*)mMemPool.Alloc(sizeof(Elem)); + elem->mNext = NULL; + elem->mData = 0; + return elem; + } + + // Sometimes people want to have a sequence of operations like, + // Get the knob, + // Add one element, on the knob + // Add more elements, on the knob. + // This is common scenario. To implement, it requires a temporary + // pointer to the located knob. This temp knob is used ONLY when + // paired operations, PairedFindOrCreateKnob() and PairedAddElem() + struct { + Knob *mKnob; + K mKey; + }mTempKnob; + +private: + // Just try to find the Knob. + // return NULL if fails. + Knob* FindKnob(K key) { + Knob *result = NULL; + auto search = mKnobs.find(key); + if (search != mKnobs.end()) + result = search->second; + return result; + } + + // Try to find the Knob. Create one if failed. + Knob* FindOrCreateKnob(K key) { + Knob *knob = FindKnob(key); + if (!knob) { + knob = NewKnob(); + mKnobs.insert(std::make_pair(key, knob)); + } + return knob; + } + + // Add an element to knob. It's the caller's duty to assure + // knob is not NULL. + void AddElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + Elem *found = NULL; + while (elem) { + if (elem->mData == data) { + found = elem; + break; + } + elem = elem->mNext; + } + + if (!found) { + Elem *e = NewElem(); + e->mData = data; + e->mNext = knob->mChildren; + knob->mChildren = e; + } + } + + // return true : if find the element + // false : if fail + bool FindElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + while (elem) { + if (elem->mData == data) + return true; + elem = elem->mNext; + } + return false; + } + + // Remove elem from the list. If elem doesn't exist, exit quietly. + // It's caller's duty to assure elem exists. + void RemoveElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + Elem *elem_prev = NULL; + Elem *target = NULL; + while (elem) { + if (elem->mData == data) { + target = elem; + break; + } + elem_prev = elem; + elem = elem->mNext; + } + + if (target) { + if (target == knob->mChildren) + knob->mChildren = target->mNext; + else + elem_prev->mNext = target->mNext; + } + } + + // Move the element to be the first child of knob. + // It's the caller's duty to make sure 'data' does exist + // in knob's children. + void MoveElemToHead(Knob *knob, E data) { + Elem *target_elem = NULL; + Elem *elem = knob->mChildren; + Elem *elem_prev = NULL; + while (elem) { + if (elem->mData == data) { + target_elem = elem; + break; + } + elem_prev = elem; + elem = elem->mNext; + } + + if (target_elem && (target_elem != knob->mChildren)) { + elem_prev->mNext = target_elem->mNext; + target_elem->mNext = knob->mChildren; + knob->mChildren = target_elem; + } + } + + // Try to find the first child of Knob k. Return the data. + // found is set to false if fails, or true. + // [NOTE] It's the user's responsibilty to make sure the Knob + // of 'key' exists. + E FindFirstElem(Knob *knob, bool &found) { + Elem *e = knob->mChildren; + if (!e) { + found = false; + return 0; + } + found = true; + return e->mData; + } + + // return num of elements in knob. + // It's caller's duty to assure knob is not NULL. + unsigned NumOfElem(Knob *knob) { + Elem *e = knob->mChildren; + unsigned c = 0; + while(e) { + c++; + e = e->mNext; + } + return c; + } + + // Return the idx-th element in knob. + // It's caller's duty to assure the validity of return value. + // It doesn't check validity here. + // Index starts from 0. + E GetElemAtIndex(Knob *knob, unsigned idx) { + Elem *e = knob->mChildren; + unsigned c = 0; + E data; + while(e) { + if (c == idx) { + data = e->mData; + break; + } + c++; + e = e->mNext; + } + return data; + } + +public: + GuamianFast() {mTempKnob.mKnob = NULL;} + ~GuamianFast(){Release();} + + void AddElem(K key, E data) { + Knob *knob = FindOrCreateKnob(key); + AddElem(knob, data); + } + + // If 'data' doesn't exist, it ends quietly + void RemoveElem(K key, E data) { + Knob *knob = FindOrCreateKnob(key); + RemoveElem(knob, data); + } + + // Try to find the first child of Knob k. Return the data. + // found is set to false if fails, or true. + // [NOTE] It's the user's responsibilty to make sure the Knob + // of 'key' exists. + E FindFirstElem(K key, bool &found) { + Knob *knob = FindKnob(key); + if (!knob) { + found = false; + return 0; // return value doesn't matter when fails. + } + E data = FindFirstElem(knob, found); + return data; + } + + // return true : if find the element + // false : if fail + bool FindElem(K key, E data) { + Knob *knob = FindKnob(key); + if (!knob) + return false; + return FindElem(knob, data); + } + + // Move element to be the header + // If 'data' doesn't exist, it ends quietly. + void MoveElemToHead(K key, E data) { + Knob *knob = FindKnob(key); + if (!knob) + return; + MoveElemToHead(knob, data); + } + + ///////////////////////////////////////////////////////// + // Paired operations start with finding a knob. It can + // be either PairedFindKnob() or PairedFindOrCreateKnob() + // Following that, there could be any number of operations + // like searching, adding, moving an element. + ///////////////////////////////////////////////////////// + + void PairedFindOrCreateKnob(K key) { + mTempKnob.mKnob = FindOrCreateKnob(key); + mTempKnob.mKey = key; + } + + bool PairedFindKnob(K key) { + mTempKnob.mKnob = FindKnob(key); + mTempKnob.mKey = key; + if (mTempKnob.mKnob) + return true; + else + return false; + } + + void PairedAddElem(E data) { + AddElem(mTempKnob.mKnob, data); + } + + // If 'data' doesn't exist, it ends quietly + void PairedRemoveElem(E data) { + RemoveElem(mTempKnob.mKnob, data); + } + + bool PairedFindElem(E data) { + return FindElem(mTempKnob.mKnob, data); + } + + // If 'data' doesn't exist, it ends quietly. + void PairedMoveElemToHead(E data) { + MoveElemToHead(mTempKnob.mKnob, data); + } + + E PairedFindFirstElem(bool &found) { + return FindFirstElem(mTempKnob.mKnob, found); + } + + // return num of elements in current temp knob. + // It's caller's duty to assure knob is not NULL. + unsigned PairedNumOfElem() { + return NumOfElem(mTempKnob.mKnob); + } + + // Return the idx-th element in knob. + // It's caller's duty to assure the validity of return value. + // It doesn't check validity here. + // Index starts from 0. + E PairedGetElemAtIndex(unsigned idx) { + return GetElemAtIndex(mTempKnob.mKnob, idx); + } + + // Reduce the element at index exc_idx. + // It's caller's duty to assure the element exists. + void PairedReduceElems(unsigned exc_idx) { + ReduceElems(mTempKnob.mKnob, exc_idx); + } + + void PairedSetKnobData(D d) { + mTempKnob.mKnob->mData = d; + } + + D PairedGetKnobData() { + return mTempKnob.mKnob->mData; + } + + K PairedGetKnobKey() { + return mTempKnob.mKey; + } + + ///////////////////////////////////////////////////////// + // Other functions + ///////////////////////////////////////////////////////// + + void Clear(){ + mTempKnob.mKnob = NULL; + mKnobs.clear(); + mMemPool.Clear(); + } + + void Release(){ + mMemPool.Release(); + } +}; + ////////////////////////////////////////////////////////////////////////////////////// // Tree // This is a regular tree. It simply maintains the basic operations of a tree, like diff --git a/src/MapleFE/shared/include/lexer.h b/src/MapleFE/shared/include/lexer.h index 151f55b258b5ebb5e8a6c92faecead47c5edb363..00a3f708629d9e8e8d347b56e6e942b241c768fb 100644 --- a/src/MapleFE/shared/include/lexer.h +++ b/src/MapleFE/shared/include/lexer.h @@ -135,6 +135,8 @@ public: virtual bool FindNextTLFormat(unsigned start, std::string& s, unsigned& end) {return false;} virtual bool FindNextTLPlaceHolder(unsigned start, std::string& s, unsigned& end) {return false;} + virtual bool FindTripleSlash() {return false;} + // replace keyword/opr/sep... with tokens //void PlantTokens(); //void PlantTraverseRuleTable(RuleTable*); diff --git a/src/MapleFE/shared/include/parser.h b/src/MapleFE/shared/include/parser.h index a747c23656ba23838301a06b86ad6ed762098a68..5b4a1072bc48fd4bce6da0d79d0ffe37756fbfdb 100644 --- a/src/MapleFE/shared/include/parser.h +++ b/src/MapleFE/shared/include/parser.h @@ -1,7 +1,8 @@ /* * Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. * -* OpenArkFE is licensed under the Mulan PSL v2. +* MapleFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * @@ -43,6 +44,7 @@ class TreeNode; typedef enum AppealStatus { FailWasFailed, FailNotRightToken, + FailNotRightString, FailNotIdentifier, FailNotLiteral, FailNotRegExpr, @@ -302,6 +304,7 @@ public: // 1. If the target is a token, we just need compare mCurToken with it. // 2. If the target is a special rule table, like literal, identifier, we just // need check the type of mCurToken. + bool TraverseStringSucc(Token*, AppealNode*, AppealNode *&); bool TraverseToken(Token*, AppealNode*, AppealNode *&); bool TraverseLiteral(RuleTable*, AppealNode*); bool TraverseIdentifier(RuleTable*, AppealNode*); diff --git a/src/MapleFE/shared/include/rule_summary.h b/src/MapleFE/shared/include/rule_summary.h index 5487d3d070eaa4751915bc26ecf5e23303fddbcc..e6803bcd73b5f35eb5b870cb2e33d32d6c272942 100644 --- a/src/MapleFE/shared/include/rule_summary.h +++ b/src/MapleFE/shared/include/rule_summary.h @@ -1,3 +1,19 @@ +/* +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. +* +* MapleFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + #ifndef __RULE_SUMMARY_H__ #define __RULE_SUMMARY_H__ #include "ruletable.h" @@ -28,6 +44,7 @@ extern RuleTable TblHEXDIGIT; extern RuleTable TblUTF8; extern RuleTable TblIRREGULAR_CHAR; +extern RuleTable TblNoLineTerminator; extern RuleTable TblTemplateLiteral; extern RuleTable TblRegularExpression; extern RuleTable TblExpression; diff --git a/src/MapleFE/shared/include/stringpool.h b/src/MapleFE/shared/include/stringpool.h index 259e33e6209f4e48f58465f7c7ba4ef7b7cc5cb7..fd8dfee8ab8012e29a2bf5586db57f588e7d1721 100644 --- a/src/MapleFE/shared/include/stringpool.h +++ b/src/MapleFE/shared/include/stringpool.h @@ -23,6 +23,8 @@ #include #include +#include +#include #include #include "massert.h" @@ -45,18 +47,32 @@ private: StringMap *mMap; std::vector mBlocks; int mFirstAvail; // -1 means no available. + bool mUseAltStr; // use alter string std::vector mLongStrings; // for strings longer than block size, // we allocate them by malloc. std::vector mStringTable; + // alternate string which can be used for obfuscation + std::unordered_set mAltStrIdxSet; + std::unordered_map mAltStrIdxMap; + friend class StringMap; public: StringPool(); ~StringPool(); + void SetUseAltStr(bool b) { mUseAltStr = b; } + void AddAltStrIdx(unsigned idx) { mAltStrIdxSet.insert(idx); } + unsigned GetAltStrSize() { return mAltStrIdxSet.size(); } + bool IsAltStrIdx(unsigned idx) { + return mAltStrIdxSet.find(idx) != mAltStrIdxSet.end(); + } + void AddAltStrIdxMap(unsigned orig, unsigned alt) { mAltStrIdxMap[orig] = alt; } + void SetAltStrIdxMap(); + char* AllocBlock(); char* Alloc(const size_t); char* Alloc(const std::string&); @@ -71,12 +87,12 @@ public: unsigned GetStrIdx(const char*); unsigned GetStrIdx(const char*, size_t); - const char *GetStringFromStrIdx(unsigned idx) { - MASSERT(idx < mStringTable.size() && "string index out of range"); - return mStringTable[idx]; - } + unsigned GetSize() {return mStringTable.size();} + + const char *GetStringFromStrIdx(unsigned idx); void Dump(); + void DumpAlt(); }; // Lexing, Parsing, AST Building and IR Building all share one global diff --git a/src/MapleFE/shared/include/succ_match.h b/src/MapleFE/shared/include/succ_match.h index fe78a9ecb27f05f2b4480742b4f3c9998d4556f3..648dc0be294fdb3a04afba79679b5ecb3596c819 100644 --- a/src/MapleFE/shared/include/succ_match.h +++ b/src/MapleFE/shared/include/succ_match.h @@ -1,7 +1,8 @@ /* * Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. * -* OpenArkFE is licensed under the Mulan PSL v2. +* MapleFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * @@ -47,8 +48,8 @@ namespace maplefe { class AppealNode; class SuccMatch { private: - Guamian mNodes; - Guamian mMatches; + GuamianFast mNodes; + GuamianFast mMatches; public: SuccMatch(){} diff --git a/src/MapleFE/shared/include/supported_actions.def b/src/MapleFE/shared/include/supported_actions.def index 1b3f48505e78db1c50ff16b47ee3e1e016e44553..ef4710d3862514ff281d00f5f277cbf93d708a66 100644 --- a/src/MapleFE/shared/include/supported_actions.def +++ b/src/MapleFE/shared/include/supported_actions.def @@ -237,5 +237,7 @@ ACTION(AddTypeParameterExtends) ACTION(BuildNameTypePair) +ACTION(BuildTripleSlash) // TS triple-slash directive. + // This is a special action to pass a child to parent ACTION(PassChild) diff --git a/src/MapleFE/shared/include/supported_operators.def b/src/MapleFE/shared/include/supported_operators.def index b09021d0b2e245794d97cc4a420c3e0898afdc3b..2ffa34dd44fa1f53b361757b863c4d9ae861e8b4 100644 --- a/src/MapleFE/shared/include/supported_operators.def +++ b/src/MapleFE/shared/include/supported_operators.def @@ -81,4 +81,5 @@ OPERATOR(StNe, Binary) OPERATOR(ArrowFunction, Binary) OPERATOR(NullCoalesce, Binary) OPERATOR(NullAssign, Binary) +OPERATOR(TripleSlash, Binary) // triple slash directive diff --git a/src/MapleFE/shared/include/typetable.h b/src/MapleFE/shared/include/typetable.h index 2817fadd4efbb22a9dd34536abdabc411f503f5d..d53b17719156f086fefca6fbc7d2f9ec02844e92 100644 --- a/src/MapleFE/shared/include/typetable.h +++ b/src/MapleFE/shared/include/typetable.h @@ -22,10 +22,12 @@ #define __TYPETABLE_H__ #include +#include #include #include #include "massert.h" #include "ast.h" +#include "ast_type.h" namespace maplefe { @@ -51,6 +53,8 @@ private: std::vector mTypeTable; std::unordered_map mNodeId2TypeIdxMap; std::unordered_map mTypeId2TypeMap; + std::unordered_set mPrimTypeId; + std::unordered_set mFuncTypeIdx; unsigned mPrimSize; unsigned mPreBuildSize; @@ -60,14 +64,23 @@ public: unsigned size() { return mTypeTable.size(); } unsigned GetPreBuildSize() { return mPreBuildSize; } + + bool IsPrimTypeId(TypeId tid) { return mPrimTypeId.find(tid) != mPrimTypeId.end(); } unsigned GetPrimSize() { return mPrimSize; } TreeNode *CreatePrimType(std::string name, TypeId tid); TreeNode *CreateBuiltinType(std::string name, TypeId tid); + + void AddPrimTypeId(TypeId tid); void AddPrimAndBuiltinTypes(); bool AddType(TreeNode *node); + TypeEntry *GetTypeEntryFromTypeIdx(unsigned tidx); TreeNode *GetTypeFromTypeIdx(unsigned tidx); TreeNode *GetTypeFromTypeId(TypeId tid) { return mTypeId2TypeMap[tid]; } + TreeNode *GetTypeFromStrIdx(unsigned strid); + + unsigned GetOrCreateFunctionTypeIdx(FunctionTypeNode *type); + void Dump(); }; diff --git a/src/MapleFE/shared/src/ast.cpp b/src/MapleFE/shared/src/ast.cpp index d0772ef4090bbd156e7069f79e4068d715da7edf..f42e21d7ff0442a6825f5ca3dcb6e92968630f91 100644 --- a/src/MapleFE/shared/src/ast.cpp +++ b/src/MapleFE/shared/src/ast.cpp @@ -1775,7 +1775,7 @@ void ClassNode::Dump(unsigned indent) { ////////////////////////////////////////////////////////////////////////////////////// FunctionNode::FunctionNode() : TreeNode(NK_Function), - mFuncName(NULL), mType(NULL), mBody(NULL), mDims(NULL), + mFuncName(NULL), mRetType(NULL), mBody(NULL), mDims(NULL), mIsConstructor(false), mIsGenerator(false), mIsIterator(false), mIsGetAccessor(false), mIsSetAccessor(false), mIsCallSignature(false), mIsConstructSignature(false), mAssert(NULL) {} @@ -1797,7 +1797,7 @@ void FunctionNode::AddTypeParam(TreeNode *param) { // and parameter types. So languages require Type Erasure at first, like Java. // Type erasure should be done earlier in language specific process. bool FunctionNode::OverrideEquivalent(FunctionNode *fun) { - if (!mType->TypeEquivalent(fun->GetType())) + if (!mRetType->TypeEquivalent(fun->GetRetType())) return false; if (GetStrIdx() != fun->GetStrIdx()) return false; @@ -2153,4 +2153,30 @@ void InterfaceNode::Dump(unsigned indent) { } } +void TripleSlashNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("trip-slash reference "); + + switch(mProp) { + case TSP_Path: + DUMP0_NORETURN("path = "); + break; + case TSP_Types: + DUMP0_NORETURN("types = "); + break; + case TSP_NoDefaultLib: + DUMP0_NORETURN("no-default-lib = "); + break; + case TSP_Lib: + DUMP0_NORETURN("lib = "); + break; + case TSP_NA: + default: + DUMP0_NORETURN("NA = "); + break; + } + + mValue->Dump(0); +} + } diff --git a/src/MapleFE/shared/src/ast_builder.cpp b/src/MapleFE/shared/src/ast_builder.cpp index 8657146ac7d6991cbbd1fc9724777133e07a4f91..016d6a9ae7466c363596d053f3a6e289141f05e6 100644 --- a/src/MapleFE/shared/src/ast_builder.cpp +++ b/src/MapleFE/shared/src/ast_builder.cpp @@ -176,14 +176,14 @@ static void add_type_to(TreeNode *tree, TreeNode *type) { lit->SetType(type); } else if (tree->IsLambda()) { LambdaNode *lam = (LambdaNode*)tree; - lam->SetType(type); + lam->SetRetType(type); } else if (tree->IsVarList()) { VarListNode *vl = (VarListNode*)tree; for (unsigned i = 0; i < vl->GetVarsNum(); i++) vl->GetVarAtIndex(i)->SetType(type); } else if (tree->IsFunction()) { FunctionNode *func = (FunctionNode*)tree; - func->SetType(type); + func->SetRetType(type); } else if (tree->IsBindingPattern()) { BindingPatternNode *bp = (BindingPatternNode*)tree; bp->SetType(type); @@ -1040,6 +1040,8 @@ TreeNode* ASTBuilder::SetIsConst() { } treenode->SetIsConst(); + + mLastTreeNode = treenode; return mLastTreeNode; } @@ -1672,9 +1674,9 @@ TreeNode* ASTBuilder::AddAsType() { if (tree_type) { node->AddAsTypes(tree_type); - mLastTreeNode = node; } + mLastTreeNode = node; return mLastTreeNode; } @@ -3819,7 +3821,7 @@ TreeNode* ASTBuilder::BuildUnionUserType() { // assert, both children cannot be UnionUserType at the same time. MASSERT(!user_type); user_type = ut; - user_type->AddUnionInterType(child_a); + user_type->AddUnionInterType(child_a, true); } } @@ -3865,7 +3867,7 @@ TreeNode* ASTBuilder::BuildInterUserType() { // assert, both children cannot be UnionUserType at the same time. MASSERT(!user_type); user_type = ut; - user_type->AddUnionInterType(child_a); + user_type->AddUnionInterType(child_a, true); } } @@ -4256,6 +4258,50 @@ TreeNode* ASTBuilder::BuildInfer() { return mLastTreeNode; } +//////////////////////////////////////////////////////////////////////////////// +// Triple Slash Directive of TypeScript +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildTripleSlash() { + if (mTrace) + std::cout << "In BuildTripleSlash" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *left = l_param.mData.mTreeNode; + + Param r_param = mParams[1]; + MASSERT(!r_param.mIsEmpty); + MASSERT(r_param.mIsTreeNode); + TreeNode *right = r_param.mData.mTreeNode; + + TripleSlashNode *tsnode = (TripleSlashNode*)gTreePool.NewTreeNode(sizeof(TripleSlashNode)); + new (tsnode) TripleSlashNode(); + + TripleSlashProp prop = TSP_NA; + if (left->IsIdentifier()) { + // no-default-lib + if ((strlen(left->GetName()) == 14) && !strncmp(left->GetName(), "no-default-lib", 14)) + prop = TSP_NoDefaultLib; + // lib + if ((strlen(left->GetName()) == 3) && !strncmp(left->GetName(), "lib", 3)) + prop = TSP_Lib; + // types + if ((strlen(left->GetName()) == 5) && !strncmp(left->GetName(), "types", 5)) + prop = TSP_Types; + // path + if ((strlen(left->GetName()) == 4) && !strncmp(left->GetName(), "path", 4)) + prop = TSP_Path; + } + tsnode->SetProp(prop); + + tsnode->SetValue(right); + + mLastTreeNode = tsnode; + return mLastTreeNode; +} + //////////////////////////////////////////////////////////////////////////////// // Await //////////////////////////////////////////////////////////////////////////////// diff --git a/src/MapleFE/shared/src/ast_scope.cpp b/src/MapleFE/shared/src/ast_scope.cpp index 7441ddac5ef88164751bc1e475463b9ff510dbc7..f1e5505be67a6b5e36140edc8200f9c4c4f62fd3 100644 --- a/src/MapleFE/shared/src/ast_scope.cpp +++ b/src/MapleFE/shared/src/ast_scope.cpp @@ -31,7 +31,7 @@ void ASTScope::AddChild(ASTScope *s) { // This is to find the decl having the name as stridx // starting from local scope -TreeNode* ASTScope::FindDeclOf(unsigned stridx) { +TreeNode* ASTScope::FindDeclOf(unsigned stridx, bool deep) { ASTScope *scope = this; while (scope) { for (unsigned i = 0; i < scope->GetDeclNum(); i++) { @@ -46,8 +46,8 @@ TreeNode* ASTScope::FindDeclOf(unsigned stridx) { return tree; } } - // search parent scope - scope = scope->mParent; + // search parent scope if deep is set + scope = deep ? scope->mParent : NULL; } return NULL; } diff --git a/src/MapleFE/shared/src/ast_type.cpp b/src/MapleFE/shared/src/ast_type.cpp index 48744a1850407b0574586ad28d1e4eb1a7da73e7..74ab86984068083463faea968862b181c685d6b0 100644 --- a/src/MapleFE/shared/src/ast_type.cpp +++ b/src/MapleFE/shared/src/ast_type.cpp @@ -27,7 +27,7 @@ namespace maplefe { // UserTypeNode // ////////////////////////////////////////////////////////////////////////// -void UserTypeNode::AddUnionInterType(TreeNode *args) { +void UserTypeNode::AddUnionInterType(TreeNode *args, bool front) { if (args->IsIdentifier() || args->IsPrimType() || args->IsPrimArrayType() || @@ -42,8 +42,14 @@ void UserTypeNode::AddUnionInterType(TreeNode *args) { args->IsKeyOf() || args->IsImport() || args->IsField() || + args->IsTemplateLiteral() || args->IsStruct()) { mUnionInterTypes.PushBack(args); + if (front) { + for(unsigned i = mUnionInterTypes.GetNum() - 1; i > 0; --i) + mUnionInterTypes.SetElem(i, mUnionInterTypes.ValueAtIndex(i-1)); + mUnionInterTypes.SetElem(0, args); + } SETPARENT(args); } else if (args->IsPass()) { PassNode *p = (PassNode*)args; @@ -175,6 +181,29 @@ static TypeId FindPrimTypeId(const char *keyword) { return TY_NA; } +////////////////////////////////////////////////////////////////////////// +// FunctionTypeNode // +////////////////////////////////////////////////////////////////////////// + +bool FunctionTypeNode::IsEqual(FunctionTypeNode *node) { + bool result = true; + if (node->GetParamsNum() != GetParamsNum()) { + result = false; + } else { + for (unsigned i = 0; i < GetParamsNum(); i++) { + if (node->GetParam(i) != GetParam(i)) { + result = false; + break; + } + } + } + return result; +} + +void FunctionTypeNode::Dump(unsigned ind) { + DUMP0_NORETURN("functiontype-TBD"); +} + ////////////////////////////////////////////////////////////////////////// // PrimTypeNode // ////////////////////////////////////////////////////////////////////////// diff --git a/src/MapleFE/shared/src/lexer.cpp b/src/MapleFE/shared/src/lexer.cpp index ccd8a5a2550bfde4a62a4a98b4df4660e8a2d61f..694f86a8edafedea9ff0584a140a9b0e25777951 100644 --- a/src/MapleFE/shared/src/lexer.cpp +++ b/src/MapleFE/shared/src/lexer.cpp @@ -541,6 +541,9 @@ LitData Lexer::GetLiteral() { // // Return true if a comment is read. The contents are ignore. bool Lexer::GetComment() { + if (FindTripleSlash()) + return false; + if (line[curidx] == '/' && line[curidx+1] == '/') { curidx = current_line_size; return true; @@ -654,7 +657,9 @@ bool Lexer::TraverseTableData(TableData *data) { // Need to make sure the following text is a separator curidx += strlen(data->mData.mString); if (mCheckSeparator || special_need_check) { - if ((TraverseSepTable() != SEP_NA) || (TraverseOprTable() != OPR_NA)) { + if ((TraverseSepTable() != SEP_NA) || + (TraverseOprTable() != OPR_NA) || + EndOfLine()) { // TraverseSepTable() moves 'curidx', need restore it curidx = old_pos + strlen(data->mData.mString); // Put into gStringPool diff --git a/src/MapleFE/shared/src/parser.cpp b/src/MapleFE/shared/src/parser.cpp index 12e8d953dc4e61a8ad5ddeb8efc18b7c7d9251a4..a3f8139a95310f5d692004b8e687b0ca5c6bbca6 100644 --- a/src/MapleFE/shared/src/parser.cpp +++ b/src/MapleFE/shared/src/parser.cpp @@ -576,14 +576,6 @@ ParseStatus Parser::ParseStmt() { mRootNode = mAppealNodePool.NewAppealNode(); mAppealNodes.push_back(mRootNode); - // mActiveTokens contain some un-matched tokens from last time of TraverseStmt(), - // because at the end of every TraverseStmt() when it finishes its matching it always - // MoveCurToken() which in turn calls LexOneLine() to read new tokens of a new line. - // - // This means in LexOneLine() we also need check if there are already tokens pending. - // - // [TODO] Later on, we will move thoes pending tokens to a separate data structure. - unsigned token_num = LexOneLine(); // No more token, end of file if (!token_num) @@ -806,6 +798,8 @@ void Parser::DumpExitTable(const char *table_name, unsigned indent, std::cout << " fail@WasFailed" << "}" << std::endl; else if (reason == FailNotRightToken) std::cout << " fail@NotRightToken" << "}" << std::endl; + else if (reason == FailNotRightString) + std::cout << " fail@NotRightString" << "}" << std::endl; else if (reason == FailNotIdentifier) std::cout << " fail@NotIdentifer" << "}" << std::endl; else if (reason == FailNotLiteral) @@ -876,6 +870,10 @@ bool Parser::LookAheadFail(RuleTable *rule_table, unsigned token) { if (curr_token->IsTempLit() || curr_token->IsRegExpr()) found = true; } + if (rule_table == &TblNoLineTerminator) { + if (!curr_token->mLineBegin) + found = true; + } } break; case LA_Identifier: @@ -1210,6 +1208,14 @@ bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) if ((rule_table == &TblRegularExpression)) return TraverseRegularExpression(rule_table, appeal); + if (rule_table == &TblNoLineTerminator) { + Token *token = mActiveTokens.ValueAtIndex(mCurToken); + if (token->mLineBegin) + return false; + else + return true; + } + EntryType type = rule_table->mType; switch(type) { case ET_Oneof: @@ -1260,6 +1266,41 @@ bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) } } +// Returns 1. true if succ. +// 2. child_node which represents 'token'. +bool Parser::TraverseStringSucc(Token *token, AppealNode *parent, AppealNode *&child_node) { + AppealNode *appeal = NULL; + mIndentation += 2; + + if (mTraceTable) { + std::string name = "string:"; + name += token->GetName(); + name += " curr_token matches"; + DumpEnterTable(name.c_str(), mIndentation); + } + + appeal = mAppealNodePool.NewAppealNode(); + child_node = appeal; + mAppealNodes.push_back(appeal); + appeal->SetToken(token); + appeal->SetStartIndex(mCurToken); + appeal->SetParent(parent); + parent->AddChild(appeal); + appeal->mResult = Succ; + appeal->AddMatch(mCurToken); + MoveCurToken(); + + if (mTraceTable) { + std::string name; + name = "string:"; + name += token->GetName(); + DumpExitTable(name.c_str(), mIndentation, appeal); + } + + mIndentation -= 2; + return true; +} + // Returns 1. true if succ. // 2. child_node which represents 'token'. bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_node) { @@ -1668,17 +1709,21 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { bool turned_on_AltToken = false; for (unsigned i = 0; i < rule_table->mNum; i++) { - bool is_zeroxxx = false; + bool is_zeroxxx = false; // If the table is Zeroorxxx(), or NoLineTerminator. + bool no_line_term = false; // If the table is NoLineTerminator + bool no_line_term_met = false; // If the table is NoLineTerminator and token is no line term. bool is_asi = false; bool is_token = false; bool old_mInAltTokensMatching = mInAltTokensMatching; TableData *data = rule_table->mData + i; if (data->mType == DT_Subtable) { - RuleTable *zero_rt = data->mData.mEntry; - if (zero_rt->mType == ET_Zeroormore || zero_rt->mType == ET_Zeroorone) + RuleTable *curr_rt = data->mData.mEntry; + if (curr_rt == &TblNoLineTerminator) + no_line_term = true; + if (curr_rt->mType == ET_Zeroormore || curr_rt->mType == ET_Zeroorone) is_zeroxxx = true; - if (zero_rt->mType == ET_ASI) + if (curr_rt->mType == ET_ASI) is_asi = true; } else if (data->mType == DT_Token) { is_token = true; @@ -1720,11 +1765,18 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { } } + if ((prev_succ_tokens.GetNum() == 1) && no_line_term) { + unsigned prev = prev_succ_tokens.ValueAtIndex(0); + Token *t = GetActiveToken(prev + 1); + if (!t->mLineBegin) + no_line_term_met = true; + } + // for Zeroorone/Zeroormore node it always returns true. NO matter how // many tokens it really matches, 'zero' is also a correct match. we // need take it into account so that the next rule table can try // on it. - if (!is_zeroxxx) + if (!is_zeroxxx && !no_line_term_met) prev_succ_tokens.Clear(); // is_zeroxxx seems redundant because the traversal should always be true. @@ -1806,10 +1858,14 @@ bool Parser::TraverseTableData(TableData *data, AppealNode *appeal, AppealNode * switch (data->mType) { case DT_Char: + MASSERT(0 && "Hit Char in TableData during matching!"); + break; case DT_String: - //MASSERT(0 && "Hit Char/String in TableData during matching!"); - //TODO: Need compare literal. But so far looks like it's impossible to - // have a literal token able to match a string/char in rules. + if (curr_token->IsIdentifier() && + !strncmp(curr_token->GetName(), data->mData.mString, strlen(data->mData.mString)) && + strlen(curr_token->GetName()) == strlen(data->mData.mString) ){ + found = TraverseStringSucc(curr_token, appeal, child_node); + } break; // separator, operator, keywords are generated as DT_Token. // just need check the pointer of token @@ -1854,8 +1910,10 @@ void Parser::SetIsDone(RuleTable *rt, unsigned start_token) { SuccMatch *succ = &gSucc[rt->mIndex]; bool found = succ->GetStartToken(start_token); - MASSERT(found); - succ->SetIsDone(); + if (rt != &TblNoLineTerminator) { + MASSERT(found); + succ->SetIsDone(); + } } ///////////////////////////////////////////////////////////////////////////// @@ -2243,7 +2301,7 @@ void Parser::SortOutConcatenate(AppealNode *parent) { if (!child) { if (data->mType == DT_Subtable) { RuleTable *table = data->mData.mEntry; - if (table->mType == ET_Zeroorone || table->mType == ET_Zeroormore) + if (table->mType == ET_Zeroorone || table->mType == ET_Zeroormore || table == &TblNoLineTerminator) good_child = true; if (table->mType == ET_ASI) good_child = true; diff --git a/src/MapleFE/shared/src/stringpool.cpp b/src/MapleFE/shared/src/stringpool.cpp index 1c96f4553e687943d4a866b4be67693bcb9dc7ab..3b6e292486e4199e33c3347bc6bc941298d08b41 100644 --- a/src/MapleFE/shared/src/stringpool.cpp +++ b/src/MapleFE/shared/src/stringpool.cpp @@ -20,6 +20,7 @@ #include #include +#include #include "stringpool.h" #include "stringmap.h" @@ -31,6 +32,7 @@ namespace maplefe { StringPool gStringPool; StringPool::StringPool() { + mUseAltStr = false; mMap = new StringMap(); mMap->SetPool(this); mFirstAvail = -1; @@ -173,11 +175,107 @@ unsigned StringPool::GetStrIdx(const char *str, size_t len) { return mMap->LookupEntryFor(s)->GetStrIdx(); } +const char *StringPool::GetStringFromStrIdx(unsigned idx) { + MASSERT(idx < mStringTable.size() && "string index out of range"); + if (mUseAltStr) { + if (mAltStrIdxMap.find(idx) != mAltStrIdxMap.end()) { + idx = mAltStrIdxMap[idx]; + } + } + return mStringTable[idx]; +} + +// This is the public interface to setup AltStrIdxMap used for obfuscation +// a name is mapped to a fixed length random unused name. +// starting from 2-letter names, [a-zA-Z] [a-zA-Z], which will cover over 2K names +// AA Aa AB Ab, ...., zz +// if not enough will extend to use 3-letter or 4-letter for over 7 million names +void StringPool::SetAltStrIdxMap() { + // starting from 2-letter names + unsigned len = 2; + bool done = false; + + // names use [A-Z] and [a-z] total 52 letters + int k = 52; + + // total number of names can be handled for len = 4, 3, 2, 1 respectively + int Size[4] = {k*k*k*k, k*k*k, k*k, k}; + + // names, trailing '\0' + char A[5] = {0, 0, 0, 0, 0}; + + // names already encounted, either existing name or new names + std::unordered_set used; + + for (auto stridx : mAltStrIdxSet) { + done = false; + while (!done) { + unsigned offset = 4 - len; + int mod = Size[offset]; + + int n = rand(); + int r = n % mod; + + // check if already encounted + if (used.find(r) != used.end()) { + // expand to use one more leter if close to limit + if (used.size() > mod - Size[offset + 1]) { + len++; + MASSERT(len < 5 && "Need more names"); + } + continue; + } + + // have un-encounted name + used.insert(r); + + int q; + bool odd; + int i = 0; + while (i < len - 1) { + mod = Size[offset + 1 + i]; + q = r / mod; + r = r % mod; + + // char, use upper case for odd number + odd = q%2; + A[i++] = (odd ? 'A' : 'a') + q/2; + } + + // last char, use upper case for odd number + odd = r%2; + A[i] = (odd ? 'A' : 'a') + r/2; + + unsigned size = GetSize(); + unsigned alt = GetStrIdx(A); + // make sure alt is a new string + if (alt == size) { + mAltStrIdxMap[stridx] = alt; + done = true; + } + } + } +} + void StringPool::Dump() { std::cout << "===================== StringTable =====================" << std::endl; for (unsigned idx = 1; idx < mStringTable.size(); idx++) { std::cout << " " << idx << " : " << mStringTable[idx] << std::endl; } } + +void StringPool::DumpAlt() { + std::cout << "================= Alt String Map ======================" << std::endl; + unsigned count = 0; + for (auto stridx : mAltStrIdxSet) { + unsigned alt = mAltStrIdxMap[stridx]; + std::cout << "count #" << stridx + << " str " << GetStringFromStrIdx(stridx) + << " --> " + << " alt " << GetStringFromStrIdx(alt) + << std::endl; + } +} + } diff --git a/src/MapleFE/shared/src/typetable.cpp b/src/MapleFE/shared/src/typetable.cpp index ad58543fd809f6c4f6a31c1204ba28f853c1db07..8a1d8a93e96c53f2c8636eafbdebf5d22e352a57 100644 --- a/src/MapleFE/shared/src/typetable.cpp +++ b/src/MapleFE/shared/src/typetable.cpp @@ -69,7 +69,8 @@ TreeNode *TypeTable::CreateBuiltinType(std::string name, TypeId tid) { unsigned stridx = gStringPool.GetStrIdx(name); IdentifierNode *id = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); new (id) IdentifierNode(stridx); - id->SetTypeId(tid); + // use TY_Class for Object type + (tid == TY_Object) ? id->SetTypeId(TY_Class) : id->SetTypeId(tid); UserTypeNode *utype = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); new (utype) UserTypeNode(id); @@ -82,18 +83,25 @@ TreeNode *TypeTable::CreateBuiltinType(std::string name, TypeId tid) { } bool TypeTable::AddType(TreeNode *node) { - unsigned id = node->GetNodeId(); - if (mNodeId2TypeIdxMap.find(id) != mNodeId2TypeIdxMap.end()) { + unsigned nid = node->GetNodeId(); + if (mNodeId2TypeIdxMap.find(nid) != mNodeId2TypeIdxMap.end()) { return false; } - unsigned tid = mTypeTable.size(); - mNodeId2TypeIdxMap[id] = tid; - node->SetTypeIdx(tid); + unsigned tidx = mTypeTable.size(); + mNodeId2TypeIdxMap[nid] = tidx; + node->SetTypeIdx(tidx); + if (node->IsUserType()) { + static_cast(node)->GetId()->SetTypeIdx(tidx); + } TypeEntry *entry = new TypeEntry(node); mTypeTable.push_back(entry); return true; } +void TypeTable::AddPrimTypeId(TypeId tid) { + mPrimTypeId.insert(tid); +} + #undef TYPE #undef PRIMTYPE void TypeTable::AddPrimAndBuiltinTypes() { @@ -109,7 +117,7 @@ void TypeTable::AddPrimAndBuiltinTypes() { // first are primitive types, and their typeid TY_Xyz is their typeidx as well #define TYPE(T) -#define PRIMTYPE(T) node = CreatePrimType(#T, TY_##T); AddType(node); +#define PRIMTYPE(T) node = CreatePrimType(#T, TY_##T); AddType(node); AddPrimTypeId(TY_##T); #include "supported_types.def" // add additional primitive types for number and string PRIMTYPE(Number); @@ -117,7 +125,7 @@ void TypeTable::AddPrimAndBuiltinTypes() { mPrimSize = size(); -#define TYPE(T) node = CreateBuiltinType(#T, TY_##T); AddType(node); +#define TYPE(T) node = CreateBuiltinType(#T, TY_##T); AddType(node); #define PRIMTYPE(T) // additional usertype Boolean TYPE(Boolean); @@ -127,14 +135,46 @@ void TypeTable::AddPrimAndBuiltinTypes() { return; } -TypeEntry *TypeTable::GetTypeEntryFromTypeIdx(unsigned idx) { - MASSERT(idx < mTypeTable.size() && "type index out of range"); - return mTypeTable[idx]; +TypeEntry *TypeTable::GetTypeEntryFromTypeIdx(unsigned tidx) { + MASSERT(tidx < mTypeTable.size() && "type index out of range"); + return mTypeTable[tidx]; +} + +TreeNode *TypeTable::GetTypeFromTypeIdx(unsigned tidx) { + MASSERT(tidx < mTypeTable.size() && "type index out of range"); + return mTypeTable[tidx]->GetType(); +} + +TreeNode *TypeTable::GetTypeFromStrIdx(unsigned stridx) { + for (auto entry : mTypeTable) { + TreeNode *node = entry->GetType(); + if (node && node->GetStrIdx() == stridx) { + return node; + } + } + return NULL; } -TreeNode *TypeTable::GetTypeFromTypeIdx(unsigned idx) { - MASSERT(idx < mTypeTable.size() && "type index out of range"); - return mTypeTable[idx]->GetType(); +unsigned TypeTable::GetOrCreateFunctionTypeIdx(FunctionTypeNode *node) { + for (auto tidx: mFuncTypeIdx) { + TreeNode *type = GetTypeFromTypeIdx(tidx); + FunctionTypeNode *functype = static_cast(type); + bool found = functype->IsEqual(node); + if (found) { + return tidx; + } + } + bool status = AddType(node); + MASSERT(status && "failed to add a functiontype"); + unsigned tidx = node->GetTypeIdx(); + mFuncTypeIdx.insert(tidx); + + std::string str("FuncType__"); + str += std::to_string(tidx); + unsigned stridx = gStringPool.GetStrIdx(str); + node->SetStrIdx(stridx); + + return tidx; } void TypeTable::Dump() { diff --git a/src/MapleFE/shared/src/vfy.cpp b/src/MapleFE/shared/src/vfy.cpp index cb03fb5439385aabf65f7bf4684633acb8c0cb85..07809d0f89ed58d2191117ee254c9fe0ca949449 100644 --- a/src/MapleFE/shared/src/vfy.cpp +++ b/src/MapleFE/shared/src/vfy.cpp @@ -568,4 +568,12 @@ void Verifier::VerifyArrayType(ArrayTypeNode *tree){ return; } +void Verifier::VerifyTripleSlash(TripleSlashNode *tree){ + return; +} + +void Verifier::VerifyFunctionType(FunctionTypeNode *tree){ + return; +} + } diff --git a/src/MapleFE/test/astdump.sh b/src/MapleFE/test/astdump.sh index db62279ca83707ad71e16a7c3827851c59a1cebb..2260c58f95c703efdf91b6bb871d883f67219a3f 100755 --- a/src/MapleFE/test/astdump.sh +++ b/src/MapleFE/test/astdump.sh @@ -110,8 +110,6 @@ for ts in $LIST; do T=$(sed -e "s/\(.*\)\(\.d\)\(\.ts-$PROCID.out\)/\1\2\3\2/" <<< "$ts-$PROCID.out.ts") eval $cmd <<< "$out" > "$T" [ -z "$NAME" ] || sed -i 's/__v[0-9][0-9]*//g' "$T" - clang-format-10 -i --style="{ColumnLimit: 120, JavaScriptWrapImports: false, AlignOperands: false}" "$T" - sed -i -e 's/?? =/??=/g' -e 's/ int\[/ number[/g' "$T" echo -e "\n====== TS Reformatted ======\n" $HIGHLIGHT "$T" echo TREEDIFF=$TREEDIFF @@ -132,8 +130,6 @@ for ts in $LIST; do [ -n "$KEEP" ] || rm -f "$T" else cp $ts $ts.tmp.ts - clang-format-10 -i --style="{ColumnLimit: 120, JavaScriptWrapImports: false, AlignOperands: false, JavaScriptQuotes: Double}" $ts.tmp.ts - sed -i 's/?? =/??=/g' $ts.tmp.ts $TS2AST $ts.tmp.ts if [ $? -eq 0 ]; then $AST2CPP $ts.tmp.ts.ast $TREEDIFF | sed -n '/^AstDump:/,/^}/p' | sed 's/\(mStrIdx: unsigned int, \)[0-9]* =>/\1=>/' diff --git a/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result b/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result index 73efc592e98fb5a4c189e1b0bf162db5ebfcec9a..06e1681196983bfca57f375df768dced69a0d430 100644 --- a/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result +++ b/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result @@ -1,10 +1,13 @@ Matched 16 tokens. Matched 24 tokens. Matched 32 tokens. +Matched 40 tokens. ============= Module =========== == Sub Tree == ts_interface: JQueryEventObjectTest {data;which;metaKey } == Sub Tree == +trip-slash reference path = "jquery.d.ts" +== Sub Tree == ts_interface: SecondEvent {data } == Sub Tree == ts_interface: Third { } diff --git a/src/MapleFE/test/typescript/unit_tests/class6.ts b/src/MapleFE/test/typescript/unit_tests/class6.ts new file mode 100644 index 0000000000000000000000000000000000000000..ad924fc06795ae44aa9fc447ddf7a8480482c9f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class6.ts @@ -0,0 +1,20 @@ +class Klass { + public num: number = 1; + + if(n: number): boolean { + return this.num == n; + } + + try(n: number): void { + if(n == this.num) + console.log("EQ"); + else + console.log("NE"); + } +} + +var obj: Klass = new Klass(); +console.log(obj.if(0)); +console.log(obj.if(1)); +obj.try(0); +obj.try(1); diff --git a/src/MapleFE/test/typescript/unit_tests/class6.ts.result b/src/MapleFE/test/typescript/unit_tests/class6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..08a20f91d9cb7bde8eba40afc7635cd73adeb7ca --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class6.ts.result @@ -0,0 +1,34 @@ +Matched 61 tokens. +Matched 71 tokens. +Matched 83 tokens. +Matched 95 tokens. +Matched 102 tokens. +Matched 109 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + num=1 + Instance Initializer: + Constructors: + Methods: + func if(n) throws: + return this.num EQ n + func try(n) throws: + cond-branch cond:n EQ this.num + true branch : + console.log("EQ") false branch : + console.log("NE") + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj.if(0)) +== Sub Tree == +console.log(obj.if(1)) +== Sub Tree == +obj.try(0) +== Sub Tree == +obj.try(1) diff --git a/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..dd9965a57cfc8382a32c2ae6a50b371fa371ef6d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts @@ -0,0 +1,6 @@ +class Klass { + else: number = 0; +} + +var obj: Klass = new Klass(); +console.log(obj, obj.else); diff --git a/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b14f7e7d22946f638f60fdd316acf89110a295ee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result @@ -0,0 +1,18 @@ +Matched 10 tokens. +Matched 20 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + else=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj,obj.else) diff --git a/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result index 42db4d50cd1948ed6342f61d5646f75724f35a84..c71e900f9a1c38da2856901eb5858a378b61611c 100644 --- a/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result +++ b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result @@ -1,7 +1,10 @@ -Matched 7 tokens. -Matched 14 tokens. +Matched 8 tokens. +Matched 15 tokens. +Matched 22 tokens. ============= Module =========== == Sub Tree == +trip-slash reference path = "import-in-module.ts" +== Sub Tree == import {X} "M1" == Sub Tree == import {NS} "M2" diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts new file mode 100644 index 0000000000000000000000000000000000000000..a21a6ce8a1190aa1bc6a8f85e5d85179ee1dbbb8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts @@ -0,0 +1,16 @@ +var n: number = 1; +switch (true) { + case n < 5: + console.log(n, " is less than 5"); + case n > 2 && n < 5: + console.log(n, " + 1 is equal to", n + 1); + break; + case n == 6: + console.log(n, " is equal to 6"); + break; + case n < 8: + console.log(n, " is greater than 4 and less than 8"); + break + default: + console.log(n, " is greater than 7"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..78c460f980018fd17fd60b48153b7b3b98ca4a9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 93 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: n=1 +== Sub Tree == +A switch + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts new file mode 100644 index 0000000000000000000000000000000000000000..3caad616a659fe6951afcd48192e7063c6a0f87c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts @@ -0,0 +1,8 @@ +function func(arg: number): number | undefined { + if(arg < 1) return + for(let i = 0; i < arg; i++) + console.log(i); + return arg * 10; +} +console.log(func(3)); + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5a85dd3971104c3fd5b7e648634c0a1e7b0ff316 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result @@ -0,0 +1,15 @@ +Matched 46 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + cond-branch cond:arg LT 1 + true branch : + return false branch : + + for ( ) + console.log(i) + return arg Mul 10 + +== Sub Tree == +console.log(func(3)) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts new file mode 100644 index 0000000000000000000000000000000000000000..f5c4aff91fda35a3bfecf34ccd70dfe72cdf7624 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts @@ -0,0 +1,9 @@ +function func(arg: number): number | undefined { + for(let i = 0; i < arg; i++) { + if(i % 2 > 0) continue + console.log(i); + } + return arg * 10; +} +console.log(func(5)); + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6de52c4b72ce0abb85198d4eeeaf62bc051bb110 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result @@ -0,0 +1,17 @@ +Matched 50 tokens. +Matched 60 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + for ( ) + cond-branch cond:i Mod 2 GT 0 + true branch : + continue: + false branch : + + console.log(i) + + return arg Mul 10 + +== Sub Tree == +console.log(func(5)) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..073bae7598a49870fb29a75da43f6c479ed99402 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts @@ -0,0 +1,12 @@ +class Num { + neg: boolean = false; + val: number = 0; +} + +function func(v: Num): `${string}n` | `-${string}n` { + return `${v.neg ? '-' : ''}${v.val}n`; +} + +var obj : Num = {neg: true, val: 123}; +console.log(func(obj)); +console.log(typeof func(obj)); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..70ca48c63fb79ef537785175df99a5d33a80e5f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result @@ -0,0 +1,30 @@ +Matched 16 tokens. +Matched 32 tokens. +Matched 47 tokens. +Matched 57 tokens. +Matched 68 tokens. +Matched 69 tokens. +Matched 70 tokens. +Matched 77 tokens. +Matched 80 tokens. +============= Module =========== +== Sub Tree == +class Num + Fields: + neg=false val=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(v) throws: + return template-literal: NULL,v.neg ? "-" : "",NULL,v.val,"n",NULL + +== Sub Tree == +js_var Decl: obj= {neg:true, val:123} +== Sub Tree == +console.log(func(obj)) +== Sub Tree == +console.log( typeof func(obj)) diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts new file mode 100644 index 0000000000000000000000000000000000000000..29e5a31ca3b3a3ba94f49fbc89206d3bd06ec68c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts @@ -0,0 +1,10 @@ +enum ET { + TOP = "top", /// < top string + BOTTOM = "bottom", ///< bottom string +} + +/// +/// + +let et = ET.TOP; +console.log(et); diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e4afff30c02415e10dfeab2420a63675b1286d5d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_enum: ET {TOP="top";BOTTOM="bottom" } +== Sub Tree == +js_let Decl: et=ET.TOP +== Sub Tree == +console.log(et) diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result index f9052bd4b2db8b75cc78e04aad0acd979d29b938..0e74c743cd4c0c3b70270bc58d388ad13603002f 100644 --- a/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result @@ -1 +1,7 @@ +Matched 8 tokens. +Matched 16 tokens. ============= Module =========== +== Sub Tree == +trip-slash reference no-default-lib = "true" +== Sub Tree == +trip-slash reference lib = "es5" diff --git a/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd97f5cab3b2af51e3f45bca1c7320496a53b5ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts @@ -0,0 +1,10 @@ +const obj = { + else() { + return this; + }, + var() { + console.log("var"); + } +} + +obj.else().var(); diff --git a/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7fcef6b23a7beaf3d03a4115b40cfebcf5da9d1f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result @@ -0,0 +1,11 @@ +Matched 26 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {else:func else() throws: + return this +, var:func var() throws: + console.log("var") +} +== Sub Tree == +obj.else().var() diff --git a/src/MapleFE/tools/obfuscate/include/obfuscate.h b/src/MapleFE/tools/obfuscate/include/obfuscate.h new file mode 100644 index 0000000000000000000000000000000000000000..63574ba6d02657eb5293605a36236e4a14374fe3 --- /dev/null +++ b/src/MapleFE/tools/obfuscate/include/obfuscate.h @@ -0,0 +1,52 @@ +/* +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef __OBFUSCATE_HEADER__ +#define __OBFUSCATE_HEADER__ + +#include "astopt.h" +#include "ast_handler.h" +#include "ast_module.h" + +namespace maplefe { + +class Obfuscate : public AstOpt { +private: + AST_Handler *mASTHandler; + unsigned mFlags; + unsigned mIndexImported; + +public: + explicit Obfuscate(AST_Handler *h, unsigned flags) : + AstOpt(h, flags), + mASTHandler(h), + mFlags(flags), + mIndexImported(0) {} + ~Obfuscate() = default; + + void EmitTS(); + bool LoadImportedModules(); + + // return 0 if successful + // return non-zero if failed + int ProcessAST(); +}; + +} +#endif diff --git a/src/MapleFE/tools/obfuscate/src/main.cpp b/src/MapleFE/tools/obfuscate/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0865f8545ed5116743448c3bb710b7e33cb9289a --- /dev/null +++ b/src/MapleFE/tools/obfuscate/src/main.cpp @@ -0,0 +1,109 @@ +/* +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2022] Tencent. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "gen_astload.h" +#include "ast_handler.h" +#include "obfuscate.h" + +static void help() { + std::cout << "obfuscate a.ast[,b.ast] [options]:" << std::endl; + std::cout << " --out=x.ts : ts output file" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace=n : Emit trace with 4-bit combo levels 1...15" << std::endl; + std::cout << " 1 : Emit ast tree visits" << std::endl; + std::cout << " 2 : Emit graph" << std::endl; + std::cout << " --emit-ts-only : Emit ts code only" << std::endl; + std::cout << " --emit-ts : Emit ts code" << std::endl; + std::cout << " --format-cpp : Format cpp" << std::endl; + std::cout << " --no-imported : Do not process the imported modules" << std::endl; + std::cout << "default out name uses the first input name: a.cpp" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + unsigned flags; + // one or more input .ast files separated by ',' + const char *inputname = argv[1]; + // output .ast file + const char *outputname = nullptr; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace=", 8)) { + int val = atoi(argv[i] + 8); + if (val < 1 || val > 15) { + help(); + exit(-1); + } + flags |= val; + } else if (!strncmp(argv[i], "--emit-ts-only", 14)) { + flags |= maplefe::FLG_emit_ts_only; + } else if (!strncmp(argv[i], "--emit-ts", 9)) { + flags |= maplefe::FLG_emit_ts; + } else if (!strncmp(argv[i], "--format-cpp", 12)) { + flags |= maplefe::FLG_format_cpp; + } else if (!strncmp(argv[i], "--no-imported", 13)) { + flags |= maplefe::FLG_no_imported; + } else if (!strncmp(argv[i], "--in=", 5)) { + inputname = argv[i]+5; + } else if (!strncmp(argv[i], "--out=", 6)) { + outputname = argv[i]+6; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + // input ast files + std::vector inputfiles; + if (inputname) { + std::stringstream ss; + ss.str(inputname); + std::string item; + while (std::getline(ss, item, ',')) { + // std::cout << "item " << item << " xxx"<< std::endl; + inputfiles.push_back(item); + } + } + + unsigned trace = (flags & maplefe::FLG_trace); + maplefe::AST_Handler handler(trace); + for (auto astfile: inputfiles) { + std::ifstream input(astfile, std::ifstream::binary); + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + handler.AddModule(mod); + mod = loadAst.Next(); + } + } + + maplefe::Obfuscate *obfuscate = new maplefe::Obfuscate(&handler, flags); + int res = obfuscate->ProcessAST(); + + return res; +} diff --git a/src/MapleFE/tools/obfuscate/src/obfuscate.cpp b/src/MapleFE/tools/obfuscate/src/obfuscate.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cec38cbc9cd61b600310f72967651b849431cc77 --- /dev/null +++ b/src/MapleFE/tools/obfuscate/src/obfuscate.cpp @@ -0,0 +1,168 @@ +/* +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2022] Tencent. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include +#include + +#include "obfuscate.h" +#include "ast_handler.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" +#include "gen_aststore.h" +#include "gen_astload.h" +#include "cpp_definition.h" +#include "cpp_declaration.h" +#include "a2c_util.h" + +namespace maplefe { + +bool Obfuscate::LoadImportedModules() { + std::queue queue; + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + ImportedFiles imported(module); + imported.VisitTreeNode(module); + for(const auto &e: imported.mFilenames) + queue.push(e); + } + + bool err = false; + while(!queue.empty()) { + std::string filename = queue.front(); + queue.pop(); + if(mASTHandler->GetHandlerIndex(filename.c_str()) == HandlerNotFound) { + std::ifstream input(filename, std::ifstream::binary); + if(input.fail()) { + std::cerr << "Error: File " << filename << " not found for imported module" << std::endl; + err = true; + continue; + } + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + mASTHandler->AddModule(mod); + ImportedFiles imported(mod); + imported.VisitTreeNode(mod); + for(const auto &e: imported.mFilenames) + queue.push(e); + mod = loadAst.Next(); + } + } + } + return err; +} + +// starting point of AST +int Obfuscate::ProcessAST() { + mIndexImported = GetModuleNum(); + + // load all imported modules + if (!(mFlags & FLG_no_imported)) { + if (LoadImportedModules()) { + return 1; + } + } + + // loop through module handlers + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + if (mFlags & FLG_trace_1) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After LoadFromAstBuf()", &std::cout); + } + } + + // build dependency of modules + PreprocessModules(); + + // loop through module handlers in import/export dependency order + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + // basic analysis + handler->BasicAnalysis(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= After BasicAnalysis ===========" << std::endl; + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + AstGraph graph(module); + graph.DumpGraph("After BasicAnalysis()", &std::cout); + } + } + + if (mFlags & FLG_trace_3) { + gStringPool.Dump(); + } + + gStringPool.SetAltStrIdxMap(); + + if (mFlags & FLG_trace_3) { + gStringPool.Dump(); + gStringPool.DumpAlt(); + } + + gStringPool.SetUseAltStr(true); + + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + std::cout << "============= Emitter ===========" << std::endl; + maplefe::Emitter emitter(handler); + std::string code = emitter.Emit("Convert AST to TypeScript code"); + + // Emit to file + std::string of_name(module->GetFilename()); + of_name += ".obf"; + std::ofstream ofs; + ofs.open(of_name.c_str(), std::ofstream::out); + ofs << code; + ofs.close(); + } + + return 0; +} + +} // namespace maplefe diff --git a/src/MapleFE/typescript/identifier.spec b/src/MapleFE/typescript/identifier.spec index 6cfe3baf6e1b7b01004cdd387139257599dd0990..5723b480b3cd59e04d86d6d935f9bd949971c926 100644 --- a/src/MapleFE/typescript/identifier.spec +++ b/src/MapleFE/typescript/identifier.spec @@ -16,4 +16,5 @@ # rule JavaChar : ONEOF(CHAR, '_' , '$') rule CharOrDigit : ONEOF(JavaChar, DIGIT) -rule Identifier : JavaChar + ZEROORMORE(CharOrDigit) +rule Identifier : ONEOF("no-default-lib", + JavaChar + ZEROORMORE(CharOrDigit)) diff --git a/src/MapleFE/typescript/include/lang_builtin.def b/src/MapleFE/typescript/include/lang_builtin.def index 3b529b2cbb3922b4cb24343f2f10422991afe1ee..791452525da5c9d65a88608d27840c712bfd885e 100644 --- a/src/MapleFE/typescript/include/lang_builtin.def +++ b/src/MapleFE/typescript/include/lang_builtin.def @@ -37,7 +37,6 @@ BUILTIN(Int32Array) BUILTIN(Int8Array) BUILTIN(InternalError (Mozilla only)) BUILTIN(Map) -BUILTIN(Math) BUILTIN(Number) BUILTIN(Object) BUILTIN(Promise) diff --git a/src/MapleFE/typescript/include/lang_spec.h b/src/MapleFE/typescript/include/lang_spec.h index 929de87c24b46706d9964306411a854a052ce0e2..96aef8bf12195fd32df8cf8630e6beb6632f8c9a 100644 --- a/src/MapleFE/typescript/include/lang_spec.h +++ b/src/MapleFE/typescript/include/lang_spec.h @@ -49,6 +49,7 @@ public: TempLitData* GetTempLit(); bool FindNextTLFormat(unsigned start, std::string& s, unsigned& end); bool FindNextTLPlaceHolder(unsigned start, std::string& s, unsigned& end); + bool FindTripleSlash(); }; //////////////////////////////////////////////////////////////////////////////////// diff --git a/src/MapleFE/typescript/operator.spec b/src/MapleFE/typescript/operator.spec index 48d5037f5a9897174513ea3644d3a34ffcabee28..d09db223d3cdc7069e1300fde8861a08f6ff9b8d 100644 --- a/src/MapleFE/typescript/operator.spec +++ b/src/MapleFE/typescript/operator.spec @@ -72,5 +72,8 @@ STRUCT Operator : ( ("|=", BorAssign), ("^=", BxorAssign), (">>>=", ZextAssign), + + ("///", TripleSlash), + # arrow function ("=>", ArrowFunction)) diff --git a/src/MapleFE/typescript/src/lang_spec.cpp b/src/MapleFE/typescript/src/lang_spec.cpp index b2022817b90b2ef798909c39710dcb1cc7649f29..d51d1f312d0184badb28eac07e422c6a24a65632 100644 --- a/src/MapleFE/typescript/src/lang_spec.cpp +++ b/src/MapleFE/typescript/src/lang_spec.cpp @@ -394,6 +394,37 @@ bool TypescriptLexer::FindNextTLPlaceHolder(unsigned start_idx, std::string& str return true; } +// This is to catch TS triple-slash directives : /// ' + MemberExpression, - PrimaryExpression + "as" + "const", MemberExpression + '.' + KeywordPropName) attr.action.%1 : AddAsType(%1, %2) attr.action.%2 : BuildArrayElement(%1, %3) @@ -407,12 +403,9 @@ rule MemberExpression : ONEOF( attr.action.%8 : AddAsType(%6) attr.action.%10: BuildArrayElement(%1, %3) attr.action.%11: SetIsNonNull(%1) - attr.action.%12: BuildField(%1, %3) - attr.action.%12: SetIsConst() + attr.action.%12: SetIsConst(%1) attr.action.%13: BuildCast(%2, %4) - attr.action.%14: PassChild(%1) - attr.action.%14: SetIsConst() - attr.action.%15 : BuildField(%1, %3) + attr.action.%14 : BuildField(%1, %3) rule IsExpression: ONEOF(PrimaryExpression + "is" + Type, ArrowFunction + "is" + Type) @@ -472,7 +465,8 @@ rule CallExpression : ONEOF( "set" + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), "get" + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), CallExpression + "?." + Arguments + ZEROORMORE(AsType), - ImportFunction) + ImportFunction, + CallExpression + '.' + KeywordPropName + ZEROORMORE(AsType)) attr.action.%1,%3,%10,%11 : BuildCall(%1) attr.action.%1,%10,%11 : AddAsType(%4) attr.action.%1,%10,%11 : AddTypeGenerics(%2) @@ -481,8 +475,8 @@ rule CallExpression : ONEOF( attr.action.%3 : AddAsType(%3) attr.action.%4 : BuildArrayElement(%1, %3) attr.action.%4 : AddAsType(%5) - attr.action.%5 : BuildField(%1, %3) - attr.action.%5 : AddAsType(%4) + attr.action.%5,%14 : BuildField(%1, %3) + attr.action.%5,%14 : AddAsType(%4) attr.action.%7 : SetIsNonNull(%1) attr.action.%7 : AddAsType(%1, %3) attr.action.%8 : SetIsOptional(%1) @@ -790,6 +784,12 @@ rule Expression : ONEOF( # Statements #------------------------------------------------------------------------------- +rule TripleSlash : ONEOF( "///" + '<' + "reference" + "path" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "types" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "lib" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "no-default-lib" + '=' + Literal + '/' + '>') + attr.action.%1,%2,%3,%4 : BuildTripleSlash(%4, %6) + ##----------------------------------- ##rule Statement[Yield, Return] : ## BlockStatement[?Yield, ?Return] @@ -820,7 +820,8 @@ rule Statement : ONEOF( # WithStatement[?Yield, ?Return] LabelledStatement, ThrowStatement, - TryStatement) + TryStatement, + TripleSlash) # DebuggerStatement attr.property : Top attr.property : Single # This is extremely important to give CallExpression the @@ -1137,9 +1138,9 @@ rule ForBinding : ONEOF(BindingIdentifier, ## continue [no LineTerminator here] LabelIdentifier[?Yield] ; rule ContinueStatement : ONEOF( "continue" + ZEROORONE(';'), - "continue" + LabelIdentifier + ZEROORONE(';')) + "continue" + NoLineTerminator + LabelIdentifier + ZEROORONE(';')) attr.action.%1 : BuildContinue() - attr.action.%2 : BuildContinue(%2) + attr.action.%2 : BuildContinue(%3) ##----------------------------------- ##rule BreakStatement[Yield] : @@ -1147,18 +1148,18 @@ rule ContinueStatement : ONEOF( ## break [no LineTerminator here] LabelIdentifier[?Yield] ; rule BreakStatement : ONEOF( "break" + ZEROORONE(';'), - "break" + LabelIdentifier + ZEROORONE(';')) + "break" + NoLineTerminator + LabelIdentifier + ZEROORONE(';')) attr.action.%1 : BuildBreak() - attr.action.%2 : BuildBreak(%2) + attr.action.%2 : BuildBreak(%3) ##----------------------------------- ##rule ReturnStatement[Yield] : ## return ; ## return [no LineTerminator here] Expression[In, ?Yield] ; rule ReturnStatement :ONEOF("return" + ZEROORONE(';'), - "return" + Expression + ZEROORONE(';')) + "return" + NoLineTerminator + Expression + ZEROORONE(';')) attr.action.%1 : BuildReturn() - attr.action.%2 : BuildReturn(%2) + attr.action.%2 : BuildReturn(%3) ##----------------------------------- ##rule WithStatement[Yield, Return] : @@ -1734,11 +1735,8 @@ rule Type : ONEOF(UnionOrIntersectionOrPrimaryType, InferType, IsExpression, PrimaryType + '[' + TypeQuery + ']', - TemplateLiteral, - ImportFunction, - ImportFunction + '.' + TypeReference) + TemplateLiteral) attr.action.%7,%11 : BuildArrayElement(%1, %3) - attr.action.%14 : BuildField(%1, %3) #rule UnionOrIntersectionOrPrimaryType: ONEOF(UnionType, # IntersectionOrPrimaryType) @@ -1847,9 +1845,10 @@ rule TupleElementType: ONEOF(ZEROORONE(JSIdentifier + ':') + Type, rule UnionType : ONEOF(ZEROORONE('|') + UnionOrIntersectionOrPrimaryType + '|' + IntersectionOrPrimaryType, UnionOrIntersectionOrPrimaryType + '|' + KeyOf, KeyOf + '|' + UnionOrIntersectionOrPrimaryType, - TypeQuery + '|' + UnionOrIntersectionOrPrimaryType) + TypeQuery + '|' + UnionOrIntersectionOrPrimaryType, + TemplateLiteral + '|' + TemplateLiteral) attr.action.%1 : BuildUnionUserType(%2, %4) - attr.action.%2,%3,%4 : BuildUnionUserType(%1, %3) + attr.action.%2,%3,%4,%5 : BuildUnionUserType(%1, %3) ## rule IntersectionType: IntersectionOrPrimaryType & PrimaryType rule IntersectionType: ONEOF(IntersectionOrPrimaryType + '&' + PrimaryType, @@ -2215,11 +2214,11 @@ rule PropertyMemberDeclaration: ONEOF(MemberVariableDeclaration, ## MemberVariableDeclaration: AccessibilityModifieropt staticopt PropertyName TypeAnnotationopt Initializeropt ; rule MemberVariableDeclaration: ONEOF( - ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), - ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + '?' + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertySignatureName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertySignatureName + '?' + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "get" + '=' + ArrowFunction + ZEROORONE(';'), ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "set" + '=' + ArrowFunction + ZEROORONE(';'), - '#' + PropertyName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + '#' + PropertySignatureName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), '#' + "private" + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "if" + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';')) attr.action.%1: AddInitTo(%3, %5) @@ -2256,6 +2255,13 @@ rule KeywordMemberFunctionName : ONEOF("return", "set", "continue", "break", + "const", + "let", + "var", + "if", + "else", + "for", + "try", "export") attr.action : BuildIdentifier() diff --git a/src/hir2mpl/BUILD.gn b/src/hir2mpl/BUILD.gn index 80b4e5e920ecc003426b7bf3ef3ded50aad58a32..ac416ae0f8675f2932d4311cd7594b4d748b8ff2 100644 --- a/src/hir2mpl/BUILD.gn +++ b/src/hir2mpl/BUILD.gn @@ -23,6 +23,13 @@ cflags += [ "-DJAVA_OBJ_IN_MFILE=1", ] +if (ONLY_C == 1) { + cflags += [ + "-w", + "-DONLY_C", + ] +} + if (MAST == 1) { cflags += [ "-w", @@ -151,10 +158,7 @@ executable("hir2mpl") { deps = [ ":lib_hir2mpl_ast_input_clang", ":lib_hir2mpl_ast_input_common", - ":lib_hir2mpl_bytecode_input_common", - ":lib_hir2mpl_bytecode_input_dex", ":lib_hir2mpl_input_helper", - ":lib_hir2mpl_bytecode_input_class", ":lib_hir2mpl_common", ":lib_hir2mpl_optimize", "${MAPLEALL_ROOT}/maple_driver:libdriver_option", @@ -164,6 +168,15 @@ executable("hir2mpl") { "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", ] + + if (ONLY_C != 1) { + deps += [ + ":lib_hir2mpl_bytecode_input_common", + ":lib_hir2mpl_bytecode_input_dex", + ":lib_hir2mpl_bytecode_input_class", + ] + } + if (MAST == 1) { deps += [ ":lib_hir2mpl_ast_input_maple", @@ -218,96 +231,98 @@ static_library("lib_hir2mpl_bytecode_input_class") { output_dir = "${root_out_dir}/ar" } -include_bytecode_input_common_directories = [ - "${HIR2MPL_ROOT}/common/include", - "${HIR2MPL_ROOT}/optimize/include", - "${HIR2MPL_ROOT}/bytecode_input/common/include", - "${HIR2MPL_ROOT}/bytecode_input/dex/include", - "${MAPLEALL_ROOT}/maple_ir/include", - "${MAPLEALL_ROOT}/maple_util/include", - "${MAPLEALL_ROOT}/maple_driver/include", - "${MAPLEALL_ROOT}/mempool/include", - "${THIRD_PARTY_ROOT}/bounds_checking_function/include", -] - -static_library("lib_hir2mpl_bytecode_input_common") { - sources = [ - "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_map.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_processor.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class2fe_helper.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_function.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_instruction.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_io.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_parser_base.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_pragma.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_util.cpp", - "${HIR2MPL_ROOT}/bytecode_input/common/src/rc_setter.cpp", +if (ONLY_C != 1) { + include_bytecode_input_common_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", ] - include_dirs = include_bytecode_input_common_directories - output_dir = "${root_out_dir}/ar" -} -include_bytecode_input_dex_directories = [ - "${HIR2MPL_ROOT}/common/include", - "${HIR2MPL_ROOT}/optimize/include", - "${HIR2MPL_ROOT}/bytecode_input/common/include", - "${HIR2MPL_ROOT}/bytecode_input/dex/include", - "${MAPLEALL_ROOT}/maple_ir/include", - "${MAPLEALL_ROOT}/maple_util/include", - "${MAPLEALL_ROOT}/maple_driver/include", - "${MAPLEALL_ROOT}/mempool/include", - "${THIRD_PARTY_ROOT}/bounds_checking_function/include", -] + static_library("lib_hir2mpl_bytecode_input_common") { + sources = [ + "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_map.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_processor.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_function.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_instruction.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_io.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_parser_base.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_pragma.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_util.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/rc_setter.cpp", + ] + include_dirs = include_bytecode_input_common_directories + output_dir = "${root_out_dir}/ar" + } -static_library("lib_hir2mpl_bytecode_input_dex") { - sources = [ - "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_linker.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_loader_context.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class2fe_helper.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_encode_value.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_file_util.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_op.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_pragma.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_strfac.cpp", + include_bytecode_input_dex_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", ] - # for libdexfile - include_dirs_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/include" ] - include_dirs_libdexfile = [ - #for libdexfile -start - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/include", - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libutils/include", - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base/include", - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive/include", - "${THIRD_PARTY_ROOT}/aosp_modified/art/libartpalette/include", - "${THIRD_PARTY_ROOT}/aosp_modified/art/libartbase", - "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile", - "${THIRD_PARTY_ROOT}/aosp_modified/include", - "${THIRD_PARTY_ROOT}/aosp_modified/libnativehelper/include_jni", - - #for libdexfile -end - ] + static_library("lib_hir2mpl_bytecode_input_dex") { + sources = [ + "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_linker.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_loader_context.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_encode_value.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_file_util.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_op.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_pragma.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_strfac.cpp", + ] - sources += [ - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_parser.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_reader.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_factory.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_interface.cpp", - "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_libdexfile.cpp", - ] - deps_libdexfile = [ - "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile:libdexfile", - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive:libziparchive", - "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base:libbase", - ] - lib_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a" ] - deps = deps_libdexfile - libs = lib_dex - include_dirs = - include_bytecode_input_dex_directories + include_dirs_libdexfile + include_dirs_dex - output_dir = "${root_out_dir}/ar" + # for libdexfile + include_dirs_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/include" ] + include_dirs_libdexfile = [ + #for libdexfile -start + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libutils/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive/include", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libartpalette/include", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libartbase", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile", + "${THIRD_PARTY_ROOT}/aosp_modified/include", + "${THIRD_PARTY_ROOT}/aosp_modified/libnativehelper/include_jni", + + #for libdexfile -end + ] + + sources += [ + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_parser.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_reader.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_factory.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_interface.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_libdexfile.cpp", + ] + deps_libdexfile = [ + "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile:libdexfile", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive:libziparchive", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base:libbase", + ] + lib_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a" ] + deps = deps_libdexfile + libs = lib_dex + include_dirs = + include_bytecode_input_dex_directories + include_dirs_libdexfile + include_dirs_dex + output_dir = "${root_out_dir}/ar" + } } static_library("lib_hir2mpl_ast_input_clang_lib") { diff --git a/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def b/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def index 08747d4fbf5d36c34644539de5625b80938a342b..1ecffd079cffd062a14b1d5783ffa88302ca2f11 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def +++ b/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def @@ -6,6 +6,7 @@ BUILTIN_FUNC(strcpy) BUILTIN_FUNC(strcmp) BUILTIN_FUNC(strlen) BUILTIN_FUNC(strchr) +BUILTIN_FUNC(strrchr) BUILTIN_FUNC(memcmp) BUILTIN_FUNC(memcpy) BUILTIN_FUNC(memset) diff --git a/src/hir2mpl/ast_input/clang/include/ast_expr.h b/src/hir2mpl/ast_input/clang/include/ast_expr.h index 9c7c52ad3e183080ba57b8bdf250fd7fe4349a29..b3eaf1e8f5d26fe98441b9ab38165ac4063291ac 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_expr.h +++ b/src/hir2mpl/ast_input/clang/include/ast_expr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -51,6 +51,12 @@ enum ParentFlag { kStructParent }; +enum EvaluatedFlag : uint8 { + EvaluatedAsZero, + EvaluatedAsNonZero, + NotEvaluated +}; + class ASTExpr { public: explicit ASTExpr(ASTOp o) : op(o) {} @@ -108,6 +114,15 @@ class ASTExpr { return srcFileLineNum; } + void SetEvaluatedFlag(EvaluatedFlag flag) { + evaluatedflag = flag; + return; + } + + EvaluatedFlag GetEvaluatedFlag() const { + return evaluatedflag; + } + virtual void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) {} protected: @@ -129,11 +144,12 @@ class ASTExpr { uint32 srcFileIdx = 0; uint32 srcFileLineNum = 0; + EvaluatedFlag evaluatedflag = NotEvaluated; }; class ASTCastExpr : public ASTExpr { public: - ASTCastExpr() : ASTExpr(kASTOpCast) {} + explicit ASTCastExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpCast) {} ~ASTCastExpr() = default; void SetASTExpr(ASTExpr *expr) { @@ -219,7 +235,6 @@ class ASTCastExpr : public ASTExpr { MIRConst *GenerateMIRFloatConst() const; MIRConst *GenerateMIRIntConst() const; UniqueFEIRExpr EmitExprVdupVector(PrimType primtype, UniqueFEIRExpr &subExpr) const; - void CheckNonnullFieldInStruct() const; ASTExpr *child = nullptr; MIRType *src = nullptr; @@ -236,7 +251,7 @@ class ASTCastExpr : public ASTExpr { class ASTDeclRefExpr : public ASTExpr { public: - ASTDeclRefExpr() : ASTExpr(kASTOpRef) {} + explicit ASTDeclRefExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpRef) {} ~ASTDeclRefExpr() = default; protected: @@ -247,6 +262,7 @@ class ASTDeclRefExpr : public ASTExpr { class ASTUnaryOperatorExpr : public ASTExpr { public: explicit ASTUnaryOperatorExpr(ASTOp o) : ASTExpr(o) {} + ASTUnaryOperatorExpr(MapleAllocator &allocatorIn, ASTOp o) : ASTExpr(o) {} virtual ~ASTUnaryOperatorExpr() = default; void SetUOExpr(ASTExpr*); @@ -297,7 +313,7 @@ class ASTUnaryOperatorExpr : public ASTExpr { class ASTUOMinusExpr: public ASTUnaryOperatorExpr { public: - ASTUOMinusExpr() : ASTUnaryOperatorExpr(kASTOpMinus) {} + explicit ASTUOMinusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpMinus) {} ~ASTUOMinusExpr() = default; private: @@ -306,7 +322,7 @@ class ASTUOMinusExpr: public ASTUnaryOperatorExpr { class ASTUONotExpr: public ASTUnaryOperatorExpr { public: - ASTUONotExpr() : ASTUnaryOperatorExpr(kASTOpNot) {} + explicit ASTUONotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpNot) {} ~ASTUONotExpr() = default; private: @@ -315,7 +331,7 @@ class ASTUONotExpr: public ASTUnaryOperatorExpr { class ASTUOLNotExpr: public ASTUnaryOperatorExpr { public: - ASTUOLNotExpr() : ASTUnaryOperatorExpr(kASTOpLNot) {} + explicit ASTUOLNotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpLNot) {} ~ASTUOLNotExpr() = default; void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) override { @@ -331,7 +347,8 @@ class ASTUOLNotExpr: public ASTUnaryOperatorExpr { class ASTUOPostIncExpr: public ASTUnaryOperatorExpr { public: - ASTUOPostIncExpr() : ASTUnaryOperatorExpr(kASTOpPostInc), tempVarName(FEUtils::GetSequentialName("postinc_")) {} + explicit ASTUOPostIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostInc), + tempVarName(FEUtils::GetSequentialName("postinc_")) {} ~ASTUOPostIncExpr() = default; private: @@ -341,7 +358,8 @@ class ASTUOPostIncExpr: public ASTUnaryOperatorExpr { class ASTUOPostDecExpr: public ASTUnaryOperatorExpr { public: - ASTUOPostDecExpr() : ASTUnaryOperatorExpr(kASTOpPostDec), tempVarName(FEUtils::GetSequentialName("postdec_")) {} + explicit ASTUOPostDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostDec), + tempVarName(FEUtils::GetSequentialName("postdec_")) {} ~ASTUOPostDecExpr() = default; private: @@ -351,7 +369,7 @@ class ASTUOPostDecExpr: public ASTUnaryOperatorExpr { class ASTUOPreIncExpr: public ASTUnaryOperatorExpr { public: - ASTUOPreIncExpr() : ASTUnaryOperatorExpr(kASTOpPreInc) {} + explicit ASTUOPreIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreInc) {} ~ASTUOPreIncExpr() = default; private: @@ -360,7 +378,7 @@ class ASTUOPreIncExpr: public ASTUnaryOperatorExpr { class ASTUOPreDecExpr: public ASTUnaryOperatorExpr { public: - ASTUOPreDecExpr() : ASTUnaryOperatorExpr(kASTOpPreDec) {} + explicit ASTUOPreDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreDec) {} ~ASTUOPreDecExpr() = default; private: @@ -371,6 +389,7 @@ class ASTUOPreDecExpr: public ASTUnaryOperatorExpr { class ASTUOAddrOfExpr: public ASTUnaryOperatorExpr { public: ASTUOAddrOfExpr() : ASTUnaryOperatorExpr(kASTOpAddrOf) {} + explicit ASTUOAddrOfExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOf) {} ~ASTUOAddrOfExpr() = default; protected: @@ -382,24 +401,29 @@ class ASTUOAddrOfExpr: public ASTUnaryOperatorExpr { class ASTUOAddrOfLabelExpr : public ASTUnaryOperatorExpr { public: - ASTUOAddrOfLabelExpr() : ASTUnaryOperatorExpr(kASTOpAddrOfLabel) {} + explicit ASTUOAddrOfLabelExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOfLabel), + labelName("", allocatorIn.GetMemPool()) {} ~ASTUOAddrOfLabelExpr() = default; void SetLabelName(const std::string &name) { labelName = name; } + const std::string GetLabelName() const { + return labelName.c_str() == nullptr ? "" : labelName.c_str(); + } + protected: MIRConst *GenerateMIRConstImpl() const override; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; - std::string labelName; + MapleString labelName; }; class ASTUODerefExpr: public ASTUnaryOperatorExpr { public: - ASTUODerefExpr() : ASTUnaryOperatorExpr(kASTOpDeref) {} + explicit ASTUODerefExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpDeref) {} ~ASTUODerefExpr() = default; private: @@ -410,7 +434,7 @@ class ASTUODerefExpr: public ASTUnaryOperatorExpr { class ASTUOPlusExpr: public ASTUnaryOperatorExpr { public: - ASTUOPlusExpr() : ASTUnaryOperatorExpr(kASTOpPlus) {} + explicit ASTUOPlusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPlus) {} ~ASTUOPlusExpr() = default; private: @@ -419,7 +443,7 @@ class ASTUOPlusExpr: public ASTUnaryOperatorExpr { class ASTUORealExpr: public ASTUnaryOperatorExpr { public: - ASTUORealExpr() : ASTUnaryOperatorExpr(kASTOpReal) {} + explicit ASTUORealExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpReal) {} ~ASTUORealExpr() = default; void SetElementType(MIRType *type) { @@ -433,7 +457,7 @@ class ASTUORealExpr: public ASTUnaryOperatorExpr { class ASTUOImagExpr: public ASTUnaryOperatorExpr { public: - ASTUOImagExpr() : ASTUnaryOperatorExpr(kASTOpImag) {} + explicit ASTUOImagExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpImag) {} ~ASTUOImagExpr() = default; void SetElementType(MIRType *type) { @@ -447,7 +471,7 @@ class ASTUOImagExpr: public ASTUnaryOperatorExpr { class ASTUOExtensionExpr: public ASTUnaryOperatorExpr { public: - ASTUOExtensionExpr() : ASTUnaryOperatorExpr(kASTOpExtension) {} + explicit ASTUOExtensionExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpExtension) {} ~ASTUOExtensionExpr() = default; private: @@ -456,7 +480,7 @@ class ASTUOExtensionExpr: public ASTUnaryOperatorExpr { class ASTUOCoawaitExpr: public ASTUnaryOperatorExpr { public: - ASTUOCoawaitExpr() : ASTUnaryOperatorExpr(kASTOpCoawait) {} + explicit ASTUOCoawaitExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpCoawait) {} ~ASTUOCoawaitExpr() = default; private: @@ -465,7 +489,7 @@ class ASTUOCoawaitExpr: public ASTUnaryOperatorExpr { class ASTPredefinedExpr : public ASTExpr { public: - ASTPredefinedExpr() : ASTExpr(kASTOpPredefined) {} + explicit ASTPredefinedExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpPredefined) {} ~ASTPredefinedExpr() = default; void SetASTExpr(ASTExpr*); @@ -476,7 +500,7 @@ class ASTPredefinedExpr : public ASTExpr { class ASTOpaqueValueExpr : public ASTExpr { public: - ASTOpaqueValueExpr() : ASTExpr(kASTOpOpaqueValue) {} + explicit ASTOpaqueValueExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpOpaqueValue) {} ~ASTOpaqueValueExpr() = default; void SetASTExpr(ASTExpr*); @@ -487,7 +511,7 @@ class ASTOpaqueValueExpr : public ASTExpr { class ASTNoInitExpr : public ASTExpr { public: - ASTNoInitExpr() : ASTExpr(kASTOpNoInitExpr) {} + explicit ASTNoInitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpNoInitExpr) {} ~ASTNoInitExpr() = default; void SetNoInitType(MIRType *type); @@ -498,39 +522,27 @@ class ASTNoInitExpr : public ASTExpr { class ASTCompoundLiteralExpr : public ASTExpr { public: - ASTCompoundLiteralExpr() : ASTExpr(kASTOpCompoundLiteralExp) {} + explicit ASTCompoundLiteralExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpCompoundLiteralExpr) {} ~ASTCompoundLiteralExpr() = default; void SetCompoundLiteralType(MIRType *clType); - - MIRType *GetCompoundLiteralType() const { - return compoundLiteralType; - } - void SetASTExpr(ASTExpr*); - const ASTExpr *GetASTExpr() const { - return child; - } - - void SetInitName(const std::string &argInitName) { - initName = argInitName; - } - - const std::string &GetInitName() const { - return initName; + void SetAddrof(bool flag) { + isAddrof = flag; } private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; MIRConst *GenerateMIRConstImpl() const override; + MIRConst *GenerateMIRPtrConst() const; ASTExpr *child = nullptr; MIRType *compoundLiteralType = nullptr; - std::string initName; + bool isAddrof = false; }; class ASTOffsetOfExpr : public ASTExpr { public: - ASTOffsetOfExpr() : ASTExpr(kASTOpOffsetOfExpr) {} + explicit ASTOffsetOfExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpOffsetOfExpr) {} ~ASTOffsetOfExpr() = default; void SetStructType(MIRType *stype); void SetFieldName(const std::string &fName); @@ -548,7 +560,8 @@ class ASTOffsetOfExpr : public ASTExpr { class ASTInitListExpr : public ASTExpr { public: - ASTInitListExpr() : ASTExpr(kASTOpInitListExpr) {} + explicit ASTInitListExpr(MapleAllocator &allocatorIn) + : ASTExpr(kASTOpInitListExpr), initExprs(allocatorIn.Adapter()), varName("", allocatorIn.GetMemPool()) {} ~ASTInitListExpr() = default; void SetInitExprs(ASTExpr *astExpr); void SetInitListType(MIRType *type); @@ -557,7 +570,7 @@ class ASTInitListExpr : public ASTExpr { return initListType; } - std::vector GetInitExprs() const { + MapleVector GetInitExprs() const { return initExprs; } @@ -565,8 +578,8 @@ class ASTInitListExpr : public ASTExpr { varName = argVarName; } - const std::string &GetInitListVarName() const { - return varName; + const std::string GetInitListVarName() const { + return varName.c_str() == nullptr ? "" : varName.c_str(); } void SetParentFlag(ParentFlag argParentFlag) { @@ -633,20 +646,21 @@ class ASTInitListExpr : public ASTExpr { std::list &stmts) const; MIRConst *GenerateMIRConstForArray() const; MIRConst *GenerateMIRConstForStruct() const; - std::vector initExprs; + MapleVector initExprs; ASTExpr *arrayFillerExpr = nullptr; MIRType *initListType = nullptr; - std::string varName; + MapleString varName; ParentFlag parentFlag = kNoParent; uint32 unionInitFieldIdx = UINT32_MAX; bool hasArrayFiller = false; bool isTransparent = false; bool hasVectorType = false; + mutable bool isGenerating = false; }; class ASTBinaryConditionalOperator : public ASTExpr { public: - ASTBinaryConditionalOperator() : ASTExpr(kASTOpBinaryConditionalOperator) {} + explicit ASTBinaryConditionalOperator(MapleAllocator &allocatorIn) : ASTExpr(kASTOpBinaryConditionalOperator) {} ~ASTBinaryConditionalOperator() = default; void SetCondExpr(ASTExpr *expr); void SetFalseExpr(ASTExpr *expr); @@ -659,8 +673,8 @@ class ASTBinaryConditionalOperator : public ASTExpr { class ASTBinaryOperatorExpr : public ASTExpr { public: - explicit ASTBinaryOperatorExpr(ASTOp o) : ASTExpr(o) {} - ASTBinaryOperatorExpr() + ASTBinaryOperatorExpr(MapleAllocator &allocatorIn, ASTOp o) : ASTExpr(o) {} + explicit ASTBinaryOperatorExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpBO), varName(FEUtils::GetSequentialName("shortCircuit_")) {} ~ASTBinaryOperatorExpr() override = default; @@ -745,7 +759,7 @@ class ASTBinaryOperatorExpr : public ASTExpr { class ASTImplicitValueInitExpr : public ASTExpr { public: - ASTImplicitValueInitExpr() : ASTExpr(kASTImplicitValueInitExpr) {} + explicit ASTImplicitValueInitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTImplicitValueInitExpr) {} ~ASTImplicitValueInitExpr() = default; protected: @@ -757,7 +771,8 @@ class ASTImplicitValueInitExpr : public ASTExpr { class ASTStringLiteral : public ASTExpr { public: - ASTStringLiteral() : ASTExpr(kASTStringLiteral) {} + explicit ASTStringLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTStringLiteral), + codeUnits(allocatorIn.Adapter()), str(allocatorIn.Adapter()) {} ~ASTStringLiteral() = default; void SetLength(size_t len) { @@ -768,14 +783,26 @@ class ASTStringLiteral : public ASTExpr { return length; } - void SetCodeUnits(std::vector &units) { + void SetCodeUnits(MapleVector &units) { codeUnits = std::move(units); } - const std::vector &GetCodeUnits() const { + const MapleVector &GetCodeUnits() const { return codeUnits; } + void SetStr(const std::string &strIn) { + if (str.size() > 0) { + str.clear(); + str.shrink_to_fit(); + } + str.insert(str.end(), strIn.begin(), strIn.end()); + } + + const std::string GetStr() const { + return std::string(str.begin(), str.end()); + } + void SetIsArrayToPointerDecay(bool argIsArrayToPointerDecay) { isArrayToPointerDecay = argIsArrayToPointerDecay; } @@ -790,13 +817,14 @@ class ASTStringLiteral : public ASTExpr { private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; size_t length = 0; - std::vector codeUnits; + MapleVector codeUnits; + MapleVector str; // Ascii string bool isArrayToPointerDecay = false; }; class ASTArraySubscriptExpr : public ASTExpr { public: - ASTArraySubscriptExpr() : ASTExpr(kASTSubscriptExpr) {} + explicit ASTArraySubscriptExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTSubscriptExpr) {} ~ASTArraySubscriptExpr() = default; void SetBaseExpr(ASTExpr *astExpr) { @@ -847,7 +875,7 @@ class ASTArraySubscriptExpr : public ASTExpr { class ASTExprUnaryExprOrTypeTraitExpr : public ASTExpr { public: - ASTExprUnaryExprOrTypeTraitExpr() : ASTExpr(kASTExprUnaryExprOrTypeTraitExpr) {} + explicit ASTExprUnaryExprOrTypeTraitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTExprUnaryExprOrTypeTraitExpr) {} ~ASTExprUnaryExprOrTypeTraitExpr() = default; void SetIsType(bool type) { @@ -871,7 +899,8 @@ class ASTExprUnaryExprOrTypeTraitExpr : public ASTExpr { class ASTMemberExpr : public ASTExpr { public: - ASTMemberExpr() : ASTExpr(kASTMemberExpr) {} + explicit ASTMemberExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTMemberExpr), + memberName("", allocatorIn.GetMemPool()) {} ~ASTMemberExpr() = default; void SetBaseExpr(ASTExpr *astExpr) { @@ -887,7 +916,7 @@ class ASTMemberExpr : public ASTExpr { } std::string GetMemberName() const { - return memberName; + return memberName.c_str() == nullptr ? "" : memberName.c_str(); } void SetMemberType(MIRType *type) { @@ -929,7 +958,7 @@ class ASTMemberExpr : public ASTExpr { void InsertNonnullChecking(std::list &stmts, UniqueFEIRExpr baseFEExpr) const; ASTExpr *baseExpr = nullptr; - std::string memberName; + MapleString memberName; MIRType *memberType = nullptr; MIRType *baseType = nullptr; bool isArrow = false; @@ -938,7 +967,7 @@ class ASTMemberExpr : public ASTExpr { class ASTDesignatedInitUpdateExpr : public ASTExpr { public: - ASTDesignatedInitUpdateExpr() : ASTExpr(kASTASTDesignatedInitUpdateExpr) {} + explicit ASTDesignatedInitUpdateExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTASTDesignatedInitUpdateExpr) {} ~ASTDesignatedInitUpdateExpr() = default; void SetBaseExpr(ASTExpr *astExpr) { @@ -984,7 +1013,8 @@ class ASTDesignatedInitUpdateExpr : public ASTExpr { class ASTAssignExpr : public ASTBinaryOperatorExpr { public: - ASTAssignExpr() : ASTBinaryOperatorExpr(kASTOpAssign), isCompoundAssign(false) {} + explicit ASTAssignExpr(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpAssign), + isCompoundAssign(false) {} ~ASTAssignExpr() override = default; void SetIsCompoundAssign(bool argIsCompoundAssign) { @@ -1000,7 +1030,7 @@ class ASTAssignExpr : public ASTBinaryOperatorExpr { class ASTBOComma : public ASTBinaryOperatorExpr { public: - ASTBOComma() : ASTBinaryOperatorExpr(kASTOpComma) {} + explicit ASTBOComma(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpComma) {} ~ASTBOComma() override = default; private: @@ -1009,7 +1039,7 @@ class ASTBOComma : public ASTBinaryOperatorExpr { class ASTBOPtrMemExpr : public ASTBinaryOperatorExpr { public: - ASTBOPtrMemExpr() : ASTBinaryOperatorExpr(kASTOpPtrMemD) {} + ASTBOPtrMemExpr(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpPtrMemD) {} ~ASTBOPtrMemExpr() override = default; private: @@ -1018,7 +1048,8 @@ class ASTBOPtrMemExpr : public ASTBinaryOperatorExpr { class ASTCallExpr : public ASTExpr { public: - ASTCallExpr() : ASTExpr(kASTOpCall), varName(FEUtils::GetSequentialName("retVar_")) {} + explicit ASTCallExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpCall), args(allocatorIn.Adapter()), + funcName("", allocatorIn.GetMemPool()), varName(FEUtils::GetSequentialName("retVar_"), allocatorIn.GetMemPool()) {} ~ASTCallExpr() = default; void SetCalleeExpr(ASTExpr *astExpr) { calleeExpr = astExpr; @@ -1028,11 +1059,11 @@ class ASTCallExpr : public ASTExpr { return calleeExpr; } - void SetArgs(std::vector &argsVector){ + void SetArgs(MapleVector &argsVector){ args = std::move(argsVector); } - const std::vector &GetArgsExpr() const { + const MapleVector &GetArgsExpr() const { return args; } @@ -1044,16 +1075,16 @@ class ASTCallExpr : public ASTExpr { return retType; } - const std::string &GetRetVarName() const { - return varName; + const std::string GetRetVarName() const { + return varName.c_str() == nullptr ? "" : varName.c_str(); } void SetFuncName(const std::string &name) { funcName = name; } - const std::string &GetFuncName() const { - return funcName; + const std::string GetFuncName() const { + return funcName.c_str() == nullptr ? "" : funcName.c_str(); } void SetFuncAttrs(const FuncAttrs &attrs) { @@ -1101,6 +1132,7 @@ class ASTCallExpr : public ASTExpr { static std::unordered_map InitBuiltinFuncPtrMap(); UniqueFEIRExpr CreateIntrinsicopForC(std::list &stmts, MIRIntrinsicID argIntrinsicID, bool genTempVar = true) const; + UniqueFEIRExpr CreateIntrinsicCallAssignedForC(std::list &stmts, MIRIntrinsicID argIntrinsicID) const; UniqueFEIRExpr CreateBinaryExpr(std::list &stmts, Opcode op) const; UniqueFEIRExpr EmitBuiltinFunc(std::list &stmts) const; UniqueFEIRExpr EmitBuiltinVectorZip(std::list &stmts, bool &isFinish) const; @@ -1180,23 +1212,71 @@ class ASTCallExpr : public ASTExpr { UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch4); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch1); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch4); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch1); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub4); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub1); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd4); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap2); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap4); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet1); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap1); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease8); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSynchronize); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(AtomicExchangeN); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ReturnAddress); UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ExtractReturnAddr); @@ -1209,19 +1289,19 @@ UniqueFEIRExpr EmitBuiltin##STR(std::list &stmts) const; UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; static std::unordered_map builtingFuncPtrMap; - std::vector args; + MapleVector args; ASTExpr *calleeExpr = nullptr; MIRType *retType = nullptr; - std::string funcName; + MapleString funcName; FuncAttrs funcAttrs; bool isIcall = false; - std::string varName; + MapleString varName; ASTFunc *funcDecl = nullptr; }; class ASTParenExpr : public ASTExpr { public: - ASTParenExpr() : ASTExpr(kASTParen) {} + explicit ASTParenExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTParen) {} ~ASTParenExpr() = default; void SetASTExpr(ASTExpr *astExpr) { @@ -1252,7 +1332,7 @@ class ASTParenExpr : public ASTExpr { class ASTIntegerLiteral : public ASTExpr { public: - ASTIntegerLiteral() : ASTExpr(kASTIntegerLiteral) {} + explicit ASTIntegerLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTIntegerLiteral) {} ~ASTIntegerLiteral() = default; int64 GetVal() const { @@ -1279,7 +1359,7 @@ enum FloatKind { class ASTFloatingLiteral : public ASTExpr { public: - ASTFloatingLiteral() : ASTExpr(kASTFloatingLiteral) {} + explicit ASTFloatingLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTFloatingLiteral) {} ~ASTFloatingLiteral() = default; double GetVal() const { @@ -1307,7 +1387,7 @@ class ASTFloatingLiteral : public ASTExpr { class ASTCharacterLiteral : public ASTExpr { public: - ASTCharacterLiteral() : ASTExpr(kASTCharacterLiteral) {} + explicit ASTCharacterLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTCharacterLiteral) {} ~ASTCharacterLiteral() = default; int64 GetVal() const { @@ -1340,7 +1420,7 @@ struct VaArgInfo { class ASTVAArgExpr : public ASTExpr { public: - ASTVAArgExpr() : ASTExpr(kASTVAArgExpr) {} + explicit ASTVAArgExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTVAArgExpr) {} ~ASTVAArgExpr() = default; void SetASTExpr(ASTExpr *astExpr) { @@ -1363,12 +1443,16 @@ class ASTVAArgExpr : public ASTExpr { class ASTConstantExpr : public ASTExpr { public: - ASTConstantExpr() : ASTExpr(kConstantExpr) {} + explicit ASTConstantExpr(MapleAllocator &allocatorIn) : ASTExpr(kConstantExpr) {} ~ASTConstantExpr() = default; void SetASTExpr(ASTExpr *astExpr) { child = astExpr; } + ASTExpr *GetChild() { + return child; + } + protected: MIRConst *GenerateMIRConstImpl() const override; @@ -1379,7 +1463,7 @@ class ASTConstantExpr : public ASTExpr { class ASTImaginaryLiteral : public ASTExpr { public: - ASTImaginaryLiteral() : ASTExpr(kASTImaginaryLiteral) {} + explicit ASTImaginaryLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTImaginaryLiteral) {} ~ASTImaginaryLiteral() = default; void SetASTExpr(ASTExpr *astExpr) { child = astExpr; @@ -1402,7 +1486,7 @@ class ASTImaginaryLiteral : public ASTExpr { class ASTConditionalOperator : public ASTExpr { public: - ASTConditionalOperator() : ASTExpr(kASTConditionalOperator) {} + explicit ASTConditionalOperator(MapleAllocator &allocatorIn) : ASTExpr(kASTConditionalOperator) {} ~ASTConditionalOperator() = default; void SetCondExpr(ASTExpr *astExpr) { @@ -1437,7 +1521,7 @@ class ASTConditionalOperator : public ASTExpr { class ASTArrayInitLoopExpr : public ASTExpr { public: - ASTArrayInitLoopExpr() : ASTExpr(kASTOpArrayInitLoop) {} + explicit ASTArrayInitLoopExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpArrayInitLoop) {} ~ASTArrayInitLoopExpr() = default; void SetCommonExpr(ASTExpr *expr) { @@ -1455,7 +1539,7 @@ class ASTArrayInitLoopExpr : public ASTExpr { class ASTArrayInitIndexExpr : public ASTExpr { public: - ASTArrayInitIndexExpr() : ASTExpr(kASTOpArrayInitLoop) {} + explicit ASTArrayInitIndexExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpArrayInitLoop) {} ~ASTArrayInitIndexExpr() = default; void SetPrimType(MIRType *pType) { @@ -1482,7 +1566,7 @@ class ASTArrayInitIndexExpr : public ASTExpr { class ASTExprWithCleanups : public ASTExpr { public: - ASTExprWithCleanups() : ASTExpr(kASTOpExprWithCleanups) {} + explicit ASTExprWithCleanups(MapleAllocator &allocatorIn) : ASTExpr(kASTOpExprWithCleanups) {} ~ASTExprWithCleanups() = default; void SetSubExpr(ASTExpr *sub) { @@ -1500,7 +1584,7 @@ class ASTExprWithCleanups : public ASTExpr { class ASTMaterializeTemporaryExpr : public ASTExpr { public: - ASTMaterializeTemporaryExpr() : ASTExpr(kASTOpMaterializeTemporary) {} + explicit ASTMaterializeTemporaryExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpMaterializeTemporary) {} ~ASTMaterializeTemporaryExpr() = default; private: @@ -1509,7 +1593,7 @@ class ASTMaterializeTemporaryExpr : public ASTExpr { class ASTSubstNonTypeTemplateParmExpr : public ASTExpr { public: - ASTSubstNonTypeTemplateParmExpr() : ASTExpr(kASTOpSubstNonTypeTemplateParm) {} + explicit ASTSubstNonTypeTemplateParmExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpSubstNonTypeTemplateParm) {} ~ASTSubstNonTypeTemplateParmExpr() = default; private: @@ -1518,7 +1602,7 @@ class ASTSubstNonTypeTemplateParmExpr : public ASTExpr { class ASTDependentScopeDeclRefExpr : public ASTExpr { public: - ASTDependentScopeDeclRefExpr() : ASTExpr(kASTOpDependentScopeDeclRef) {} + explicit ASTDependentScopeDeclRefExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpDependentScopeDeclRef) {} ~ASTDependentScopeDeclRefExpr() = default; private: @@ -1527,7 +1611,7 @@ class ASTDependentScopeDeclRefExpr : public ASTExpr { class ASTAtomicExpr : public ASTExpr { public: - ASTAtomicExpr() : ASTExpr(kASTOpAtomic) {} + explicit ASTAtomicExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpAtomic) {} ~ASTAtomicExpr() = default; void SetRefType(MIRType *ref) { @@ -1609,7 +1693,7 @@ class ASTAtomicExpr : public ASTExpr { class ASTExprStmtExpr : public ASTExpr { public: - ASTExprStmtExpr() : ASTExpr(kASTOpStmtExpr) {} + explicit ASTExprStmtExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpStmtExpr) {} ~ASTExprStmtExpr() = default; void SetCompoundStmt(ASTStmt *sub) { cpdStmt = sub; diff --git a/src/hir2mpl/ast_input/clang/include/ast_op.h b/src/hir2mpl/ast_input/clang/include/ast_op.h index 795a9181daf91f5b909f2facc9b631668f6f1d94..f7df6c97f7d9ebf61b40cb5f88790da8c997277c 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_op.h +++ b/src/hir2mpl/ast_input/clang/include/ast_op.h @@ -112,7 +112,7 @@ enum ASTOp { kASTOpOpaqueValue, kASTOpBinaryConditionalOperator, kASTOpNoInitExpr, - kASTOpCompoundLiteralExp, + kASTOpCompoundLiteralExpr, kASTOpOffsetOfExpr, kASTOpGenericSelectionExpr, kASTOpInitListExpr, @@ -169,6 +169,8 @@ enum ASTStmtOp { kASTOffsetOfStmt, kASTGenericSelectionExprStmt, kASTStmtAttributed, + kASTStmtDeclRefExpr, + kASTStmtUnaryExprOrTypeTraitExpr, }; } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_OP_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_parser.h b/src/hir2mpl/ast_input/clang/include/ast_parser.h index 7d05e26fc4f69d5bdb28dfc379dbb40268cf98d2..8de885cd76a4f9a08a4896f3dd2f362326f66923 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_parser.h +++ b/src/hir2mpl/ast_input/clang/include/ast_parser.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -26,13 +26,15 @@ class ASTParser { ASTParser(MapleAllocator &allocatorIn, uint32 fileIdxIn, const std::string &fileNameIn, MapleList &astStructsIn, MapleList &astFuncsIn, MapleList &astVarsIn, MapleList &astFileScopeAsmsIn) - : fileIdx(fileIdxIn), fileName(fileNameIn), globalVarDecles(allocatorIn.Adapter()), + : fileIdx(fileIdxIn), fileName(fileNameIn, allocatorIn.GetMemPool()), globalVarDecles(allocatorIn.Adapter()), funcDecles(allocatorIn.Adapter()), recordDecles(allocatorIn.Adapter()), globalEnumDecles(allocatorIn.Adapter()), globalTypeDefDecles(allocatorIn.Adapter()), globalFileScopeAsm(allocatorIn.Adapter()), astStructs(astStructsIn), astFuncs(astFuncsIn), astVars(astVarsIn), astFileScopeAsms(astFileScopeAsmsIn) {} virtual ~ASTParser() = default; - bool OpenFile(); + bool OpenFile(MapleAllocator &allocator); + bool Release(); + bool Verify() const; bool PreProcessAST(); @@ -43,7 +45,7 @@ class ASTParser { bool ProcessGlobalTypeDef(MapleAllocator &allocator); - const std::string &GetSourceFileName() const; + const std::string GetSourceFileName() const; const uint32 GetFileIdx() const; // ProcessStmt @@ -84,6 +86,8 @@ class ASTParser { ASTStmt *PROCESS_STMT(GCCAsmStmt); ASTStmt *PROCESS_STMT(OffsetOfExpr); ASTStmt *PROCESS_STMT(GenericSelectionExpr); + ASTStmt *PROCESS_STMT(DeclRefExpr); + ASTStmt *PROCESS_STMT(UnaryExprOrTypeTraitExpr); bool HasDefault(const clang::Stmt &stmt); // ProcessExpr @@ -161,7 +165,7 @@ class ASTParser { private: void ProcessNonnullFuncAttrs(const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); - void ProcessNonnullFuncPtrAttrs(const clang::ValueDecl &valueDecl, ASTDecl &astVar); + void ProcessNonnullFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, ASTDecl &astVar); void ProcessBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); void ProcessByteBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); void ProcessBoundaryFuncAttrsByIndex(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, @@ -179,8 +183,8 @@ class ASTParser { template bool ProcessBoundaryFuncPtrAttrsForRet(T *attr, MapleAllocator &allocator, const MIRFuncType &funcType, const clang::FunctionType &clangFuncType, TypeAttrs &retAttr); - void ProcessBoundaryFuncPtrAttrsByIndex(const clang::ValueDecl &valueDecl, ASTDecl &astDecl, - const MIRFuncType &funcType); + void ProcessBoundaryFuncPtrAttrsByIndex(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astDecl, const MIRFuncType &funcType); template bool ProcessBoundaryFuncPtrAttrsByIndexForParams(T *attr, ASTDecl &astDecl, const MIRFuncType &funcType, std::vector &attrsVec); @@ -238,8 +242,8 @@ ASTExpr *ParseBuiltinFunc(MapleAllocator &allocator, const clang::CallExpr &expr static std::map builtingFuncPtrMap; uint32 fileIdx; - const std::string fileName; - std::unique_ptr astFile; + const MapleString fileName; + LibAstFile *astFile = nullptr; AstUnitDecl *astUnitDecl = nullptr; MapleList globalVarDecles; MapleList funcDecles; diff --git a/src/hir2mpl/ast_input/clang/include/ast_stmt.h b/src/hir2mpl/ast_input/clang/include/ast_stmt.h index 4a053bb43957715bdcf1de748b3fb429c28cfe38..517d9f14453fd458edcf0d9fc6433017109fc3b8 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_stmt.h +++ b/src/hir2mpl/ast_input/clang/include/ast_stmt.h @@ -29,7 +29,7 @@ enum SafeSS { class ASTStmt { public: - explicit ASTStmt(ASTStmtOp o = kASTStmtNone) : op(o) {} + explicit ASTStmt(MapleAllocator &allocatorIn, ASTStmtOp o = kASTStmtNone) : exprs(allocatorIn.Adapter()), op(o) {} virtual ~ASTStmt() = default; void SetASTExpr(ASTExpr* astExpr); @@ -41,7 +41,7 @@ class ASTStmt { return op; } - const std::vector &GetExprs() const { + const MapleVector &GetExprs() const { return exprs; } @@ -60,8 +60,8 @@ class ASTStmt { protected: virtual std::list Emit2FEStmtImpl() const = 0; + MapleVector exprs; ASTStmtOp op; - std::vector exprs; uint32 srcFileIdx = 0; uint32 srcFileLineNum = 0; @@ -69,7 +69,7 @@ class ASTStmt { class ASTStmtDummy : public ASTStmt { public: - ASTStmtDummy() : ASTStmt(kASTStmtDummy) {} + explicit ASTStmtDummy(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDummy) {} ~ASTStmtDummy() = default; private: @@ -78,11 +78,12 @@ class ASTStmtDummy : public ASTStmt { class ASTCompoundStmt : public ASTStmt { public: - ASTCompoundStmt() : ASTStmt(kASTStmtCompound) {} + explicit ASTCompoundStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCompound), + astStmts(allocatorIn.Adapter()){} ~ASTCompoundStmt() = default; void SetASTStmt(ASTStmt*); void InsertASTStmtsAtFront(const std::list &stmts); - const std::list &GetASTStmtList() const; + const MapleList &GetASTStmtList() const; void SetSafeSS(SafeSS state) { safeSS = state; @@ -94,14 +95,14 @@ class ASTCompoundStmt : public ASTStmt { private: SafeSS safeSS = kNoneSS; - std::list astStmts; // stmts + MapleList astStmts; // stmts std::list Emit2FEStmtImpl() const override; }; // Any other expressions or stmts should be extended here class ASTReturnStmt : public ASTStmt { public: - ASTReturnStmt() : ASTStmt(kASTStmtReturn) {} + explicit ASTReturnStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtReturn) {} ~ASTReturnStmt() = default; private: @@ -110,7 +111,7 @@ class ASTReturnStmt : public ASTStmt { class ASTAttributedStmt : public ASTStmt { public: - ASTAttributedStmt() : ASTStmt(kASTStmtAttributed) {} + explicit ASTAttributedStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtAttributed) {} ~ASTAttributedStmt() override = default; private: @@ -119,7 +120,7 @@ class ASTAttributedStmt : public ASTStmt { class ASTIfStmt : public ASTStmt { public: - ASTIfStmt() : ASTStmt(kASTStmtIf) {} + explicit ASTIfStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIf) {} ~ASTIfStmt() override = default; void SetCondExpr(ASTExpr *astExpr) { @@ -143,7 +144,7 @@ class ASTIfStmt : public ASTStmt { class ASTForStmt : public ASTStmt { public: - ASTForStmt() : ASTStmt(kASTStmtFor) {} + explicit ASTForStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtFor) {} ~ASTForStmt() override = default; void SetInitStmt(ASTStmt *astStmt) { @@ -172,7 +173,7 @@ class ASTForStmt : public ASTStmt { class ASTWhileStmt : public ASTStmt { public: - ASTWhileStmt() : ASTStmt(kASTStmtWhile) {} + explicit ASTWhileStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtWhile) {} ~ASTWhileStmt() override = default; void SetCondExpr(ASTExpr *astExpr) { @@ -191,7 +192,7 @@ class ASTWhileStmt : public ASTStmt { class ASTDoStmt : public ASTStmt { public: - ASTDoStmt() : ASTStmt(kASTStmtDo) {} + explicit ASTDoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDo) {} ~ASTDoStmt() override = default; void SetCondExpr(ASTExpr *astExpr) { @@ -210,7 +211,7 @@ class ASTDoStmt : public ASTStmt { class ASTBreakStmt : public ASTStmt { public: - ASTBreakStmt() : ASTStmt(kASTStmtBreak) {} + explicit ASTBreakStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtBreak) {} ~ASTBreakStmt() override = default; private: @@ -219,7 +220,8 @@ class ASTBreakStmt : public ASTStmt { class ASTLabelStmt : public ASTStmt { public: - ASTLabelStmt() : ASTStmt(kASTStmtLabel) {} + explicit ASTLabelStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtLabel), + labelName("", allocatorIn.GetMemPool()) {} ~ASTLabelStmt() override = default; void SetSubStmt(ASTStmt *stmt) { @@ -230,15 +232,19 @@ class ASTLabelStmt : public ASTStmt { labelName = name; } + const std::string GetLabelName() const { + return labelName.c_str() == nullptr ? "" : labelName.c_str(); + } + private: std::list Emit2FEStmtImpl() const override; - std::string labelName; + MapleString labelName; ASTStmt *subStmt = nullptr; }; class ASTContinueStmt : public ASTStmt { public: - ASTContinueStmt() : ASTStmt(kASTStmtContinue) {} + explicit ASTContinueStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtContinue) {} ~ASTContinueStmt() override = default; private: @@ -247,7 +253,7 @@ class ASTContinueStmt : public ASTStmt { class ASTUnaryOperatorStmt : public ASTStmt { public: - ASTUnaryOperatorStmt() : ASTStmt(kASTStmtUO) {} + explicit ASTUnaryOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtUO) {} ~ASTUnaryOperatorStmt() = default; private: @@ -256,7 +262,7 @@ class ASTUnaryOperatorStmt : public ASTStmt { class ASTBinaryOperatorStmt : public ASTStmt { public: - ASTBinaryOperatorStmt() : ASTStmt(kASTStmtBO) {} + explicit ASTBinaryOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtBO) {} ~ASTBinaryOperatorStmt() override = default; private: @@ -265,11 +271,12 @@ class ASTBinaryOperatorStmt : public ASTStmt { class ASTGotoStmt : public ASTStmt { public: - ASTGotoStmt() : ASTStmt(kASTStmtGoto) {} + explicit ASTGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtGoto), + labelName("", allocatorIn.GetMemPool()) {} ~ASTGotoStmt() = default; std::string GetLabelName() const { - return labelName; + return labelName.c_str() == nullptr ? "" : labelName.c_str(); } void SetLabelName(const std::string &name) { @@ -278,12 +285,12 @@ class ASTGotoStmt : public ASTStmt { private: std::list Emit2FEStmtImpl() const override; - std::string labelName; + MapleString labelName; }; class ASTIndirectGotoStmt : public ASTStmt { public: - ASTIndirectGotoStmt() : ASTStmt(kASTStmtIndirectGoto) {} + explicit ASTIndirectGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIndirectGoto) {} ~ASTIndirectGotoStmt() = default; protected: @@ -292,7 +299,7 @@ class ASTIndirectGotoStmt : public ASTStmt { class ASTSwitchStmt : public ASTStmt { public: - ASTSwitchStmt() : ASTStmt(kASTStmtSwitch) {} + explicit ASTSwitchStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtSwitch) {} ~ASTSwitchStmt() = default; void SetCondStmt(ASTStmt *cond) { @@ -342,7 +349,7 @@ class ASTSwitchStmt : public ASTStmt { class ASTCaseStmt : public ASTStmt { public: - ASTCaseStmt() : ASTStmt(kASTStmtCase) {} + explicit ASTCaseStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCase) {} ~ASTCaseStmt() = default; void SetLHS(ASTExpr *l) { @@ -396,7 +403,7 @@ class ASTCaseStmt : public ASTStmt { class ASTDefaultStmt : public ASTStmt { public: - ASTDefaultStmt() : ASTStmt(kASTStmtDefault) {} + explicit ASTDefaultStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDefault) {} ~ASTDefaultStmt() = default; void SetChildStmt(ASTStmt* ch) { @@ -414,7 +421,7 @@ class ASTDefaultStmt : public ASTStmt { class ASTNullStmt : public ASTStmt { public: - ASTNullStmt() : ASTStmt(kASTStmtNull) {} + explicit ASTNullStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtNull) {} ~ASTNullStmt() = default; private: @@ -423,14 +430,15 @@ class ASTNullStmt : public ASTStmt { class ASTDeclStmt : public ASTStmt { public: - ASTDeclStmt() : ASTStmt(kASTStmtDecl) {} + explicit ASTDeclStmt(MapleAllocator &allocatorIn) + : ASTStmt(allocatorIn, kASTStmtDecl), subDecls(allocatorIn.Adapter()) {} ~ASTDeclStmt() = default; void SetSubDecl(ASTDecl *decl) { subDecls.emplace_back(decl); } - const std::list& GetSubDecls() const { + const MapleList& GetSubDecls() const { return subDecls; } @@ -438,12 +446,12 @@ class ASTDeclStmt : public ASTStmt { std::list Emit2FEStmtImpl() const override; void InsertBoundaryVar(ASTDecl *ptrDecl, std::list &stmts) const; - std::list subDecls; + MapleList subDecls; }; class ASTCompoundAssignOperatorStmt : public ASTStmt { public: - ASTCompoundAssignOperatorStmt() : ASTStmt(kASTStmtCAO) {} + explicit ASTCompoundAssignOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCAO) {} ~ASTCompoundAssignOperatorStmt() override = default; private: @@ -452,7 +460,7 @@ class ASTCompoundAssignOperatorStmt : public ASTStmt { class ASTImplicitCastExprStmt : public ASTStmt { public: - ASTImplicitCastExprStmt() : ASTStmt(kASTStmtImplicitCastExpr) {} + explicit ASTImplicitCastExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtImplicitCastExpr) {} ~ASTImplicitCastExprStmt() override = default; private: @@ -461,7 +469,7 @@ class ASTImplicitCastExprStmt : public ASTStmt { class ASTParenExprStmt : public ASTStmt { public: - ASTParenExprStmt() : ASTStmt(kASTStmtParenExpr) {} + explicit ASTParenExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtParenExpr) {} ~ASTParenExprStmt() override = default; private: @@ -470,7 +478,7 @@ class ASTParenExprStmt : public ASTStmt { class ASTIntegerLiteralStmt : public ASTStmt { public: - ASTIntegerLiteralStmt() : ASTStmt(kASTStmtIntegerLiteral) {} + explicit ASTIntegerLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIntegerLiteral) {} ~ASTIntegerLiteralStmt() override = default; private: @@ -479,7 +487,7 @@ class ASTIntegerLiteralStmt : public ASTStmt { class ASTFloatingLiteralStmt : public ASTStmt { public: - ASTFloatingLiteralStmt() : ASTStmt(kASTStmtFloatingLiteral) {} + explicit ASTFloatingLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtFloatingLiteral) {} ~ASTFloatingLiteralStmt() override = default; private: @@ -488,7 +496,7 @@ class ASTFloatingLiteralStmt : public ASTStmt { class ASTVAArgExprStmt : public ASTStmt { public: - ASTVAArgExprStmt() : ASTStmt(kASTStmtVAArgExpr) {} + explicit ASTVAArgExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtVAArgExpr) {} ~ASTVAArgExprStmt() override = default; private: @@ -497,7 +505,7 @@ class ASTVAArgExprStmt : public ASTStmt { class ASTConditionalOperatorStmt : public ASTStmt { public: - ASTConditionalOperatorStmt() : ASTStmt(kASTStmtConditionalOperator) {} + explicit ASTConditionalOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtConditionalOperator) {} ~ASTConditionalOperatorStmt() override = default; private: @@ -506,7 +514,7 @@ class ASTConditionalOperatorStmt : public ASTStmt { class ASTCharacterLiteralStmt : public ASTStmt { public: - ASTCharacterLiteralStmt() : ASTStmt(kASTStmtCharacterLiteral) {} + explicit ASTCharacterLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCharacterLiteral) {} ~ASTCharacterLiteralStmt() override = default; private: @@ -515,7 +523,7 @@ class ASTCharacterLiteralStmt : public ASTStmt { class ASTStmtExprStmt : public ASTStmt { public: - ASTStmtExprStmt() : ASTStmt(kASTStmtStmtExpr) {} + explicit ASTStmtExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtStmtExpr) {} ~ASTStmtExprStmt() override = default; void SetBodyStmt(ASTStmt *stmt) { @@ -534,7 +542,7 @@ class ASTStmtExprStmt : public ASTStmt { class ASTCStyleCastExprStmt : public ASTStmt { public: - ASTCStyleCastExprStmt() : ASTStmt(kASTStmtCStyleCastExpr) {} + explicit ASTCStyleCastExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCStyleCastExpr) {} ~ASTCStyleCastExprStmt() override = default; private: @@ -543,7 +551,8 @@ class ASTCStyleCastExprStmt : public ASTStmt { class ASTCallExprStmt : public ASTStmt { public: - ASTCallExprStmt() : ASTStmt(kASTStmtCallExpr), varName(FEUtils::GetSequentialName("retVar_")) {} + explicit ASTCallExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCallExpr), + varName(FEUtils::GetSequentialName("retVar_")) {} ~ASTCallExprStmt() override = default; private: @@ -556,7 +565,7 @@ class ASTCallExprStmt : public ASTStmt { class ASTAtomicExprStmt : public ASTStmt { public: - ASTAtomicExprStmt() : ASTStmt(kASTStmtAtomicExpr) {} + explicit ASTAtomicExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtAtomicExpr) {} ~ASTAtomicExprStmt() override = default; private: @@ -565,13 +574,19 @@ class ASTAtomicExprStmt : public ASTStmt { class ASTGCCAsmStmt : public ASTStmt { public: - ASTGCCAsmStmt() : ASTStmt(kASTStmtGCCAsmStmt) {} + explicit ASTGCCAsmStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtGCCAsmStmt), + asmStr("", allocatorIn.GetMemPool()), outputs(allocatorIn.Adapter()), inputs(allocatorIn.Adapter()), + clobbers(allocatorIn.Adapter()), labels(allocatorIn.Adapter()) {} ~ASTGCCAsmStmt() override = default; void SetAsmStr(const std::string &str) { asmStr = str; } + const std::string GetAsmStr() const { + return asmStr.c_str() == nullptr ? "" : asmStr.c_str(); + } + void InsertOutput(std::tuple &&output) { outputs.emplace_back(output); } @@ -598,18 +613,18 @@ class ASTGCCAsmStmt : public ASTStmt { private: std::list Emit2FEStmtImpl() const override; - std::string asmStr; - std::vector> outputs; - std::vector> inputs; - std::vector clobbers; - std::vector labels; + MapleString asmStr; + MapleVector> outputs; + MapleVector> inputs; + MapleVector clobbers; + MapleVector labels; bool isGoto = false; bool isVolatile = false; }; class ASTOffsetOfStmt : public ASTStmt { public: - ASTOffsetOfStmt() : ASTStmt(kASTOffsetOfStmt) {} + explicit ASTOffsetOfStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTOffsetOfStmt) {} ~ASTOffsetOfStmt() override = default; private: @@ -618,11 +633,29 @@ class ASTOffsetOfStmt : public ASTStmt { class ASTGenericSelectionExprStmt : public ASTStmt { public: - ASTGenericSelectionExprStmt() : ASTStmt(kASTGenericSelectionExprStmt) {} + explicit ASTGenericSelectionExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTGenericSelectionExprStmt) {} ~ASTGenericSelectionExprStmt() override = default; private: std::list Emit2FEStmtImpl() const override; }; + +class ASTDeclRefExprStmt : public ASTStmt { + public: + explicit ASTDeclRefExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDeclRefExpr) {} + ~ASTDeclRefExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTUnaryExprOrTypeTraitExprStmt : public ASTStmt { + public: + explicit ASTUnaryExprOrTypeTraitExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDeclRefExpr) {} + ~ASTUnaryExprOrTypeTraitExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_STMT_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h index 772a0639055c5cc70f6fd4e8fbae97b8f9002fd9..f4f1de8bd415a31cf1cf60f0d9f6c58c7e951d7b 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h +++ b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h @@ -93,7 +93,7 @@ class ASTFunc2FEHelper : public FEInputMethodHelper { return func; } - const std::string &GetSrcFileName() const; + const std::string GetSrcFileName() const; protected: bool ProcessDeclImpl(MapleAllocator &allocator) override; diff --git a/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def b/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def index 1b4efe5299d793b87aa3a9fd3fcc82b927877245..874b607b1c60fbdb6f9f1a05935c6ee6b86d1a15 100644 --- a/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def +++ b/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def @@ -110,22 +110,73 @@ BUILTIN_FUNC_EMIT("__builtin_rotateright64", &ASTCallExpr::EmitBuiltinRotateRigh BUILTIN_FUNC_EMIT("__sync_add_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncAddAndFetch8) BUILTIN_FUNC_EMIT("__sync_add_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncAddAndFetch4) BUILTIN_FUNC_EMIT("__sync_add_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncAddAndFetch2) +BUILTIN_FUNC_EMIT("__sync_add_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncAddAndFetch1) BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncSubAndFetch8) BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncSubAndFetch4) BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncSubAndFetch2) +BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncSubAndFetch1) BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_8", &ASTCallExpr::EmitBuiltinSyncFetchAndSub8) BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_4", &ASTCallExpr::EmitBuiltinSyncFetchAndSub4) BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_2", &ASTCallExpr::EmitBuiltinSyncFetchAndSub2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_1", &ASTCallExpr::EmitBuiltinSyncFetchAndSub1) BUILTIN_FUNC_EMIT("__sync_fetch_and_add_8", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd8) BUILTIN_FUNC_EMIT("__sync_fetch_and_add_4", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd4) BUILTIN_FUNC_EMIT("__sync_fetch_and_add_2", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_add_1", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd1) BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_8", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap8) BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_4", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap4) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_2", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap2) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_1", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap1) BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_8", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap8) BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_4", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap4) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_2", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap2) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_1", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap1) BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_8", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet8) BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_4", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet4) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_2", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet2) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_1", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet1) BUILTIN_FUNC_EMIT("__sync_lock_release_8", &ASTCallExpr::EmitBuiltinSyncLockRelease8) BUILTIN_FUNC_EMIT("__sync_lock_release_4", &ASTCallExpr::EmitBuiltinSyncLockRelease4) +BUILTIN_FUNC_EMIT("__sync_lock_release_2", &ASTCallExpr::EmitBuiltinSyncLockRelease2) +BUILTIN_FUNC_EMIT("__sync_lock_release_1", &ASTCallExpr::EmitBuiltinSyncLockRelease1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_1", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_2", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_4", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_8", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_1", &ASTCallExpr::EmitBuiltinSyncFetchAndOr1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_2", &ASTCallExpr::EmitBuiltinSyncFetchAndOr2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_4", &ASTCallExpr::EmitBuiltinSyncFetchAndOr4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_8", &ASTCallExpr::EmitBuiltinSyncFetchAndOr8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_1", &ASTCallExpr::EmitBuiltinSyncFetchAndXor1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_2", &ASTCallExpr::EmitBuiltinSyncFetchAndXor2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_4", &ASTCallExpr::EmitBuiltinSyncFetchAndXor4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_8", &ASTCallExpr::EmitBuiltinSyncFetchAndXor8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_1", &ASTCallExpr::EmitBuiltinSyncFetchAndNand1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_2", &ASTCallExpr::EmitBuiltinSyncFetchAndNand2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_4", &ASTCallExpr::EmitBuiltinSyncFetchAndNand4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_8", &ASTCallExpr::EmitBuiltinSyncFetchAndNand8) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncAndAndFetch1) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncAndAndFetch2) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncAndAndFetch4) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncAndAndFetch8) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncOrAndFetch1) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncOrAndFetch2) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncOrAndFetch4) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncOrAndFetch8) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncXorAndFetch1) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncXorAndFetch2) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncXorAndFetch4) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncXorAndFetch8) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncNandAndFetch1) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncNandAndFetch2) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncNandAndFetch4) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncNandAndFetch8) +BUILTIN_FUNC_EMIT("__sync_synchronize", &ASTCallExpr::EmitBuiltinSyncSynchronize) + +BUILTIN_FUNC_EMIT("__atomic_exchange_1", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_2", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_4", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_8", &ASTCallExpr::EmitBuiltinAtomicExchangeN) + BUILTIN_FUNC_EMIT("__builtin_return_address", &ASTCallExpr::EmitBuiltinReturnAddress) BUILTIN_FUNC_EMIT("__builtin_extract_return_addr", &ASTCallExpr::EmitBuiltinExtractReturnAddr) \ No newline at end of file diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp index 0315f2e402663b8e5fa260327ebaca32e5454451..b073ee32859fa927922a7d8c581cace0e2f09e34 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp @@ -19,11 +19,11 @@ #include "fe_manager.h" namespace maple { -bool LibAstFile::Open(const std::string &fileName, +bool LibAstFile::Open(const MapleString &fileName, int excludeDeclFromPCH, int displayDiagnostics) { astFileName = fileName; - CXIndex index = clang_createIndex(excludeDeclFromPCH, displayDiagnostics); - CXTranslationUnit translationUnit = clang_createTranslationUnit(index, fileName.c_str()); + index = clang_createIndex(excludeDeclFromPCH, displayDiagnostics); + translationUnit = clang_createTranslationUnit(index, fileName.c_str()); if (translationUnit == nullptr) { return false; } @@ -46,6 +46,13 @@ bool LibAstFile::Open(const std::string &fileName, return true; } +void LibAstFile::DisposeTranslationUnit() { + clang_disposeIndex(index); + clang_disposeTranslationUnit(translationUnit); + translationUnit = nullptr; + index = nullptr; +} + const AstASTContext *LibAstFile::GetAstContext() const { return astContext; } @@ -179,6 +186,14 @@ void LibAstFile::GetStorageAttrs(const clang::NamedDecl &decl, GenericAttrs &gen const auto *funcDecl = llvm::cast(&decl); const clang::StorageClass storageClass = funcDecl->getStorageClass(); GetSClassAttrs(storageClass, genAttrs); + // static or extern maybe missing in current FunctionDecls, + // Since a given function can be declared several times in a program, + // Only one of those FunctionDecls will be found when traversing the list of declarations in the context. + const clang::FunctionDecl *prev = funcDecl->getPreviousDecl(); + while (prev != nullptr && prev->isDefined()) { + GetStorageAttrs(*prev, genAttrs); + prev = prev->getPreviousDecl(); + } break; } case clang::Decl::ParmVar: @@ -274,6 +289,9 @@ void LibAstFile::CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs } else if (decl.hasAttr()) { genAttrs.SetAttr(GENATTR_noinline); } + if (decl.hasAttr()) { + genAttrs.SetAttr(GENATTR_always_inline); + } if (decl.isDefaulted()) { genAttrs.SetAttr(GENATTR_default); } @@ -309,27 +327,17 @@ void LibAstFile::CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs } void LibAstFile::CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl) { - std::string unsupportedFuncAttrs = ""; - if (decl.hasAttr()) { - unsupportedFuncAttrs += " no_instrument_function"; - } - if (decl.hasAttr()) { - unsupportedFuncAttrs += " stdcall"; - } - if (decl.hasAttr()) { - unsupportedFuncAttrs += " cdecl"; - } - if (decl.hasAttr()) { - unsupportedFuncAttrs += " long_call"; - } - if (decl.hasAttr()) { - unsupportedFuncAttrs += " short_call"; - } - if(decl.hasAttr() || decl.hasAttr()) { - unsupportedFuncAttrs += " interrupt"; + if (!decl.hasAttrs()) { + return; } - if (decl.hasAttr()) { - unsupportedFuncAttrs += " naked"; + std::string unsupportedFuncAttrs = ""; + const clang::AttrVec &funcAttrs = decl.getAttrs(); + for (const auto *attr : funcAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = unsupportedFuncAttrsMap.find(attrKind); + if (iterator != unsupportedFuncAttrsMap.end()) { + unsupportedFuncAttrs += iterator->second + " "; + } } CHECK_FATAL(unsupportedFuncAttrs.empty(), "%s:%d error: The function %s has unsupported attribute(s): %s", FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).first).c_str(), @@ -354,15 +362,17 @@ void LibAstFile::CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAt } void LibAstFile::CheckUnsupportedVarAttrs(const clang::VarDecl &decl) { - std::string unsupportedVarAttrs = ""; - if(decl.hasAttr()) { - unsupportedVarAttrs += " mode"; + if (!decl.hasAttrs()) { + return; } - if(decl.hasAttr()) { - unsupportedVarAttrs += " nocommon"; - } - if(decl.hasAttr()) { - unsupportedVarAttrs += " transparent_union"; + std::string unsupportedVarAttrs = ""; + const clang::AttrVec &varAttrs = decl.getAttrs(); + for (const auto *attr : varAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = unsupportedVarAttrsMap.find(attrKind); + if (iterator != unsupportedVarAttrsMap.end()) { + unsupportedVarAttrs += iterator->second + " "; + } } CHECK_FATAL(unsupportedVarAttrs.empty(), "%s:%d error: The variable %s has unsupported attribute(s): %s", FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).first).c_str(), @@ -371,6 +381,49 @@ void LibAstFile::CheckUnsupportedVarAttrs(const clang::VarDecl &decl) { unsupportedVarAttrs.c_str()); } +void LibAstFile::CollectRecordAttrs(const clang::RecordDecl &decl, GenericAttrs &genAttrs, AccessKind access) { + clang::PackedAttr *packedAttr = decl.getAttr(); + if (packedAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + genAttrs.InsertIntContentMap(GENATTR_pack, 1); // 1 byte + } + clang::MaxFieldAlignmentAttr *maxFieldAlignAttr = decl.getAttr(); + if (maxFieldAlignAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + genAttrs.InsertIntContentMap(GENATTR_pack, static_cast(maxFieldAlignAttr->getAlignment() / 8)); + } + CheckUnsupportedTypeAttrs(decl); +} + +void LibAstFile::CheckUnsupportedTypeAttrs(const clang::RecordDecl &decl) { + if (!decl.hasAttrs()) { + return; + } + std::string unsupportedTypeAttrs = ""; + const clang::AttrVec &typeAttrs = decl.getAttrs(); + for (const auto *attr : typeAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = unsupportedTypeAttrsMap.find(attrKind); + if (iterator != unsupportedTypeAttrsMap.end()) { + unsupportedTypeAttrs += iterator->second + " "; + } + } + CHECK_FATAL(unsupportedTypeAttrs.empty(), "%s:%d error: struct or union %s has unsupported type attribute(s): %s", + FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).first).c_str(), + GetLOC(decl.getLocation()).second, + GetMangledName(decl).c_str(), + unsupportedTypeAttrs.c_str()); +} + +void LibAstFile::CollectFieldAttrs(const clang::FieldDecl &decl, GenericAttrs &genAttrs, AccessKind access) { + CollectAttrs(decl, genAttrs, access); + clang::PackedAttr *packedAttr = decl.getAttr(); + if (packedAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + genAttrs.InsertIntContentMap(GENATTR_pack, 1); // 1 byte + } +} + void LibAstFile::EmitTypeName(const clang::QualType qualType, std::stringstream &ss) { switch (qualType->getTypeClass()) { case clang::Type::LValueReference: { @@ -412,23 +465,11 @@ const std::string LibAstFile::GetOrCreateMappedUnnamedName(uint32_t id) { std::map::iterator it = unnamedSymbolMap.find(id); if (it == unnamedSymbolMap.end()) { std::string name = FEUtils::GetSequentialName("unNamed"); - if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { - name = name + GetAstFileNameHashStr(); - } unnamedSymbolMap[id] = name; } return unnamedSymbolMap[id]; } -const std::string LibAstFile::GetOrCreateCompoundLiteralExprInitName(uint32_t id) { - std::map::iterator it = CompoundLiteralExprInitSymbolMap.find(id); - if (it == CompoundLiteralExprInitSymbolMap.end()) { - const std::string name = FEUtils::GetSequentialName("unNamedInit"); - CompoundLiteralExprInitSymbolMap[id] = name; - } - return CompoundLiteralExprInitSymbolMap[id]; -} - void LibAstFile::EmitTypeName(const clang::RecordType &recoType, std::stringstream &ss) { clang::RecordDecl *recoDecl = recoType.getDecl(); std::string str = recoType.desugar().getAsString(); @@ -458,6 +499,9 @@ void LibAstFile::EmitTypeName(const clang::RecordType &recoType, std::stringstre nsStack.pop(); } auto nameStr = recoDecl->getName().str(); + if (nameStr.empty()) { + nameStr = GetTypedefNameFromUnnamedStruct(*recoDecl); + } if (nameStr.empty()) { uint32_t id = recoType.getDecl()->getLocation().getRawEncoding(); nameStr = GetOrCreateMappedUnnamedName(id); @@ -473,4 +517,13 @@ void LibAstFile::EmitTypeName(const clang::RecordType &recoType, std::stringstre ss << "_" << p.first << "_" << p.second; } } + +// get TypedefDecl name for the unnamed struct, e.g. typedef struct {} foo; +std::string LibAstFile::GetTypedefNameFromUnnamedStruct(const clang::RecordDecl &recoDecl) { + auto *defnameDcel = recoDecl.getTypedefNameForAnonDecl(); + if (defnameDcel != nullptr) { + return defnameDcel->getQualifiedNameAsString(); + } + return std::string(); +} } // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.h b/src/hir2mpl/ast_input/clang/lib/ast_interface.h index 5f0255b41ca52a2fc48a6666ff73b1c62383b4c6..acca5a78486d27b49f47651ef6c3b8a08710b37c 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_interface.h +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.h @@ -21,6 +21,7 @@ #include "mpl_logging.h" #include "generic_attrs.h" #include "fe_utils.h" +#include "clang/Basic/AttrKinds.h" namespace maple { using Pos = std::pair; @@ -30,25 +31,60 @@ enum AccessKind { kPrivate, kNone }; +const std::unordered_map unsupportedFuncAttrsMap = { + {clang::attr::NoInstrumentFunction, "no_instrument_function"}, + {clang::attr::StdCall, "stdcall"}, + {clang::attr::CDecl, "cdecl"}, + {clang::attr::MipsLongCall, "mips_long_call"}, + {clang::attr::MipsShortCall, "mips_short_call"}, + {clang::attr::ARMInterrupt, "arm_interrupt"}, + {clang::attr::AnyX86Interrupt, "x86_interrupt"}, + {clang::attr::Naked, "naked"}, + {clang::attr::AllocAlign, "alloc_align"}, + {clang::attr::AssumeAligned, "assume_aligned"}, + {clang::attr::Flatten, "flatten"}, + {clang::attr::GNUInline, "gnu_inline"}, + {clang::attr::Cold, "cold"}, + {clang::attr::IFunc, "ifunc"}, + {clang::attr::NoSanitize, "no_sanitize"}, + {clang::attr::NoSplitStack, "no_split_stack"}, + {clang::attr::PatchableFunctionEntry, "patchable_function_entry"}, + {clang::attr::Target, "target"} +}; +const std::unordered_map unsupportedVarAttrsMap = { + {clang::attr::Mode, "mode"}, + {clang::attr::NoCommon, "nocommon"}, + {clang::attr::TransparentUnion, "transparent_union"}, + {clang::attr::Alias, "alias"}, + {clang::attr::Cleanup, "cleanup"}, + {clang::attr::Common, "common"}, + {clang::attr::Uninitialized, "uninitialized"} +}; +const std::unordered_map unsupportedTypeAttrsMap = { + {clang::attr::MSStruct, "ms_struct"} +}; class LibAstFile { public: - explicit LibAstFile(MapleList &recordDeclesIn) : recordDecles(recordDeclesIn) {} + explicit LibAstFile(MapleAllocator &allocatorIn, MapleList &recordDeclesIn) + : recordDeclMap(allocatorIn.Adapter()), recordDeclSet(allocatorIn.Adapter()), + unnamedSymbolMap(allocatorIn.Adapter()), CompoundLiteralExprInitSymbolMap(allocatorIn.Adapter()), + recordDecles(recordDeclesIn), astFileName("", allocatorIn.GetMemPool()) {} ~LibAstFile() = default; - bool Open(const std::string &fileName, + bool Open(const MapleString &fileName, int excludeDeclFromPCH, int displayDiagnostics); + void DisposeTranslationUnit(); const AstASTContext *GetAstContext() const; AstASTContext *GetNonConstAstContext() const; AstUnitDecl *GetAstUnitDecl(); std::string GetMangledName(const clang::NamedDecl &decl); const std::string GetOrCreateMappedUnnamedName(uint32_t id); - const std::string GetOrCreateCompoundLiteralExprInitName(uint32_t id); void EmitTypeName(const clang::QualType qualType, std::stringstream &ss); void EmitTypeName(const clang::RecordType &qualType, std::stringstream &ss); void EmitQualifierName(const clang::QualType qualType, std::stringstream &ss); - + std::string GetTypedefNameFromUnnamedStruct(const clang::RecordDecl &recoDecl); void CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualType &qualType, MIRType *&elemType, TypeAttrs &elemAttr, std::vector &operands); @@ -68,6 +104,9 @@ class LibAstFile { void CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl); void CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAttrs, AccessKind access); void CheckUnsupportedVarAttrs(const clang::VarDecl &decl); + void CollectRecordAttrs(const clang::RecordDecl &decl, GenericAttrs &genAttrs, AccessKind access); + void CheckUnsupportedTypeAttrs(const clang::RecordDecl &decl); + void CollectFieldAttrs(const clang::FieldDecl &decl, GenericAttrs &genAttrs, AccessKind access); MIRType *CvtPrimType(const clang::QualType qualType) const; PrimType CvtPrimType(const clang::BuiltinType::Kind) const; MIRType *CvtType(const clang::QualType qualType); @@ -87,7 +126,8 @@ class LibAstFile { } const std::string GetAstFileNameHashStr() const { - return FEUtils::GetFileNameHashStr(astFileName); + std::string fileName = (astFileName.c_str() == nullptr ? "" : astFileName.c_str()); + return FEUtils::GetFileNameHashStr(fileName); } Pos GetDeclPosInfo(const clang::Decl &decl) const; @@ -98,11 +138,11 @@ class LibAstFile { uint32 RetrieveAggTypeAlign(const clang::Type *ty) const; private: - using RecordDeclMap = std::map; + using RecordDeclMap = MapleMap; RecordDeclMap recordDeclMap; - std::set recordDeclSet; - std::map unnamedSymbolMap; - std::map CompoundLiteralExprInitSymbolMap; + MapleSet recordDeclSet; + MapleMap unnamedSymbolMap; + MapleMap CompoundLiteralExprInitSymbolMap; MIRModule *module = nullptr; MapleList &recordDecles; @@ -110,7 +150,9 @@ class LibAstFile { clang::ASTContext *astContext = nullptr; clang::TranslationUnitDecl *astUnitDecl = nullptr; clang::MangleContext *mangleContext = nullptr; - std::string astFileName; + CXTranslationUnit translationUnit = nullptr; + CXIndex index = nullptr; + MapleString astFileName; }; } // namespace maple #endif // HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H diff --git a/src/hir2mpl/ast_input/clang/lib/ast_type.cpp b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp index 4d935dba70536748cdfb5726424bab181ab681fe..a2d3b89ccbbd02852804b38ce7ee4d0fe2f26d98 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_type.cpp +++ b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -78,7 +78,6 @@ PrimType LibAstFile::CvtPrimType(const clang::BuiltinType::Kind kind) const { return PTY_i64; case clang::BuiltinType::Int128: return PTY_i128; - case clang::BuiltinType::Half: // PTY_f16, NOTYETHANDLED case clang::BuiltinType::Float: return PTY_f32; case clang::BuiltinType::Double: @@ -88,6 +87,10 @@ PrimType LibAstFile::CvtPrimType(const clang::BuiltinType::Kind kind) const { return PTY_f64; case clang::BuiltinType::NullPtr: // default 64-bit, need to update return PTY_a64; + case clang::BuiltinType::Half: // PTY_f16, NOTYETHANDLED + case clang::BuiltinType::Float16: + CHECK_FATAL(false, "Float16 types not implemented yet"); + return PTY_void; case clang::BuiltinType::Void: default: return PTY_void; @@ -202,10 +205,16 @@ MIRType *LibAstFile::CvtRecordType(const clang::QualType srcType) { uint32_t id = recordType->getDecl()->getLocation().getRawEncoding(); name = GetOrCreateMappedUnnamedName(id); } else if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { - name = name + GetAstFileNameHashStr(); + std::string recordLayoutStr = recordDecl->getDefinition() == nullptr ? "" : + ASTUtil::GetRecordLayoutString(astContext->getASTRecordLayout(recordDecl->getDefinition())); + std::string filename = astContext->getSourceManager().getFilename(recordDecl->getLocation()).str(); + name = name + FEUtils::GetFileNameHashStr(filename + recordLayoutStr); } type = FEManager::GetTypeManager().GetOrCreateStructType(name); type->SetMIRTypeKind(srcType->isUnionType() ? kTypeUnion : kTypeStruct); + if(recordDecl->getDefinition() == nullptr) { + type->SetMIRTypeKind(kTypeStructIncomplete); + } return recordDecl->isLambda() ? GlobalTables::GetTypeTable().GetOrCreatePointerType(*type) : type; } diff --git a/src/hir2mpl/ast_input/clang/lib/ast_util.cpp b/src/hir2mpl/ast_input/clang/lib/ast_util.cpp index 99fe86588548c1169119d5e8927a64e940ac1eb0..007eab98cadbd9c4c7096f42a15a65bb0e9b99fb 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_util.cpp +++ b/src/hir2mpl/ast_input/clang/lib/ast_util.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -328,4 +328,15 @@ bool ASTUtil::InsertFuncSet(const GStrIdx &idx) { static std::set funcIdxSet; return funcIdxSet.insert(idx).second; } + +std::string ASTUtil::GetRecordLayoutString(const clang::ASTRecordLayout &recordLayout) { + std::string recordLayoutStr = ""; + unsigned int fieldCount = recordLayout.getFieldCount(); + uint64_t recordSize = static_cast(recordLayout.getSize().getQuantity()); + recordLayoutStr += (std::to_string(fieldCount) + std::to_string(recordSize)); + for (unsigned int i = 0; i < fieldCount; ++i) { + recordLayoutStr += std::to_string(recordLayout.getFieldOffset(i)); + } + return recordLayoutStr; +} } // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_util.h b/src/hir2mpl/ast_input/clang/lib/ast_util.h index 503193269c6e283e0940a771c874d8cc6da81560..40d151685e565473691e3291f034043a7f246ab2 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_util.h +++ b/src/hir2mpl/ast_input/clang/lib/ast_util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,6 +15,7 @@ #ifndef AST2MPL_INCLUDE_ASTUTIL_H #define AST2MPL_INCLUDE_ASTUTIL_H #include "clang/AST/AST.h" +#include "clang/AST/RecordLayout.h" #include "mir_type.h" #include "ast_macros.h" @@ -66,6 +67,7 @@ class ASTUtil { } return os.str(); } + static std::string GetRecordLayoutString(const clang::ASTRecordLayout &recordLayout); }; } // namespace maple #endif // AST2MPL_INCLUDE_ASTUTIL_H_ diff --git a/src/hir2mpl/ast_input/clang/src/ast_expr.cpp b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp index b2082dd8a4487761eb4ea6f79773c1e08a4048ba..b7dacf5ecaeb3f0f6370185b33f8c0f2cd2baee2 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_expr.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp @@ -226,7 +226,7 @@ void ASTCallExpr::AddArgsExpr(const std::unique_ptr &callStmt, s callStmt->AddExprArgReverse(std::move(expr)); } if (IsFirstArgRet()) { - UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(varName, *retType, false, false); + UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *retType, false, false); UniqueFEIRExpr expr = FEIRBuilder::CreateExprAddrofVar(var->Clone()); callStmt->AddExprArgReverse(std::move(expr)); } @@ -250,7 +250,7 @@ void ASTCallExpr::InsertNonnullCheckingForIcall(const UniqueFEIRExpr &expr, std: } UniqueFEIRExpr ASTCallExpr::AddRetExpr(const std::unique_ptr &callStmt) const { - UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(varName, *retType, false, false); + UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *retType, false, false); UniqueFEIRVar dreadVar = var->Clone(); if (!IsFirstArgRet()) { callStmt->SetVar(var->Clone()); @@ -264,7 +264,7 @@ std::unique_ptr ASTCallExpr::GenCallStmt() const { if (isIcall) { callStmt = std::make_unique(); } else { - StructElemNameIdx *nameIdx = mp->New(funcName); + StructElemNameIdx *nameIdx = mp->New(GetFuncName()); FEStructMethodInfo *info = static_cast( FEManager::GetTypeManager().RegisterStructMethodInfo(*nameIdx, kSrcLangC, false)); info->SetFuncAttrs(funcAttrs); @@ -319,6 +319,9 @@ MIRConst *ASTCastExpr::GenerateMIRConstImpl() const { MIRSymbol *mirSymbol = static_cast(astDecl)->Translate2MIRSymbol(); return FEManager::GetModule().GetMemPool()->New(mirSymbol->GetStIdx(), 0, *(astDecl->GetTypeDesc().front())); + } else if (isArrayToPointerDecay && child->GetASTOp() == kASTOpCompoundLiteralExpr) { + static_cast(child)->SetAddrof(true); + return child->GenerateMIRConst(); } else if (isNeededCvt) { if (dst->GetPrimType() == PTY_f64) { return GenerateMIRDoubleConst(); @@ -334,6 +337,9 @@ MIRConst *ASTCastExpr::GenerateMIRConstImpl() const { MIRConst *ASTCastExpr::GenerateMIRDoubleConst() const { MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } switch (childConst->GetKind()) { case kConstFloatConst: { return FEManager::GetModule().GetMemPool()->New( @@ -359,6 +365,9 @@ MIRConst *ASTCastExpr::GenerateMIRDoubleConst() const { MIRConst *ASTCastExpr::GenerateMIRFloatConst() const { MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } switch (childConst->GetKind()) { case kConstDoubleConst: { return FEManager::GetModule().GetMemPool()->New( @@ -379,6 +388,9 @@ MIRConst *ASTCastExpr::GenerateMIRFloatConst() const { MIRConst *ASTCastExpr::GenerateMIRIntConst() const { MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } switch (childConst->GetKind()) { case kConstDoubleConst: case kConstInt: { @@ -515,7 +527,6 @@ UniqueFEIRExpr ASTCastExpr::Emit2FEExprImpl(std::list &stmts) co } UniqueFEIRType dstType = std::make_unique(*dst); if (dst->GetKind() == kTypePointer) { - CheckNonnullFieldInStruct(); return subExpr; } else { return std::make_unique(std::move(dstType), OP_retype, std::move(subExpr)); @@ -680,20 +691,9 @@ UniqueFEIRExpr ASTUOPreDecExpr::Emit2FEExprImpl(std::list &stmts MIRConst *ASTUOAddrOfExpr::GenerateMIRConstImpl() const { switch (expr->GetASTOp()) { - case kASTOpCompoundLiteralExp: { - auto astCompoundLiteralExpr = static_cast(expr); - // CompoundLiteral Symbol - MIRSymbol *compoundLiteralMirSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl( - astCompoundLiteralExpr->GetInitName(), - *astCompoundLiteralExpr->GetCompoundLiteralType()); - - auto child = static_cast(expr)->GetASTExpr(); - auto mirConst = child->GenerateMIRConst(); // InitListExpr in CompoundLiteral gen struct - compoundLiteralMirSymbol->SetKonst(mirConst); - - MIRAddrofConst *mirAddrofConst = FEManager::GetModule().GetMemPool()->New( - compoundLiteralMirSymbol->GetStIdx(), 0, *astCompoundLiteralExpr->GetCompoundLiteralType()); - return mirAddrofConst; + case kASTOpCompoundLiteralExpr: { + static_cast(expr)->SetAddrof(true); + return expr->GenerateMIRConst(); } case kASTOpRef: case kASTSubscriptExpr: @@ -731,6 +731,12 @@ UniqueFEIRExpr ASTUOAddrOfExpr::Emit2FEExprImpl(std::list &stmts } else if (childFEIRExpr->GetKind() == kExprIAddrof || childFEIRExpr->GetKind() == kExprAddrofVar || childFEIRExpr->GetKind() == kExprAddrofFunc || childFEIRExpr->GetKind() == kExprAddrof) { return childFEIRExpr; + } else if (childFEIRExpr->GetKind() == kExprConst) { + std::string tmpName = FEUtils::GetSequentialName("tmpvar_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, childFEIRExpr->GetType()->Clone()); + auto tmpStmt = FEIRBuilder::CreateStmtDAssign(tmpVar->Clone(), std::move(childFEIRExpr)); + stmts.emplace_back(std::move(tmpStmt)); + return FEIRBuilder::CreateExprAddrofVar(std::move(tmpVar)); } else { CHECK_FATAL(false, "unsupported expr kind %d", childFEIRExpr->GetKind()); } @@ -740,14 +746,14 @@ UniqueFEIRExpr ASTUOAddrOfExpr::Emit2FEExprImpl(std::list &stmts // ---------- ASTUOAddrOfLabelExpr --------- MIRConst *ASTUOAddrOfLabelExpr::GenerateMIRConstImpl() const { return FEManager::GetMIRBuilder().GetCurrentFuncCodeMp()->New( - FEManager::GetMIRBuilder().GetOrCreateMIRLabel(labelName), + FEManager::GetMIRBuilder().GetOrCreateMIRLabel(GetLabelName()), FEManager::GetMIRBuilder().GetCurrentFunction()->GetPuidx(), // GetCurrentFunction need to be optimized *GlobalTables::GetTypeTable().GetVoidPtr()); // when parallel features } UniqueFEIRExpr ASTUOAddrOfLabelExpr::Emit2FEExprImpl(std::list &stmts) const { (void)stmts; - return FEIRBuilder::CreateExprAddrofLabel(labelName, std::make_unique(*uoType)); + return FEIRBuilder::CreateExprAddrofLabel(GetLabelName(), std::make_unique(*uoType)); } UniqueFEIRExpr ASTUODerefExpr::Emit2FEExprImpl(std::list &stmts) const { @@ -927,8 +933,12 @@ UniqueFEIRExpr ASTCompoundLiteralExpr::Emit2FEExprImpl(std::list if (child->GetASTOp() == kASTOpInitListExpr) { // other potential expr should concern std::string tmpName = FEUtils::GetSequentialName("clvar_"); static_cast(child)->SetInitListVarName(tmpName); - child->Emit2FEExpr(stmts); UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, *compoundLiteralType); + auto expr = child->Emit2FEExpr(stmts); + if (expr != nullptr) { + auto tmpStmt = FEIRBuilder::CreateStmtDAssign(tmpVar->Clone(), std::move(expr)); + stmts.emplace_back(std::move(tmpStmt)); + } feirExpr = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); } else { feirExpr = child->Emit2FEExpr(stmts); @@ -936,7 +946,27 @@ UniqueFEIRExpr ASTCompoundLiteralExpr::Emit2FEExprImpl(std::list return feirExpr; } +MIRConst *ASTCompoundLiteralExpr::GenerateMIRPtrConst() const { + CHECK_NULL_FATAL(compoundLiteralType); + std::string tmpName = FEUtils::GetSequentialName("cle."); + if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { + tmpName = tmpName + FEUtils::GetFileNameHashStr(FEManager::GetModule().GetFileName()); + } + // If a var is pointer type, agg value cannot be directly assigned to it + // Create a temporary symbol for addrof agg value + MIRSymbol *cleSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl( + tmpName, *compoundLiteralType); + auto mirConst = child->GenerateMIRConst(); // InitListExpr in CompoundLiteral gen struct + cleSymbol->SetKonst(mirConst); + MIRAddrofConst *mirAddrofConst = FEManager::GetModule().GetMemPool()->New( + cleSymbol->GetStIdx(), 0, *compoundLiteralType); + return mirAddrofConst; +} + MIRConst *ASTCompoundLiteralExpr::GenerateMIRConstImpl() const { + if (isAddrof) { + return GenerateMIRPtrConst(); + } return child->GenerateMIRConst(); } @@ -964,6 +994,11 @@ UniqueFEIRExpr ASTOffsetOfExpr::Emit2FEExprImpl(std::list &stmts // ---------- ASTInitListExpr ---------- MIRConst *ASTInitListExpr::GenerateMIRConstImpl() const { + // avoid the infinite loop + if (isGenerating) { + return nullptr; + } + isGenerating = true; if (initListType->GetKind() == kTypeArray) { return GenerateMIRConstForArray(); } else if (initListType->GetKind() == kTypeStruct || initListType->GetKind() == kTypeUnion) { @@ -985,9 +1020,8 @@ MIRConst *ASTInitListExpr::GenerateMIRConstForArray() const { CHECK_FATAL(initExprs.size() <= arrayMirType->GetSizeArrayItem(0), "InitExpr size must less or equal array size"); for (size_t i = 0; i < initExprs.size(); ++i) { auto konst = initExprs[i]->GenerateMIRConst(); - if (konst->GetKind() == kConstLblConst) { - // init by initListExpr, Only MIRConst kind is set here. - return konst; + if (konst == nullptr) { + return nullptr; } aggConst->AddItem(konst, 0); } @@ -1016,6 +1050,9 @@ MIRConst *ASTInitListExpr::GenerateMIRConstForStruct() const { } MIRAggConst *aggConst = FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *initListType); CHECK_FATAL(initExprs.size() <= UINT_MAX, "Too large elem size"); + if (initListType->GetKind() == kTypeUnion) { + CHECK_FATAL(initExprs.size() == 1, "union should only have one elem"); + } for (uint32 i = 0; i < static_cast(initExprs.size()); ++i) { if (initExprs[i] == nullptr) { continue; @@ -1025,14 +1062,15 @@ MIRConst *ASTInitListExpr::GenerateMIRConstForStruct() const { // init by initListExpr, Only MIRConst kind is set here. return konst; } - aggConst->AddItem(konst, i + 1); + uint32 fieldIdx = (initListType->GetKind() == kTypeUnion) ? unionInitFieldIdx : i; + aggConst->AddItem(konst, fieldIdx + 1); } ENCChecker::CheckNullFieldInGlobalStruct(*initListType, *aggConst, initExprs); return aggConst; } UniqueFEIRExpr ASTInitListExpr::Emit2FEExprImpl(std::list &stmts) const { - UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(varName, *initListType); + UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(GetInitListVarName(), *initListType); if (initListType->GetKind() == MIRTypeKind::kTypeArray) { UniqueFEIRExpr arrayExpr = FEIRBuilder::CreateExprAddrofVar(feirVar->Clone()); auto base = std::variant, UniqueFEIRExpr>(arrayExpr->Clone()); @@ -1196,7 +1234,8 @@ void ASTInitListExpr::ProcessStructInitList(std::variant>(base).second; } - if (initList->initExprs.size() == 0) { + if (initList->initExprs.size() == 0 || (!FEOptions::GetInstance().IsNpeCheckDynamic() && + initList->GetEvaluatedFlag() == EvaluatedAsZero)) { UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), 0); ProcessImplicitInit(addrOfExpr->Clone(), 0, curStructMirType->GetSize(), 1, stmts); return; @@ -1305,20 +1344,40 @@ void ASTInitListExpr::ProcessArrayInitList(const UniqueFEIRExpr &addrOfArray, AS auto elementPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*elementType); auto elementPtrFEType = FEIRTypeHelper::CreateTypeNative(*elementPtrType); CHECK_FATAL(initExprs.size() <= INT_MAX, "invalid index"); + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && initList->GetEvaluatedFlag() == EvaluatedAsZero) { + ProcessImplicitInit(addrOfArray->Clone(), 0, arrayMirType->GetSize(), 1, stmts); + return; + } for (size_t i = 0; i < initList->initExprs.size(); ++i) { std::list indexExprs; UniqueFEIRExpr indexExpr = FEIRBuilder::CreateExprConstI32(static_cast(i)); indexExprs.emplace_back(std::move(indexExpr)); auto addrOfElemExpr = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), addrOfArray->Clone(), "", indexExprs); - if (initList->initExprs[i]->GetASTOp() == kASTOpInitListExpr) { + ASTExpr *subExpr = initList->initExprs[i]; + while (subExpr->GetASTOp() == kConstantExpr) { + subExpr = static_cast(subExpr)->GetChild(); + } + if (subExpr->GetASTOp() == kASTOpInitListExpr) { auto base = std::variant, UniqueFEIRExpr>(addrOfElemExpr->Clone()); - ProcessInitList(base, static_cast(initList->initExprs[i]), stmts); + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && subExpr->GetEvaluatedFlag() == EvaluatedAsZero) { + UniqueFEIRExpr realAddr = addrOfArray->Clone(); + if (i > 0) { + UniqueFEIRExpr indexExpr = FEIRBuilder::CreateExprConstI32(static_cast(i)); + UniqueFEIRExpr elemSizeExpr = FEIRBuilder::CreateExprConstI32(static_cast(elementType->GetSize())); + UniqueFEIRExpr offsetSizeExpr = FEIRBuilder::CreateExprBinary(OP_mul, std::move(indexExpr), + elemSizeExpr->Clone()); + realAddr = FEIRBuilder::CreateExprBinary(OP_add, std::move(realAddr), offsetSizeExpr->Clone()); + } + ProcessImplicitInit(realAddr->Clone(), 0, elementType->GetSize(), 1, stmts); + } else { + ProcessInitList(base, static_cast(subExpr), stmts); + } } else { - UniqueFEIRExpr elemExpr = initList->initExprs[i]->Emit2FEExpr(stmts); - if (elementType->GetKind() == kTypeArray && initList->initExprs[i]->GetASTOp() == kASTStringLiteral) { + UniqueFEIRExpr elemExpr = subExpr->Emit2FEExpr(stmts); + if (elementType->GetKind() == kTypeArray && subExpr->GetASTOp() == kASTStringLiteral) { ProcessStringLiteralInitList(addrOfElemExpr->Clone(), elemExpr->Clone(), - static_cast(initList->initExprs[i])->GetLength(), stmts); + static_cast(subExpr)->GetLength(), stmts); } else { auto stmt = FEIRBuilder::CreateStmtIAssign(elementPtrFEType->Clone(), addrOfElemExpr->Clone(), elemExpr->Clone(), @@ -1443,7 +1502,9 @@ MIRConst *ASTStringLiteral::GenerateMIRConstImpl() const { UniqueFEIRExpr ASTStringLiteral::Emit2FEExprImpl(std::list &stmts) const { (void)stmts; MIRType *elemType = static_cast(mirType)->GetElemType(); - UniqueFEIRExpr expr = std::make_unique(codeUnits, elemType); + std::vector codeUnitsVec; + codeUnitsVec.insert(codeUnitsVec.end(), codeUnits.begin(), codeUnits.end()); + UniqueFEIRExpr expr = std::make_unique(codeUnitsVec, elemType, GetStr()); CHECK_NULL_FATAL(expr); return expr; } @@ -1564,7 +1625,7 @@ UniqueFEIRExpr ASTArraySubscriptExpr::Emit2FEExprImpl(std::list } else { sizeType = std::make_unique(*GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); feIdxExpr = FEIRBuilder::CreateExprCvtPrim(std::move(feIdxExpr), - GetSignedPrimType(GetRegPrimType(indexPty)), PTY_ptr); + GetRegPrimType(indexPty), PTY_ptr); } if (mirType->GetSize() != 1) { auto typeSizeExpr = std::make_unique(mirType->GetSize(), sizeType->GetPrimType()); @@ -1630,7 +1691,7 @@ MIRConst *ASTMemberExpr::GenerateMIRConstImpl() const { const ASTMemberExpr *ASTMemberExpr::FindFinalMember(const ASTMemberExpr *startExpr, std::list &memberNames) const { - memberNames.emplace_back(startExpr->memberName); + memberNames.emplace_back(startExpr->GetMemberName()); if (startExpr->isArrow || startExpr->baseExpr->GetASTOp() != kASTMemberExpr) { return startExpr; } @@ -1650,12 +1711,12 @@ void ASTMemberExpr::InsertNonnullChecking(std::list &stmts, Uniq UniqueFEIRExpr ASTMemberExpr::Emit2FEExprImpl(std::list &stmts) const { UniqueFEIRExpr baseFEExpr; - std::string fieldName = memberName; + std::string fieldName = GetMemberName(); bool tmpIsArrow = this->isArrow; MIRType *tmpBaseType = this->baseType; if (baseExpr->GetASTOp() == kASTMemberExpr) { std::list memberNameList; - memberNameList.emplace_back(memberName); + memberNameList.emplace_back(GetMemberName()); const ASTMemberExpr *finalMember = FindFinalMember(static_cast(baseExpr), memberNameList); baseFEExpr = finalMember->baseExpr->Emit2FEExpr(stmts); tmpIsArrow = finalMember->isArrow; @@ -1740,8 +1801,8 @@ MIRConst *ASTBinaryOperatorExpr::GenerateMIRConstImpl() const { } rightConst = rightExpr->GenerateMIRConst(); if (leftConst->GetKind() == kConstLblConst || rightConst->GetKind() == kConstLblConst) { - // init by initListExpr, Only MIRConst kind is set here. - return leftConst->GetKind() == kConstLblConst ? leftConst : rightConst; + // if left or right is label mirconst, not currently implemented + return nullptr; } if (opcode == OP_land || opcode == OP_cand) { if (leftConst->IsZero()) { @@ -2192,13 +2253,13 @@ UniqueFEIRExpr ASTConditionalOperator::Emit2FEExprImpl(std::list std::list falseStmts; UniqueFEIRExpr falseFEIRExpr = falseExpr->Emit2FEExpr(falseStmts); // when subExpr is void - if (trueFEIRExpr == nullptr || falseFEIRExpr == nullptr) { + if (trueFEIRExpr == nullptr || falseFEIRExpr == nullptr || mirType->GetPrimType() == PTY_void) { UniqueFEIRStmt stmtIf = FEIRBuilder::CreateStmtIf(std::move(condFEIRExpr), trueStmts, falseStmts); stmts.emplace_back(std::move(stmtIf)); return nullptr; } // Otherwise, (e.g., a < 1 ? 1 : a++) create a temporary var to hold the return trueExpr or falseExpr value - MIRType *retType = trueFEIRExpr->GetType()->GenerateMIRTypeAuto(); + MIRType *retType = mirType; if (retType->GetKind() == kTypeBitField) { retType = GlobalTables::GetTypeTable().GetPrimType(retType->GetPrimType()); } diff --git a/src/hir2mpl/ast_input/clang/src/ast_function.cpp b/src/hir2mpl/ast_input/clang/src/ast_function.cpp index 5f87736bb3db04381ceda5697557ca449cc000a9..381afbef3ab1e913461cd612d60991316d695433 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_function.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_function.cpp @@ -76,6 +76,14 @@ bool ASTFunction::ProcessFEIRFunction() { } void ASTFunction::FinishImpl() { + if (FEOptions::GetInstance().IsDumpFEIRBB()) { + (void)LowerFunc("low feir func"); + (void)DumpFEIRBBs("dump bb list"); + } + if (FEOptions::GetInstance().IsDumpFEIRCFGGraph(GetGeneralFuncName())) { + (void)LowerFunc("low feir func"); + (void)DumpFEIRCFGGraph("dump cfg graph"); + } (void)EmitToMIR("finish/emit to mir"); (void)GenerateAliasVars("finish/generate alias vars"); } diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp index 1d578dde1277ec064f58d39d14048f1178b08911..c4572a841f0d9f574e3965230af507ec2172fcea 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_parser.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp @@ -28,8 +28,8 @@ namespace maple { std::map ASTDeclsBuilder::declesTable; -bool ASTParser::OpenFile() { - astFile = std::make_unique(recordDecles); +bool ASTParser::OpenFile(MapleAllocator &allocator) { + astFile = allocator.GetMemPool()->New(allocator, recordDecles); bool res = astFile->Open(fileName, 0, 0); if (!res) { return false; @@ -38,6 +38,11 @@ bool ASTParser::OpenFile() { return true; } +bool ASTParser::Release() { + astFile->DisposeTranslationUnit(); + return true; +} + bool ASTParser::Verify() const { return true; } @@ -145,6 +150,8 @@ ASTStmt *ASTParser::ProcessStmt(MapleAllocator &allocator, const clang::Stmt &st STMT_CASE(OffsetOfExpr); STMT_CASE(GenericSelectionExpr); STMT_CASE(AttributedStmt); + STMT_CASE(DeclRefExpr); + STMT_CASE(UnaryExprOrTypeTraitExpr); default: { CHECK_FATAL(false, "ASTStmt: %s NIY", stmt.getStmtClassName()); return nullptr; @@ -430,7 +437,14 @@ ASTStmt *ASTParser::ProcessStmtWhileStmt(MapleAllocator &allocator, const clang: ASTStmt *ASTParser::ProcessStmtGotoStmt(MapleAllocator &allocator, const clang::GotoStmt &gotoStmt) { ASTGotoStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); - astStmt->SetLabelName(gotoStmt.getLabel()->getStmt()->getName()); + std::string name; + if (gotoStmt.getLabel() != nullptr) { + ASTDecl *astDecl = ProcessDecl(allocator, *gotoStmt.getLabel()); + name = astDecl->GetName(); + } else { + name = gotoStmt.getLabel()->getStmt()->getName(); + } + astStmt->SetLabelName(name); return astStmt; } @@ -482,20 +496,24 @@ bool ASTParser::HasDefault(const clang::Stmt &stmt) { const auto *cpdStmt = llvm::cast(&stmt); clang::CompoundStmt::const_body_iterator it; for (it = cpdStmt->body_begin(); it != cpdStmt->body_end(); ++it) { - if (llvm::isa(*it)) { + const auto *stmt = llvm::dyn_cast(*it); + if (stmt == nullptr) { + continue; + } + if (HasDefault(*stmt)) { return true; - } else if (llvm::isa(*it)) { - auto *caseStmt = llvm::cast(*it); - if (HasDefault(*caseStmt->getSubStmt())) { - return true; - } } } } else if (llvm::isa(stmt)) { const auto *caseStmt = llvm::cast(&stmt); - if (HasDefault(*caseStmt->getSubStmt())) { - return true; - } + if (HasDefault(*caseStmt->getSubStmt())) { + return true; + } + } else if (llvm::isa(stmt)) { + const auto *labelStmt = llvm::cast(&stmt); + if (HasDefault(*labelStmt->getSubStmt())) { + return true; + } } return false; } @@ -544,8 +562,14 @@ ASTStmt *ASTParser::ProcessStmtBreakStmt(MapleAllocator &allocator, const clang: ASTStmt *ASTParser::ProcessStmtLabelStmt(MapleAllocator &allocator, const clang::LabelStmt &stmt) { auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + std::string name; ASTStmt *astSubStmt = ProcessStmt(allocator, *stmt.getSubStmt()); - std::string name(stmt.getName()); + if (stmt.getDecl() != nullptr) { + ASTDecl *astDecl = ProcessDecl(allocator, *stmt.getDecl()); + name = astDecl->GetName(); + } else { + name = stmt.getName(); + } astStmt->SetLabelName(name); astStmt->SetSubStmt(astSubStmt); return astStmt; @@ -636,6 +660,27 @@ ASTStmt *ASTParser::ProcessStmtDeclStmt(MapleAllocator &allocator, const clang:: return astStmt; } +ASTStmt *ASTParser::ProcessStmtDeclRefExpr(MapleAllocator &allocator, const clang::DeclRefExpr &expr) { + ASTDeclRefExprStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtUnaryExprOrTypeTraitExpr(MapleAllocator &allocator, + const clang::UnaryExprOrTypeTraitExpr &expr) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, const clang::Expr *expr) const { ASTValue *astValue = nullptr; clang::Expr::EvalResult result; @@ -684,6 +729,13 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, case PTY_i128: astValue->val.i64 = static_cast(result.Val.getInt().getSExtValue()); astValue->pty = PTY_i128; + static bool i128Warning = true; + if (i128Warning) { + WARN(kLncWarn, "%s:%d PTY_i128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).first).c_str(), + astFile->GetExprLOC(*expr).second); + i128Warning = false; + } break; case PTY_u8: astValue->val.u8 = static_cast(result.Val.getInt().getExtValue()); @@ -708,6 +760,13 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, case PTY_u128: astValue->val.u64 = static_cast(result.Val.getInt().getZExtValue()); astValue->pty = PTY_u128; + static bool u128Warning = true; + if (u128Warning) { + WARN(kLncWarn, "%s:%d PTY_u128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).first).c_str(), + astFile->GetExprLOC(*expr).second); + u128Warning = false; + } break; case PTY_u1: astValue->val.u8 = (result.Val.getInt().getExtValue() == 0 ? 0 : 1); @@ -731,7 +790,16 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, break; case llvm::APFloat::S_IEEEquad: case llvm::APFloat::S_PPCDoubleDouble: - case llvm::APFloat::S_x87DoubleExtended: + case llvm::APFloat::S_x87DoubleExtended: { + auto ty = expr->getType().getCanonicalType(); + static bool f128Warning = true; + if (f128Warning && (ty->isFloat128Type() || + (ty->isRealFloatingType() && astFile->GetAstContext()->getTypeSize(ty) == 128))) { + WARN(kLncWarn, "%s:%d PTY_f128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).first).c_str(), + astFile->GetExprLOC(*expr).second); + f128Warning = false; + } bool LosesInfo; if (constMirType->GetPrimType() == PTY_f64) { (void)fValue.convert(llvm::APFloat::IEEEdouble(), @@ -745,6 +813,7 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, astValue->val.f32 = fValue.convertToFloat(); } break; + } default: CHECK_FATAL(false, "unsupported semantics"); } @@ -783,6 +852,13 @@ ASTValue *ASTParser::TranslateLValue2ASTValue( const clang::StringLiteral &strExpr = llvm::cast(*lvExpr); std::string str = ""; if (strExpr.isWide() || strExpr.isUTF16() || strExpr.isUTF32()) { + static bool wcharWarning = true; + if (wcharWarning && strExpr.isWide()) { + WARN(kLncWarn, "%s:%d wchar is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*lvExpr).first).c_str(), + astFile->GetExprLOC(*lvExpr).second); + wcharWarning = false; + } str = strExpr.getBytes().str(); } else { str = strExpr.getString().str(); @@ -850,12 +926,17 @@ ASTExpr *ASTParser::EvaluateExprAsConst(MapleAllocator &allocator, const clang:: clang::APValue constVal = constResult.Val; if (constVal.isInt()) { - ASTIntegerLiteral *intExpr = allocator.New(); + ASTIntegerLiteral *intExpr = allocator.New(allocator); llvm::APSInt intVal = constVal.getInt(); intExpr->SetVal(intVal.getExtValue()); + if (intVal.getExtValue() == 0) { + intExpr->SetEvaluatedFlag(EvaluatedAsZero); + } else { + intExpr->SetEvaluatedFlag(EvaluatedAsNonZero); + } return intExpr; } else if (constVal.isFloat()) { - ASTFloatingLiteral *floatExpr = allocator.New(); + ASTFloatingLiteral *floatExpr = allocator.New(allocator); llvm::APFloat floatVal = constVal.getFloat(); const llvm::fltSemantics &fltSem = floatVal.getSemantics(); double val = 0; @@ -878,9 +959,13 @@ ASTExpr *ASTParser::EvaluateExprAsConst(MapleAllocator &allocator, const clang:: } else { return nullptr; } + if (floatVal.isPosZero()) { + floatExpr->SetEvaluatedFlag(EvaluatedAsZero); + } else { + floatExpr->SetEvaluatedFlag(EvaluatedAsNonZero); + } return floatExpr; } - return nullptr; } @@ -1071,8 +1156,9 @@ ASTExpr *ASTParser::ProcessExprUnaryOperator(MapleAllocator &allocator, const cl ASTExpr *ASTParser::ProcessExprAddrLabelExpr(MapleAllocator &allocator, const clang::AddrLabelExpr &expr) { ASTUOAddrOfLabelExpr *astAddrOfLabelExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); const clang::LabelDecl *lbDecl = expr.getLabel(); - std::string labelName = lbDecl->getName().str(); - astAddrOfLabelExpr->SetLabelName(labelName); + CHECK_NULL_FATAL(lbDecl); + ASTDecl *astDecl = ProcessDecl(allocator, *lbDecl); + astAddrOfLabelExpr->SetLabelName(astDecl->GetName()); astAddrOfLabelExpr->SetUOType(GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); return astAddrOfLabelExpr; } @@ -1135,20 +1221,10 @@ ASTExpr *ASTParser::ProcessExprCompoundLiteralExpr(MapleAllocator &allocator, CHECK_FATAL(initExpr != nullptr, "initExpr is nullptr"); clang::QualType qualType = initExpr->getType(); astCompoundLiteralExpr->SetCompoundLiteralType(astFile->CvtType(qualType)); - - const auto *initListExpr = llvm::dyn_cast(initExpr); - ASTExpr *astExpr = nullptr; - if (initListExpr != nullptr) { - astExpr = ProcessExpr(allocator, initListExpr); - } else { - astExpr = ProcessExpr(allocator, initExpr); - } + ASTExpr *astExpr = ProcessExpr(allocator, initExpr); if (astExpr == nullptr) { return nullptr; } - static uint32 unNamedCount = 0; - auto initListName = astFile->GetOrCreateCompoundLiteralExprInitName(unNamedCount++); - astCompoundLiteralExpr->SetInitName(initListName); astCompoundLiteralExpr->SetASTExpr(astExpr); return astCompoundLiteralExpr; } @@ -1165,6 +1241,7 @@ ASTExpr *ASTParser::ProcessExprInitListExpr(MapleAllocator &allocator, const cla } uint32 n = expr.getNumInits(); clang::Expr * const *le = expr.getInits(); + std::unordered_set evaluatedFlags; if (aggType->isRecordType()) { const auto *recordType = llvm::cast(aggType); clang::RecordDecl *recordDecl = recordType->getDecl(); @@ -1180,6 +1257,7 @@ ASTExpr *ASTParser::ProcessExprInitListExpr(MapleAllocator &allocator, const cla const clang::Expr *eExpr = le[i]; ASTExpr *astExpr = ProcessExpr(allocator, eExpr); CHECK_FATAL(astExpr != nullptr, "Invalid InitListExpr"); + evaluatedFlags.insert(astExpr->GetEvaluatedFlag()); astInitListExpr->SetInitExprs(astExpr); i++; } @@ -1207,9 +1285,15 @@ ASTExpr *ASTParser::ProcessExprInitListExpr(MapleAllocator &allocator, const cla if (astExpr == nullptr) { return nullptr; } + evaluatedFlags.insert(astExpr->GetEvaluatedFlag()); astInitListExpr->SetInitExprs(astExpr); } } + if (evaluatedFlags.count(NotEvaluated) || evaluatedFlags.count(EvaluatedAsNonZero)) { + astInitListExpr->SetEvaluatedFlag(EvaluatedAsNonZero); + } else { + astInitListExpr->SetEvaluatedFlag(EvaluatedAsZero); + } return astInitListExpr; } @@ -1303,6 +1387,7 @@ ASTExpr *ASTParser::ProcessExprImplicitValueInitExpr(MapleAllocator &allocator, auto *astImplicitValueInitExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); CHECK_FATAL(astImplicitValueInitExpr != nullptr, "astImplicitValueInitExpr is nullptr"); astImplicitValueInitExpr->SetType(astFile->CvtType(expr.getType())); + astImplicitValueInitExpr->SetEvaluatedFlag(EvaluatedAsZero); return astImplicitValueInitExpr; } @@ -1311,11 +1396,14 @@ ASTExpr *ASTParser::ProcessExprStringLiteral(MapleAllocator &allocator, const cl CHECK_FATAL(astStringLiteral != nullptr, "astStringLiteral is nullptr"); astStringLiteral->SetType(astFile->CvtType(expr.getType())); astStringLiteral->SetLength(expr.getLength()); - std::vector codeUnits; + MapleVector codeUnits(allocator.Adapter()); for (size_t i = 0; i < expr.getLength(); ++i) { codeUnits.emplace_back(expr.getCodeUnit(i)); } astStringLiteral->SetCodeUnits(codeUnits); + if (expr.isAscii()) { + astStringLiteral->SetStr(expr.getString().str()); + } return astStringLiteral; } @@ -1430,6 +1518,7 @@ ASTExpr *ASTParser::GetAddrShiftExpr(MapleAllocator &allocator, ASTExpr *expr, u shiftExpr->SetOpcode(OP_mul); shiftExpr->SetRetType(retType); shiftExpr->SetCvtNeeded(true); + shiftExpr->SetSrcLOC(expr->GetSrcFileIdx(), expr->GetSrcFileLineNum()); return shiftExpr; } @@ -1669,7 +1758,7 @@ ASTExpr *ASTParser::ProcessExprCallExpr(MapleAllocator &allocator, const clang:: MIRType *retType = astFile->CvtType(expr.getCallReturnType(*astFile->GetAstContext())); astCallExpr->SetRetType(retType); // args - std::vector args; + MapleVector args(allocator.Adapter()); for (uint32_t i = 0; i < expr.getNumArgs(); ++i) { const clang::Expr *subExpr = expr.getArg(i); ASTExpr *arg = ProcessExpr(allocator, subExpr); @@ -1724,6 +1813,7 @@ ASTExpr *ASTParser::ProcessExprParenExpr(MapleAllocator &allocator, const clang: if (astExpr == nullptr) { return nullptr; } + astParenExpr->SetEvaluatedFlag(astExpr->GetEvaluatedFlag()); astParenExpr->SetASTExpr(astExpr); return astParenExpr; } @@ -1883,6 +1973,7 @@ ASTExpr *ASTParser::ProcessExprCastExpr(MapleAllocator &allocator, const clang:: if (astExpr == nullptr) { return nullptr; } + astCastExpr->SetEvaluatedFlag(astExpr->GetEvaluatedFlag()); astCastExpr->SetASTExpr(astExpr); return astCastExpr; } @@ -2191,15 +2282,15 @@ bool ASTParser::PreProcessAST() { return true; } -#define DECL_CASE(CLASS) \ - case clang::Decl::CLASS: { \ +#define DECL_CASE(CLASS) \ + case clang::Decl::CLASS: { \ ASTDecl *astDeclaration = ProcessDecl##CLASS##Decl(allocator, llvm::cast(decl)); \ if (astDeclaration != nullptr) { \ astDeclaration->SetDeclPos(astFile->GetDeclPosInfo(decl)); \ astDeclaration->SetGlobal(decl.isDefinedOutsideFunctionOrMethod()); \ - Pos loc = astFile->GetLOC(decl.getLocation()); \ + Pos loc = astFile->GetLOC(decl.getLocation()); \ astDeclaration->SetSrcLOC(loc.first, loc.second); \ - } \ + } \ return astDeclaration; \ } ASTDecl *ASTParser::ProcessDecl(MapleAllocator &allocator, const clang::Decl &decl) { @@ -2245,16 +2336,25 @@ ASTDecl *ASTParser::ProcessDeclRecordDecl(MapleAllocator &allocator, const clang return nullptr; } GenericAttrs attrs; - astFile->CollectAttrs(recDecl, attrs, kNone); + astFile->CollectRecordAttrs(recDecl, attrs, kNone); std::string structName = recName.str(); if (structName.empty() || !ASTUtil::IsValidName(structName)) { - uint32 id = qType->getAs()->getDecl()->getLocation().getRawEncoding(); - structName = astFile->GetOrCreateMappedUnnamedName(id); + structName = astFile->GetTypedefNameFromUnnamedStruct(recDecl); + if (structName.empty()) { + uint32 id = qType->getAs()->getDecl()->getLocation().getRawEncoding(); + structName = astFile->GetOrCreateMappedUnnamedName(id); + } } else if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { - structName = structName + astFile->GetAstFileNameHashStr(); - } - curStructOrUnion = ASTDeclsBuilder::ASTStructBuilder( - allocator, fileName, structName, std::vector{recType}, attrs, recDecl.getID()); + std::string recordLayoutStr = recDecl.getDefinition() == nullptr ? "" : + ASTUtil::GetRecordLayoutString(astFile->GetContext()->getASTRecordLayout(recDecl.getDefinition())); + std::string filename = astFile->GetContext()->getSourceManager().getFilename(recDecl.getLocation()).str(); + structName = structName + FEUtils::GetFileNameHashStr(filename + recordLayoutStr); + } + curStructOrUnion = ASTDeclsBuilder::ASTStructBuilder(allocator, + fileName, + structName,MapleVector({recType}, allocator.Adapter()), + attrs, + recDecl.getID()); if (recDecl.isUnion()) { curStructOrUnion->SetIsUnion(); } @@ -2306,7 +2406,7 @@ ASTDecl *ASTParser::ProcessDeclFunctionDecl(MapleAllocator &allocator, const cla if (!ASTUtil::IsValidName(funcName)) { ASTUtil::AdjustName(funcName); } - std::vector typeDescIn; + MapleVector typeDescIn(allocator.Adapter()); clang::QualType funcQualType = funcDecl.getType(); MIRType *mirFuncType = astFile->CvtType(funcQualType); typeDescIn.push_back(mirFuncType); @@ -2315,7 +2415,7 @@ ASTDecl *ASTParser::ProcessDeclFunctionDecl(MapleAllocator &allocator, const cla if (retType == nullptr) { return nullptr; } - std::vector paramDecls; + MapleVector paramDecls(allocator.Adapter()); typeDescIn.push_back(retType); unsigned int numParam = funcDecl.getNumParams(); std::list implicitStmts; @@ -2433,13 +2533,14 @@ ASTDecl *ASTParser::ProcessDeclFieldDecl(MapleAllocator &allocator, const clang: fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(bfTypeIdx); } GenericAttrs attrs; - astFile->CollectAttrs(decl, attrs, kNone); + astFile->CollectFieldAttrs(decl, attrs, kNone); // one elem vector type if (LibAstFile::IsOneElementVector(qualType)) { attrs.SetAttr(GENATTR_oneelem_simd); } auto fieldDecl = ASTDeclsBuilder::ASTFieldBuilder( - allocator, fileName, fieldName, std::vector{fieldType}, attrs, decl.getID(), isAnonymousField); + allocator, fileName, fieldName, MapleVector({fieldType}, allocator.Adapter()), + attrs, decl.getID(), isAnonymousField); clang::CharUnits alignment = astFile->GetContext()->getDeclAlign(&decl); clang::CharUnits unadjust = astFile->GetContext()->toCharUnitsFromBits( astFile->GetContext()->getTypeUnadjustedAlign(qualType)); @@ -2447,7 +2548,7 @@ ASTDecl *ASTParser::ProcessDeclFieldDecl(MapleAllocator &allocator, const clang: fieldDecl->SetAlign(maxAlign); const auto *valueDecl = llvm::dyn_cast(&decl); if (valueDecl != nullptr) { - ProcessNonnullFuncPtrAttrs(*valueDecl, *fieldDecl); + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *fieldDecl); ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *fieldDecl); } return fieldDecl; @@ -2481,7 +2582,8 @@ ASTDecl *ASTParser::ProcessDeclVarDecl(MapleAllocator &allocator, const clang::V varType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*elementType); } astVar = ASTDeclsBuilder::ASTVarBuilder( - allocator, fileName, varName, std::vector{varType}, attrs, varDecl.getID()); + allocator, fileName, varName, MapleVector({varType}, allocator.Adapter()), attrs, varDecl.getID()); + astVar->SetIsMacro(varDecl.getLocation().isMacroID()); clang::SectionAttr *sa = varDecl.getAttr(); if (sa != nullptr && !sa->isImplicit()) { astVar->SetSectionAttr(sa->getName().str()); @@ -2491,13 +2593,21 @@ ASTDecl *ASTParser::ProcessDeclVarDecl(MapleAllocator &allocator, const clang::V astVar->SetAsmAttr(ala->getLabel().str()); } if (varDecl.hasInit()) { + bool isStaticStorageVar = (varDecl.getStorageDuration() == clang::SD_Static); astVar->SetDeclPos(astFile->GetDeclPosInfo(varDecl)); auto initExpr = varDecl.getInit(); auto astInitExpr = ProcessExpr(allocator, initExpr); if (initExpr->getStmtClass() == clang::Stmt::InitListExprClass && astInitExpr->GetASTOp() == kASTOpInitListExpr) { static_cast(astInitExpr)->SetInitListVarName(astVar->GenerateUniqueVarName()); } - astVar->SetInitExpr(astInitExpr); + EvaluatedFlag flag = astInitExpr->GetEvaluatedFlag(); + // For thoese global and static local variables initialized with zero or the init list only + // has zero, they won't be set initExpr and will be stored into .bss section instead of .data section + // to reduce code size. However, when passing '-npe-check-dynamic', initExpr should be always set otherwise + // '-npe-check-dynamic' cannot work as expected. + if (FEOptions::GetInstance().IsNpeCheckDynamic() || !isStaticStorageVar || flag != EvaluatedAsZero) { + astVar->SetInitExpr(astInitExpr); + } } if (llvm::isa(varDecl.getType())) { CHECK_FATAL(FEOptions::GetInstance().IsEnableVariableArray(), @@ -2526,7 +2636,7 @@ ASTDecl *ASTParser::ProcessDeclVarDecl(MapleAllocator &allocator, const clang::V } const auto *valueDecl = llvm::dyn_cast(&varDecl); if (valueDecl != nullptr) { - ProcessNonnullFuncPtrAttrs(*valueDecl, *astVar); + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *astVar); ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *astVar); } ProcessBoundaryVarAttrs(allocator, varDecl, *astVar); @@ -2552,19 +2662,23 @@ ASTDecl *ASTParser::ProcessDeclParmVarDecl(MapleAllocator &allocator, const clan if (LibAstFile::IsOneElementVector(parmQualType)) { attrs.SetAttr(GENATTR_oneelem_simd); } - parmVar = ASTDeclsBuilder::ASTVarBuilder( - allocator, fileName, parmName, std::vector{paramType}, attrs, parmVarDecl.getID()); + parmVar = ASTDeclsBuilder::ASTVarBuilder(allocator, + fileName, + parmName, + MapleVector({paramType}, allocator.Adapter()), + attrs, + parmVarDecl.getID()); parmVar->SetIsParam(true); const auto *valueDecl = llvm::dyn_cast(&parmVarDecl); if (valueDecl != nullptr) { - ProcessNonnullFuncPtrAttrs(*valueDecl, *parmVar); + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *parmVar); ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *parmVar); } return parmVar; } ASTDecl *ASTParser::ProcessDeclFileScopeAsmDecl(MapleAllocator &allocator, const clang::FileScopeAsmDecl &asmDecl) { - ASTFileScopeAsm *astAsmDecl = allocator.GetMemPool()->New(fileName); + ASTFileScopeAsm *astAsmDecl = allocator.GetMemPool()->New(allocator, fileName); astAsmDecl->SetAsmStr(asmDecl.getAsmString()->getString().str()); return astAsmDecl; } @@ -2578,7 +2692,7 @@ ASTDecl *ASTParser::ProcessDeclEnumDecl(MapleAllocator &allocator, const clang:: astFile->CollectAttrs(*clang::dyn_cast(&enumDecl), attrs, kNone); const std::string &enumName = clang::dyn_cast(&enumDecl)->getNameAsString(); localEnumDecl = ASTDeclsBuilder::ASTLocalEnumDeclBuilder(allocator, fileName, enumName, - std::vector{}, attrs, enumDecl.getID()); + MapleVector({}, allocator.Adapter()), attrs, enumDecl.getID()); TraverseDecl(&enumDecl, [&](clang::Decl *child) { CHECK_FATAL(child->getKind() == clang::Decl::EnumConstant, "Unsupported decl kind: %u", child->getKind()); localEnumDecl->PushConstant(static_cast(ProcessDecl(allocator, *child))); @@ -2610,16 +2724,23 @@ ASTDecl *ASTParser::ProcessDeclEnumConstantDecl(MapleAllocator &allocator, const const std::string &varName = clang::dyn_cast(&decl)->getNameAsString(); MIRType *mirType = astFile->CvtType(clang::dyn_cast(&decl)->getType()); astConst = ASTDeclsBuilder::ASTEnumConstBuilder( - allocator, fileName, varName, std::vector{mirType}, attrs, decl.getID()); + allocator, fileName, varName, MapleVector({mirType}, allocator.Adapter()), attrs, decl.getID()); astConst->SetValue(static_cast(clang::dyn_cast(&decl)->getInitVal().getExtValue())); return astConst; } ASTDecl *ASTParser::ProcessDeclLabelDecl(MapleAllocator &allocator, const clang::LabelDecl &decl) { - (void)allocator; - (void)decl; - return nullptr; + ASTDecl *astDecl= static_cast(ASTDeclsBuilder::GetASTDecl(decl.getID())); + if (astDecl != nullptr) { + return astDecl; + } + std::string varName = astFile->GetMangledName(decl); + CHECK_FATAL(!varName.empty(), "label string is null"); + varName = FEUtils::GetSequentialName0(varName + "@", FEUtils::GetSequentialNumber()); + MapleVector typeDescVec(allocator.Adapter()); + astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, fileName, varName, typeDescVec, decl.getID()); + return astDecl; } bool ASTParser::RetrieveStructs(MapleAllocator &allocator) { @@ -2709,8 +2830,8 @@ bool ASTParser::ProcessGlobalTypeDef(MapleAllocator &allocator) { return true; } -const std::string &ASTParser::GetSourceFileName() const { - return fileName; +const std::string ASTParser::GetSourceFileName() const { + return fileName.c_str() == nullptr ? "" : fileName.c_str(); } const uint32 ASTParser::GetFileIdx() const { diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp index 930f628624a592a5d76333c7e35c13ba1cc1ef93..4418326b0b37c618e82b56a814a0173d4851b586 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp @@ -73,6 +73,28 @@ UniqueFEIRExpr ASTCallExpr::CreateIntrinsicopForC(std::list &stm } } +UniqueFEIRExpr ASTCallExpr::CreateIntrinsicCallAssignedForC(std::list &stmts, + MIRIntrinsicID argIntrinsicID) const { + std::unique_ptr> argExprList = std::make_unique>(); + for (auto arg : args) { + argExprList->emplace_back(arg->Emit2FEExpr(stmts)); + } + if (!IsNeedRetExpr()) { + auto stmt = std::make_unique(argIntrinsicID, nullptr, nullptr, + std::move(argExprList)); + stmt->SetSrcFileInfo(GetSrcFileIdx(), GetSrcFileLineNum()); + stmts.emplace_back(std::move(stmt)); + return nullptr; + } + UniqueFEIRVar retVar = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *retType, false); + auto stmt = std::make_unique(argIntrinsicID, nullptr, retVar->Clone(), + std::move(argExprList)); + stmt->SetSrcFileInfo(GetSrcFileIdx(), GetSrcFileLineNum()); + stmts.emplace_back(std::move(stmt)); + UniqueFEIRExpr dread = FEIRBuilder::CreateExprDRead(std::move(retVar)); + return dread; +} + UniqueFEIRExpr ASTCallExpr::CreateBinaryExpr(std::list &stmts, Opcode op) const { auto feTy = std::make_unique(*mirType); auto arg1 = args[0]->Emit2FEExpr(stmts); @@ -83,7 +105,7 @@ UniqueFEIRExpr ASTCallExpr::CreateBinaryExpr(std::list &stmts, O UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, bool &isFinish) const { // process a kind of builtinFunc std::string prefix = "__builtin_mpl_vector_load"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { auto argExpr = args[0]->Emit2FEExpr(stmts); UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*mirType); UniqueFEIRType ptrType = FEIRTypeHelper::CreateTypeNative( @@ -92,7 +114,7 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, return FEIRBuilder::CreateExprIRead(std::move(type), std::move(ptrType), std::move(argExpr)); } prefix = "__builtin_mpl_vector_store"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { auto arg1Expr = args[0]->Emit2FEExpr(stmts); auto arg2Expr = args[1]->Emit2FEExpr(stmts); UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative( @@ -104,11 +126,11 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, return nullptr; } prefix = "__builtin_mpl_vector_zip"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { return EmitBuiltinVectorZip(stmts, isFinish); } prefix = "__builtin_mpl_vector_shli"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { isFinish = true; UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); auto arg1Expr = args[0]->Emit2FEExpr(stmts); @@ -116,7 +138,7 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, return FEIRBuilder::CreateExprBinary(std::move(type), OP_shl, std::move(arg1Expr), std::move(arg2Expr)); } prefix = "__builtin_mpl_vector_shri"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { isFinish = true; UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); auto arg1Expr = args[0]->Emit2FEExpr(stmts); @@ -124,7 +146,7 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, return FEIRBuilder::CreateExprBinary(std::move(type), OP_ashr, std::move(arg1Expr), std::move(arg2Expr)); } prefix = "__builtin_mpl_vector_shru"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { isFinish = true; UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); auto arg1Expr = args[0]->Emit2FEExpr(stmts); @@ -132,7 +154,7 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, return FEIRBuilder::CreateExprBinary(std::move(type), OP_lshr, std::move(arg1Expr), std::move(arg2Expr)); } // process a single builtinFunc - auto ptrFunc = builtingFuncPtrMap.find(funcName); + auto ptrFunc = builtingFuncPtrMap.find(GetFuncName()); if (ptrFunc != builtingFuncPtrMap.end()) { isFinish = true; return EmitBuiltinFunc(stmts); @@ -140,17 +162,17 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, isFinish = false; if (FEOptions::GetInstance().GetDumpLevel() >= FEOptions::kDumpLevelInfo) { prefix = "__builtin"; - if (funcName.compare(0, prefix.size(), prefix) == 0) { + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "%s:%d BuiltinFunc (%s) has not been implemented", FEManager::GetModule().GetFileNameFromFileNum(GetSrcFileIdx()).c_str(), GetSrcFileLineNum(), - funcName.c_str()); + GetFuncName().c_str()); } } return nullptr; } UniqueFEIRExpr ASTCallExpr::EmitBuiltinFunc(std::list &stmts) const { - return (this->*(builtingFuncPtrMap[funcName]))(stmts); + return (this->*(builtingFuncPtrMap[GetFuncName()]))(stmts); } #define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ @@ -176,7 +198,7 @@ UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorZip(std::list &stmt UniqueFEIRVar retVar = FEIRBuilder::CreateVarNameForC(retName, *retType); #define VECTOR_INTRINSICCALL_TYPE(OP_NAME, VECTY) \ - if (FEUtils::EndsWith(funcName, #VECTY)) { \ + if (FEUtils::EndsWith(GetFuncName(), #VECTY)) { \ stmt = std::make_unique( \ INTRN_vector_##OP_NAME##_##VECTY, nullptr, retVar->Clone(), std::move(argExprList)); \ } @@ -336,83 +358,267 @@ UniqueFEIRExpr ASTCallExpr::EmitBuiltinAlignDown(std::list &stmt } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_add_and_fetch_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_add_and_fetch_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_4); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch2(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_add_and_fetch_2); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_sub_and_fetch_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_sub_and_fetch_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_4); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch2(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_sub_and_fetch_2); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_sub_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_sub_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_4); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub2(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_sub_2); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_add_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_add_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_4); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd2(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_fetch_and_add_2); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_val_compare_and_swap_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_val_compare_and_swap_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_lock_release_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_lock_release_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_bool_compare_and_swap_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_bool_compare_and_swap_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_1); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet8(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_lock_test_and_set_8); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_8); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet4(std::list &stmts) const { - return CreateIntrinsicopForC(stmts, INTRN_C___sync_lock_test_and_set_4); + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSynchronize(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_synchronize); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAtomicExchangeN(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___atomic_exchange_n); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinReturnAddress(std::list &stmts) const { diff --git a/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp index 4184cc0e7379fb4c0a807c3feee68ee724f28c71..4886cbc0b0a00a72f9564e5246c3405dde5b0b28 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp @@ -48,7 +48,7 @@ void ASTCompoundStmt::InsertASTStmtsAtFront(const std::list &stmts) { astStmts.insert(astStmts.begin(), stmts.begin(), stmts.end()); } -const std::list &ASTCompoundStmt::GetASTStmtList() const { +const MapleList &ASTCompoundStmt::GetASTStmtList() const { return astStmts; } @@ -227,7 +227,7 @@ std::list ASTBreakStmt::Emit2FEStmtImpl() const { std::list ASTLabelStmt::Emit2FEStmtImpl() const { std::list stmts; - auto feStmt = std::make_unique(labelName); + auto feStmt = std::make_unique(GetLabelName()); feStmt->SetSrcFileInfo(GetSrcFileIdx(), GetSrcFileLineNum()); stmts.emplace_back(std::move(feStmt)); stmts.splice(stmts.end(), subStmt->Emit2FEStmt()); @@ -264,7 +264,7 @@ std::list ASTUnaryOperatorStmt::Emit2FEStmtImpl() const { // ---------- ASTGotoStmt ---------- std::list ASTGotoStmt::Emit2FEStmtImpl() const { std::list stmts; - UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtGoto(labelName); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtGoto(GetLabelName()); stmt->SetSrcFileInfo(GetSrcFileIdx(), GetSrcFileLineNum()); stmts.emplace_back(std::move(stmt)); return stmts; @@ -529,13 +529,15 @@ std::list ASTGCCAsmStmt::Emit2FEStmtImpl() const { std::list stmts; std::vector outputsExprs; std::vector inputsExprs; - std::unique_ptr stmt = std::make_unique(asmStr, isGoto, isVolatile); - stmt->SetOutputs(outputs); + std::unique_ptr stmt = std::make_unique(GetAsmStr(), isGoto, isVolatile); + std::vector> outputsVec(outputs.begin(), outputs.end()); + stmt->SetOutputs(outputsVec); for (uint32 i = 0; i < outputs.size(); ++i) { outputsExprs.emplace_back(exprs[i]->Emit2FEExpr(stmts)); } stmt->SetOutputsExpr(outputsExprs); - stmt->SetInputs(inputs); + std::vector> inputsVec(inputs.begin(), inputs.end()); + stmt->SetInputs(inputsVec); for (uint32 i = 0; i < inputs.size(); ++i) { UniqueFEIRExpr expr; if (inputs[i].second == "m") { @@ -548,8 +550,10 @@ std::list ASTGCCAsmStmt::Emit2FEStmtImpl() const { inputsExprs.emplace_back(std::move(expr)); } stmt->SetInputsExpr(inputsExprs); - stmt->SetClobbers(clobbers); - stmt->SetLabels(labels); + std::vector clobbersVec(clobbers.begin(), clobbers.end()); + stmt->SetClobbers(clobbersVec); + std::vector labelsVec(labels.begin(), labels.end()); + stmt->SetLabels(labelsVec); stmts.emplace_back(std::move(stmt)); return stmts; } @@ -581,4 +585,20 @@ std::list ASTGenericSelectionExprStmt::Emit2FEStmtImpl() const { } return stmts; } + +std::list ASTDeclRefExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} + +std::list ASTUnaryExprOrTypeTraitExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} } // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp index 1ea52ec118a3b10e1562a719f0732fcab51d6f26..10d7c589a63dd9e74c196eb8a1422224c0a80d17 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp @@ -29,6 +29,7 @@ bool ASTStruct2FEHelper::ProcessDeclImpl() { if (mirStructType == nullptr) { return false; } + mirStructType->SetTypeAttrs(GetStructAttributeFromInput()); // Process Fields InitFieldHelpers(); ProcessFieldDef(); @@ -166,6 +167,7 @@ bool ASTGlobalVar2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { ENCChecker::InsertBoundaryInAtts(typeAttrs, astVar.GetBoundaryInfo()); // do not allow extern var override global var if (mirSymbol->GetAttrs().GetAttrFlag() != 0 && typeAttrs.GetAttr(ATTR_extern)) { + mirSymbol->AddAttrs(typeAttrs); ASTExpr *initExpr = astVar.GetInitExpr(); if (initExpr == nullptr) { return true; @@ -183,7 +185,7 @@ bool ASTGlobalVar2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { mirSymbol->SetStorageClass(MIRStorageClass::kScGlobal); } typeAttrs.SetAlign(astVar.GetAlign()); - mirSymbol->SetAttrs(typeAttrs); + mirSymbol->AddAttrs(typeAttrs); if (!astVar.GetSectionAttr().empty()) { mirSymbol->sectionAttr = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(astVar.GetSectionAttr()); } @@ -247,10 +249,10 @@ bool ASTFunc2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { } funSym->SetWeakrefAttr(std::pair { true, idx }); } - std::vector paramDecls = func.GetParamDecls(); + MapleVector paramDecls = func.GetParamDecls(); if (firstArgRet) { - ASTDecl *returnParamVar = ASTDeclsBuilder::ASTVarBuilder( - allocator, "", "first_arg_return", std::vector{}, GenericAttrs()); + ASTDecl *returnParamVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + "first_arg_return", MapleVector({}, allocator.Adapter()), GenericAttrs()); returnParamVar->SetIsParam(true); paramDecls.insert(paramDecls.begin(), returnParamVar); } @@ -272,13 +274,13 @@ bool ASTFunc2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { return true; } -const std::string &ASTFunc2FEHelper::GetSrcFileName() const { +const std::string ASTFunc2FEHelper::GetSrcFileName() const { return func.GetSrcFileName(); } void ASTFunc2FEHelper::SolveReturnAndArgTypesImpl(MapleAllocator &allocator) { (void)allocator; - const std::vector &returnAndArgTypeNames = func.GetTypeDesc(); + const MapleVector &returnAndArgTypeNames = func.GetTypeDesc(); retMIRType = returnAndArgTypeNames[1]; // skip funcType and returnType argMIRTypes.insert(argMIRTypes.begin(), returnAndArgTypeNames.begin() + 2, returnAndArgTypeNames.end()); diff --git a/src/hir2mpl/ast_input/common/include/ast_decl.h b/src/hir2mpl/ast_input/common/include/ast_decl.h index cc66fac4e766940773cb98f350e0f070c81254ed..2fc42a632fd204b76969ae1938a860d0a181f55e 100644 --- a/src/hir2mpl/ast_input/common/include/ast_decl.h +++ b/src/hir2mpl/ast_input/common/include/ast_decl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -45,13 +45,15 @@ struct BoundaryInfo { class ASTDecl { public: - ASTDecl(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn) - : isGlobalDecl(false), srcFileName(srcFile), name(nameIn), typeDesc(typeDescIn) {} + ASTDecl(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn) + : isGlobalDecl(false), srcFileName(srcFile), name(nameIn), typeDesc(typeDescIn) { + isDbgFriendly = FEOptions::GetInstance().IsDbgFriendly(); + } virtual ~ASTDecl() = default; - const std::string &GetSrcFileName() const; - const std::string &GetName() const; - const std::vector &GetTypeDesc() const; - void SetTypeDesc(const std::vector &typeVecIn); + const std::string GetSrcFileName() const; + const std::string GetName() const; + const MapleVector &GetTypeDesc() const; + void SetTypeDesc(const MapleVector &typeVecIn); GenericAttrs GetGenericAttrs() const { return genAttrs; } @@ -72,6 +74,18 @@ class ASTDecl { return isParam; } + void SetIsMacro(bool flag) { + if (flag) { + isMacroID = FEUtils::GetSequentialNumber(); + } else { + isMacroID = flag; + } + } + + uint32 IsMacroID() const { + return isMacroID; + } + void SetAlign(uint32 n) { if (n > align) { align = n; @@ -159,14 +173,17 @@ class ASTDecl { virtual void GenerateInitStmtImpl(std::list &stmts) {} bool isGlobalDecl; bool isParam = false; + bool isDbgFriendly = false; uint32 align = 1; // in byte - const std::string srcFileName; - std::string name; - std::vector typeDesc; + const MapleString srcFileName; + + MapleString name; + MapleVector typeDesc; GenericAttrs genAttrs; Pos pos = { 0, 0 }; uint32 srcFileIdx = 0; uint32 srcFileLineNum = 0; + uint32 isMacroID = false; DeclKind declKind = kASTDecl; BoundaryInfo boundary; std::string sectionAttr; @@ -174,7 +191,7 @@ class ASTDecl { class ASTField : public ASTDecl { public: - ASTField(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, + ASTField(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn, bool isAnonymous = false) : ASTDecl(srcFile, nameIn, typeDescIn), isAnonymousField(isAnonymous) { genAttrs = genAttrsIn; @@ -191,8 +208,8 @@ class ASTField : public ASTDecl { class ASTFunc : public ASTDecl { public: - ASTFunc(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, - const GenericAttrs &genAttrsIn, const std::vector ¶mDeclsIn) + ASTFunc(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn, const MapleVector ¶mDeclsIn) : ASTDecl(srcFile, nameIn, typeDescIn), compound(nullptr), paramDecls(paramDeclsIn) { genAttrs = genAttrsIn; declKind = kASTFunc; @@ -203,7 +220,7 @@ class ASTFunc : public ASTDecl { void SetCompoundStmt(ASTStmt*); void InsertStmtsIntoCompoundStmtAtFront(const std::list &stmts); const ASTStmt *GetCompoundStmt() const; - const std::vector &GetParamDecls() const { + const MapleVector &GetParamDecls() const { return paramDecls; } std::vector> GenArgVarList() const; @@ -230,16 +247,17 @@ class ASTFunc : public ASTDecl { private: // typeDesc format: [funcType, retType, arg0, arg1 ... argN] ASTStmt *compound = nullptr; // func body - std::vector paramDecls; + MapleVector paramDecls; std::pair weakrefAttr; uint32 bodySize = 0; }; class ASTStruct : public ASTDecl { public: - ASTStruct(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, - const GenericAttrs &genAttrsIn) - : ASTDecl(srcFile, nameIn, typeDescIn), isUnion(false) { + ASTStruct(MapleAllocator &allocatorIn, const MapleString &srcFile, const MapleString &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn), + isUnion(false), fields(allocatorIn.Adapter()), methods(allocatorIn.Adapter()) { genAttrs = genAttrsIn; declKind = kASTStruct; } @@ -251,7 +269,7 @@ class ASTStruct : public ASTDecl { fields.emplace_back(f); } - const std::list &GetFields() const { + const MapleList &GetFields() const { return fields; } @@ -265,13 +283,13 @@ class ASTStruct : public ASTDecl { private: bool isUnion = false; - std::list fields; - std::list methods; + MapleList fields; + MapleList methods; }; class ASTVar : public ASTDecl { public: - ASTVar(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, + ASTVar(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) : ASTDecl(srcFile, nameIn, typeDescIn) { genAttrs = genAttrsIn; @@ -314,8 +332,8 @@ class ASTVar : public ASTDecl { class ASTFileScopeAsm : public ASTDecl { public: - ASTFileScopeAsm(const std::string &srcFile) - : ASTDecl(srcFile, "", std::vector{}) { + ASTFileScopeAsm(MapleAllocator &allocatorIn, const MapleString &srcFile) + : ASTDecl(srcFile, MapleString("", allocatorIn.GetMemPool()), MapleVector(allocatorIn.Adapter())) { declKind = kASTFileScopeAsm; } ~ASTFileScopeAsm() = default; @@ -334,8 +352,8 @@ class ASTFileScopeAsm : public ASTDecl { class ASTEnumConstant : public ASTDecl { public: - ASTEnumConstant(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, - const GenericAttrs &genAttrsIn) + ASTEnumConstant(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn) : ASTDecl(srcFile, nameIn, typeDescIn) { genAttrs = genAttrsIn; declKind = kASTEnumConstant; @@ -353,9 +371,9 @@ class ASTEnumConstant : public ASTDecl { // only process local `EnumDecl` here class ASTEnumDecl : public ASTDecl { public: - ASTEnumDecl(const std::string &srcFile, const std::string &nameIn, const std::vector &typeDescIn, - const GenericAttrs &genAttrsIn) - : ASTDecl(srcFile, nameIn, typeDescIn) { + ASTEnumDecl(MapleAllocator &allocatorIn, const MapleString &srcFile, const MapleString &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn), consts(allocatorIn.Adapter()) { genAttrs = genAttrsIn; declKind = kASTEnumDecl; } @@ -366,7 +384,7 @@ class ASTEnumDecl : public ASTDecl { } private: - std::list consts; + MapleList consts; }; } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_H diff --git a/src/hir2mpl/ast_input/common/include/ast_decl_builder.h b/src/hir2mpl/ast_input/common/include/ast_decl_builder.h index 45aaf4fdc795954cee5e8652e711f5f6e6fd054e..579f0da56a5b1865383d6eec1c7590179650b7bd 100644 --- a/src/hir2mpl/ast_input/common/include/ast_decl_builder.h +++ b/src/hir2mpl/ast_input/common/include/ast_decl_builder.h @@ -25,86 +25,98 @@ class ASTDeclsBuilder { return decl; } - static ASTDecl *ASTDeclBuilder(const MapleAllocator &allocator, const std::string &srcFile, - const std::string &nameIn, const std::vector &typeDescIn, int64 id = INT64_MAX) { + static ASTDecl *ASTDeclBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &nameIn, const MapleVector &typeDescIn, int64 id = INT64_MAX) { + MapleString nameStr(nameIn, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn); // for temp decl + return allocator.GetMemPool()->New(srcFile, nameStr, typeDescIn); // for temp decl } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn); + declesTable[id] = allocator.GetMemPool()->New(srcFile, nameStr, typeDescIn); } return declesTable[id]; } - static ASTVar *ASTVarBuilder(const MapleAllocator &allocator, const std::string &srcFile, const std::string &varName, - const std::vector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + static ASTVar *ASTVarBuilder(const MapleAllocator &allocator, const MapleString &srcFile, const std::string &varName, + const MapleVector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); } return static_cast(declesTable[id]); } - static ASTEnumConstant *ASTEnumConstBuilder(const MapleAllocator &allocator, const std::string &srcFile, - const std::string &varName, const std::vector &desc, + static ASTEnumConstant *ASTEnumConstBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); } return static_cast(declesTable[id]); } - static ASTEnumDecl *ASTLocalEnumDeclBuilder(const MapleAllocator &allocator, const std::string &srcFile, - const std::string &varName, const std::vector &desc, const GenericAttrs &genAttrsIn, + static ASTEnumDecl *ASTLocalEnumDeclBuilder(MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + return allocator.GetMemPool()->New(allocator, srcFile, varNameStr, desc, genAttrsIn); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn); + declesTable[id] = allocator.GetMemPool()->New(allocator, srcFile, varNameStr, desc, genAttrsIn); } return static_cast(declesTable[id]); } - static ASTFunc *ASTFuncBuilder(const MapleAllocator &allocator, const std::string &srcFile, const std::string &nameIn, - const std::vector &typeDescIn, const GenericAttrs &genAttrsIn, - const std::vector ¶mDeclsIn, int64 id = INT64_MAX) { + static ASTFunc *ASTFuncBuilder(const MapleAllocator &allocator, const MapleString &srcFile, const std::string &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn, + MapleVector ¶mDeclsIn, int64 id = INT64_MAX) { + MapleString funcNameStr(nameIn, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn, genAttrsIn, paramDeclsIn); + return allocator.GetMemPool()->New(srcFile, funcNameStr, typeDescIn, genAttrsIn, paramDeclsIn); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn, genAttrsIn, paramDeclsIn); + declesTable[id] = allocator.GetMemPool()->New(srcFile, funcNameStr, typeDescIn, genAttrsIn, + paramDeclsIn); } return static_cast(declesTable[id]); } template - static T *ASTStmtBuilder(const MapleAllocator &allocator) { - return allocator.GetMemPool()->New(); + static T *ASTStmtBuilder(MapleAllocator &allocator) { + return allocator.GetMemPool()->New(allocator); } template - static T *ASTExprBuilder(const MapleAllocator &allocator) { - return allocator.GetMemPool()->New(); + static T *ASTExprBuilder(MapleAllocator &allocator) { + return allocator.GetMemPool()->New(allocator); } - static ASTStruct *ASTStructBuilder(const MapleAllocator &allocator, const std::string &srcFile, const std::string &nameIn, - const std::vector &typeDescIn, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + static ASTStruct *ASTStructBuilder(MapleAllocator &allocator, const MapleString &srcFile, + const std::string &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString structNameStr(nameIn, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn, genAttrsIn); + return allocator.GetMemPool()->New(allocator, srcFile, structNameStr, typeDescIn, genAttrsIn); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, nameIn, typeDescIn, genAttrsIn); + declesTable[id] = allocator.GetMemPool()->New(allocator, srcFile, structNameStr, typeDescIn, + genAttrsIn); } return static_cast(declesTable[id]); } - static ASTField *ASTFieldBuilder(const MapleAllocator &allocator, const std::string &srcFile, const std::string &varName, - const std::vector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX, - bool isAnonymous = false) { + static ASTField * ASTFieldBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX, + bool isAnonymous = false) { + MapleString varNameStr(varName, allocator.GetMemPool()); if (id == INT64_MAX) { - return allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn, isAnonymous); + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn, isAnonymous); } else if (declesTable[id] == nullptr) { - declesTable[id] = allocator.GetMemPool()->New(srcFile, varName, desc, genAttrsIn, isAnonymous); + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn, + isAnonymous); } return static_cast(declesTable[id]); } diff --git a/src/hir2mpl/ast_input/common/include/ast_input-inl.h b/src/hir2mpl/ast_input/common/include/ast_input-inl.h index ed577ba5862eaca799c6456b1dadd5ee8ca5607a..a0155a8ac374261f36b56d7773e33496233bf499 100644 --- a/src/hir2mpl/ast_input/common/include/ast_input-inl.h +++ b/src/hir2mpl/ast_input/common/include/ast_input-inl.h @@ -27,7 +27,7 @@ template bool ASTInput::ReadASTFile(MapleAllocator &allocatorIn, uint32 index, const std::string &fileName) { T *parser = allocator.GetMemPool()->New(allocator, index, fileName, astStructs, astFuncs, astVars, astFileScopeAsms); - TRY_DO(parser->OpenFile()); + TRY_DO(parser->OpenFile(allocatorIn)); TRY_DO(parser->Verify()); TRY_DO(parser->PreProcessAST()); // Some implicit record decl would be retrieved in func body at use, @@ -36,6 +36,7 @@ bool ASTInput::ReadASTFile(MapleAllocator &allocatorIn, uint32 index, const s TRY_DO(parser->RetrieveStructs(allocatorIn)); TRY_DO(parser->RetrieveGlobalVars(allocatorIn)); TRY_DO(parser->RetrieveFileScopeAsms(allocatorIn)); + TRY_DO(parser->Release()); parserMap.emplace(fileName, parser); return true; } diff --git a/src/hir2mpl/ast_input/common/src/ast_decl.cpp b/src/hir2mpl/ast_input/common/src/ast_decl.cpp index 56c9747695190252267d080b87c74acc1f9def70..a5d3ce2b2a16e09417335271da90a2825b82aefc 100644 --- a/src/hir2mpl/ast_input/common/src/ast_decl.cpp +++ b/src/hir2mpl/ast_input/common/src/ast_decl.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -24,19 +24,19 @@ namespace maple { // ---------- ASTDecl --------- -const std::string &ASTDecl::GetSrcFileName() const { - return srcFileName; +const std::string ASTDecl::GetSrcFileName() const { + return srcFileName.c_str() == nullptr ? "" : srcFileName.c_str(); } -const std::string &ASTDecl::GetName() const { - return name; +const std::string ASTDecl::GetName() const { + return name.c_str() == nullptr ? "" : name.c_str(); } -const std::vector &ASTDecl::GetTypeDesc() const { +const MapleVector &ASTDecl::GetTypeDesc() const { return typeDesc; } -void ASTDecl::SetTypeDesc(const std::vector &typeVecIn) { +void ASTDecl::SetTypeDesc(const MapleVector &typeVecIn) { typeDesc = typeVecIn; } @@ -46,10 +46,18 @@ MIRConst *ASTDecl::Translate2MIRConst() const { std::string ASTDecl::GenerateUniqueVarName() const { // add `_line_column` suffix for avoiding local var name conflict - if (isGlobalDecl || isParam) { - return name; + if (isGlobalDecl || isParam || isDbgFriendly) { + return GetName(); } else { - return name + "_" + std::to_string(pos.first) + "_" + std::to_string(pos.second); + std::stringstream os; + os << GetName(); + if (isMacroID) { + // for macro expansion, variable names of same location need to be unique + os << "_" << std::to_string(isMacroID); + } else { + os << "_" << std::to_string(pos.first) << "_" << std::to_string(pos.second); + } + return os.str(); } } @@ -174,10 +182,8 @@ MIRSymbol *ASTVar::Translate2MIRSymbol() const { MIRSymbol *mirSymbol = feirVar->GenerateMIRSymbol(FEManager::GetMIRBuilder()); if (initExpr != nullptr && genAttrs.GetAttr(GENATTR_static)) { MIRConst *cst = initExpr->GenerateMIRConst(); - if (cst->GetKind() != kConstLblConst) { + if (cst != nullptr && cst->GetKind() != kConstInvalid) { mirSymbol->SetKonst(cst); - } else { - mirSymbol->SetKonst(nullptr); } } if (!sectionAttr.empty()) { @@ -228,7 +234,7 @@ std::list ASTFunc::EmitASTStmtToFEIR() const { return stmts; } const ASTCompoundStmt *astCpdStmt = static_cast(astStmt); - const std::list &astStmtList = astCpdStmt->GetASTStmtList(); + const MapleList &astStmtList = astCpdStmt->GetASTStmtList(); for (auto stmtNode : astStmtList) { std::list childStmts = stmtNode->Emit2FEStmt(); for (auto &stmt : childStmts) { @@ -255,6 +261,6 @@ std::list ASTFunc::EmitASTStmtToFEIR() const { // ---------- ASTStruct ---------- std::string ASTStruct::GetStructName(bool mapled) const { - return mapled ? namemangler::EncodeName(name) : name; + return mapled ? namemangler::EncodeName(GetName()) : GetName(); } } // namespace maple diff --git a/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp b/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp index 01765dc2d6ba18de8a7a18985e4ce35779c2773f..edbfc19b28aeadcacea583996cbc687b034f92b9 100644 --- a/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp +++ b/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -97,7 +97,7 @@ ASTDecl *MapleASTParser::ProcessDeclIdentifierNode(MapleAllocator &allocator, ma } GenericAttrs attrs; astVar = ASTDeclsBuilder::ASTVarBuilder( - allocator, fileName, varName, std::vector{varType}, attrs, identifierDecl->GetNodeId()); + allocator, fileName, varName, MapleVector{varType}, attrs, identifierDecl->GetNodeId()); if (identifierDecl->GetInit() != nullptr) { auto astInitExpr = ProcessExpr(allocator, identifierDecl->GetInit()); @@ -129,7 +129,7 @@ ASTDecl *MapleASTParser::ProcessDeclFunctionNode(MapleAllocator &allocator, mapl return nullptr; } - std::vector typeDescIn; + MapleVector typeDescIn; typeDescIn.push_back(nullptr); // mirFuncType MIRType *retType = astFile->MapType(funcDecl->GetType()); if (retType == nullptr) { @@ -137,7 +137,7 @@ ASTDecl *MapleASTParser::ProcessDeclFunctionNode(MapleAllocator &allocator, mapl } typeDescIn.push_back(retType); - std::vector paramDecls; + MapleVector paramDecls; uint32 numParam = funcDecl->GetParamsNum(); for (uint32 i = 0; i < numParam; ++i) { maplefe::TreeNode *param = funcDecl->GetParam(i); diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp index 63a92f0870acfb9196587a7c00f20b0fa4e8c63b..55b4bd740b80f87aa66d24c3929f9ea8dfc4f683 100644 --- a/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp +++ b/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp @@ -36,8 +36,8 @@ JBCFunction::~JBCFunction() { void JBCFunction::InitImpl() { FEFunction::InitImpl(); - generalCFG = std::make_unique(genStmtHead, genStmtTail); - generalCFG->Init(); + feirCFG = std::make_unique(feirStmtHead, feirStmtHead); + feirCFG->Init(); pesudoBBCatchPred = RegisterFEIRBB(std::make_unique()); } @@ -80,11 +80,11 @@ void JBCFunction::FinishImpl() { if (FEOptions::GetInstance().IsDumpJBCStmt() && dumpFunc) { DumpGeneralStmts(); } - if (FEOptions::GetInstance().IsDumpJBCBB() && dumpFunc) { - DumpFEIRBBs(); + if (FEOptions::GetInstance().IsDumpFEIRBB() && dumpFunc) { + INFO(kLncInfo, "not implemented"); } - if (FEOptions::GetInstance().IsDumpFEIRCFGGraph() && dumpFunc) { - DumpFEIRCFGGraph(); + if (FEOptions::GetInstance().IsDumpFEIRCFGGraph(method.GetFullName())) { + (void)DumpFEIRCFGGraph("dump cfg graph"); } (void)UpdateFormal("finish/update formal"); // Not gen func body for abstract method @@ -367,7 +367,7 @@ FEIRStmt *JBCFunction::BuildStmtFromInstructionForBranch(const jbc::JBCOp &op) { FEIRStmt *stmt = uniStmt.get(); const jbc::JBCOpBranch &opBranch = static_cast(op); FEIRStmt *target = BuildAndUpdateLabel(opBranch.GetTarget(), uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); return stmt; } @@ -378,7 +378,7 @@ FEIRStmt *JBCFunction::BuildStmtFromInstructionForGoto(const jbc::JBCOp &op) { stmt->SetFallThru(false); const jbc::JBCOpGoto &opGoto = static_cast(op); FEIRStmt *target = BuildAndUpdateLabel(opGoto.GetTarget(), uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); return stmt; } @@ -390,10 +390,10 @@ FEIRStmt *JBCFunction::BuildStmtFromInstructionForSwitch(const jbc::JBCOp &op) { const jbc::JBCOpSwitch &opSwitch = static_cast(op); for (const std::pair &targetInfo : opSwitch.GetTargets()) { FEIRStmt *target = BuildAndUpdateLabel(targetInfo.second, uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); } FEIRStmt *target = BuildAndUpdateLabel(opSwitch.GetDefaultTarget(), uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); return stmt; } @@ -404,7 +404,7 @@ FEIRStmt *JBCFunction::BuildStmtFromInstructionForJsr(const jbc::JBCOp &op) { stmt->SetFallThru(false); const jbc::JBCOpJsr &opJsr = static_cast(op); FEIRStmt *target = BuildAndUpdateLabel(opJsr.GetTarget(), uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); return stmt; } @@ -423,7 +423,7 @@ FEIRStmt *JBCFunction::BuildStmtFromInstructionForRet(const jbc::JBCOp &op) { for (auto itTarget : itJsrInfo->second) { uint32 pc = itTarget.second; FEIRStmt *target = BuildAndUpdateLabel(pc, uniStmt); - static_cast(stmt)->AddSucc(*target); + static_cast(stmt)->AddExtraSucc(*target); } return stmt; } @@ -627,7 +627,7 @@ FEIRStmt *JBCFunction::BuildAndUpdateLabel(uint32 dstPC, const std::unique_ptrsecond; } ASSERT(stmtLabel != nullptr, "null ptr check"); - stmtLabel->AddPred(*srcStmt); + stmtLabel->AddExtraPred(*srcStmt); return stmtLabel; } diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp index ddfbd358c4a2e90d9b35f4b5ddf9963a2751e9fc..708395d4a5620ec2dc3be0a642ad5119b1276252 100644 --- a/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp +++ b/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp @@ -1141,7 +1141,7 @@ void JBCStmtInstBranch::DumpImpl(const std::string &prefix) const { "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << "op=" << op.GetOpcodeName() << ", " << "targets={"; - for (FEIRStmt *stmt : predsOrSuccs) { + for (FEIRStmt *stmt : extraSuccs) { std::cout << stmt->GetID() << " "; } std::cout << "})" << std::endl; @@ -1416,7 +1416,7 @@ void JBCStmtPesudoLabel::DumpImpl(const std::string &prefix) const { std::cout << prefix << "JBCStmtPesudoLabel (id=" << id << "," << "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << "preds={"; - for (FEIRStmt *stmt : predsOrSuccs) { + for (FEIRStmt *stmt : extraPreds) { std::cout << stmt->GetID() << " "; } std::cout << "})" << std::endl; @@ -1442,7 +1442,7 @@ void JBCStmtPesudoCatch::DumpImpl(const std::string &prefix) const { std::cout << prefix << "JBCStmtPesudoCatch (id=" << id << "," << "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << "preds={"; - for (FEIRStmt *stmt : predsOrSuccs) { + for (FEIRStmt *stmt : extraPreds) { std::cout << stmt->GetID() << " "; } std::cout << "})" << std::endl; diff --git a/src/hir2mpl/common/include/enhance_c_checker.h b/src/hir2mpl/common/include/enhance_c_checker.h index 66eb5b720c32e934e9824d586b699ee0310e292f..ba4db35eb733c6b7326b126e6d00531d06816cce 100644 --- a/src/hir2mpl/common/include/enhance_c_checker.h +++ b/src/hir2mpl/common/include/enhance_c_checker.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -29,7 +29,7 @@ class ENCChecker { static bool HasNonnullAttrInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, bool isNested = false); static bool HasNullExpr(const UniqueFEIRExpr &expr); static void CheckNonnullGlobalVarInit(const MIRSymbol &sym, const MIRConst *cst); - static void CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const std::vector &initExprs); + static void CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const MapleVector &initExprs); static void CheckNonnullLocalVarInit(const MIRSymbol &sym, const ASTExpr *initExpr); static void CheckNonnullLocalVarInit(const MIRSymbol &sym, const UniqueFEIRExpr &initFEExpr, std::list &stmts); @@ -39,6 +39,7 @@ class ENCChecker { uint32 fileNum, uint32 fileLine); static bool HasNonnullFieldInStruct(const MIRType &mirType); static bool HasNonnullFieldInPtrStruct(const MIRType &mirType); + static void CheckNonnullFieldInStruct(const MIRType &src, const MIRType &dst, uint32 fileIdx, uint32 fileLine); static bool IsSameBoundary(const AttrBoundary &arg1, const AttrBoundary &arg2); static void CheckBoundaryArgsAndRetForFuncPtr(const MIRType &dstType, const UniqueFEIRExpr &srcExpr, uint32 fileNum, uint32 fileLine); @@ -61,7 +62,7 @@ class ENCChecker { const ASTCallExpr &astCallExpr); static UniqueFEIRExpr GetRealBoundaryLenExprInField(const UniqueFEIRExpr &lenExpr, MIRStructType &baseType, const UniqueFEIRExpr &dstExpr); - static void InitBoundaryVarFromASTDecl(const MapleAllocator &allocator, ASTDecl *ptrDecl, + static void InitBoundaryVarFromASTDecl(MapleAllocator &allocator, ASTDecl *ptrDecl, ASTExpr *lenExpr, std::list &stmts); static void InitBoundaryVar(MIRFunction &curFunction, const ASTDecl &ptrDecl, UniqueFEIRExpr lenExpr, std::list &stmts); @@ -90,6 +91,7 @@ class ENCChecker { static void InsertBoundaryInAtts(FuncAttrs &attr, const BoundaryInfo &boundary); static bool IsSafeRegion(const MIRBuilder &mirBuilder); static bool IsUnsafeRegion(const MIRBuilder &mirBuilder); + static void CheckLenExpr(const ASTExpr &lenExpr); }; // class ENCChecker } // namespace maple #endif // HIR2MPL_INCLUDE_COMMON_ENCCHECKER_H diff --git a/src/hir2mpl/common/include/fe_file_type.h b/src/hir2mpl/common/include/fe_file_type.h index 65c3a3dbe09b19432567bb9e7d874cbb86543aab..b4e51d55513c211e7e942cb2fb2d8c70c2b0bb38 100644 --- a/src/hir2mpl/common/include/fe_file_type.h +++ b/src/hir2mpl/common/include/fe_file_type.h @@ -54,7 +54,7 @@ class FEFileType { static const uint32 kMagicZip = 0x04034B50; static const uint32 kMagicDex = 0x0A786564; static const uint32 kMagicAST = 0x48435043; - static const uint32 kMagicMAST = 0x854C504D; + static const uint32 kMagicMAST = 0xAB4C504D; std::map mapExtNameType; std::map mapTypeMagic; std::map mapMagicType; diff --git a/src/hir2mpl/common/include/fe_function.h b/src/hir2mpl/common/include/fe_function.h index cd953736117b490f56db172e7c479f5607716a5e..68ba49da312ca6ac4054973d999a92c01ec143d9 100644 --- a/src/hir2mpl/common/include/fe_function.h +++ b/src/hir2mpl/common/include/fe_function.h @@ -25,6 +25,7 @@ #include "feir_bb.h" #include "feir_stmt.h" #include "fe_timer_ns.h" +#include "feir_lower.h" #include "feir_cfg.h" #include "fe_function_phase_result.h" #include "feir_type_infer.h" @@ -38,9 +39,6 @@ class FEFunction { public: FEFunction(MIRFunction &argMIRFunction, const std::unique_ptr &argPhaseResultTotal); virtual ~FEFunction(); - void LabelGenStmt(); - void LabelGenBB(); - bool HasDeadBB(); // element memory manage method FEIRStmt *RegisterGeneralStmt(std::unique_ptr stmt); @@ -55,6 +53,16 @@ class FEFunction { srcFileName = fileName; } + const FEIRStmt *GetFEIRStmtHead() const { + ASSERT_NOT_NULL(feirStmtHead); + return feirStmtHead; + } + + const FEIRStmt *GetFEIRStmtTail() const { + ASSERT_NOT_NULL(feirStmtTail); + return feirStmtTail; + } + void Init() { InitImpl(); } @@ -112,6 +120,9 @@ class FEFunction { virtual bool SetupFEIRStmtJavaTry(const std::string &phaseName); virtual bool SetupFEIRStmtBranch(const std::string &phaseName); virtual bool UpdateRegNum2This(const std::string &phaseName); + bool LowerFunc(const std::string &phaseName); + bool DumpFEIRBBs(const std::string &phaseName); + bool DumpFEIRCFGGraph(const std::string &phaseName); bool BuildFEIRDFG(const std::string &phaseName); // process fe ir check point, build fe ir DFG bool BuildFEIRUDDU(const std::string &phaseName); // build fe ir UD DU chain bool TypeInfer(const std::string &phaseName); // feir based Type Infer @@ -135,15 +146,11 @@ class FEFunction { virtual bool VerifyGeneral() = 0; virtual void VerifyGeneralFailCallBack() = 0; virtual void DumpGeneralStmts(); - virtual void DumpFEIRBBs(); - virtual void DumpFEIRCFGGraph(); virtual std::string GetGeneralFuncName() const; void EmitToMIRStmt(); void PhaseTimerStart(FETimerNS &timer); void PhaseTimerStopAndDump(FETimerNS &timer, const std::string &label); - virtual void DumpFEIRCFGGraphForBB(std::ofstream &file, const FEIRBB &bb); - virtual void DumpFEIRCFGGraphForCFGEdge(std::ofstream &file); virtual void DumpFEIRCFGGraphForDFGEdge(std::ofstream &file); virtual bool HasThis() = 0; virtual bool IsNative() = 0; @@ -155,11 +162,11 @@ class FEFunction { std::list genStmtListRaw; FEIRBB *genBBHead; FEIRBB *genBBTail; - std::unique_ptr generalCFG; FEIRStmt *feirStmtHead; FEIRStmt *feirStmtTail; FEIRBB *feirBBHead; FEIRBB *feirBBTail; + std::unique_ptr feirLower; std::unique_ptr feirCFG; std::map genStmtBBMap; std::vector> argVarList; @@ -213,6 +220,8 @@ class FEFunction { std::list> genStmtList; std::list> genBBList; + // FeirStmts generated by feirLower are inserted after the kStmtPesudoFuncEnd and linked, + // Access sequential feirStmt instructions through a doubly linked FELinkListNode from feirStmtHead std::list feirStmtList; std::list> feirBBList; std::map feirStmtBBMap; diff --git a/src/hir2mpl/common/include/fe_options.h b/src/hir2mpl/common/include/fe_options.h index e00a26ba3251d267a2fd4050fbaac58ed3ef39cc..d7ec8672511bd6adc02d6405d3a988f21f8a33c6 100644 --- a/src/hir2mpl/common/include/fe_options.h +++ b/src/hir2mpl/common/include/fe_options.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -236,6 +236,16 @@ class FEOptions { return isDumpLOC; } + void SetDbgFriendly(bool flag) { + isDbgFriendly = flag; + // set isDumpLOC if flag is true + isDumpLOC = flag ? flag : isDumpLOC; + } + + bool IsDbgFriendly() const { + return isDbgFriendly; + } + void SetIsDumpPhaseTime(bool flag) { isDumpPhaseTime = flag; } @@ -285,28 +295,21 @@ class FEOptions { return isDumpJBCStmt; } - void SetIsDumpJBCBB(bool flag) { - isDumpJBCBB = flag; - } - - bool IsDumpJBCBB() const { - return isDumpJBCBB; + void SetIsDumpFEIRBB(bool flag) { + isDumpBB = flag; } - void SetIsDumpFEIRCFGGraph(bool flag) { - isDumpGenCFGGraph = flag; + bool IsDumpFEIRBB() const { + return isDumpBB; } - bool IsDumpFEIRCFGGraph() const { - return isDumpGenCFGGraph; + void AddFuncNameForDumpCFGGraph(const std::string &funcName) { + funcNamesForDumpCFGGraph.insert(funcName); } - void SetFEIRCFGGraphFileName(const std::string &fileName) { - genCFGGraphFileName = fileName; - } - - const std::string GetJBCCFGGraphFileName() const { - return genCFGGraphFileName; + bool IsDumpFEIRCFGGraph(const std::string &funcName) const { + return funcNamesForDumpCFGGraph.find(funcName) != + funcNamesForDumpCFGGraph.end(); } void SetIsDumpJBCAll(bool flag) { @@ -472,6 +475,14 @@ class FEOptions { return funcInlineSize; } + void SetWPAA(bool flag) { + wpaa = flag; + } + + bool GetWPAA() const { + return wpaa; + } + private: static FEOptions options; // input control options @@ -504,6 +515,7 @@ class FEOptions { bool isDumpTime; bool isDumpComment = false; bool isDumpLOC = true; + bool isDbgFriendly = false; bool isDumpPhaseTime = false; bool isDumpPhaseTimeDetail = false; @@ -512,7 +524,6 @@ class FEOptions { bool isJBCUseImpreciseType = false; bool isJBCInfoUsePathName = false; bool isDumpJBCStmt = false; - bool isDumpJBCBB = false; bool isDumpJBCAll = false; bool isDumpJBCErrorOnly = false; std::set dumpJBCFuncNames; @@ -530,8 +541,8 @@ class FEOptions { bool isBigEndian = false; // general stmt/bb/cfg debug options - bool isDumpGenCFGGraph = false; - std::string genCFGGraphFileName = ""; + bool isDumpBB = false; + std::set funcNamesForDumpCFGGraph; // parallel uint32 nthreads; @@ -549,6 +560,7 @@ class FEOptions { bool isEnableSafeRegion = false; uint32 funcInlineSize = 0; + bool wpaa = false; FEOptions(); ~FEOptions() = default; diff --git a/src/hir2mpl/common/include/fe_utils.h b/src/hir2mpl/common/include/fe_utils.h index 92184fce85df2d9e457fc6b1668af32833ab43d4..e932fbf80775bdd2b023afdbf3799a86419c6391 100644 --- a/src/hir2mpl/common/include/fe_utils.h +++ b/src/hir2mpl/common/include/fe_utils.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -39,9 +39,10 @@ class FEUtils { static std::string GetBaseTypeName(const std::string &typeName); static PrimType GetPrimType(const GStrIdx &typeNameIdx); static uint32 GetSequentialNumber(); - static std::string GetFileNameHashStr(const std::string &fileName); + static std::string GetFileNameHashStr(const std::string &fileName, uint32 seed = 211); static std::string GetSequentialName0(const std::string &prefix, uint32_t num); static std::string GetSequentialName(const std::string &prefix); + static std::string CreateLabelName(); static FieldID GetStructFieldID(MIRStructType *base, const std::string &fieldName); static bool TraverseToNamedField(MIRStructType &structType, const GStrIdx &nameIdx, FieldID &fieldID, bool isTopLevel = true); @@ -262,6 +263,7 @@ class FELinkListNode { void InsertAfter(FELinkListNode *ins); static void InsertBefore(FELinkListNode *ins, FELinkListNode *pos); static void InsertAfter(FELinkListNode *ins, FELinkListNode *pos); + static void SpliceNodes(FELinkListNode *head, FELinkListNode *tail, FELinkListNode *newTail); FELinkListNode *GetPrev() const { return prev; } diff --git a/src/hir2mpl/common/include/fe_utils_java.h b/src/hir2mpl/common/include/fe_utils_java.h index e914e6ff4c5a9e9bd739d798de1135d841599ffa..1f58dddd688b3a0a0627d33963af62b812f5b604 100644 --- a/src/hir2mpl/common/include/fe_utils_java.h +++ b/src/hir2mpl/common/include/fe_utils_java.h @@ -17,6 +17,8 @@ #include #include #include "feir_type.h" +#include "global_tables.h" +#include "types_def.h" namespace maple { class FEUtilJava { @@ -24,9 +26,39 @@ class FEUtilJava { static std::vector SolveMethodSignature(const std::string &signature); static std::string SolveParamNameInJavaFormat(const std::string &signature); + static GStrIdx &GetMultiANewArrayClassIdx() { + static GStrIdx multiANewArrayClassIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;")); + return multiANewArrayClassIdx; + } + + static GStrIdx &GetMultiANewArrayElemIdx() { + static GStrIdx multiANewArrayElemIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("newInstance")); + return multiANewArrayElemIdx; + } + + static GStrIdx &GetMultiANewArrayTypeIdx() { + static GStrIdx multiANewArrayTypeIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayTypeIdx; + } + + static GStrIdx &GetMultiANewArrayFullIdx() { + static GStrIdx multiANewArrayFullIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;|newInstance|(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayFullIdx; + } + + static GStrIdx &GetJavaThrowableNameMplIdx() { + static GStrIdx javaThrowableNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName( "Ljava/lang/Throwable;")); + return javaThrowableNameMplIdx; + } + private: FEUtilJava() = default; ~FEUtilJava() = default; }; } // namespace maple -#endif // HIR2MPL_INCLUDE_FE_UTILS_JAVA_H \ No newline at end of file +#endif // HIR2MPL_INCLUDE_FE_UTILS_JAVA_H diff --git a/src/hir2mpl/common/include/feir_node_kind.def b/src/hir2mpl/common/include/feir_node_kind.def index 8139d80342b420ae4afc9bb891e13e64eef9fb64..42d9797447be5639f57e569b28b2a089a4b39c9b 100644 --- a/src/hir2mpl/common/include/feir_node_kind.def +++ b/src/hir2mpl/common/include/feir_node_kind.def @@ -1,6 +1,6 @@ // FEIR_NODE_KIND (kind, description) FEIR_NODE_KIND(Stmt, "Stmt") -FEIR_NODE_KIND(FEIRStmtNary, "FEIRStmtNary") +FEIR_NODE_KIND(StmtNary, "StmtNary") FEIR_NODE_KIND(StmtAssign, "StmtAssign") FEIR_NODE_KIND(StmtNonAssign, "StmtNonAssign") FEIR_NODE_KIND(StmtPesudo, "StmtPesudo") @@ -83,3 +83,5 @@ FEIR_NODE_KIND(StmtContinue, "StmtContinue") FEIR_NODE_KIND(StmtLabel, "StmtLabel") FEIR_NODE_KIND(StmtAtomic, "StmtAtomic") FEIR_NODE_KIND(StmtGCCAsm, "StmtGCCAsm") +FEIR_NODE_KIND(StmtPesudoHead, "StmtPesudoHead") +FEIR_NODE_KIND(StmtPesudoTail, "StmtPesudoTail") diff --git a/src/hir2mpl/common/include/feir_stmt.h b/src/hir2mpl/common/include/feir_stmt.h old mode 100755 new mode 100644 index 4ffc046f50a57a3802f1718fd6593a50addaecc2..eb814a86ddc3e167791a54c2a3c91b78d7220cae --- a/src/hir2mpl/common/include/feir_stmt.h +++ b/src/hir2mpl/common/include/feir_stmt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -138,17 +138,13 @@ class FEIRStmt : public FELinkListNode { } bool IsFallThru() const { - return isFallThru; + return IsFallThroughImpl(); } void SetFallThru(bool arg) { isFallThru = arg; } - bool IsFallThrough() const { - return IsFallThroughImpl(); - } - bool IsBranch() const { return IsBranchImpl(); } @@ -189,28 +185,20 @@ class FEIRStmt : public FELinkListNode { return isAuxPre || isAuxPost; } - const std::vector &GetPredsOrSuccs() const { - return predsOrSuccs; - } - - void AddPredOrSucc(FEIRStmt &stmt) { - predsOrSuccs.push_back(&stmt); - } - - const std::vector &GetPreds() const { - return predsOrSuccs; + const std::vector &GetExtraPreds() const { + return extraPreds; } - const std::vector &GetSuccs() const { - return predsOrSuccs; + const std::vector &GetExtraSuccs() const { + return extraSuccs; } - void AddPred(FEIRStmt &stmt) { - predsOrSuccs.push_back(&stmt); + void AddExtraPred(FEIRStmt &stmt) { + extraPreds.push_back(&stmt); } - void AddSucc(FEIRStmt &stmt) { - predsOrSuccs.push_back(&stmt); + void AddExtraSucc(FEIRStmt &stmt) { + extraSuccs.push_back(&stmt); } bool HasDef() const { @@ -277,7 +265,7 @@ class FEIRStmt : public FELinkListNode { virtual std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const; virtual bool IsStmtInstImpl() const; virtual bool IsFallThroughImpl() const { - return true; + return isFallThru; } virtual bool IsBranchImpl() const { @@ -313,11 +301,12 @@ class FEIRStmt : public FELinkListNode { uint32 srcFileLineNum = 0; uint32 hexPC = UINT32_MAX; bool isDummy = false; - bool isFallThru = false; + bool isFallThru = true; bool isAuxPre = false; bool isAuxPost = false; bool isThrowable = false; - std::vector predsOrSuccs; + std::vector extraPreds; + std::vector extraSuccs; }; using UniqueFEIRStmt = std::unique_ptr; @@ -668,12 +657,14 @@ class FEIRExprRegRead : public FEIRExpr { // ---------- FEIRExprAddrofConstArray ---------- class FEIRExprAddrofConstArray : public FEIRExpr { public: - FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn); - FEIRExprAddrofConstArray(const std::string &arrayNameIn, const std::vector &arrayIn, MIRType *typeIn) + FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn, const std::string &strIn); + FEIRExprAddrofConstArray(const std::string &arrayNameIn, const std::vector &arrayIn, MIRType *typeIn, + const std::string &strIn) : FEIRExpr(FEIRNodeKind::kExprAddrof, FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), arrayName(arrayNameIn), - elemType(typeIn) { + elemType(typeIn), + str(strIn) { std::copy(arrayIn.begin(), arrayIn.end(), std::back_inserter(array)); } @@ -711,6 +702,7 @@ class FEIRExprAddrofConstArray : public FEIRExpr { std::string arrayName; std::vector array; MIRType *elemType = nullptr; + std::string str; }; // ---------- FEIRExprAddrOfLabel ---------- @@ -1040,6 +1032,11 @@ class FEIRExprIRead : public FEIRExpr { return type->Clone(); } + void SetClonedOpnd(UniqueFEIRExpr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "opnd is nullptr"); + subExpr = std::move(argOpnd); + } + UniqueFEIRExpr GetClonedOpnd() const { return subExpr->Clone(); } @@ -2034,10 +2031,6 @@ class FEIRStmtCondGotoForC : public FEIRStmt { } protected: - bool IsFallThroughImpl() const override { - return false; - } - bool IsBranchImpl() const override { return true; } @@ -2151,14 +2144,11 @@ class FEIRStmtSwitch : public FEIRStmt { } protected: - bool IsFallThroughImpl() const override { - return true; - } - bool IsBranchImpl() const override { return true; } + bool IsFallThroughImpl() const override; std::string DumpDotStringImpl() const override; void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; @@ -2215,14 +2205,11 @@ class FEIRStmtSwitch2 : public FEIRStmt { } protected: - bool IsFallThroughImpl() const override { - return true; - } - bool IsBranchImpl() const override { return true; } + bool IsFallThroughImpl() const override; std::string DumpDotStringImpl() const override; std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -2702,6 +2689,10 @@ class FEIRStmtIf : public FEIRStmt { condExpr = std::move(argCondExpr); } + const UniqueFEIRExpr &GetCondExpr() const { + return condExpr; + } + void SetHasElse(bool argHasElse) { hasElse = argHasElse; } @@ -2725,10 +2716,7 @@ class FEIRStmtIf : public FEIRStmt { protected: std::string DumpDotStringImpl() const override; std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; - - bool IsFallThroughImpl() const override { - return true; - } + bool IsFallThroughImpl() const override; bool IsBranchImpl() const override { return true; @@ -2755,6 +2743,7 @@ class FEIRStmtDoWhile : public FEIRStmt { return true; } + bool IsFallThroughImpl() const override; std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; private: @@ -2807,6 +2796,10 @@ class FEIRStmtLabel : public FEIRStmt { explicit FEIRStmtLabel(const std::string &name) : FEIRStmt(FEIRNodeKind::kStmtLabel), labelName(name) {} ~FEIRStmtLabel() = default; + const std::string &GetLabelName() const { + return labelName; + } + protected: bool IsBranchImpl() const override { return true; @@ -2860,7 +2853,12 @@ class FEIRStmtGCCAsm : public FEIRStmt { protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; - bool HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index) const; + bool HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index, + std::list &stmts, std::list &initStmts) const; + std::pair HandleGlobalAsmOutOperand(const UniqueFEIRVar &asmOut, + const FieldID fieldID, + std::list &stmts, + MIRBuilder &mirBuilder) const; private: std::vector> outputs; diff --git a/src/hir2mpl/common/include/hir2mpl_compiler.h b/src/hir2mpl/common/include/hir2mpl_compiler.h index f00e270545cc624dc236a881b4218f811fcc44d4..3645e767a062616ce53de8fb59bdf45e2ad9f4c9 100644 --- a/src/hir2mpl/common/include/hir2mpl_compiler.h +++ b/src/hir2mpl/common/include/hir2mpl_compiler.h @@ -70,7 +70,7 @@ class HIR2MPLCompiler { std::string firstInputName; std::string outputPath; std::string outputName; - std::string outputInlineName; + std::string outNameWithoutType; std::list> components; std::set compileFailedFEFunctions; }; diff --git a/src/hir2mpl/common/include/hir2mpl_options.h b/src/hir2mpl/common/include/hir2mpl_options.h index cb3d184ce6772bf8ab17e33c86b514995341662c..b7e5cefb6022cf888172babce29c74170e9d80a5 100644 --- a/src/hir2mpl/common/include/hir2mpl_options.h +++ b/src/hir2mpl/common/include/hir2mpl_options.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -75,6 +75,7 @@ class HIR2MPLOptions : public maple::MapleDriverOptionBase { bool ProcessDumpTime(const mapleOption::Option &opt); bool ProcessDumpComment(const mapleOption::Option &opt); bool ProcessDumpLOC(const mapleOption::Option &opt); + bool ProcessDbgFriendly(const mapleOption::Option &opt); bool ProcessDumpPhaseTime(const mapleOption::Option &opt); bool ProcessDumpPhaseTimeDetail(const mapleOption::Option &opt); @@ -82,7 +83,6 @@ class HIR2MPLOptions : public maple::MapleDriverOptionBase { bool ProcessModeForJavaStaticFieldName(const mapleOption::Option &opt); bool ProcessJBCInfoUsePathName(const mapleOption::Option &opt); bool ProcessDumpJBCStmt(const mapleOption::Option &opt); - bool ProcessDumpJBCBB(const mapleOption::Option &opt); bool ProcessDumpJBCAll(const mapleOption::Option &opt); bool ProcessDumpJBCErrorOnly(const mapleOption::Option &opt); bool ProcessDumpJBCFuncName(const mapleOption::Option &opt); @@ -95,12 +95,14 @@ class HIR2MPLOptions : public maple::MapleDriverOptionBase { bool ProcessSimplifyShortCircuit(const mapleOption::Option &opt); bool ProcessEnableVariableArray(const mapleOption::Option &opt); bool ProcessFuncInlineSize(const mapleOption::Option &opt); + bool ProcessWPAA(const mapleOption::Option &opt); // ast compiler options bool ProcessUseSignedChar(const mapleOption::Option &opt); bool ProcessBigEndian(const mapleOption::Option &opt); - // general stmt/bb/cfg debug options + // general stmt/bb/cfg options + bool ProcessDumpFEIRBB(const mapleOption::Option &opt); bool ProcessDumpFEIRCFGGraph(const mapleOption::Option &opt); // multi-thread control options diff --git a/src/hir2mpl/common/src/enhance_c_checker.cpp b/src/hir2mpl/common/src/enhance_c_checker.cpp index beecaa10fabf90be7d88f71156771d3ce6e44eca..b163a102f1339793035eced6119c102e74188273 100755 --- a/src/hir2mpl/common/src/enhance_c_checker.cpp +++ b/src/hir2mpl/common/src/enhance_c_checker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -22,7 +22,8 @@ #include "fe_macros.h" namespace maple { -void ASTParser::ProcessNonnullFuncPtrAttrs(const clang::ValueDecl &valueDecl, ASTDecl &astVar) { +void ASTParser::ProcessNonnullFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astVar) { const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*astVar.GetTypeDesc().front()); if (funcType == nullptr) { return; @@ -48,8 +49,8 @@ void ASTParser::ProcessNonnullFuncPtrAttrs(const clang::ValueDecl &valueDecl, AS } MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); - astVar.SetTypeDesc(std::vector{GlobalTables::GetTypeTable().GetOrCreatePointerType( - *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}); + astVar.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } bool ENCChecker::HasNonnullAttrInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, bool isNested) { @@ -112,7 +113,7 @@ void ENCChecker::CheckNonnullGlobalVarInit(const MIRSymbol &sym, const MIRConst } } -void ENCChecker::CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const std::vector &initExprs) { +void ENCChecker::CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const MapleVector &initExprs) { if (!FEOptions::GetInstance().IsNpeCheckDynamic() || !ENCChecker::HasNonnullFieldInStruct(type)) { return; } @@ -280,9 +281,9 @@ void ASTCallExpr::CheckNonnullFieldInStruct() const { } std::list nullStmts; UniqueFEIRExpr baseExpr = nullptr; - if (funcName == "bzero" && args.size() == 2) { + if (GetFuncName() == "bzero" && args.size() == 2) { baseExpr = args[0]->Emit2FEExpr(nullStmts); - } else if (funcName == "memset" && args.size() == 3 && + } else if (GetFuncName() == "memset" && args.size() == 3 && FEIRBuilder::IsZeroConstExpr(args[1]->Emit2FEExpr(nullStmts))) { baseExpr = args[0]->Emit2FEExpr(nullStmts); } @@ -291,18 +292,21 @@ void ASTCallExpr::CheckNonnullFieldInStruct() const { MIRType *mirType = baseExpr->GetType()->GenerateMIRTypeAuto(); if (ENCChecker::HasNonnullFieldInPtrStruct(*mirType)) { FE_ERR(kLncErr, "%s:%d error: null assignment of nonnull structure field pointer in %s", - FEManager::GetModule().GetFileNameFromFileNum(srcFileIdx).c_str(), srcFileLineNum, funcName.c_str()); + FEManager::GetModule().GetFileNameFromFileNum(srcFileIdx).c_str(), srcFileLineNum, GetFuncName().c_str()); } } } -void ASTCastExpr::CheckNonnullFieldInStruct() const { - if (!FEOptions::GetInstance().IsNpeCheckDynamic() || dst->GetTypeIndex() == src->GetTypeIndex()) { +void ENCChecker::CheckNonnullFieldInStruct(const MIRType &src, const MIRType &dst, + uint32 fileIdx, uint32 fileLine) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || + !dst.IsMIRPtrType() || !src.IsMIRPtrType() || + dst.GetTypeIndex() == src.GetTypeIndex()) { return; } - if (ENCChecker::HasNonnullFieldInPtrStruct(*dst)) { + if (ENCChecker::HasNonnullFieldInPtrStruct(dst)) { FE_ERR(kLncErr, "%s:%d error: null assignment risk of nonnull field pointer", - FEManager::GetModule().GetFileNameFromFileNum(srcFileIdx).c_str(), srcFileLineNum); + FEManager::GetModule().GetFileNameFromFileNum(fileIdx).c_str(), fileLine); } } @@ -552,10 +556,10 @@ void ASTParser::ProcessBoundaryFuncPtrAttrs(MapleAllocator &allocator, const cla if (isUpdated) { MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); - astDecl.SetTypeDesc(std::vector{GlobalTables::GetTypeTable().GetOrCreatePointerType( - *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}); + astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } - ProcessBoundaryFuncPtrAttrsByIndex(valueDecl, astDecl, *funcType); + ProcessBoundaryFuncPtrAttrsByIndex(allocator, valueDecl, astDecl, *funcType); } template @@ -575,8 +579,8 @@ bool ASTParser::ProcessBoundaryFuncPtrAttrsForParams(T *attr, MapleAllocator &al continue; } MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typesVec[idx]); - ASTVar *tmpDecl = ASTDeclsBuilder::ASTVarBuilder( - allocator, "", "tmpVar", std::vector{ptrType}, GenericAttrs()); + ASTVar *tmpDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), "tmpVar", + MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); bool isByte = std::is_same::type, clang::ByteCountAttr>::value; ProcessBoundaryLenExprInVar(allocator, *tmpDecl, proto.getParamType(idx), lenExpr, !isByte); ENCChecker::InsertBoundaryInAtts(attrsVec[idx], tmpDecl->GetBoundaryInfo()); @@ -594,16 +598,16 @@ bool ASTParser::ProcessBoundaryFuncPtrAttrsForRet(T *attr, MapleAllocator &alloc return false; } MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType.GetRetTyIdx()); - ASTVar *tmpRetDecl = ASTDeclsBuilder::ASTVarBuilder( - allocator, "", "tmpRetVar", std::vector{ptrType}, GenericAttrs()); + ASTVar *tmpRetDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + "tmpRetVar", MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); bool isByte = std::is_same::type, clang::ReturnsByteCountAttr>::value; ProcessBoundaryLenExprInVar(allocator, *tmpRetDecl, clangFuncType.getReturnType(), lenExpr, !isByte); ENCChecker::InsertBoundaryInAtts(retAttr, tmpRetDecl->GetBoundaryInfo()); return true; } -void ASTParser::ProcessBoundaryFuncPtrAttrsByIndex(const clang::ValueDecl &valueDecl, ASTDecl &astDecl, - const MIRFuncType &funcType) { +void ASTParser::ProcessBoundaryFuncPtrAttrsByIndex(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astDecl, const MIRFuncType &funcType) { std::vector attrsVec = funcType.GetParamAttrsList(); TypeAttrs retAttr = funcType.GetRetAttrs(); bool isUpdated = false; @@ -631,8 +635,8 @@ void ASTParser::ProcessBoundaryFuncPtrAttrsByIndex(const clang::ValueDecl &value if (isUpdated) { MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( funcType.GetRetTyIdx(), funcType.GetParamTypeList(), attrsVec, funcType.IsVarargs(), retAttr); - astDecl.SetTypeDesc(std::vector{GlobalTables::GetTypeTable().GetOrCreatePointerType( - *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}); + astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } } @@ -745,10 +749,31 @@ void ASTParser::ProcessBoundaryLenExpr(MapleAllocator &allocator, ASTDecl &ptrDe } lenExpr = GetAddrShiftExpr(allocator, lenExpr, lenSize); } + ENCChecker::CheckLenExpr(*lenExpr); ptrDecl.SetBoundaryLenExpr(lenExpr); ptrDecl.SetIsBytedLen(!isSize); } +void ENCChecker::CheckLenExpr(const ASTExpr &lenExpr) { + std::list nullstmts; + (void)lenExpr.Emit2FEExpr(nullstmts); + for (const auto &stmt : nullstmts) { + bool isAssertStmt = false; + if (stmt->GetKind() == kStmtNary) { + FEIRStmtNary *nary = static_cast(stmt.get()); + if (kOpcodeInfo.IsAssertBoundary(nary->GetOP()) || kOpcodeInfo.IsAssertNonnull(nary->GetOP())) { + isAssertStmt = true; + } + } + if (!isAssertStmt) { + FE_ERR(kLncErr, "%s:%d error: The boundary length expr containing statement is invalid", + FEManager::GetModule().GetFileNameFromFileNum(lenExpr.GetSrcFileIdx()).c_str(), + lenExpr.GetSrcFileLineNum()); + break; + } + } +} + void ASTParser::ProcessBoundaryLenExprInFunc(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, unsigned int idx, ASTFunc &astFunc, ASTExpr *lenExpr, bool isSize) { ASTDecl *ptrDecl = nullptr; @@ -1196,15 +1221,15 @@ void ENCChecker::AssignUndefVal(MIRBuilder &mirBuilder, MIRSymbol &sym) { } } -void ENCChecker::InitBoundaryVarFromASTDecl(const MapleAllocator &allocator, ASTDecl *ptrDecl, ASTExpr *lenExpr, +void ENCChecker::InitBoundaryVarFromASTDecl(MapleAllocator &allocator, ASTDecl *ptrDecl, ASTExpr *lenExpr, std::list &stmts) { MIRType *ptrType = ptrDecl->GetTypeDesc().front(); // insert lower boundary stmt ASTDeclRefExpr *lowerRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); lowerRefExpr->SetASTDecl(ptrDecl); std::string lowerVarName = "_boundary." + ptrDecl->GetName() + ".lower"; - ASTVar *lowerDecl = ASTDeclsBuilder::ASTVarBuilder( - allocator, "", lowerVarName, std::vector{ptrType}, GenericAttrs()); + ASTVar *lowerDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + lowerVarName, MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); lowerDecl->SetIsParam(true); lowerDecl->SetInitExpr(lowerRefExpr); ASTDeclStmt *lowerStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); @@ -1219,8 +1244,8 @@ void ENCChecker::InitBoundaryVarFromASTDecl(const MapleAllocator &allocator, AST upperBinExpr->SetRetType(ptrType); upperBinExpr->SetCvtNeeded(true); std::string upperVarName = "_boundary." + ptrDecl->GetName() + ".upper"; - ASTVar *upperDecl = ASTDeclsBuilder::ASTVarBuilder( - allocator, "", upperVarName, std::vector{ptrType}, GenericAttrs()); + ASTVar *upperDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + upperVarName, MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); upperDecl->SetIsParam(true); upperDecl->SetInitExpr(upperBinExpr); ASTDeclStmt *upperStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); @@ -1370,7 +1395,7 @@ bool ENCChecker::IsConstantIndex(const UniqueFEIRExpr &expr) { void ENCChecker::PeelNestedBoundaryChecking(std::list &stmts, const UniqueFEIRExpr &baseExpr) { std::list::iterator i = stmts.begin(); while (i != stmts.end()) { - bool flag = ((*i)->GetKind() == kFEIRStmtNary); + bool flag = ((*i)->GetKind() == kStmtNary); if (flag) { FEIRStmtNary *nary = static_cast((*i).get()); flag = kOpcodeInfo.IsAssertBoundary(nary->GetOP()) && @@ -1432,6 +1457,15 @@ UniqueFEIRExpr ENCChecker::GetRealBoundaryLenExprInFunc(const UniqueFEIRExpr &le return nullptr; } } + if (lenExpr->GetKind() == kExprIRead) { + FEIRExprIRead *ireadExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr subExpr = GetRealBoundaryLenExprInFunc(ireadExpr->GetClonedOpnd(), astFunc, astCallExpr); + if (subExpr != nullptr) { + ireadExpr->SetClonedOpnd(std::move(subExpr)); + } else { + return nullptr; + } + } // formal parameter length expr -> actual parameter expr std::list nullStmts; if (lenExpr->GetKind() == kExprDRead) { @@ -1967,7 +2001,7 @@ void ENCChecker::ReduceBoundaryChecking(std::list &stmts, const } std::list::iterator iter = stmts.begin(); for (; iter != stmts.end(); ++iter) { - if ((*iter)->GetKind() != kFEIRStmtNary) { + if ((*iter)->GetKind() != kStmtNary) { continue; } FEIRStmtNary *nary = static_cast((*iter).get()); @@ -1977,7 +2011,7 @@ void ENCChecker::ReduceBoundaryChecking(std::list &stmts, const } nary->SetOP(OP_calcassertge); std::list::iterator nextedIter = std::next(iter, 1); - if (nextedIter != stmts.end() && (*nextedIter)->GetKind() == kFEIRStmtNary) { + if (nextedIter != stmts.end() && (*nextedIter)->GetKind() == kStmtNary) { FEIRStmtNary *nextedNary = static_cast((*nextedIter).get()); if (nextedNary->GetOP() == OP_assertlt) { nextedNary->SetOP(OP_calcassertlt); @@ -2080,7 +2114,7 @@ void ASTCallExpr::InsertBoundaryVarInRet(std::list &stmts) const } // GetCurrentFunction need to be optimized when parallel features MIRFunction *curFunction = FEManager::GetMIRBuilder().GetCurrentFunctionNotNull(); - ENCChecker::InitBoundaryVar(*curFunction, varName, *retType, std::move(realLenExpr), stmts); + ENCChecker::InitBoundaryVar(*curFunction, GetRetVarName(), *retType, std::move(realLenExpr), stmts); } bool ENCChecker::IsSameBoundary(const AttrBoundary &arg1, const AttrBoundary &arg2) { diff --git a/src/hir2mpl/common/src/fe_function.cpp b/src/hir2mpl/common/src/fe_function.cpp index 22e4a3a8921bc1fc7a13a17397e60d4ca40ec685..3da74fcd73cc329d83b4bf126cd4c46ce0322844 100644 --- a/src/hir2mpl/common/src/fe_function.cpp +++ b/src/hir2mpl/common/src/fe_function.cpp @@ -97,113 +97,61 @@ FEIRBB *FEFunction::RegisterFEIRBB(std::unique_ptr bb) { return feirBBList.back().get(); } -void FEFunction::LabelGenStmt() { +void FEFunction::DumpGeneralStmts() { FELinkListNode *nodeStmt = genStmtHead; - uint32 idx = 0; while (nodeStmt != nullptr) { FEIRStmt *stmt = static_cast(nodeStmt); - stmt->SetID(idx); - idx++; + stmt->Dump(); nodeStmt = nodeStmt->GetNext(); } } -void FEFunction::LabelGenBB() { - FELinkListNode *nodeBB = genBBHead; - uint32 idx = 0; - while (nodeBB != nullptr) { - FEIRBB *bb = static_cast(nodeBB); - bb->SetID(idx); - idx++; - nodeBB = nodeBB->GetNext(); - } -} - -bool FEFunction::HasDeadBB() { - FELinkListNode *nodeBB = genBBHead->GetNext(); - while (nodeBB != nullptr && nodeBB != genBBTail) { - FEIRBB *bb = static_cast(nodeBB); - if (bb->IsDead()) { - return true; - } - nodeBB = nodeBB->GetNext(); - } - return false; -} - -void FEFunction::DumpGeneralStmts() { - FELinkListNode *nodeStmt = genStmtHead; - while (nodeStmt != nullptr) { - FEIRStmt *stmt = static_cast(nodeStmt); - stmt->Dump(); - nodeStmt = nodeStmt->GetNext(); +bool FEFunction::LowerFunc(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + if (feirLower == nullptr) { + feirLower = std::make_unique(*this); + feirLower->LowerFunc(); + feirStmtHead = feirLower->GetlowerStmtHead(); + feirStmtTail = feirLower->GetlowerStmtTail(); } + return phaseResult.Finish(); } -void FEFunction::DumpFEIRBBs() { - FELinkListNode *nodeBB = genBBHead->GetNext(); - while (nodeBB != nullptr && nodeBB != genBBTail) { - FEIRBB *bb = static_cast(nodeBB); - bb->Dump(); - nodeBB = nodeBB->GetNext(); +bool FEFunction::DumpFEIRBBs(const std::string &phaseName) { + HIR2MPL_PARALLEL_FORBIDDEN(); + phaseResult.RegisterPhaseNameAndStart(phaseName); + if (feirCFG == nullptr) { + feirCFG = std::make_unique(feirStmtHead, feirStmtTail); + feirCFG->GenerateCFG(); } + std::cout << "****** CFG built by FEIR for " << GetGeneralFuncName() << " *******\n"; + feirCFG->DumpBBs(); + std::cout << "****** END CFG built for " << GetGeneralFuncName() << " *******\n\n"; + return phaseResult.Finish(); } -void FEFunction::DumpFEIRCFGGraph() { +bool FEFunction::DumpFEIRCFGGraph(const std::string &phaseName) { HIR2MPL_PARALLEL_FORBIDDEN(); - if (!FEOptions::GetInstance().IsDumpFEIRCFGGraph()) { - return; + phaseResult.RegisterPhaseNameAndStart(phaseName); + std::string outName = FEManager::GetModule().GetFileName(); + size_t lastDot = outName.find_last_of("."); + if (lastDot != std::string::npos) { + outName = outName.substr(0, lastDot); } - std::string fileName = FEOptions::GetInstance().GetJBCCFGGraphFileName(); - CHECK_FATAL(!fileName.empty(), "General CFG Graph FileName is empty"); + CHECK_FATAL(!outName.empty(), "General CFG Graph FileName is empty"); + std::string fileName = outName + "." + GetGeneralFuncName() + ".dot"; std::ofstream file(fileName); CHECK_FATAL(file.is_open(), "Failed to open General CFG Graph FileName: %s", fileName.c_str()); - file << "digraph {" << std::endl; - file << " # /* " << GetGeneralFuncName() << " */" << std::endl; - FELinkListNode *nodeBB = genBBHead->GetNext(); - while (nodeBB != nullptr && nodeBB != genBBTail) { - FEIRBB *bb = static_cast(nodeBB); - DumpFEIRCFGGraphForBB(file, *bb); - nodeBB = nodeBB->GetNext(); + if (feirCFG == nullptr) { + feirCFG = std::make_unique(feirStmtHead, feirStmtTail); + feirCFG->GenerateCFG(); } - DumpFEIRCFGGraphForCFGEdge(file); - DumpFEIRCFGGraphForDFGEdge(file); - file << "}" << std::endl; + file << "digraph {" << std::endl; + file << " label=\"" << GetGeneralFuncName() << "\"\n"; + file << " labelloc=t\n"; + feirCFG->DumpCFGGraph(file); file.close(); -} - -void FEFunction::DumpFEIRCFGGraphForBB(std::ofstream &file, const FEIRBB &bb) { - file << " BB" << bb.GetID() << " [shape=record,label=\"{" << std::endl; - const FELinkListNode *nodeStmt = bb.GetStmtHead(); - while (nodeStmt != nullptr) { - const FEIRStmt *stmt = static_cast(nodeStmt); - file << " " << stmt->DumpDotString(); - if (nodeStmt == bb.GetStmtTail()) { - file << std::endl; - break; - } else { - file << " |" << std::endl; - } - nodeStmt = nodeStmt->GetNext(); - } - file << " }\"];" << std::endl; -} - -void FEFunction::DumpFEIRCFGGraphForCFGEdge(std::ofstream &file) { - file << " subgraph cfg_edges {" << std::endl; - file << " edge [color=\"#000000\",weight=0.3,len=3];" << std::endl; - const FELinkListNode *nodeBB = genBBHead->GetNext(); - while (nodeBB != nullptr && nodeBB != genBBTail) { - const FEIRBB *bb = static_cast(nodeBB); - const FEIRStmt *stmtS = bb->GetStmtTail(); - for (FEIRBB *bbNext : bb->GetSuccBBs()) { - const FEIRStmt *stmtE = bbNext->GetStmtHead(); - file << " BB" << bb->GetID() << ":stmt" << stmtS->GetID() << " -> "; - file << "BB" << bbNext->GetID() << ":stmt" << stmtE->GetID() << std::endl; - } - nodeBB = nodeBB->GetNext(); - } - file << " }" << std::endl; + return phaseResult.Finish(); } void FEFunction::DumpFEIRCFGGraphForDFGEdge(std::ofstream &file) { @@ -538,7 +486,7 @@ bool FEFunction::IsBBEnd(const FEIRStmt &stmt) const { } bool FEFunction::MayBeBBEnd(const FEIRStmt &stmt) const { - return (stmt.IsBranch() || !stmt.IsFallThrough()); + return (stmt.IsBranch() || !stmt.IsFallThru()); } void FEFunction::LinkFallThroughBBAndItsNext(FEIRBB &bb) { diff --git a/src/hir2mpl/common/src/fe_struct_elem_info.cpp b/src/hir2mpl/common/src/fe_struct_elem_info.cpp index 82584923eb08608b000be43cebeea4b9c9fe758d..0a31e14cd97aaaccc28c074b7974176c23219229 100644 --- a/src/hir2mpl/common/src/fe_struct_elem_info.cpp +++ b/src/hir2mpl/common/src/fe_struct_elem_info.cpp @@ -18,11 +18,11 @@ #include "namemangler.h" #include "feir_builder.h" #include "feir_var_name.h" +#include "fe_utils_java.h" #include "fe_utils.h" #include "fe_manager.h" -#include "jbc_util.h" #include "fe_options.h" -#include "bc_util.h" + namespace maple { // ---------- FEStructElemInfo ---------- @@ -300,7 +300,7 @@ void FEStructMethodInfo::LoadMethodType() { void FEStructMethodInfo::LoadMethodTypeJava() { std::string signatureJava = namemangler::DecodeName(GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.full)); - std::vector typeNames = jbc::JBCUtil::SolveMethodSignature(signatureJava); + std::vector typeNames = FEUtilJava::SolveMethodSignature(signatureJava); CHECK_FATAL(typeNames.size() > 0, "invalid method signature: %s", signatureJava.c_str()); // constructor check const std::string &funcName = GetElemName(); diff --git a/src/hir2mpl/common/src/fe_utils.cpp b/src/hir2mpl/common/src/fe_utils.cpp index 772620e1c45cc9108da4a73c7b5d1d9e9bf90172..31c17b6590983a46e1f19eed2c350fdc59c5c3fd 100644 --- a/src/hir2mpl/common/src/fe_utils.cpp +++ b/src/hir2mpl/common/src/fe_utils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -191,11 +191,14 @@ uint32 FEUtils::GetSequentialNumber() { return unnamedSymbolIdx++; } -std::string FEUtils::GetFileNameHashStr(const std::string &fileName) { - static const std::hash hasher; - auto tmp = static_cast(hasher(fileName)); - uint32 result = tmp & 0x7fffff; // The highest bit is always 0, so it can be converted to int32 safely - return "_" + std::to_string(static_cast(result)); +std::string FEUtils::GetFileNameHashStr(const std::string &fileName, uint32 seed) { + const char *name = fileName.c_str(); + uint32 hash = 0; + while (*name) { + uint8_t uName = *name++; + hash = hash * seed + uName; + } + return kRenameKeyWord + std::to_string(hash); } std::string FEUtils::GetSequentialName(const std::string &prefix) { @@ -203,6 +206,11 @@ std::string FEUtils::GetSequentialName(const std::string &prefix) { return name; } +std::string FEUtils::CreateLabelName() { + static uint32 unnamedSymbolIdx = 1; + return "L." + std::to_string(unnamedSymbolIdx++); +} + bool FEUtils::TraverseToNamedField(MIRStructType &structType, const GStrIdx &nameIdx, FieldID &fieldID, bool isTopLevel) { for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { @@ -465,6 +473,16 @@ void FELinkListNode::InsertAfter(FELinkListNode *ins, FELinkListNode *pos) { ins->next = posNext; } +void FELinkListNode::SpliceNodes(FELinkListNode *head, FELinkListNode *tail, FELinkListNode *newTail) { + FELinkListNode *stmt = head->GetNext(); + FELinkListNode *nextStmt = stmt; + do { + stmt = nextStmt; + nextStmt = stmt->GetNext(); + newTail->InsertBefore(stmt); + } while (nextStmt != nullptr && nextStmt != tail); +} + uint32_t AstSwitchUtil::tempVarNo = 0; const char *AstSwitchUtil::cleanLabel = "clean"; const char *AstSwitchUtil::exitLabel = "exit"; diff --git a/src/hir2mpl/common/src/feir_stmt.cpp b/src/hir2mpl/common/src/feir_stmt.cpp old mode 100755 new mode 100644 index 9b6e73dc8eb44146cf8b7e85368335c1e44d28db..91386198aea11c4cdb3a37e33ca9a21104e7a21c --- a/src/hir2mpl/common/src/feir_stmt.cpp +++ b/src/hir2mpl/common/src/feir_stmt.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -24,11 +24,13 @@ #include "feir_var_type_scatter.h" #include "fe_options.h" #include "feir_type_helper.h" -#include "bc_util.h" -#include "rc_setter.h" #include "fe_utils.h" +#include "fe_utils_java.h" #include "enhance_c_checker.h" #include "fe_macros.h" +#ifndef ONLY_C +#include "rc_setter.h" +#endif namespace maple { std::string GetFEIRNodeKindDescription(FEIRNodeKind kindArg) { @@ -108,7 +110,7 @@ std::string FEIRStmt::DumpDotStringImpl() const { } void FEIRStmt::DumpImpl(const std::string &prefix) const { - std::cout << prefix << "FEIRStmt" << id << "(kind=" << GetKind() << ")" << std::endl; + std::cout << prefix << "FEIRStmt" << id << "(kind=" << GetFEIRNodeKindDescription(kind) << ")\n"; } // ---------- FEIRStmtCheckPoint ---------- @@ -224,7 +226,7 @@ std::string FEIRStmtCheckPoint::DumpDotStringImpl() const { // ---------- FEIRStmtNary ---------- FEIRStmtNary::FEIRStmtNary(Opcode opIn, std::list> argExprsIn) - : FEIRStmt(kFEIRStmtNary), op(opIn), argExprs(std::move(argExprsIn)) {} + : FEIRStmt(kStmtNary), op(opIn), argExprs(std::move(argExprsIn)) {} std::list FEIRStmtNary::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list stmts; @@ -335,19 +337,23 @@ void FEIRStmtDAssign::InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRSym if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { return; } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); if (fieldID == 0) { + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstSym.GetType(), srcFileIndex,srcFileLineNum); if (!dstSym.GetAttr(ATTR_nonnull)) { return; } } else { FieldID tmpID = fieldID; FieldPair fieldPair = static_cast(dstSym.GetType())->TraverseToFieldRef(tmpID); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, srcFileIndex,srcFileLineNum); if (!fieldPair.second.second.GetAttr(FLDATTR_nonnull)) { return; } } if (ENCChecker::HasNullExpr(expr)) { - FE_ERR(kLncErr, "%s:%d error: null assignment of nonnull pointer", + FE_ERR(kLncErr, "%s:%d errorz: null assignment of nonnull pointer", FEManager::GetModule().GetFileNameFromFileNum(srcFileIndex).c_str(), srcFileLineNum); return; } @@ -706,10 +712,10 @@ FEStructMethodInfo &FEIRStmtJavaMultiANewArray::GetMethodInfoNewInstance() { if (methodInfoNewInstance != nullptr) { return *methodInfoNewInstance; } - StructElemNameIdx structElemNameIdx(bc::BCUtil::GetMultiANewArrayClassIdx(), - bc::BCUtil::GetMultiANewArrayElemIdx(), - bc::BCUtil::GetMultiANewArrayTypeIdx(), - bc::BCUtil::GetMultiANewArrayFullIdx()); + StructElemNameIdx structElemNameIdx(FEUtilJava::GetMultiANewArrayClassIdx(), + FEUtilJava::GetMultiANewArrayElemIdx(), + FEUtilJava::GetMultiANewArrayTypeIdx(), + FEUtilJava::GetMultiANewArrayFullIdx()); methodInfoNewInstance = static_cast(FEManager::GetTypeManager().RegisterStructMethodInfo( structElemNameIdx, kSrcLangJava, true)); return *methodInfoNewInstance; @@ -833,7 +839,9 @@ std::list FEIRStmtAssertBoundary::GenMIRStmtsImpl(MIRBuilder &mirBuil // ---------- FEIRStmtReturn ---------- FEIRStmtReturn::FEIRStmtReturn(std::unique_ptr argExpr) - : FEIRStmtUseOnly(FEIRNodeKind::kStmtReturn, OP_return, std::move(argExpr)) {} + : FEIRStmtUseOnly(FEIRNodeKind::kStmtReturn, OP_return, std::move(argExpr)) { + isFallThru = true; + } std::list FEIRStmtReturn::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list ans; @@ -863,6 +871,9 @@ void FEIRStmtReturn::InsertNonnullChecking(MIRBuilder &mirBuilder, std::listGetType()->GenerateMIRTypeAuto(); + MIRType *dstType = mirBuilder.GetCurrentFunction()->GetReturnType(); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, srcFileIndex, srcFileLineNum); if (!mirBuilder.GetCurrentFunction()->GetAttrs().GetAttr(FUNCATTR_nonnull)) { return; } @@ -1026,6 +1037,13 @@ bool FEIRStmtSwitch::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, F return expr->CalculateDefs4AllUses(checkPoint, udChain); } +bool FEIRStmtSwitch::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(srcFileIndex).c_str(), srcFileLineNum, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + std::list FEIRStmtSwitch::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list ans; CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -1059,6 +1077,13 @@ FEIRStmtSwitch2::~FEIRStmtSwitch2() { defaultTarget = nullptr; } +bool FEIRStmtSwitch2::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(srcFileIndex).c_str(), srcFileLineNum, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + std::list FEIRStmtSwitch2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list ans; CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -1101,6 +1126,13 @@ FEIRStmtIf::FEIRStmtIf(UniqueFEIRExpr argCondExpr, } } +bool FEIRStmtIf::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(srcFileIndex).c_str(), srcFileLineNum, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + std::list FEIRStmtIf::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { BaseNode *condBase = condExpr->GenMIRNode(mirBuilder); IfStmtNode *stmt = nullptr; @@ -1564,9 +1596,11 @@ std::list FEIRStmtFieldStore::GenMIRStmtsImplForNonStatic(MIRBuilder BaseNode *nodeObj = exprDReadObj->GenMIRNode(mirBuilder); BaseNode *nodeField = exprDReadField->GenMIRNode(mirBuilder); StmtNode *stmt = mirBuilder.CreateStmtIassign(*ptrStructType, fieldID, nodeObj, nodeField); +#ifndef ONLY_C if (FEOptions::GetInstance().IsRC()) { bc::RCSetter::GetRCSetter().CollectInputStmtField(stmt, fieldInfo.GetElemNameIdx()); } +#endif ans.emplace_back(stmt); if (!FEOptions::GetInstance().IsNoBarrier() && fieldInfo.IsVolatile()) { StmtNode *barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membarrelease); @@ -1862,6 +1896,9 @@ void FEIRStmtCallAssign::InsertNonnullCheckingInArgs(const UniqueFEIRExpr &expr, if (index >= methodInfo.GetMirFunc()->GetParamSize()) { // Skip variable parameter return; } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = methodInfo.GetMirFunc()->GetNthParamType(index); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, srcFileIndex, srcFileLineNum); if (!methodInfo.GetMirFunc()->GetNthParamAttr(index).GetAttr(ATTR_nonnull)) { return; } @@ -1976,7 +2013,13 @@ void FEIRStmtICallAssign::InsertNonnullCheckingInArgs(MIRBuilder &mirBuilder, st size_t size = funcType->GetParamAttrsList().size(); for (const auto &expr : exprArgs) { ++idx; - if (idx < 0 || idx >= static_cast(size) || !funcType->GetNthParamAttrs(idx).GetAttr(ATTR_nonnull)) { + if (idx < 0 || idx >= static_cast(size)) { + continue; + } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetNthParamType(idx)); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, srcFileIndex, srcFileLineNum); + if (!funcType->GetNthParamAttrs(idx).GetAttr(ATTR_nonnull)) { continue; } if (ENCChecker::HasNullExpr(expr)) { @@ -2141,17 +2184,14 @@ std::list FEIRStmtIntrinsicCallAssign::GenMIRStmtsImpl(MIRBuilder &mi args.push_back(node); } } - stmtCall = mirBuilder.CreateStmtIntrinsicCall(intrinsicId, std::move(args), TyIdx(0)); - } else if (intrinsicId == INTRN_C_memset) { - MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); - if (exprList != nullptr) { - for (const auto &expr : *exprList) { - BaseNode *node = expr->GenMIRNode(mirBuilder); - args.push_back(node); - } + MIRSymbol *retVarSym = nullptr; + if (var != nullptr) { + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicId, std::move(args), retVarSym); + } else { + stmtCall = mirBuilder.CreateStmtIntrinsicCall(intrinsicId, std::move(args), TyIdx(0)); } - stmtCall = mirBuilder.CreateStmtIntrinsicCall(INTRN_C_memset, std::move(args), TyIdx(0)); - } else if (intrinsicId >= INTRN_vector_zip_v2i32 && intrinsicId <= INTRN_vector_zip_v2f32) { + } else { MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); if (exprList != nullptr) { for (const auto &expr : *exprList) { @@ -2162,8 +2202,10 @@ std::list FEIRStmtIntrinsicCallAssign::GenMIRStmtsImpl(MIRBuilder &mi MIRSymbol *retVarSym = nullptr; if ((var != nullptr) && (var.get() != nullptr)) { retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicId, std::move(args), retVarSym); + } else { + stmtCall = mirBuilder.CreateStmtIntrinsicCall(intrinsicId, std::move(args), TyIdx(0)); } - stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicId, std::move(args), retVarSym); } // other intrinsic call should be implemented ans.emplace_back(stmtCall); @@ -2368,6 +2410,8 @@ BaseNode *FEIRExprConst::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { case PTY_i16: case PTY_i32: case PTY_i64: + case PTY_i128: + case PTY_u128: case PTY_ref: case PTY_ptr: return mirBuilder.CreateIntConst(value.i64, primType); @@ -2516,21 +2560,28 @@ BaseNode *FEIRExprIRead::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { } // ---------- FEIRExprAddrofConstArray ---------- -FEIRExprAddrofConstArray::FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn) +FEIRExprAddrofConstArray::FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn, + const std::string &strIn) : FEIRExpr(FEIRNodeKind::kExprAddrof, FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), arrayName(FEOptions::GetInstance().GetFuncInlineSize() != 0 ? FEUtils::GetSequentialName("const_array_") + FEUtils::GetFileNameHashStr(FEManager::GetModule().GetFileName()) : FEUtils::GetSequentialName("const_array_")), - elemType(typeIn) { + elemType(typeIn), + str(strIn) { std::copy(arrayIn.begin(), arrayIn.end(), std::back_inserter(array)); } std::unique_ptr FEIRExprAddrofConstArray::CloneImpl() const { - std::unique_ptr expr = std::make_unique(arrayName, array, elemType); + std::unique_ptr expr = std::make_unique(arrayName, array, elemType, str); return expr; } BaseNode *FEIRExprAddrofConstArray::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + if (!str.empty()) { + MIRModule &module = mirBuilder.GetMirModule(); + UStrIdx StrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + return module.GetMemPool()->New(PTY_ptr, StrIdx); + } MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( *elemType,static_cast(array.size())); MIRSymbol *arrayVar = mirBuilder.GetOrCreateGlobalDecl(arrayName, *arrayTypeWithSize); @@ -3790,7 +3841,7 @@ BaseNode *FEIRExprAtomic::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { {kAtomicOpStoreN, INTRN_C___atomic_store_n}, {kAtomicOpStore, INTRN_C___atomic_store}, {kAtomicOpExchangeN, INTRN_C___atomic_exchange_n}, - {kAtomicOpExchange, INTRN_C___atomic_compare_exchange}, + {kAtomicOpExchange, INTRN_C___atomic_exchange}, {kAtomicOpAddFetch, INTRN_C___atomic_add_fetch}, {kAtomicOpSubFetch, INTRN_C___atomic_sub_fetch}, {kAtomicOpAndFetch, INTRN_C___atomic_and_fetch}, @@ -3990,7 +4041,7 @@ std::list FEIRStmtPesudoCatch2::GenMIRStmtsImpl(MIRBuilder &mirBuilde void FEIRStmtPesudoCatch2::AddCatchTypeNameIdx(GStrIdx typeNameIdx) { UniqueFEIRType type; if (typeNameIdx == FEUtils::GetVoidIdx()) { - type = std::make_unique(PTY_ref, bc::BCUtil::GetJavaThrowableNameMplIdx()); + type = std::make_unique(PTY_ref, FEUtilJava::GetJavaThrowableNameMplIdx()); } else { type = std::make_unique(PTY_ref, typeNameIdx); } @@ -4131,6 +4182,9 @@ void FEIRStmtIAssign::InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRTyp } FieldID tmpID = fieldID; FieldPair fieldPair = static_cast(baseType).TraverseToFieldRef(tmpID); + MIRType *srcType = baseExpr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, srcFileIndex, srcFileLineNum); if (fieldPair.second.second.GetAttr(FLDATTR_nonnull)) { if (ENCChecker::HasNullExpr(baseExpr)) { FE_ERR(kLncErr, "%s:%d error: null assignment of nonnull pointer", @@ -4144,6 +4198,13 @@ void FEIRStmtIAssign::InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRTyp } // ---------- FEIRStmtDoWhile ---------- +bool FEIRStmtDoWhile::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(srcFileIndex).c_str(), srcFileLineNum, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + std::list FEIRStmtDoWhile::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list stmts; auto *whileStmtNode = mirBuilder.GetCurrentFuncCodeMp()->New(opcode); @@ -4194,21 +4255,29 @@ std::list FEIRStmtAtomic::GenMIRStmtsImpl(MIRBuilder &mirBuilder) con return stmts; } -bool FEIRStmtGCCAsm::HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index) const { +bool FEIRStmtGCCAsm::HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index, + std::list &stmts, std::list &initStmts) const { if (std::get<1>(outputs[index]) != "+Q" && std::get<1>(outputs[index]) != "+m") { return false; } FieldID fieldID = outputsExprs[index]->GetFieldID(); MIRSymbol *sym = outputsExprs[index]->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); - - CallReturnPair retPair(sym->GetStIdx(), RegFieldPair(fieldID, 0)); - asmNode->asmOutputs.emplace_back(retPair); - UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(std::get<1>(outputs[index])); - asmNode->outputConstraints.emplace_back(strIdx); - + UniqueFEIRVar asmOut = outputsExprs[index]->GetVarUses().front()->Clone(); + MIRSymbol *localSym = nullptr; + UniqueFEIRVar localAsmOut = nullptr; BaseNode *node; if (outputsExprs[index]->GetKind() == kExprDRead) { - node = static_cast(mirBuilder.CreateExprAddrof(fieldID, *sym)); + if (asmOut->IsGlobal()) { + auto pair = HandleGlobalAsmOutOperand(asmOut, fieldID, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + // '+' means that asm out operand is both read and written, copy the initial value of global var into the + // local temp var and then add local temp var into the input list. + auto stmt = FEIRBuilder::CreateStmtDAssign(localAsmOut->Clone(), outputsExprs[index]->Clone()); + std::list node = stmt->GenMIRStmts(mirBuilder); + initStmts.splice(initStmts.end(), node); + } + node = static_cast(mirBuilder.CreateExprAddrof(fieldID, localSym != nullptr ? *localSym : *sym)); } else if (outputsExprs[index]->GetKind() == kExprIRead) { FEIRExprIRead *iread = static_cast(outputsExprs[index].get()); if (iread->GetFieldID() == 0) { @@ -4221,11 +4290,43 @@ bool FEIRStmtGCCAsm::HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asm } else { CHECK_FATAL(false, "FEIRStmtGCCAsm NYI."); } + + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(std::get<1>(outputs[index])); asmNode->PushOpnd(node); asmNode->inputConstraints.emplace_back(strIdx); + + CallReturnPair retPair(localSym != nullptr ? localSym->GetStIdx() : sym->GetStIdx(), RegFieldPair(fieldID, 0)); + asmNode->asmOutputs.emplace_back(retPair); + asmNode->outputConstraints.emplace_back(strIdx); return true; } +std::pair FEIRStmtGCCAsm::HandleGlobalAsmOutOperand(const UniqueFEIRVar &asmOut, + const FieldID fieldID, + std::list &stmts, + MIRBuilder &mirBuilder) const { + MIRSymbol *localSym = nullptr; + UniqueFEIRExpr srcExpr; + UniqueFEIRStmt stmt; + UniqueFEIRVar localAsmOut = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("asm_out_"), + asmOut->GetType()->Clone(), false); + localSym = localAsmOut->GenerateLocalMIRSymbol(mirBuilder); + if (fieldID) { + MIRStructType *structType = static_cast(asmOut->GetType()->GenerateMIRTypeAuto()); + FieldPair fieldPair = structType->TraverseToField(fieldID); + UniqueFEIRType fieldType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable() + .GetTypeFromTyIdx(fieldPair.second.first)); + srcExpr = FEIRBuilder::CreateExprDReadAggField(localAsmOut->Clone(), fieldID, fieldType->Clone()); + stmt = FEIRBuilder::CreateStmtDAssignAggField(asmOut->Clone(), std::move(srcExpr), fieldID); + } else { + srcExpr = FEIRBuilder::CreateExprDRead(localAsmOut->Clone()); + stmt = FEIRBuilder::CreateStmtDAssign(asmOut->Clone(), std::move(srcExpr)); + } + std::list node = stmt->GenMIRStmts(mirBuilder); + stmts.splice(stmts.end(), node); + return std::make_pair(localSym, localAsmOut->Clone()); +} + std::list FEIRStmtGCCAsm::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { std::list stmts; std::list initStmts; @@ -4239,17 +4340,24 @@ std::list FEIRStmtGCCAsm::GenMIRStmtsImpl(MIRBuilder &mirBuilder) con asmNode->inputConstraints.emplace_back(strIdx); } for (uint32 i = 0; i < outputs.size(); ++i) { - if (HandleConstraintPlusQm(mirBuilder, asmNode, i)) { + if (HandleConstraintPlusQm(mirBuilder, asmNode, i, stmts, initStmts)) { continue; } FieldID fieldID = 0; MIRSymbol *sym = nullptr; + MIRSymbol *localSym = nullptr; + UniqueFEIRVar localAsmOut = nullptr; UniqueFEIRVar asmOut; if (outputsExprs[i]->GetKind() == kExprDRead) { FEIRExprDRead *dread = static_cast(outputsExprs[i].get()); fieldID = dread->GetFieldID(); sym = dread->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); asmOut = dread->GetVarUses().front()->Clone(); + if (asmOut->IsGlobal()) { + auto pair = HandleGlobalAsmOutOperand(asmOut, fieldID, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + } } else if (outputsExprs[i]->GetKind() == kExprIRead) { FEIRExprIRead *iread = static_cast(outputsExprs[i].get()); fieldID = iread->GetFieldID(); @@ -4269,18 +4377,19 @@ std::list FEIRStmtGCCAsm::GenMIRStmtsImpl(MIRBuilder &mirBuilder) con CHECK_FATAL(false, "FEIRStmtGCCAsm NYI."); } - CallReturnPair retPair(sym->GetStIdx(), RegFieldPair(fieldID, 0)); + CallReturnPair retPair(localSym != nullptr ? localSym->GetStIdx() : sym->GetStIdx(), RegFieldPair(fieldID, 0)); asmNode->asmOutputs.emplace_back(retPair); UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(std::get<1>(outputs[i])); asmNode->outputConstraints.emplace_back(strIdx); // If this is a read/write, copy the initial value into the temp before and added to the input list if (std::get<2>(outputs[i])) { - auto stmt = FEIRBuilder::CreateStmtDAssign(asmOut->Clone(), outputsExprs[i]->Clone()); + auto stmt = FEIRBuilder::CreateStmtDAssign(localAsmOut != nullptr ? localAsmOut->Clone() : asmOut->Clone(), + outputsExprs[i]->Clone()); std::list node = stmt->GenMIRStmts(mirBuilder); initStmts.splice(initStmts.end(), node); - AddrofNode *rNode = mirBuilder.CreateExprDread(*sym); + AddrofNode *rNode = mirBuilder.CreateExprDread(localSym != nullptr ? *localSym : *sym); asmNode->PushOpnd(static_cast(rNode)); asmNode->inputConstraints.emplace_back(strIdx); } diff --git a/src/hir2mpl/common/src/generic_attrs.cpp b/src/hir2mpl/common/src/generic_attrs.cpp index 65a94a8f5a21e42c85d7148fb19e718415052c8e..b516283f9c2f0d111dbc3a7cd52f5adee60c350b 100644 --- a/src/hir2mpl/common/src/generic_attrs.cpp +++ b/src/hir2mpl/common/src/generic_attrs.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -38,6 +38,11 @@ TypeAttrs GenericAttrs::ConvertToTypeAttrs() { break; } } + for(auto iter = contentMap.begin(); iter != contentMap.end(); ++iter) { + if (iter->first == GENATTR_pack) { + attr.SetPack(static_cast(std::get(iter->second))); + } + } return attr; } @@ -102,4 +107,4 @@ FieldAttrs GenericAttrs::ConvertToFieldAttrs() { } return attr; } -} \ No newline at end of file +} diff --git a/src/hir2mpl/common/src/hir2mpl_compiler.cpp b/src/hir2mpl/common/src/hir2mpl_compiler.cpp index 04f1ca53034b0c24d95c24d423d9aee652e336ed..0a9f5d0a154514bad36e17b2a6e259dbe692127c 100644 --- a/src/hir2mpl/common/src/hir2mpl_compiler.cpp +++ b/src/hir2mpl/common/src/hir2mpl_compiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -17,7 +17,9 @@ #include "fe_manager.h" #include "fe_file_type.h" #include "fe_timer.h" +#ifndef ONLY_C #include "rc_setter.h" +#endif namespace maple { HIR2MPLCompiler::HIR2MPLCompiler(MIRModule &argModule) @@ -34,9 +36,11 @@ void HIR2MPLCompiler::Init() { FEManager::Init(module); module.SetFlavor(maple::kFeProduced); module.GetImportFiles().clear(); +#ifndef ONLY_C if (FEOptions::GetInstance().IsRC()) { bc::RCSetter::InitRCSetter(""); } +#endif } void HIR2MPLCompiler::Release() { @@ -61,11 +65,15 @@ int HIR2MPLCompiler::Run() { if (!FEOptions::GetInstance().IsGenMpltOnly()) { FETypeHierarchy::GetInstance().InitByGlobalTable(); ProcessFunctions(); +#ifndef ONLY_C if (FEOptions::GetInstance().IsRC()) { bc::RCSetter::GetRCSetter().MarkRCAttributes(); } } bc::RCSetter::ReleaseRCSetter(); +#else + } +#endif FEManager::GetManager().ReleaseStructElemMempool(); CHECK_FATAL(success, "Compile Error"); ExportMpltFile(); @@ -155,14 +163,12 @@ void HIR2MPLCompiler::SetupOutputPathAndName() { module.SetFileName(outName); // mapleall need outName with type, but mplt file no need size_t lastDot = outName.find_last_of("."); - std::string outNameWithoutType; if (lastDot == std::string::npos) { outNameWithoutType = outName; } else { outNameWithoutType = outName.substr(0, lastDot); } std::string mpltName = outNameWithoutType + ".mplt"; - outputInlineName = outNameWithoutType + ".mplt_inline"; if (srcLang != kSrcLangC) { GStrIdx strIdx = module.GetMIRBuilder()->GetOrCreateStringIndex(mpltName); module.GetImportFiles().push_back(strIdx); @@ -216,8 +222,8 @@ void HIR2MPLCompiler::ExportMplFile() { emitStructureType = true; } module.OutputAsciiMpl("", ".mpl", nullptr, emitStructureType, false); - if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { - module.DumpInlineCandidateToFile(outputInlineName); + if (FEOptions::GetInstance().GetFuncInlineSize() != 0 && !FEOptions::GetInstance().GetWPAA()) { + module.DumpInlineCandidateToFile(outNameWithoutType + ".mplt_inline"); } timer.StopAndDumpTimeMS("Output mpl"); } @@ -308,6 +314,7 @@ void HIR2MPLCompiler::ProcessFunctions() { } void HIR2MPLCompiler::RegisterCompilerComponent() { +#ifndef ONLY_C if (FEOptions::GetInstance().HasJBC()) { FEOptions::GetInstance().SetTypeInferKind(FEOptions::TypeInferKind::kNo); std::unique_ptr jbcCompilerComp = std::make_unique(module); @@ -319,6 +326,7 @@ void HIR2MPLCompiler::RegisterCompilerComponent() { std::make_unique>(module); RegisterCompilerComponent(std::move(bcCompilerComp)); } +#endif if (FEOptions::GetInstance().GetInputASTFiles().size() != 0) { srcLang = kSrcLangC; std::unique_ptr astCompilerComp = diff --git a/src/hir2mpl/common/src/hir2mpl_options.cpp b/src/hir2mpl/common/src/hir2mpl_options.cpp index 3a18069b5b0bee9a51d022a9b66a8a40567b9c17..7b9d88e5470ff739bcb92d4669bea8dd89112dc7 100644 --- a/src/hir2mpl/common/src/hir2mpl_options.cpp +++ b/src/hir2mpl/common/src/hir2mpl_options.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -47,6 +47,7 @@ enum OptionIndex : uint32 { kDumpTime, kDumpComment, kDumpLOC, + kDbgFriendly, kDumpPhaseTime, kDumpPhaseTimeDetail, // bc bytecode compile options @@ -56,7 +57,6 @@ enum OptionIndex : uint32 { kJavaStaticFieldName, kJBCInfoUsePathName, kDumpJBCStmt, - kDumpJBCBB, kDumpJBCAll, kDumpJBCErrorOnly, kDumpJBCFuncName, @@ -65,7 +65,8 @@ enum OptionIndex : uint32 { kUseSignedChar, kFEBigEndian, // general stmt/bb/cfg debug options - kDumpGenCFGGraph, + kDumpFEIRBB, + kDumpFEIRCFGGraph, // multi-thread control options kNThreads, kDumpThreadTime, @@ -85,6 +86,7 @@ enum OptionIndex : uint32 { kSimplifyShortCircuit, kEnableVariableArray, kFuncInlineSize, + kWPAA, }; const Descriptor kUsage[] = { @@ -163,6 +165,11 @@ const Descriptor kUsage[] = { { kUnknown, 0, "", "", kBuildTypeAll, kArgCheckPolicyUnknown, "\n====== Debug Info Control Options ======", "hir2mpl", {} }, + { kDbgFriendly, 0, "", "g", + kBuildTypeAll, kArgCheckPolicyNone, + " -g : emit debug friendly mpl, including\n" + " no variable renaming\n" + " gen LOC", "hir2mpl", {} }, { kDumpLevel, 0, "d", "dump-level", kBuildTypeAll, kArgCheckPolicyNumeric, " -d, -dump-level xx : debug info dump level\n" @@ -186,6 +193,13 @@ const Descriptor kUsage[] = { kBuildTypeAll, kArgCheckPolicyNone, " -dump-phase-time-detail\n" \ " : dump phase time for each method", "hir2mpl", {} }, + { kDumpFEIRBB, 0, "", "dump-bb", + kBuildTypeAll, kArgCheckPolicyNone, + " -dump-bb : dump basic blocks info", "hir2mpl", {} }, + { kDumpFEIRCFGGraph, 0, "", "dump-cfg", + kBuildTypeAll, kArgCheckPolicyRequired, + " -dump-cfg funcname1,funcname2\n"\ + " : dump cfg graph to dot file", "hir2mpl", {} }, // bc bytecode compile options { kUnknown, 0, "", "", @@ -222,6 +236,9 @@ const Descriptor kUsage[] = { { kFuncInlineSize, 0, "", "func-inline-size", kBuildTypeAll, kArgCheckPolicyRequired, " -func-inline-size : set func inline size", "hir2mpl", {} }, + { kWPAA, 0, "", "wpaa", + kBuildTypeAll, kArgCheckPolicyNone, + " -wpaa : enable whole program ailas analysis", "hir2mpl", {} }, // multi-thread control { kUnknown, 0, "", "", @@ -278,7 +295,7 @@ const Descriptor kUsage[] = { }; HIR2MPLOptions::HIR2MPLOptions() { - CreateUsages(kUsage); + CreateUsages(kUsage, sizeof(kUsage)/sizeof(kUsage[0])); Init(); } @@ -335,6 +352,8 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessDumpComment); RegisterFactoryFunction(kDumpLOC, &HIR2MPLOptions::ProcessDumpLOC); + RegisterFactoryFunction(kDbgFriendly, + &HIR2MPLOptions::ProcessDbgFriendly); RegisterFactoryFunction(kDumpPhaseTime, &HIR2MPLOptions::ProcessDumpPhaseTime); RegisterFactoryFunction(kDumpPhaseTimeDetail, @@ -347,8 +366,6 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessJBCInfoUsePathName); RegisterFactoryFunction(kDumpJBCStmt, &HIR2MPLOptions::ProcessDumpJBCStmt); - RegisterFactoryFunction(kDumpJBCBB, - &HIR2MPLOptions::ProcessDumpJBCBB); RegisterFactoryFunction(kDumpJBCErrorOnly, &HIR2MPLOptions::ProcessDumpJBCErrorOnly); RegisterFactoryFunction(kDumpJBCFuncName, @@ -357,7 +374,9 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessEmitJBCLocalVarInfo); // general stmt/bb/cfg debug options - RegisterFactoryFunction(kDumpGenCFGGraph, + RegisterFactoryFunction(kDumpFEIRBB, + &HIR2MPLOptions::ProcessDumpFEIRBB); + RegisterFactoryFunction(kDumpFEIRCFGGraph, &HIR2MPLOptions::ProcessDumpFEIRCFGGraph); // multi-thread control options @@ -403,6 +422,8 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessEnableVariableArray); RegisterFactoryFunction(kFuncInlineSize, &HIR2MPLOptions::ProcessFuncInlineSize); + RegisterFactoryFunction(kWPAA, + &HIR2MPLOptions::ProcessWPAA); return true; } @@ -583,6 +604,11 @@ bool HIR2MPLOptions::ProcessDumpLOC(const Option &opt) { return true; } +bool HIR2MPLOptions::ProcessDbgFriendly(const Option &opt) { + FEOptions::GetInstance().SetDbgFriendly(true); + return true; +} + bool HIR2MPLOptions::ProcessDumpPhaseTime(const Option &opt) { FEOptions::GetInstance().SetIsDumpPhaseTime(true); return true; @@ -619,11 +645,6 @@ bool HIR2MPLOptions::ProcessDumpJBCStmt(const Option &opt) { return true; } -bool HIR2MPLOptions::ProcessDumpJBCBB(const Option &opt) { - FEOptions::GetInstance().SetIsDumpJBCBB(true); - return true; -} - bool HIR2MPLOptions::ProcessDumpJBCAll(const Option &opt) { FEOptions::GetInstance().SetIsDumpJBCAll(true); return true; @@ -677,9 +698,16 @@ bool HIR2MPLOptions::ProcessBigEndian(const Option &opt) { } // general stmt/bb/cfg debug options +bool HIR2MPLOptions::ProcessDumpFEIRBB(const Option &opt) { + FEOptions::GetInstance().SetIsDumpFEIRBB(true); + return true; +} + bool HIR2MPLOptions::ProcessDumpFEIRCFGGraph(const Option &opt) { - FEOptions::GetInstance().SetIsDumpFEIRCFGGraph(true); - FEOptions::GetInstance().SetFEIRCFGGraphFileName(opt.Args()); + std::list funcNameList = SplitByComma(opt.Args()); + for (const std::string &funcName : funcNameList) { + FEOptions::GetInstance().AddFuncNameForDumpCFGGraph(funcName); + } return true; } @@ -819,6 +847,12 @@ bool HIR2MPLOptions::ProcessFuncInlineSize(const mapleOption::Option &opt) { return true; } +bool HIR2MPLOptions::ProcessWPAA(const mapleOption::Option &opt) { + FEOptions::GetInstance().SetWPAA(true); + FEOptions::GetInstance().SetFuncInlineSize(UINT32_MAX); + return true; +} + // AOT bool HIR2MPLOptions::ProcessAOT(const Option &opt) { FEOptions::GetInstance().SetIsAOT(true); diff --git a/src/hir2mpl/optimize/include/feir_cfg.h b/src/hir2mpl/optimize/include/feir_cfg.h index aee9e8d3c9e537b2c7039c6d48d08f472fb365a3..31f9308f130bce82e9f23ef0af3130dd21644715 100644 --- a/src/hir2mpl/optimize/include/feir_cfg.h +++ b/src/hir2mpl/optimize/include/feir_cfg.h @@ -32,8 +32,16 @@ class FEIRCFG { void Init(); void BuildBB(); bool BuildCFG(); + void GenerateCFG(); const FEIRBB *GetHeadBB(); const FEIRBB *GetNextBB(); + void LabelStmtID(); + void LabelBBID(); + bool HasDeadBB() const; + void DumpBBs(); + void DumpCFGGraph(std::ofstream &file); + void DumpCFGGraphForBB(std::ofstream &file, const FEIRBB &bb); + void DumpCFGGraphForEdge(std::ofstream &file); FEIRBB *GetDummyHead() const { return bbHead.get(); @@ -47,10 +55,15 @@ class FEIRCFG { return std::make_unique(); } - private: + bool IsGeneratedCFG() const { + return isGeneratedCFG; + } + + LLT_PRIVATE: void AppendAuxStmt(); FEIRBB *NewBBAppend(); + bool isGeneratedCFG = false; FEIRStmt *stmtHead; FEIRStmt *stmtTail; FELinkListNode *currBBNode = nullptr; diff --git a/src/hir2mpl/optimize/include/feir_lower.h b/src/hir2mpl/optimize/include/feir_lower.h index 201be4e32f0764bbb41c7eeed0c1554f8d7faebc..8e8d7c2a3bf0116f01a791712b7a6db054c26b41 100644 --- a/src/hir2mpl/optimize/include/feir_lower.h +++ b/src/hir2mpl/optimize/include/feir_lower.h @@ -15,14 +15,38 @@ #ifndef HIR2MPL_FEIR_LOWER_H #define HIR2MPL_FEIR_LOWER_H -#include "fe_function.h" +#include "feir_stmt.h" namespace maple { +class FEFunction; class FEIRLower { -public: - void LowerFunc(FEFunction *func); + public: + explicit FEIRLower(FEFunction &funcIn); + void LowerFunc(); + void LowerStmt(FEIRStmt *stmt, FEIRStmt *ptrTail); + void LowerStmt(const std::list &stmts, FEIRStmt *ptrTail); -private: + FEIRStmt *GetlowerStmtHead() { + return lowerStmtHead; + } + + FEIRStmt *GetlowerStmtTail() { + return lowerStmtTail; + } + + private: + void Init(); + void Clear(); + FEIRStmt *CreateHeadAndTail(); + FEIRStmt *RegisterAuxFEIRStmt(UniqueFEIRStmt stmt); + FEIRStmt *RegisterAndInsertFEIRStmt(UniqueFEIRStmt stmt, FEIRStmt *ptrTail, uint32 fileIdx = 0, uint32 fileLine = 0); + void LowerIfStmt(FEIRStmtIf &ifStmt, FEIRStmt *ptrTail); + void CreateAndInsertCondStmt(Opcode op, FEIRStmtIf &ifStmt, FEIRStmt *head, FEIRStmt *tail, FEIRStmt *ptrTail); + + FEFunction &func; + FEIRStmt *lowerStmtHead; + FEIRStmt *lowerStmtTail; + std::list auxFEIRStmtList; // auxiliary feir stmt list }; } // namespace maple #endif // HIR2MPL_FEIR_LOWER_H diff --git a/src/hir2mpl/optimize/src/feir_bb.cpp b/src/hir2mpl/optimize/src/feir_bb.cpp index afd4265b0d8843b0d62a122c0c98256721a3ddb9..42786149d5765d210bd7e546c9cf84011d1cd54e 100644 --- a/src/hir2mpl/optimize/src/feir_bb.cpp +++ b/src/hir2mpl/optimize/src/feir_bb.cpp @@ -38,7 +38,7 @@ void FEIRBB::AppendStmt(FEIRStmt *stmt) { stmtHead = stmt; } stmtTail = stmt; - if (stmt->IsAux()) { + if (!stmt->IsAux()) { if (stmtNoAuxHead == nullptr) { stmtNoAuxHead = stmt; } @@ -89,6 +89,15 @@ void FEIRBB::Dump() const { std::cout << bb->GetID() << " "; } std::cout << "})" << std::endl; + FELinkListNode *nodeStmt = stmtHead; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->Dump(" "); + if (nodeStmt == stmtTail) { + return; + } + nodeStmt = nodeStmt->GetNext(); + } } std::string FEIRBB::GetBBKindName() const { diff --git a/src/hir2mpl/optimize/src/feir_cfg.cpp b/src/hir2mpl/optimize/src/feir_cfg.cpp index 826f2f19769e7ccf9620fb2de71092cf5468890b..0aa98d6a6d08b95992c37b7964a865062905e013 100644 --- a/src/hir2mpl/optimize/src/feir_cfg.cpp +++ b/src/hir2mpl/optimize/src/feir_cfg.cpp @@ -13,13 +13,12 @@ * See the Mulan PSL v2 for more details. */ #include "feir_cfg.h" +#include #include "mpl_logging.h" namespace maple { FEIRCFG::FEIRCFG(FEIRStmt *argStmtHead, FEIRStmt *argStmtTail) - : stmtHead(argStmtHead), stmtTail(argStmtTail) { - (void)stmtTail; -} + : stmtHead(argStmtHead), stmtTail(argStmtTail) {} void FEIRCFG::Init() { bbHead = std::make_unique(kBBKindPesudoHead); @@ -28,21 +27,32 @@ void FEIRCFG::Init() { bbTail->SetPrev(bbHead.get()); } +void FEIRCFG::GenerateCFG() { + if (isGeneratedCFG) { + return; + } + Init(); + LabelStmtID(); + BuildBB(); + BuildCFG(); + LabelBBID(); +} + void FEIRCFG::BuildBB() { FELinkListNode *nodeStmt = stmtHead->GetNext(); FEIRBB *currBB = nullptr; - while (nodeStmt != nullptr) { + while (nodeStmt != nullptr && nodeStmt != stmtTail) { FEIRStmt *stmt = static_cast(nodeStmt); if (!stmt->IsAux()) { // check start of BB - if (currBB == nullptr) { // Additional conditions need to be added + if (currBB == nullptr || !stmt->GetExtraPreds().empty()) { currBB = NewBBAppend(); bbTail->InsertBefore(currBB); } CHECK_FATAL(currBB != nullptr, "nullptr check of currBB"); currBB->AppendStmt(stmt); // check end of BB - if (!stmt->IsFallThru()) { // Additional conditions need to be added + if (!stmt->IsFallThru() || !stmt->GetExtraSuccs().empty()) { currBB = nullptr; } } @@ -123,7 +133,7 @@ bool FEIRCFG::BuildCFG() { bb->AddSuccBB(bbNext); bbNext->AddPredBB(bb); } - for (FEIRStmt *stmt : locStmtTail->GetSuccs()) { + for (FEIRStmt *stmt : locStmtTail->GetExtraSuccs()) { auto itBB = mapTargetStmtBB.find(stmt); CHECK_FATAL(itBB != mapTargetStmtBB.end(), "Target BB is not found"); FEIRBB *bbNext = itBB->second; @@ -132,7 +142,8 @@ bool FEIRCFG::BuildCFG() { } nodeBB = nodeBB->GetNext(); } - return true; + isGeneratedCFG = true; + return isGeneratedCFG; } const FEIRBB *FEIRCFG::GetHeadBB() { @@ -150,4 +161,92 @@ const FEIRBB *FEIRCFG::GetNextBB() { } return static_cast(currBBNode); } + +void FEIRCFG::LabelStmtID() { + FELinkListNode *nodeStmt = stmtHead; + uint32 idx = 0; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->SetID(idx); + idx++; + nodeStmt = nodeStmt->GetNext(); + } +} + +void FEIRCFG::LabelBBID() { + FELinkListNode *nodeBB = bbHead.get(); + uint32 idx = 0; + while (nodeBB != nullptr) { + FEIRBB *bb = static_cast(nodeBB); + bb->SetID(idx); + idx++; + nodeBB = nodeBB->GetNext(); + } +} + +bool FEIRCFG::HasDeadBB() const { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + if (bb->IsDead()) { + return true; + } + nodeBB = nodeBB->GetNext(); + } + return false; +} + +void FEIRCFG::DumpBBs() { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + bb->Dump(); + nodeBB = nodeBB->GetNext(); + } +} + +void FEIRCFG::DumpCFGGraph(std::ofstream &file) { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + DumpCFGGraphForBB(file, *bb); + nodeBB = nodeBB->GetNext(); + } + DumpCFGGraphForEdge(file); + file << "}" << std::endl; +} + +void FEIRCFG::DumpCFGGraphForBB(std::ofstream &file, const FEIRBB &bb) { + file << " BB" << bb.GetID() << " [shape=record,label=\"{\n"; + const FELinkListNode *nodeStmt = bb.GetStmtHead(); + while (nodeStmt != nullptr) { + const FEIRStmt *stmt = static_cast(nodeStmt); + file << " " << stmt->DumpDotString(); + if (nodeStmt == bb.GetStmtTail()) { + file << "\n"; + break; + } else { + file << " |\n"; + } + nodeStmt = nodeStmt->GetNext(); + } + file << " }\"];\n"; +} + +void FEIRCFG::DumpCFGGraphForEdge(std::ofstream &file) { + file << " subgraph cfg_edges {\n"; + file << " edge [color=\"#000000\",weight=0.3,len=3];\n"; + const FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + const FEIRBB *bb = static_cast(nodeBB); + const FEIRStmt *stmtS = bb->GetStmtTail(); + for (FEIRBB *bbNext : bb->GetSuccBBs()) { + const FEIRStmt *stmtE = bbNext->GetStmtHead(); + file << " BB" << bb->GetID() << ":stmt" << stmtS->GetID() << " -> "; + file << "BB" << bbNext->GetID() << ":stmt" << stmtE->GetID() << "\n"; + } + nodeBB = nodeBB->GetNext(); + } + file << " }\n"; +} } // namespace maple diff --git a/src/hir2mpl/optimize/src/feir_lower.cpp b/src/hir2mpl/optimize/src/feir_lower.cpp index 059b1e5fab744120bf2d6491941a64701d0f04e7..cbca8cf70e6bfdacf251f1a930b29b73924b06bf 100644 --- a/src/hir2mpl/optimize/src/feir_lower.cpp +++ b/src/hir2mpl/optimize/src/feir_lower.cpp @@ -13,3 +13,155 @@ * See the Mulan PSL v2 for more details. */ #include "feir_lower.h" +#include "fe_function.h" +#include "feir_builder.h" + +namespace maple { +FEIRLower::FEIRLower(FEFunction &funcIn) : func(funcIn) { + Init(); +} + +void FEIRLower::Init() { + lowerStmtHead = func.RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncStart)); + lowerStmtTail = func.RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncEnd)); + lowerStmtHead->SetNext(lowerStmtTail); + lowerStmtTail->SetPrev(lowerStmtHead); +} + +void FEIRLower::Clear() { + auxFEIRStmtList.clear(); +} + +FEIRStmt *FEIRLower::RegisterAuxFEIRStmt(UniqueFEIRStmt stmt) { + auxFEIRStmtList.push_back(std::move(stmt)); + return auxFEIRStmtList.back().get(); +} + +FEIRStmt *FEIRLower::CreateHeadAndTail() { + FEIRStmt *head = RegisterAuxFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoHead)); + FEIRStmt *tail = RegisterAuxFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoTail)); + head->SetNext(tail); + tail->SetPrev(head); + return head; +} + +FEIRStmt *FEIRLower::RegisterAndInsertFEIRStmt(UniqueFEIRStmt stmt, FEIRStmt *ptrTail, + uint32 fileIdx, uint32 fileLine) { + stmt->SetSrcFileInfo(fileIdx, fileLine); + FEIRStmt *prtStmt = func.RegisterFEIRStmt(std::move(stmt)); + ptrTail->InsertBefore(prtStmt); + return prtStmt; +} + +void FEIRLower::LowerFunc() { + FELinkListNode *nodeStmt = func.GetFEIRStmtHead()->GetNext(); + if (nodeStmt != func.GetFEIRStmtTail()) { + LowerStmt(static_cast(nodeStmt), lowerStmtTail); + } + Clear(); +} + +void FEIRLower::LowerStmt(const std::list &stmts, FEIRStmt *ptrTail) { + FEIRStmt *tmpHead = CreateHeadAndTail(); + FEIRStmt *tmpTail = static_cast(tmpHead->GetNext()); + for (auto &stmt : stmts) { + tmpTail->InsertBefore(stmt.get()); + } + LowerStmt(static_cast(tmpHead->GetNext()), ptrTail); +} + +void FEIRLower::LowerStmt(FEIRStmt *stmt, FEIRStmt *ptrTail) { + FEIRStmt *nextStmt = stmt; + do { + stmt = nextStmt; + nextStmt = static_cast(stmt->GetNext()); + switch (stmt->GetKind()) { + case kStmtIf: + LowerIfStmt(*static_cast(stmt), ptrTail); + break; + case kStmtPesudoTail: + case kStmtPesudoFuncEnd: + return; + default: + ptrTail->InsertBefore(stmt); + break; + } + } while (nextStmt != nullptr); +} + +void FEIRLower::LowerIfStmt(FEIRStmtIf &ifStmt, FEIRStmt *ptrTail) { + FEIRStmt *thenHead = nullptr; + FEIRStmt *thenTail = nullptr; + FEIRStmt *elseHead = nullptr; + FEIRStmt *elseTail = nullptr; + if (!ifStmt.GetThenStmt().empty()) { + thenHead = CreateHeadAndTail(); + thenTail = static_cast(thenHead->GetNext()); + LowerStmt(ifStmt.GetThenStmt(), thenTail); + } + if (!ifStmt.GetElseStmt().empty()) { + elseHead = CreateHeadAndTail(); + elseTail = static_cast(elseHead->GetNext()); + LowerStmt(ifStmt.GetElseStmt(), elseTail); + } + if (ifStmt.GetThenStmt().empty() && ifStmt.GetElseStmt().empty()) { + // eval statement + std::list feExprs; + feExprs.emplace_back(ifStmt.GetCondExpr()->Clone()); + (void)RegisterAndInsertFEIRStmt( + std::make_unique(OP_eval, std::move(feExprs)), + ptrTail, ifStmt.GetSrcFileIdx(), ifStmt.GetSrcFileLineNum()); + } else if (ifStmt.GetElseStmt().empty()) { + // brfalse + // + // label + CreateAndInsertCondStmt(OP_brfalse, ifStmt, thenHead, thenTail, ptrTail); + } else if (ifStmt.GetThenStmt().empty()) { + // brtrue + // + // label + CreateAndInsertCondStmt(OP_brtrue, ifStmt, elseHead, elseTail, ptrTail); + } else { + // brfalse + // + // goto + // label + // + // label + std::string elseName = FEUtils::CreateLabelName(); + UniqueFEIRStmt condFEStmt = std::make_unique( + ifStmt.GetCondExpr()->Clone(), OP_brfalse, elseName); + auto condStmt = RegisterAndInsertFEIRStmt( + std::move(condFEStmt), ptrTail, ifStmt.GetSrcFileIdx(), ifStmt.GetSrcFileLineNum()); + // + FELinkListNode::SpliceNodes(thenHead, thenTail, ptrTail); + // goto + std::string endName = FEUtils::CreateLabelName(); + auto gotoStmt = RegisterAndInsertFEIRStmt(FEIRBuilder::CreateStmtGoto(endName), ptrTail); + // label + auto elseLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(elseName), ptrTail); + // + FELinkListNode::SpliceNodes(elseHead, elseTail, ptrTail); + // label + auto endLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(endName), ptrTail); + // link bb + condStmt->AddExtraSucc(*elseLabelStmt); + elseLabelStmt->AddExtraPred(*condStmt); + gotoStmt->AddExtraSucc(*endLabelStmt); + endLabelStmt->AddExtraPred(*gotoStmt); + } +} + +void FEIRLower::CreateAndInsertCondStmt(Opcode op, FEIRStmtIf &ifStmt, + FEIRStmt *head, FEIRStmt *tail, FEIRStmt *ptrTail) { + std::string labelName = FEUtils::CreateLabelName(); + UniqueFEIRStmt condFEStmt = std::make_unique(ifStmt.GetCondExpr()->Clone(), op, labelName); + FEIRStmt *condStmt = RegisterAndInsertFEIRStmt( + std::move(condFEStmt), ptrTail, ifStmt.GetSrcFileIdx(), ifStmt.GetSrcFileLineNum()); + FELinkListNode::SpliceNodes(head, tail, ptrTail); + FEIRStmt *labelStmt = RegisterAndInsertFEIRStmt(std::make_unique(labelName), ptrTail); + // link bb + condStmt->AddExtraSucc(*labelStmt); + labelStmt->AddExtraPred(*condStmt); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/BUILD.gn b/src/hir2mpl/test/BUILD.gn index c138e4e94dc7a0ec2f1940cb1e6247058b568602..ac663e060f93d270191ae1a3aeead4435de503cf 100644 --- a/src/hir2mpl/test/BUILD.gn +++ b/src/hir2mpl/test/BUILD.gn @@ -62,13 +62,18 @@ executable("hir2mplUT") { "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", "${HIR2MPL_ROOT}:lib_hir2mpl_ast_input_clang", "${HIR2MPL_ROOT}:lib_hir2mpl_ast_input_common", - "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_common", "${HIR2MPL_ROOT}:lib_hir2mpl_common", "${HIR2MPL_ROOT}:lib_hir2mpl_optimize", - "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_dex", - "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_class", ] + if (ONLY_C != 1) { + deps += [ + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_dex", + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_common", + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_class", + ] + } + ldflags = [ "-rdynamic", "-L${LLVMLIBDIR}/", @@ -161,7 +166,7 @@ source_set("lib_hir2mpl_test_common") { "${HIR2MPL_ROOT}/test/common/fe_algorithm_test.cpp", "${HIR2MPL_ROOT}/test/common/fe_file_ops_test.cpp", "${HIR2MPL_ROOT}/test/common/fe_file_type_test.cpp", - "${HIR2MPL_ROOT}/test/common/fe_function_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_cfg_test.cpp", "${HIR2MPL_ROOT}/test/common/fe_struct_elem_info_test.cpp", "${HIR2MPL_ROOT}/test/common/fe_type_hierarchy_test.cpp", "${HIR2MPL_ROOT}/test/common/fe_type_manager_test.cpp", @@ -174,6 +179,7 @@ source_set("lib_hir2mpl_test_common") { "${HIR2MPL_ROOT}/test/common/feir_type_infer_test.cpp", "${HIR2MPL_ROOT}/test/common/feir_type_test.cpp", "${HIR2MPL_ROOT}/test/common/feir_var_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_lower_test.cpp", "${HIR2MPL_ROOT}/test/common/hir2mplUT.cpp", "${HIR2MPL_ROOT}/test/common/hir2mpl_ut_options.cpp", "${HIR2MPL_ROOT}/test/common/hir2mpl_ut_regx.cpp", diff --git a/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp b/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp index 859f317060e7f5f2bb534d3bc09f5347973e1a82..385b256bbb3a85c0dc87512cfc1078cb20e3ec2d 100644 --- a/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp +++ b/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -66,14 +66,14 @@ TEST_F(AstExprTest, IntegerLiteral) { TEST_F(AstExprTest, ImaginaryLiteral) { MIRType *elemType = FEManager::GetTypeManager().GetOrCreateTypeFromName("I", FETypeFlag::kSrcUnknown, false); MIRType *complexType = FEManager::GetTypeManager().GetOrCreateComplexStructType(*elemType); - std::unique_ptr astExpr = std::make_unique(); + ASTImaginaryLiteral *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astExpr->SetComplexType(complexType); astExpr->SetElemType(elemType); // create child expr - std::unique_ptr childExpr = std::make_unique(); + ASTIntegerLiteral *childExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); childExpr->SetVal(2); childExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); - astExpr->SetASTExpr(childExpr.get()); + astExpr->SetASTExpr(childExpr); std::list stmts; UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); @@ -109,7 +109,8 @@ TEST_F(AstExprTest, ASTUnaryOperatorExpr_1) { PrimType ouPrimType = PTY_i32; MIRType *ouType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ouPrimType); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); // ASTUOMinusExpr @@ -164,7 +165,8 @@ TEST_F(AstExprTest, ASTUnaryOperatorExpr_2) { PrimType primType = PTY_i32; MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); // ASTUOPostIncExpr @@ -244,7 +246,8 @@ TEST_F(AstExprTest, ASTUnaryOperatorExpr_3) { PrimType primType = PTY_i32; MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); // ASTUOAddrOfExpr @@ -262,7 +265,8 @@ TEST_F(AstExprTest, ASTUnaryOperatorExpr_3) { MIRType *uoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); subType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*uoType, PTY_ptr); astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); astUODerefExpr->SetUOType(uoType); astUODerefExpr->SetSubType(subType); @@ -278,7 +282,7 @@ TEST_F(AstExprTest, ASTUnaryOperatorExpr_3) { TEST_F(AstExprTest, ASTCharacterLiteral) { RedirectCout(); std::list stmts; - std::unique_ptr astExpr = std::make_unique(); + ASTCharacterLiteral *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astExpr->SetVal('a'); astExpr->SetPrimType(PTY_i32); UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); @@ -309,10 +313,12 @@ TEST_F(AstExprTest, ASTBinaryOperatorExpr_1) { PrimType primType = PTY_i32; MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr1->SetASTDecl(astDecl1); ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "bVar", std::vector{subType}); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr2->SetASTDecl(astDecl2); ASTBinaryOperatorExpr *astBinaryOperatorExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); @@ -343,10 +349,12 @@ TEST_F(AstExprTest, ASTBinaryOperatorExpr_2) { PrimType primType = PTY_i32; MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{subType}); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr1->SetASTDecl(astDecl1); ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "bVar", std::vector{subType}); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({subType}, allocator.Adapter())); astRefExpr2->SetASTDecl(astDecl2); ASTAssignExpr *astAssignExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); @@ -371,10 +379,12 @@ TEST_F(AstExprTest, ConditionalOperator) { // create ast cond expr MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{ subType }); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr1->SetASTDecl(astDecl1); ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "bVar", std::vector{ subType }); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr2->SetASTDecl(astDecl2); ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); @@ -410,10 +420,12 @@ TEST_F(AstExprTest, ConditionalOperator_NestedExpr) { // create ast cond expr MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{ subType }); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr1->SetASTDecl(astDecl1); ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "bVar", std::vector{ subType }); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr2->SetASTDecl(astDecl2); ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); @@ -454,7 +466,8 @@ TEST_F(AstExprTest, ConditionalOperator_Noncomparative) { // create ast cond expr MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_f64); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{ subType }); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); // create true expr ASTIntegerLiteral *trueAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); @@ -495,10 +508,12 @@ TEST_F(AstExprTest, BinaryConditionalOperator) { // create ast cond expr MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{ subType }); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr1->SetASTDecl(astDecl1); ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "bVar", std::vector{ subType }); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType },allocator.Adapter())); astRefExpr2->SetASTDecl(astDecl2); ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); @@ -537,7 +552,8 @@ TEST_F(AstExprTest, BinaryConditionalOperator_Noncomparative) { // create ast cond expr MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "aVar", std::vector{ subType }); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); // create false expr ASTIntegerLiteral *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); @@ -567,7 +583,8 @@ TEST_F(AstExprTest, ASTCstyleCastExpr) { PrimType srcPrimType = PTY_f32; MIRType *srcType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(srcPrimType); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, "", "a", std::vector{srcType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "a", + MapleVector({srcType}, allocator.Adapter())); astRefExpr->SetASTDecl(astDecl); ASTCastExpr *imCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); imCastExpr->SetASTExpr(astRefExpr); @@ -593,7 +610,8 @@ TEST_F(AstExprTest, ASTArraySubscriptExpr) { const std::string &refName = "arr"; MIRArrayType *arrayType = static_cast( GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 10)); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, fileName, refName, std::vector{arrayType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), refName, + MapleVector({arrayType}, allocator.Adapter())); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astRefExpr->SetASTDecl(astDecl); // astDecl is var @@ -641,7 +659,8 @@ TEST_F(AstExprTest, InitListExpr_Array) { const std::string &refName = "arr"; MIRArrayType *arrayType = static_cast( GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 4)); - ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, fileName, refName, std::vector{arrayType}); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), refName, + MapleVector({arrayType}, allocator.Adapter())); ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astRefExpr->SetASTDecl(astDecl); // astDecl is var @@ -685,8 +704,8 @@ TEST_F(AstExprTest, InitListExpr_Array) { uint32 arraySize[2] = {4, 10}; MIRArrayType *arrayMulDimType = static_cast( GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 2, arraySize)); - ASTDecl *astMulDimDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, fileName, "xxx", - std::vector{arrayMulDimType}); + ASTDecl *astMulDimDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), "xxx", + MapleVector({arrayMulDimType}, allocator.Adapter())); ASTDeclRefExpr *astMulDimRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); astMulDimRefExpr->SetASTDecl(astMulDimDecl); // astDecl is var astMulDimRefExpr->SetType(arrayMulDimType); diff --git a/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp b/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp index cf668fca684ff3176ac302076e44b060b7e251d1..1a478db53d247a6184d7ae24e590cd7193190af1 100644 --- a/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp +++ b/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -34,7 +34,8 @@ TEST_F(FEIRVarNameTest, FEIRVarInAST) { GenericAttrs attrs; attrs.SetAttr(GENATTR_const); MIRType *type = GlobalTables::GetTypeTable().GetInt32(); - auto astVar = ASTDeclsBuilder::ASTVarBuilder(allocator, "foo.c", "a", std::vector{type}, attrs); + auto astVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "a", + MapleVector({type}, allocator.Adapter()), attrs); astVar->SetGlobal(false); auto feirVar = astVar->Translate2FEIRVar(); EXPECT_EQ(feirVar->GetKind(), kFEIRVarName); @@ -55,7 +56,8 @@ TEST_F(FEIRVarNameTest, FEIRVarInAST) { RestoreCout(); MIRType *type1 = GlobalTables::GetTypeTable().GetInt32(); - auto astVar1 = ASTDeclsBuilder::ASTVarBuilder(allocator, "foo.c", "a", std::vector{type1}, attrs); + auto astVar1 = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "a", + MapleVector({type1}, allocator.Adapter()), attrs); astVar1->SetGlobal(false); auto feirVar1 = astVar1->Translate2FEIRVar(); EXPECT_EQ(feirVar1->EqualsTo(feirVar), true); @@ -66,7 +68,8 @@ TEST_F(FEIRVarNameTest, FEIRVarInAST) { // array type uint32 arraySize[3] = {3, 4, 5}; MIRType *arrayType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*type, 3, arraySize); - auto astArrVar = ASTDeclsBuilder::ASTVarBuilder(allocator, "foo.c", "array", std::vector{arrayType}, attrs); + auto astArrVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "array", + MapleVector({arrayType}, allocator.Adapter()), attrs); astArrVar->SetGlobal(true); auto feirArrVar = astArrVar->Translate2FEIRVar(); EXPECT_EQ(feirArrVar->GetType()->IsArray(), true); diff --git a/src/hir2mpl/test/common/fe_function_test.cpp b/src/hir2mpl/test/common/fe_function_test.cpp deleted file mode 100644 index 8e5919af3ec20a4cf4f00047558aa1a5f5770718..0000000000000000000000000000000000000000 --- a/src/hir2mpl/test/common/fe_function_test.cpp +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. - * - * OpenArkCompiler is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR - * FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -#include -#include -#include "fe_function.h" -#include "redirect_buffer.h" -#include "hir2mpl_ut_environment.h" - -namespace maple { -class GeneralStmtAuxPre : public FEIRStmt { - public: - GeneralStmtAuxPre() : FEIRStmt(kStmtPesudo) { - isAuxPre = true; - } - ~GeneralStmtAuxPre() = default; -}; - -class GeneralStmtAuxPost : public FEIRStmt { - public: - GeneralStmtAuxPost() : FEIRStmt(kStmtPesudo) { - isAuxPost = true; - } - ~GeneralStmtAuxPost() = default; -}; - -class FEFunctionDemo : public FEFunction { - public: - FEFunctionDemo(MapleAllocator &allocator, MIRFunction &argMIRFunction) - : FEFunction(argMIRFunction, std::make_unique(true)), - mapIdxStmt(allocator.Adapter()) {} - ~FEFunctionDemo() = default; - - bool PreProcessTypeNameIdx() override { - return false; - } - - bool GenerateGeneralStmt(const std::string &phaseName) override { - return true; - } - - void GenerateGeneralStmtFailCallBack() override {} - void GenerateGeneralDebugInfo() override {} - bool GenerateArgVarList(const std::string &phaseName) override { - return true; - } - - bool HasThis() override { - return false; - } - - bool IsNative() override { - return false; - } - - bool VerifyGeneral() override { - return false; - } - - void VerifyGeneralFailCallBack() override {} - bool EmitToFEIRStmt(const std::string &phaseName) override { - return true; - } - - bool GenerateAliasVars(const std::string &phaseName) override { - return true; - } - - void LoadGenStmtDemo1(); - void LoadGenStmtDemo2(); - void LoadGenStmtDemo3(); - void LoadGenStmtDemo4(); - void LoadGenStmtDemo5(); - - FEIRStmt *GetStmtByIdx(uint32 idx) { - CHECK_FATAL(mapIdxStmt.find(idx) != mapIdxStmt.end(), "invalid idx"); - return mapIdxStmt[idx]; - } - - template - T *NewGenStmt(uint32 idx) { - FEIRStmt *ptrStmt = RegisterGeneralStmt(std::make_unique()); - genStmtTail->InsertBefore(ptrStmt); - mapIdxStmt[idx] = ptrStmt; - return static_cast(ptrStmt); - } - - private: - MapleMap mapIdxStmt; -}; - -class FEFunctionTest : public testing::Test, public RedirectBuffer { - public: - static MemPool *mp; - MapleAllocator allocator; - MIRFunction func; - FEFunctionDemo demoFunc; - FEFunctionTest() - : allocator(mp), - func(&HIR2MPLUTEnvironment::GetMIRModule(), StIdx(0, 0)), - demoFunc(allocator, func) {} - ~FEFunctionTest() = default; - - static void SetUpTestCase() { - mp = FEUtils::NewMempool("MemPool for FEFunctionTest", false /* isLocalPool */); - } - - static void TearDownTestCase() { - delete mp; - mp = nullptr; - } -}; -MemPool *FEFunctionTest::mp = nullptr; - -/* GenStmtDemo1:BB - * 0 StmtHead - * 1 Stmt (fallthru = true) - * 2 Stmt (fallthru = false) - * 3 StmtTail - */ -void FEFunctionDemo::LoadGenStmtDemo1() { - Init(); - mapIdxStmt.clear(); - (void)NewGenStmt(1); - FEIRStmt *stmt2 = NewGenStmt(2); - stmt2->SetFallThru(false); -} - -/* GenStmtDemo2:BB_StmtAux - * 0 StmtHead - * 1 StmtAuxPre - * 2 Stmt (fallthru = true) - * 3 Stmt (fallthru = false) - * 4 StmtAuxPost - * 5 StmtTail - */ -void FEFunctionDemo::LoadGenStmtDemo2() { - Init(); - mapIdxStmt.clear(); - (void)NewGenStmt(1); - (void)NewGenStmt(2); - FEIRStmt *stmt3 = NewGenStmt(3); - (void)NewGenStmt(4); - stmt3->SetFallThru(false); -} - -/* GenStmtDemo3:CFG - * --- BB0 --- - * 0 StmtHead - * --- BB1 --- - * 1 StmtAuxPre - * 2 StmtMultiOut (fallthru = true, out = {8}) - * 3 StmtAuxPost - * --- BB2 --- - * 4 StmtAuxPre - * 5 Stmt (fallthru = false) - * 6 StmtAuxPost - * --- BB3 --- - * 7 StmtAuxPre - * 8 StmtMultiIn (fallthru = true, in = {2}) - * 9 Stmt (fallthru = false) - * 10 StmtAuxPos - * --- BB4 --- - * 11 StmtTail - * - * GenStmtDemo3_CFG: - * BB0 - * | - * BB1 - * / \ - * BB2 BB3 - */ -void FEFunctionDemo::LoadGenStmtDemo3() { - Init(); - mapIdxStmt.clear(); - // --- BB1 --- - (void)NewGenStmt(1); - FEIRStmt *stmt2 = NewGenStmt(2); - (void)NewGenStmt(3); - // --- BB2 --- - (void)NewGenStmt(4); - FEIRStmt *stmt5 = NewGenStmt(5); - stmt5->SetFallThru(false); - (void)NewGenStmt(6); - // --- BB3 --- - (void)NewGenStmt(7); - FEIRStmt *stmt8 = NewGenStmt(8); - FEIRStmt *stmt9 = NewGenStmt(9); - stmt9->SetFallThru(false); - (void)NewGenStmt(10); - // Link - stmt2->AddSucc(*stmt8); - stmt8->AddPred(*stmt2); -} - -/* GenStmtDemo4:CFG_Fail - * 0 StmtHead - * 1 Stmt (fallthru = true) - * 2 Stmt (fallthru = true) - * 3 StmtTail - */ -void FEFunctionDemo::LoadGenStmtDemo4() { - Init(); - mapIdxStmt.clear(); - (void)NewGenStmt(1); - (void)NewGenStmt(2); -} - -/* GenStmtDemo5:CFG_DeadBB - * --- BB0 --- - * 0 StmtHead - * --- BB1 --- - * 1 Stmt (fallthru = true) - * 2 Stmt (fallthru = false) - * --- BB2 --- - * 3 Stmt (fallthru = false) - * --- BB3 --- - * 4 StmtTail - * - * GenStmtDemo5_CFG: - * BB0 - * | - * BB1 BB2(DeadBB) - */ -void FEFunctionDemo::LoadGenStmtDemo5() { - Init(); - mapIdxStmt.clear(); - (void)NewGenStmt(1); - FEIRStmt *stmt2 = NewGenStmt(2); - stmt2->SetFallThru(false); - FEIRStmt *stmt3 = NewGenStmt(3); - stmt3->SetFallThru(false); -} -} // namespace maple diff --git a/src/hir2mpl/test/common/feir_cfg_test.cpp b/src/hir2mpl/test/common/feir_cfg_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d3c328b911675430f818c7321113229bf041176e --- /dev/null +++ b/src/hir2mpl/test/common/feir_cfg_test.cpp @@ -0,0 +1,576 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_cfg.h" +#include "redirect_buffer.h" +#include "hir2mpl_ut_environment.h" + +namespace maple { +class GeneralStmtHead : public FEIRStmt { + public: + GeneralStmtHead() : FEIRStmt(kStmtPesudo) {} + ~GeneralStmtHead() = default; +}; + +class GeneralStmtTail : public FEIRStmt { + public: + GeneralStmtTail() : FEIRStmt(kStmtPesudo) {} + ~GeneralStmtTail() = default; +}; + +class GeneralStmtAuxPre : public FEIRStmt { + public: + GeneralStmtAuxPre() : FEIRStmt(kStmtPesudo) { + isAuxPre = true; + } + ~GeneralStmtAuxPre() = default; +}; + +class GeneralStmtAuxPost : public FEIRStmt { + public: + GeneralStmtAuxPost() : FEIRStmt(kStmtPesudo) { + isAuxPost = true; + } + ~GeneralStmtAuxPost() = default; +}; + +class FEIRCFGDemo : public FEIRCFG { + public: + FEIRCFGDemo(MapleAllocator &argAllocator, FEIRStmt *argStmtHead, FEIRStmt *argStmtTail) + : FEIRCFG(argStmtHead, argStmtTail), + allocator(argAllocator), + mapIdxStmt(allocator.Adapter()) {} + ~FEIRCFGDemo() = default; + + void LoadGenStmtDemo1(); + void LoadGenStmtDemo2(); + void LoadGenStmtDemo3(); + void LoadGenStmtDemo4(); + void LoadGenStmtDemo5(); + void LoadGenStmtDemo6(); + void LoadGenStmtDemo7(); + void LoadGenStmtDemo8(); + + FEIRStmt *GetStmtByIdx(uint32 idx) { + CHECK_FATAL(mapIdxStmt.find(idx) != mapIdxStmt.end(), "invalid idx"); + return mapIdxStmt[idx]; + } + + template + T *NewTemporaryStmt(uint32 idx) { + FEIRStmt *ptrStmt = allocator.New(); + ptrStmt->SetFallThru(true); + stmtTail->InsertBefore(ptrStmt); + mapIdxStmt[idx] = ptrStmt; + return static_cast(ptrStmt); + } + + private: + MapleAllocator allocator; + MapleMap mapIdxStmt; +}; + +class FEIRCFGTest : public testing::Test, public RedirectBuffer { + public: + static MemPool *mp; + static FEIRStmt *genStmtHead; + static FEIRStmt *genStmtTail; + MapleAllocator allocator; + FEIRCFGDemo demoCFG; + FEIRCFGTest() + : allocator(mp), + demoCFG(allocator, genStmtHead, genStmtTail) {} + ~FEIRCFGTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for FEIRCFGTest", false /* isLocalPool */); + genStmtHead = mp->New(); + genStmtTail = mp->New(); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } + + virtual void SetUp() { + // reset head and tail stmt + genStmtHead->SetNext(genStmtTail); + genStmtTail->SetPrev(genStmtHead); + } +}; +MemPool *FEIRCFGTest::mp = nullptr; +FEIRStmt *FEIRCFGTest::genStmtHead = nullptr; +FEIRStmt *FEIRCFGTest::genStmtTail = nullptr; + +/* GenStmtDemo1:BB + * 0 StmtHead + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = false) + * 3 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo1() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + stmt2->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForBB) { + demoCFG.LoadGenStmtDemo1(); + demoCFG.Init(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + EXPECT_EQ(bb1->GetNext(), demoCFG.GetDummyTail()); + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 2); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo2:BB_StmtAux + * 0 StmtHead + * 1 StmtAuxPre + * 2 Stmt (fallthru = true) + * 3 Stmt (fallthru = false) + * 4 StmtAuxPost + * 5 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo2() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + (void)NewTemporaryStmt(2); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + (void)NewTemporaryStmt(4); + stmt3->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForBB_StmtAux) { + demoCFG.LoadGenStmtDemo2(); + demoCFG.Init(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + EXPECT_EQ(bb1->GetNext(), demoCFG.GetDummyTail()); + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 4); + EXPECT_EQ(bb1->GetStmtNoAuxHead()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtNoAuxTail()->GetID(), 3); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo3:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtAuxPre + * 2 StmtMultiOut (fallthru = true, out = {8}) + * 3 StmtAuxPost + * --- BB2 --- + * 4 StmtAuxPre + * 5 Stmt (fallthru = false) + * 6 StmtAuxPost + * --- BB3 --- + * 7 StmtAuxPre + * 8 StmtMultiIn (fallthru = true, in = {2}) + * 9 Stmt (fallthru = false) + * 10 StmtAuxPos + * --- BB4 --- + * 11 StmtTail + * + * GenStmtDemo3_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo3() { + mapIdxStmt.clear(); + // --- BB1 --- + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + (void)NewTemporaryStmt(3); + // --- BB2 --- + (void)NewTemporaryStmt(4); + FEIRStmt *stmt5 = NewTemporaryStmt(5); + stmt5->SetFallThru(false); + (void)NewTemporaryStmt(6); + // --- BB3 --- + (void)NewTemporaryStmt(7); + FEIRStmt *stmt8 = NewTemporaryStmt(8); + FEIRStmt *stmt9 = NewTemporaryStmt(9); + stmt9->SetFallThru(false); + (void)NewTemporaryStmt(10); + // Link + stmt2->AddExtraSucc(*stmt8); + stmt8->AddExtraPred(*stmt2); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG) { + demoCFG.LoadGenStmtDemo3(); + demoCFG.LabelStmtID(); + demoCFG.Init(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + // Check BB's detail + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtNoAuxHead()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtNoAuxTail()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 3); + EXPECT_EQ(bb2->GetStmtHead()->GetID(), 4); + EXPECT_EQ(bb2->GetStmtNoAuxHead()->GetID(), 5); + EXPECT_EQ(bb2->GetStmtNoAuxTail()->GetID(), 5); + EXPECT_EQ(bb2->GetStmtTail()->GetID(), 6); + EXPECT_EQ(bb3->GetStmtHead()->GetID(), 7); + EXPECT_EQ(bb3->GetStmtNoAuxHead()->GetID(), 8); + EXPECT_EQ(bb3->GetStmtNoAuxTail()->GetID(), 9); + EXPECT_EQ(bb3->GetStmtTail()->GetID(), 10); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(3), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 0); + EXPECT_EQ(bb3->GetPredBBs().size(), 1); + EXPECT_EQ(bb3->IsPredBB(1), true); + EXPECT_EQ(bb3->GetSuccBBs().size(), 0); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo4:CFG_Fail + * 0 StmtHead + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = true) + * 3 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo4() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + (void)NewTemporaryStmt(2); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_Fail) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo4(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, false); +} + +/* GenStmtDemo5:CFG_DeadBB + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = false) + * --- BB2 --- + * 3 Stmt (fallthru = false) + * --- BB3 --- + * 4 StmtTail + * + * GenStmtDemo5_CFG: + * BB0 + * | + * BB1 BB2(DeadBB) + */ +void FEIRCFGDemo::LoadGenStmtDemo5() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + stmt2->SetFallThru(false); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + stmt3->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_DeadBB) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo5(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelStmtID(); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + // Check BB's detail + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 2); + EXPECT_EQ(bb2->GetStmtHead()->GetID(), 3); + EXPECT_EQ(bb2->GetStmtTail()->GetID(), 3); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 0); + EXPECT_EQ(demoCFG.HasDeadBB(), true); +} + +/* GenStmtDemo6:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {3}) + * --- BB2 --- + * 2 Stmt (fallthru = true) + * --- BB3 --- + * 3 StmtMultiIn (fallthru = true, in = {1}) + * 4 Stmt (fallthru = false) + * --- BB4 --- + * 5 StmtTail + * + * GenStmtDemo6_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 | + * \ / + * BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo6() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + (void)NewTemporaryStmt(2); + // --- BB3 --- + FEIRStmt *stmt3 = NewTemporaryStmt(3); + FEIRStmt *stmt4 = NewTemporaryStmt(4); + stmt4->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt3); + stmt3->AddExtraPred(*stmt1); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG1) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo6(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(3), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 2); + EXPECT_EQ(bb3->IsPredBB(1), true); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo7:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {5}) + * --- BB2 --- + * 2 Stmt (fallthru = true) + * --- BB3 --- + * 3 StmtMultiIn (fallthru = true, in = {6}) + * 4 Stmt (fallthru = false) + * --- BB4 --- + * 5 StmtMultiIn (fallthru = true, in = {1}) + * 6 Stmt (fallthru = false, out = {3}) + * --- BB5 --- + * 7 StmtTail + * + * GenStmtDemo7_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 BB4 + * \ / + * BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo7() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + (void)NewTemporaryStmt(2); + // --- BB3 --- + FEIRStmt *stmt3 = NewTemporaryStmt(3); + FEIRStmt *stmt4 = NewTemporaryStmt(4); + stmt4->SetFallThru(false); + // --- BB4 --- + FEIRStmt *stmt5 = NewTemporaryStmt(5); + FEIRStmt *stmt6 = NewTemporaryStmt(6); + stmt6->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt5); + stmt5->AddExtraPred(*stmt1); + stmt6->AddExtraSucc(*stmt3); + stmt3->AddExtraPred(*stmt6); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG2) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo7(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + FEIRBB *bb4 = static_cast(bb3->GetNext()); + ASSERT_NE(bb4, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb4->GetPredBBs().size(), 1); + EXPECT_EQ(bb4->IsPredBB(1), true); + EXPECT_EQ(bb4->GetSuccBBs().size(), 1); + EXPECT_EQ(bb4->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 2); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(bb3->IsPredBB(4), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo8:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {6}) + * --- BB2 --- + * 2 StmtMultiIn (fallthru = true, in = {5}) + * 3 Stmt (fallthru = true, out = {4}) + * --- BB3 --- + * 4 Stmt (fallthru = true, in = {3}) + * 5 StmtMultiOut (fallthru = true, in = {2}) + * --- BB4 --- + * 6 StmtMultiIn (fallthru = true, in = {1}) + * 7 Stmt (fallthru = false) + * --- BB5 --- + * 8 StmtTail + * + * GenStmtDemo8_CFG_while: + * BB0 + * | + * BB1 ----- + * | | + * BB2 <- | + * | | | + * BB3 -- | + * | | + * BB4 <---- + */ +void FEIRCFGDemo::LoadGenStmtDemo8() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + FEIRStmt *stmt2 = NewTemporaryStmt(2); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + // --- BB3 --- + FEIRStmt *stmt4 = NewTemporaryStmt(4); + FEIRStmt *stmt5 = NewTemporaryStmt(5); + // --- BB4 --- + FEIRStmt *stmt6 = NewTemporaryStmt(6); + FEIRStmt *stmt7 = NewTemporaryStmt(7); + stmt7->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt6); + stmt6->AddExtraPred(*stmt1); + stmt5->AddExtraSucc(*stmt2); + stmt2->AddExtraPred(*stmt5); + stmt3->AddExtraSucc(*stmt4); + stmt4->AddExtraPred(*stmt3); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_while) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo8(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + demoCFG.DumpBBs(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + FEIRBB *bb4 = static_cast(bb3->GetNext()); + ASSERT_NE(bb4, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(4), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 2); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->IsPredBB(3), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 1); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(bb3->GetSuccBBs().size(), 2); + EXPECT_EQ(bb3->IsSuccBB(2), true); + EXPECT_EQ(bb3->IsSuccBB(4), true); + EXPECT_EQ(bb4->GetPredBBs().size(), 2); + EXPECT_EQ(bb4->IsPredBB(1), true); + EXPECT_EQ(bb4->IsPredBB(3), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_lower_test.cpp b/src/hir2mpl/test/common/feir_lower_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c22396feb196ff51bcaad65460576e3228397269 --- /dev/null +++ b/src/hir2mpl/test/common/feir_lower_test.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_test_base.h" +#include "hir2mpl_ut_environment.h" +#include "hir2mpl_ut_regx.h" +#include "fe_function.h" +#include "feir_lower.h" +#include "feir_var_reg.h" +#include "feir_builder.h" + +namespace maple { +class FEFunctionDemo : public FEFunction { + public: + FEFunctionDemo(MIRFunction &argMIRFunction) + : FEFunction(argMIRFunction, std::make_unique(true)) {} + ~FEFunctionDemo() = default; + + bool PreProcessTypeNameIdx() override { + return false; + } + + bool GenerateGeneralStmt(const std::string &phaseName) override { + return true; + } + + void GenerateGeneralStmtFailCallBack() override {} + void GenerateGeneralDebugInfo() override {} + bool GenerateArgVarList(const std::string &phaseName) override { + return true; + } + + bool HasThis() override { + return false; + } + + bool IsNative() override { + return false; + } + + bool VerifyGeneral() override { + return false; + } + + void VerifyGeneralFailCallBack() override {} + bool EmitToFEIRStmt(const std::string &phaseName) override { + return true; + } + + bool GenerateAliasVars(const std::string &phaseName) override { + return true; + } + + void LoadGenStmtDemo1(); + +}; + +class FEIRLowerTest : public FEIRTestBase { + public: + FEFunctionDemo feFunc; + + FEIRLowerTest() + : feFunc(*func) { + feFunc.Init(); + } + ~FEIRLowerTest() = default; +}; + +// ifStmt +void FEFunctionDemo::LoadGenStmtDemo1() { + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_u1); + std::unique_ptr exprDReadReg = std::make_unique(std::move(varReg)); + // ThenStmts + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRVar srcVar = std::make_unique(1, PTY_i32); + UniqueFEIRExpr exprDRead = std::make_unique(std::move(srcVar)); + UniqueFEIRStmt stmtDAssign = std::make_unique(std::move(dstVar), exprDRead->Clone()); + std::list thenStmts; + thenStmts.emplace_back(std::move(stmtDAssign)); + // ElseStmts + UniqueFEIRVar dstVar2 = FEIRBuilder::CreateVarReg(0, PTY_f32); + UniqueFEIRStmt stmtDAssign1 = std::make_unique(std::move(dstVar2), std::move(exprDRead)); + std::list elseStmts; + elseStmts.emplace_back(std::move(stmtDAssign1)); + + std::list stmts; + stmts.emplace_back(std::make_unique(std::move(exprDReadReg), thenStmts, elseStmts)); + AppendFEIRStmts(stmts); +} + +TEST_F(FEIRLowerTest, IfStmtLower) { + feFunc.LoadGenStmtDemo1(); + bool res = feFunc.LowerFunc("fert lower"); + ASSERT_EQ(res, true); + RedirectCout(); + const FEIRStmt *head = feFunc.GetFEIRStmtHead(); + FEIRStmt *stmt = static_cast(head->GetNext()); + while (stmt != nullptr && stmt->GetKind() != kStmtPesudoFuncEnd) { + std::list baseNodes = stmt->GenMIRStmts(mirBuilder); + baseNodes.front()->Dump(); + stmt = static_cast(stmt->GetNext()); + } + std::string pattern = + "brfalse @.* \\(dread u32 %Reg0_Z\\)\n\n"\ + "dassign %Reg0_I 0 \\(dread i32 %Reg1_I\\)\n\n"\ + "goto @.*\n\n"\ + "@.*\n"\ + "dassign %Reg0_F 0 \\(dread i32 %Reg1_I\\)\n\n"\ + "@.*\n"; + ASSERT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ASSERT_EQ(static_cast(head->GetNext())->GetLabelName(), + static_cast(feFunc.GetFEIRStmtTail()->GetPrev()->GetPrev()->GetPrev())->GetLabelName()); + ASSERT_EQ(static_cast(head->GetNext()->GetNext()->GetNext())->GetLabelName(), + static_cast(feFunc.GetFEIRStmtTail()->GetPrev())->GetLabelName()); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/hir2mpl_ut_options.cpp b/src/hir2mpl/test/common/hir2mpl_ut_options.cpp index d4e1df5b18e021c095a1cfc0e49298653dd755b7..130e4a673b31d2b3ddf2696e9b0e2b6f279fb541 100644 --- a/src/hir2mpl/test/common/hir2mpl_ut_options.cpp +++ b/src/hir2mpl/test/common/hir2mpl_ut_options.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -58,7 +58,7 @@ HIR2MPLUTOptions::HIR2MPLUTOptions() genBase64(false), base64SrcFileName(""), coreMpltName("") { - CreateUsages(kUsage); + CreateUsages(kUsage, sizeof(kUsage)/sizeof(kUsage[0])); } void HIR2MPLUTOptions::DumpUsage() const { diff --git a/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp b/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp index 2c6daa838c67a52bfea9510d9e04e3a5367134d3..19f9a4d79ec223ebdea9085969f51816b6452688 100644 --- a/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp +++ b/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp @@ -22,7 +22,7 @@ bool HIR2MPLUTRegx::Match(const std::string &str, const std::string &pattern) { return true; } else { std::cerr << "Pattern: " << pattern << std::endl; - std::cerr << "String: " << str << std::endl; + std::cerr << "String: " << str << std::endl; return false; } } diff --git a/src/mapleall/BUILD.gn b/src/mapleall/BUILD.gn index 4d087663c09d8f0860ec4aea671a9d2b384ef9be..d13b5d389574f520727b5d050ff5cce9c5e5f44c 100644 --- a/src/mapleall/BUILD.gn +++ b/src/mapleall/BUILD.gn @@ -19,6 +19,15 @@ config("mapleallcompilecfg") { "-fno-common", ] + if (ASAN == 1) { + cflags_cc += [ + "-fsanitize=address" + ] + libs = [ + "${LLVMLIBDIR}/libclang_rt.asan-x86_64.a" + ] + } + if (TARGET == "aarch64") { cflags_cc += [ "-DTARGAARCH64", @@ -73,6 +82,10 @@ config("mapleallcompilecfg") { ] } + if (ASAN == 1) { + ldflags += ["-ldl"] + } + if (MAJOR_VERSION != "") { cflags_cc += [ "-DMAJOR_VERSION=${MAJOR_VERSION}", ] } diff --git a/src/mapleall/bin/dex2mpl b/src/mapleall/bin/dex2mpl index 7f9489c146f13a6a3aabc4d262f8916757ae2e40..0b22917c6087fa1700235d1870015fedd0bbfa32 100755 Binary files a/src/mapleall/bin/dex2mpl and b/src/mapleall/bin/dex2mpl differ diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index 2c45bffdfc22b7c47b19af11aac0e6c9875945b3..9c3ae929cea4c9c57e719a5f8ae4ef6e8b0dd058 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -96,7 +96,6 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_fixshortbranch.cpp", "src/cg/aarch64/aarch64_global.cpp", "src/cg/aarch64/aarch64_proepilog.cpp", - "src/cg/aarch64/aarch64_immediate.cpp", "src/cg/aarch64/aarch64_operand.cpp", "src/cg/aarch64/aarch64_color_ra.cpp", "src/cg/aarch64/aarch64_reg_info.cpp", @@ -124,6 +123,7 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_regsaves.cpp", "src/cg/aarch64/aarch64_utils.cpp", "src/cg/aarch64/aarch64_cg.cpp", + "src/cg/aarch64/aarch64_validbit_opt.cpp", ] src_libcgx8664 = [ @@ -132,9 +132,13 @@ src_libcgx8664 = [ "src/cg/x86_64/x64_cgfunc.cpp", "src/cg/x86_64/x64_memlayout.cpp", "src/cg/x86_64/x64_rt.cpp", - "src/cg/x86_64/x64_reg_alloc.cpp", "src/cg/x86_64/x64_emitter.cpp", - "src/cg/x86_64/x64_irbuilder.cpp", + "src/cg/x86_64/x64_abi.cpp", + "src/cg/x86_64/x64_call_conv.cpp", + "src/cg/x86_64/x64_standardize.cpp", + "src/cg/x86_64/x64_reg_info.cpp", + "src/cg/x86_64/x64_proepilog.cpp", + "src/cg/x86_64/x64_args.cpp", ] src_libcgriscv64 = [ @@ -194,6 +198,8 @@ src_libcgphases = [ "src/cg/cg_ssa_pre.cpp", "src/cg/regsaves.cpp", "src/cg/cg_critical_edge.cpp", + "src/cg/alignment.cpp", + "src/cg/cg_validbit_opt.cpp", ] src_libcg = [ @@ -201,6 +207,7 @@ src_libcg = [ "src/cg/cg_irbuilder.cpp", "src/cg/cfi.cpp", "src/cg/cgbb.cpp", + "src/cg/operand.cpp", "src/cg/cgfunc.cpp", "src/cg/cg_cfg.cpp", "src/cg/cg_option.cpp", @@ -210,7 +217,8 @@ src_libcg = [ "src/cg/emit.cpp", "src/cg/live.cpp", "src/cg/loop.cpp", - "src/cg/Isel.cpp", + "src/cg/isel.cpp", + "src/cg/standardize.cpp", "src/cg/memlayout.cpp", "src/cg/yieldpoint.cpp", "src/cg/label_creation.cpp", @@ -218,8 +226,9 @@ src_libcg = [ "src/cg/reg_alloc.cpp", "src/cg/reg_alloc_basic.cpp", "src/cg/proepilog.cpp", - "src/cg/alignment.cpp", "src/cg/cg.cpp", + "src/cg/isa.cpp", + "src/cg/insn.cpp", "src/cg/cg_phasemanager.cpp", ] diff --git a/src/mapleall/maple_be/include/be/becommon.h b/src/mapleall/maple_be/include/be/becommon.h index d876104abaf88897e20fc059fa7b596704898fe8..92160fcc568f88adee7b79a0ee1c8fa4fd8f8590 100644 --- a/src/mapleall/maple_be/include/be/becommon.h +++ b/src/mapleall/maple_be/include/be/becommon.h @@ -26,7 +26,6 @@ namespace maplebe { using namespace maple; - #if TARGX86_64 || TARGAARCH64 || TARGRISCV64 #if ILP32 #define LOWERED_PTR_TYPE PTY_a32 @@ -165,7 +164,7 @@ class BECommon { return 1; } - const MIRModule &GetMIRModule() const { + MIRModule &GetMIRModule() const { return mirModule; } diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h index f2f0cd9b107c1948bbf93f1e5535871512c7da49..205cbf8161d3c258f94243beb781a40aed2b61ed 100644 --- a/src/mapleall/maple_be/include/be/common_utils.h +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -65,6 +65,12 @@ constexpr uint32 k12ByteSize = 12; constexpr uint32 k14ByteSize = 14; constexpr uint32 k15ByteSize = 15; constexpr uint32 k16ByteSize = 16; +constexpr uint32 k32ByteSize = 32; + +constexpr uint32 k1EightBytesSize = 8; +constexpr uint32 k2EightBytesSize = 16; +constexpr uint32 k3EightBytesSize = 24; +constexpr uint32 k4EightBytesSize = 32; constexpr uint32 k4BitShift = 2; /* 4 is 1 << 2; */ constexpr uint32 k8BitShift = 3; /* 8 is 1 << 3; */ @@ -192,6 +198,19 @@ inline uint64 RoundUp(uint64 offset, uint64 align) { return RoundUpConst(offset, align); } +inline int64 RoundDownConst(int64 offset, int64 align) { + return (-align) & offset; +} + +// align must be a power of 2 +inline int64 RoundDown(int64 offset, int64 align) { + if (align == 0) { + return offset; + } + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundDownConst(offset, align); +} + inline bool IsAlignedTo(uint64 offset, uint64 align) { ASSERT(IsPowerOf2(align), "align must be power of 2!"); return (offset & (align - 1)) == 0; diff --git a/src/mapleall/maple_be/include/be/lower.h b/src/mapleall/maple_be/include/be/lower.h index 827331c2409af42498318d1b51ec23732db400ab..d4c3a7c82417d8beebc19de6e6555ab49f5c3e01 100644 --- a/src/mapleall/maple_be/include/be/lower.h +++ b/src/mapleall/maple_be/include/be/lower.h @@ -98,7 +98,7 @@ class CGLowerer { BaseNode *LowerExpr(BaseNode&, BaseNode&, BlockNode&); - BaseNode *LowerDread(DreadNode &dread); + BaseNode *LowerDread(DreadNode &dread, BlockNode& block); BaseNode *LowerIread(IreadNode &iread) { /* use PTY_u8 for boolean type in dread/iread */ @@ -108,10 +108,12 @@ class CGLowerer { return (iread.GetFieldID() == 0 ? &iread : LowerIreadBitfield(iread)); } - BaseNode *LowerIreadBitfield(IreadNode &iread); - BaseNode *LowerCastExpr(BaseNode &expr); + BaseNode *ExtractSymbolAddress(StIdx &stIdx, BlockNode &block); + BaseNode *LowerDreadToThreadLocal(BaseNode &expr, BlockNode &block); + StmtNode *LowerDassignToThreadLocal(StmtNode &stmt, BlockNode &block); + void LowerDassign(DassignNode &dassign, BlockNode &block); void LowerResetStmt(StmtNode &stmt, BlockNode &block); @@ -138,6 +140,7 @@ class CGLowerer { BaseNode *SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode); BaseNode *SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); bool IsComplexSelect(const TernaryNode &tNode) const; + int32 FindTheCurrentStmtFreq(StmtNode *stmt) const; BaseNode *LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); BaseNode *LowerFarray(ArrayNode &array); BaseNode *LowerArrayDim(ArrayNode &array, int32 dim); @@ -148,6 +151,7 @@ class CGLowerer { DassignNode *SaveReturnValueInLocal(StIdx, uint16); void LowerCallStmt(StmtNode&, StmtNode*&, BlockNode&, MIRType *retty = nullptr, bool uselvar = false, bool isIntrinAssign = false); + BlockNode *LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall); BlockNode *LowerCallAssignedStmt(StmtNode &stmt, bool uselvar = false); bool LowerStructReturn(BlockNode &blk, StmtNode *stmt, StmtNode *nextStmt, bool &lvar); BlockNode *LowerMemop(StmtNode&); @@ -182,8 +186,12 @@ class CGLowerer { void LowerTypePtr(BaseNode &expr) const; + BaseNode *GetBitField(int32 byteOffset, BaseNode *baseAddr, PrimType fieldPrimType); + StmtNode *WriteBitField(std::pair byteBitOffsets, MIRBitFieldType *fieldType, BaseNode *baseAddr, + BaseNode *rhs, BlockNode *block); + BaseNode *ReadBitField(std::pair byteBitOffsets, MIRBitFieldType *fieldType, BaseNode *baseAddr); BaseNode *LowerDreadBitfield(DreadNode &dread); - + BaseNode *LowerIreadBitfield(IreadNode &iread); StmtNode *LowerDassignBitfield(DassignNode &dassign, BlockNode &block); StmtNode *LowerIassignBitfield(IassignNode &iassign, BlockNode &block); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h index a9d0326288a5a9045bab89549471a88da1a036b4..34ce4f7e0bbf8adf4ee6b1d984a19c6248221195 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h @@ -20,17 +20,18 @@ #include "aarch64_cgfunc.h" namespace maplebe { -/* if bb size in (16byte, 96byte) , the bb need align */ -constexpr uint32 kAlignMinBBSize = 16; -constexpr uint32 kAlignMaxBBSize = 96; constexpr uint32 kAlignRegionPower = 4; constexpr uint32 kAlignInsnLength = 4; constexpr uint32 kAlignMaxNopNum = 1; -struct AlignInfo { +struct AArch64AlignInfo { + /* if bb size in (16byte, 96byte) , the bb need align */ + uint32 alignMinBBSize = 16; + uint32 alignMaxBBSize = 96; /* default loop & jump align power, related to the target machine. eg. 2^5 */ uint32 loopAlign = 4; uint32 jumpAlign = 5; + /* record func_align_power in CGFunc */ }; class AArch64AlignAnalysis : public AlignAnalysis { @@ -58,7 +59,7 @@ class AArch64AlignAnalysis : public AlignAnalysis { bool IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const; private: - AArch64CGFunc *aarFunc; + AArch64CGFunc *aarFunc = nullptr; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h index 52ecfd9e05719cefa5aa2e545b8d597ed4022aa6..00dccf4a1b57742ceaa9b048b8eca129e530bf4b 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h @@ -50,7 +50,7 @@ class AArch64MoveRegArgs : public MoveRegArgs { ArgInfo GetArgInfo(std::map &argsList, std::vector &numFpRegs, std::vector &fpSize, uint32 argIndex) const; bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const; - void GenOneInsn(ArgInfo &argInfo, AArch64RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset); + void GenOneInsn(ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset); void GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo); void GenerateStrInsn(ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize); void MoveRegisterArgs(); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h index fe5322398e51560db92c53b7d99a44d45afcd3e7..e66975ca2196b0720400d434a759cc104cdbd2bb 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h @@ -42,6 +42,9 @@ class AArch64CallConvImpl { void InitCCLocInfo(CCLocInfo &pLoc) const; + /* for lmbc */ + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize); + /* return value related */ void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index 4ac264470ed533f9096599dbdb84927e640842a4..0901fda44b9f90de96e33f247fec9a8d39c6cbb6 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -24,6 +24,8 @@ #include "aarch64_live.h" #include "aarch64_args.h" #include "aarch64_alignment.h" +#include "aarch64_validbit_opt.h" +#include "aarch64_reg_coalesce.h" namespace maplebe { constexpr int64 kShortBRDistance = (8 * 1024); @@ -166,15 +168,21 @@ class AArch64CG : public CG { CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const override { return mp.New(f, da, mp, tmp); } + LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + }; PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { return mp.New(f, ssaInfo, mp); } - CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { - return mp.New(mp, f, ssaInfo); + CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { + return mp.New(mp, f, ssaInfo, ll); } CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { return mp.New(mp, f, ssaInfo); } + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo); + } static const AArch64MD kMd[kMopLast]; enum : uint8 { diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 174e9dedf64eab3ba2188b13461c613dde689760..28066b6c3738c9124ed6bafe33069f7b6f65e204 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -27,6 +27,19 @@ #include "aarch64_call_conv.h" namespace maplebe { +class LmbcArgInfo { + public: + LmbcArgInfo(MapleAllocator &mallocator) + : lmbcCallArgs(mallocator.Adapter()), + lmbcCallArgTypes(mallocator.Adapter()), + lmbcCallArgOffsets(mallocator.Adapter()), + lmbcCallArgNumOfRegs(mallocator.Adapter()) {} + MapleVector lmbcCallArgs; + MapleVector lmbcCallArgTypes; + MapleVector lmbcCallArgOffsets; + MapleVector lmbcCallArgNumOfRegs; // # of regs needed to complete struct +}; + class AArch64CGFunc : public CGFunc { public: AArch64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, @@ -51,7 +64,8 @@ class AArch64CGFunc : public CGFunc { if (f.GetAttr(FUNCATTR_varargs) || f.HasVlaOrAlloca()) { SetHasVLAOrAlloca(true); } - SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule()); + SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule() || + f.GetModule()->GetFlavor() == MIRFlavor::kFlavorLmbc); } ~AArch64CGFunc() override = default; @@ -86,12 +100,13 @@ class AArch64CGFunc : public CGFunc { return kRFLAG; } + MIRType *GetAggTyFromCallSite(StmtNode *stmt); RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); - void IntrinsifyGetAndAddInt(AArch64ListOperand &srcOpnds, PrimType pty); - void IntrinsifyGetAndSetInt(AArch64ListOperand &srcOpnds, PrimType pty); - void IntrinsifyCompareAndSwapInt(AArch64ListOperand &srcOpnds, PrimType pty); - void IntrinsifyStringIndexOf(AArch64ListOperand &srcOpnds, const MIRSymbol &funcSym); + void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym); void GenSaveMethodInfoCode(BB &bb) override; void DetermineReturnTypeofCall() override; void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) override; @@ -105,13 +120,19 @@ class AArch64CGFunc : public CGFunc { void SelectAbort() override; void SelectAssertNull(UnaryStmtNode &stmt) override; void SelectAsm(AsmNode &stmt) override; - AArch64MemOperand *GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 alignUsed, int64 offset, - bool needLow12 = false); - AArch64MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); - AArch64MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + MemOperand *GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 alignUsed, int64 offset, + bool needLow12 = false); + MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); + MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + void LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn); + bool LmbcSmallAggForRet(BlkassignoffNode &bNode, Operand *src); + bool LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src); void SelectAggDassign(DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; @@ -124,25 +145,27 @@ class AArch64CGFunc : public CGFunc { void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) override; void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; - Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) override; - Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, std::string &name) override; - Operand *SelectCclz(IntrinsicopNode &intrinopNode) override; - Operand *SelectCctz(IntrinsicopNode &intrinopNode) override; - Operand *SelectCpopcount(IntrinsicopNode &intrinopNode) override; - Operand *SelectCparity(IntrinsicopNode &intrinopNode) override; - Operand *SelectCclrsb(IntrinsicopNode &intrinopNode) override; - Operand *SelectCisaligned(IntrinsicopNode &intrinopNode) override; - Operand *SelectCalignup(IntrinsicopNode &intrinopNode) override; - Operand *SelectCaligndown(IntrinsicopNode &intrinopNode) override; - Operand *SelectCSyncAddFetch(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncFetchAdd(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncSubFetch(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncFetchSub(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncLockRelease(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCReturnAddress(IntrinsicopNode &intrinopNode) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinsicopNode, std::string name) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinsicopNode, PrimType retType, + const std::string &name) override; + Operand *SelectCclz(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCctz(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCpopcount(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCparity(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCclrsb(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCisaligned(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCalignup(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCaligndown(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinsicopNode, PrimType pty) override; + Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinsicopNode, PrimType pty) override; + Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinsicopNode, PrimType pty) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) override; + AArch64isa::MemoryOrdering PickMemOrder(std::memory_order memOrder, bool isLdr); + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + Operand *SelectCReturnAddress(IntrinsicopNode &intrinsicopNode) override; void SelectMembar(StmtNode &membar) override; void SelectComment(CommentNode &comment) override; @@ -151,9 +174,10 @@ class AArch64CGFunc : public CGFunc { RegOperand *SelectRegread(RegreadNode &expr) override; void SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field = 0); - void SelectAddrof(Operand &result, AArch64MemOperand &memOpnd, FieldID field = 0); + void SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field = 0); Operand *SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, PrimType pty, bool retBool = false); - Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff = false) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; @@ -161,7 +185,8 @@ class AArch64CGFunc : public CGFunc { Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) override; - + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; Operand *SelectIntConst(MIRIntConst &intConst) override; Operand *HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent); Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; @@ -212,7 +237,7 @@ class AArch64CGFunc : public CGFunc { MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t regNO, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange); void SelectAddAfterInsn(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType, bool isDest, Insn &insn); - bool IsImmediateOffsetOutOfRange(const AArch64MemOperand &memOpnd, uint32 bitLen); + bool IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen); bool IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx); Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; @@ -220,6 +245,7 @@ class AArch64CGFunc : public CGFunc { Operand *SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0); Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) override; Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -259,7 +285,7 @@ class AArch64CGFunc : public CGFunc { bool IsRegRematCand(const RegOperand ®); void ClearRegRematInfo(const RegOperand ®); bool IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src); - void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn) override; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) override; void CleanupDeadMov(bool dump = false) override; void GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) override; Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; @@ -269,18 +295,18 @@ class AArch64CGFunc : public CGFunc { RegOperand &CreateRegisterOperandOfType(PrimType primType); RegOperand &CreateRegisterOperandOfType(RegType regType, uint32 byteLen); RegOperand &CreateRflagOperand(); - RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType = PTY_i64); + RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType); MemOperand *GetOrCreatSpillMem(regno_t vrNum); void FreeSpillRegMem(regno_t vrNum); - AArch64RegOperand &GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType type, uint32 flag = 0); - AArch64RegOperand &GetOrCreatePhysicalRegisterOperand(std::string &asmAttr); + RegOperand &GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType type, uint32 flag = 0); + RegOperand &GetOrCreatePhysicalRegisterOperand(std::string &asmAttr); + RegOperand *CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg = 0); RegOperand &CreateVirtualRegisterOperand(regno_t vregNO) override; RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vregNO) override; RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; const LabelOperand *GetLabelOperand(LabelIdx labIdx) const override; LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; LabelOperand &GetOrCreateLabelOperand(BB &bb) override; - LabelOperand &CreateFuncLabelOperand(const MIRSymbol &func); uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) override; @@ -336,16 +362,10 @@ class AArch64CGFunc : public CGFunc { return nType; } - AArch64ImmOperand &CreateImmOperand(PrimType ptyp, int64 val) override { + ImmOperand &CreateImmOperand(PrimType ptyp, int64 val) override { return CreateImmOperand(val, GetPrimTypeBitSize(ptyp), IsSignedInteger(ptyp)); } - Operand *CreateZeroOperand(PrimType ptyp) override { - (void)ptyp; - ASSERT(false, "NYI"); - return nullptr; - } - Operand &CreateFPImmZero(PrimType ptyp) override { return GetOrCreateFpZeroOperand(GetPrimTypeBitSize(ptyp)); } @@ -354,27 +374,31 @@ class AArch64CGFunc : public CGFunc { return nullptr; } /* create an integer immediate operand */ - AArch64ImmOperand &CreateImmOperand(int64 val, uint32 size, bool isSigned, VaryType varyType = kNotVary, + ImmOperand &CreateImmOperand(int64 val, uint32 size, bool isSigned, VaryType varyType = kNotVary, bool isFmov = false) { - return *memPool->New(val, size, isSigned, varyType, isFmov); + return *memPool->New(val, size, isSigned, varyType, isFmov); + } + + ListOperand *CreateListOpnd(MapleAllocator &allocator) { + return memPool->New(allocator); } ImmFPZeroOperand &GetOrCreateFpZeroOperand(uint8 size) { return *ImmFPZeroOperand::allocate(size); } - AArch64OfstOperand &GetOrCreateOfstOpnd(uint64 offset, uint32 size); + OfstOperand &GetOrCreateOfstOpnd(uint64 offset, uint32 size); - AArch64OfstOperand &CreateOfstOpnd(uint64 offset, uint32 size) { - return *memPool->New(offset, size); + OfstOperand &CreateOfstOpnd(uint64 offset, uint32 size) { + return *memPool->New(offset, size); } - AArch64OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int32 relocs) { - return *memPool->New(mirSymbol, 0, relocs); + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int32 relocs) { + return *memPool->New(mirSymbol, 0, relocs); } - AArch64OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) { - return *memPool->New(mirSymbol, 0, offset, relocs); + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) { + return *memPool->New(mirSymbol, 0, offset, relocs); } StImmOperand &CreateStImmOperand(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) { @@ -386,28 +410,36 @@ class AArch64CGFunc : public CGFunc { } RegOperand &GetOrCreateStackBaseRegOperand() override { - return GetOrCreatePhysicalRegisterOperand(RFP, kSizeOfPtr * kBitsPerByte, kRegTyInt); + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + return GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, kRegTyInt); } RegOperand &GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType, PrimType targetType); void SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector& rematInsns); MemOperand &GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, - bool needLow12, AArch64RegOperand *regOp, std::vector& rematInsns); + bool needLow12, RegOperand *regOp, std::vector& rematInsns); MemOperand &GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef = false, - bool needLow12 = false, AArch64RegOperand *regOp = nullptr); + bool needLow12 = false, RegOperand *regOp = nullptr); - AArch64MemOperand &GetOrCreateMemOpnd(AArch64MemOperand::AArch64AddressingMode, uint32, RegOperand*, RegOperand*, - OfstOperand*, const MIRSymbol*); + MemOperand &HashMemOpnd(MemOperand &tMemOpnd); - AArch64MemOperand &GetOrCreateMemOpnd(AArch64MemOperand::AArch64AddressingMode, uint32 size, RegOperand *base, - RegOperand *index, int32 shift, bool isSigned = false); + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode, uint32, RegOperand*, RegOperand*, + ImmOperand*, const MIRSymbol*); - AArch64MemOperand &GetOrCreateMemOpnd(AArch64MemOperand &oldMem); + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode, uint32 size, RegOperand *base, + RegOperand *index, int32 shift, bool isSigned = false); + + MemOperand &GetOrCreateMemOpnd(MemOperand &oldMem); MemOperand &CreateMemOpnd(AArch64reg reg, int64 offset, uint32 size) { - AArch64RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, kRegTyInt); + RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, kRegTyInt); return CreateMemOpnd(baseOpnd, offset, size); } @@ -449,6 +481,9 @@ class AArch64CGFunc : public CGFunc { void GenerateCleanupCode(BB &bb) override; bool NeedCleanup() override; void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType); Operand *GetBaseReg(const AArch64SymbolAlloc &symAlloc); int32 GetBaseOffset(const SymbolAlloc &symAlloc) override; @@ -528,13 +563,21 @@ class AArch64CGFunc : public CGFunc { return cleanEANode; } - AArch64MemOperand &CreateStkTopOpnd(uint32 offset, uint32 size); + MemOperand &CreateStkTopOpnd(uint32 offset, uint32 size); + MemOperand *CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol, bool noExtend); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, uint32 shift, bool isSigned = false); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym); /* if offset < 0, allocation; otherwise, deallocation */ - AArch64MemOperand &CreateCallFrameOperand(int32 offset, int32 size); + MemOperand &CreateCallFrameOperand(int32 offset, int32 size); void AppendCall(const MIRSymbol &func); - Insn &AppendCall(const MIRSymbol &func, AArch64ListOperand &srcOpnds, bool isCleanCall = false); + Insn &AppendCall(const MIRSymbol &func, ListOperand &srcOpnds, bool isCleanCall = false); static constexpr uint32 kDwarfScalarRegBegin = 0; static constexpr uint32 kDwarfFpRegBegin = 64; @@ -579,13 +622,14 @@ class AArch64CGFunc : public CGFunc { AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone) const; MOperator PickExtInsn(PrimType dtype, PrimType stype) const; - bool CheckIfSplitOffsetWithAdd(const AArch64MemOperand &memOpnd, uint32 bitLen); + bool CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen); RegOperand *GetBaseRegForSplit(uint32 baseRegNum); - AArch64MemOperand &SplitOffsetWithAddInstruction(const AArch64MemOperand &memOpnd, uint32 bitLen, - uint32 baseRegNum = AArch64reg::kRinvalid, bool isDest = false, - Insn *insn = nullptr, bool forPair = false); - AArch64MemOperand &CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset); + MemOperand &ConstraintOffsetToSafeRegion(uint32 bitLen, MemOperand &memOpnd); + MemOperand &SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum = AArch64reg::kRinvalid, bool isDest = false, + Insn *insn = nullptr, bool forPair = false); + MemOperand &CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset); bool HasStackLoadStore(); @@ -637,6 +681,48 @@ class AArch64CGFunc : public CGFunc { return proEpilogSavedRegs; } + uint32 GetDefaultAlignPow() { + return alignPow; + } + + LmbcArgInfo *GetLmbcArgInfo() { + return lmbcArgInfo; + } + + void SetLmbcArgInfo(LmbcArgInfo *p) { + lmbcArgInfo = p; + } + + void SetLmbcArgInfo(RegOperand *reg, PrimType pTy, int32 ofst, int32 regs) { + GetLmbcCallArgs().emplace_back(reg); + GetLmbcCallArgTypes().emplace_back(pTy); + GetLmbcCallArgOffsets().emplace_back(ofst); + GetLmbcCallArgNumOfRegs().emplace_back(regs); + } + + void ResetLmbcArgInfo() { + GetLmbcCallArgs().clear(); + GetLmbcCallArgTypes().clear(); + GetLmbcCallArgOffsets().clear(); + GetLmbcCallArgNumOfRegs().clear(); + } + + MapleVector &GetLmbcCallArgs() { + return lmbcArgInfo->lmbcCallArgs; + } + + MapleVector &GetLmbcCallArgTypes() { + return lmbcArgInfo->lmbcCallArgTypes; + } + + MapleVector &GetLmbcCallArgOffsets() { + return lmbcArgInfo->lmbcCallArgOffsets; + } + + MapleVector &GetLmbcCallArgNumOfRegs() { + return lmbcArgInfo->lmbcCallArgNumOfRegs; + } + private: enum RelationOperator : uint8 { kAND, @@ -666,18 +752,18 @@ class AArch64CGFunc : public CGFunc { Insn *yieldPointInsn = nullptr; /* The insn of yield point at the entry of the func. */ IntrinsiccallNode *cleanEANode = nullptr; - MapleUnorderedMap phyRegOperandTable; /* machine register operand table */ + MapleUnorderedMap phyRegOperandTable; /* machine register operand table */ MapleUnorderedMap hashLabelOpndTable; - MapleUnorderedMap hashOfstOpndTable; - MapleUnorderedMap hashMemOpndTable; + MapleUnorderedMap hashOfstOpndTable; + MapleUnorderedMap hashMemOpndTable; /* * Local variables, formal parameters that are passed via registers * need offset adjustment after callee-saved registers are known. */ - MapleUnorderedMap memOpndsRequiringOffsetAdjustment; - MapleUnorderedMap memOpndsForStkPassedArguments; - MapleUnorderedMap immOpndsRequiringOffsetAdjustment; - MapleUnorderedMap immOpndsRequiringOffsetAdjustmentForRefloc; + MapleUnorderedMap memOpndsRequiringOffsetAdjustment; + MapleUnorderedMap memOpndsForStkPassedArguments; + MapleUnorderedMap immOpndsRequiringOffsetAdjustment; + MapleUnorderedMap immOpndsRequiringOffsetAdjustmentForRefloc; union { regno_t regNOCatch; /* For O2. */ Operand *opndCatch; /* For O0-O1. */ @@ -702,15 +788,20 @@ class AArch64CGFunc : public CGFunc { bool usedStpSubPairToAllocateCallFrame = false; int32 splitStpldpBaseOffset = 0; regno_t methodHandleVreg = -1; + uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5*/ + LmbcArgInfo *lmbcArgInfo = nullptr; void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, AArch64isa::MemoryOrdering memOrd, bool isDirect); void SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, AArch64isa::MemoryOrdering memOrd, bool isDirect); MOperator PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const; - Operand &GetZeroOpnd(uint32 size) override; bool IsFrameReg(const RegOperand &opnd) const override; + bool IsSPOrFP(const RegOperand &opnd) const override; + bool IsReturnReg(const RegOperand &opnd) const override; + bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &beCommon) const override; + RegOperand &GetZeroOpnd(uint32 size) override; PrimType GetOperandTy(bool isIntty, uint32 dsize, bool isSigned) const { ASSERT(!isSigned || isIntty, ""); return (isIntty ? ((dsize == k64BitSize) ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)) @@ -737,34 +828,34 @@ class AArch64CGFunc : public CGFunc { } void CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, RegOperand *addrOpnd, int32 baseOffset); - AArch64RegOperand *SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, - int32 offset, uint32 parmNum); - void CreateCallStructParamPassByReg(regno_t reg, MemOperand &memOpnd, AArch64ListOperand &srcOpnds, + RegOperand *SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, + int32 offset, uint32 parmNum); + void CreateCallStructParamPassByReg(regno_t reg, MemOperand &memOpnd, ListOperand &srcOpnds, fpParamState state); void CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand *addropnd, uint32 structSize, int32 copyOffset, int32 fromOffset); - AArch64RegOperand *CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, RegOperand *addrOpd, - int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc); + RegOperand *CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, RegOperand *addrOpd, + int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc); void SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, - AArch64ListOperand &srcOpnds, + ListOperand &srcOpnds, int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID); - void SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, AArch64ListOperand &srcOpnds, + void SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, int32 offset, AArch64CallConvImpl &parmLocator); void SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, - AArch64ListOperand &srcOpnds, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); - void SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, AArch64ListOperand &srcOpnds, + void SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); void CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, AArch64CallConvImpl &parmLocator, - AArch64ListOperand &srcOpnds); - void SelectParmListForAggregate(BaseNode &argExpr, AArch64ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + ListOperand &srcOpnds); + void SelectParmListForAggregate(BaseNode &argExpr, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset); size_t SelectParmListGetStructReturnSize(StmtNode &naryNode); void SelectParmListPreprocessLargeStruct(BaseNode &argExpr, int32 &structCopyOffset); void SelectParmListPreprocess(const StmtNode &naryNode, size_t start); - void SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOpnds, bool isCallNative = false); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); - void SelectClearStackCallParmList(const StmtNode &naryNode, AArch64ListOperand &srcOpnds, + void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isSigned, bool is64Bits); void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); @@ -794,10 +885,17 @@ class AArch64CGFunc : public CGFunc { LabelOperand &targetOpnd, Operand &opnd0); void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); + void SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccallNode, PrimType primType); + void SelectAtomicStore(Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + void SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm); + void SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm); + void SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm); void SelectMPLClinitCheck(const IntrinsiccallNode&); void SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode); + void SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, Opcode op); - Operand *SelectAArch64CSyncFetch(const maple::IntrinsicopNode &intrinopNode, PrimType pty, bool CalculBefore, bool isAdd); + Operand *SelectAArch64CSyncFetch(const maple::IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore); /* Helper functions for translating complex Maple IR instructions/inrinsics */ void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0); LabelIdx CreateLabeledBB(StmtNode &stmt); @@ -806,8 +904,8 @@ class AArch64CGFunc : public CGFunc { /* Translation for load-link store-conditional, and atomic RMW operations. */ MemOrd OperandToMemOrd(Operand &opnd) const; MOperator PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const; - RegOperand *SelectLoadExcl(PrimType valPrimType, AArch64MemOperand &loc, bool acquire); - RegOperand *SelectStoreExcl(PrimType valPty, AArch64MemOperand &loc, RegOperand &newVal, bool release); + RegOperand *SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire); + RegOperand *SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release); MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx i) override; void ProcessLazyBinding() override; @@ -817,9 +915,9 @@ class AArch64CGFunc : public CGFunc { bool IsStoreMop(MOperator mOp) const; bool IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const; - Insn &GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, AArch64ListOperand &srcOpnds, + Insn &GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds, bool isCleanCall = false); - Insn &GenerateLocalLongCallAfterInsn(const MIRSymbol &func, AArch64ListOperand &srcOpnds, + Insn &GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds, bool isCleanCall = false); bool IsDuplicateAsmList(const MIRSymbol &sym) const; RegOperand *CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h index efab364ade18bd8b3c6a5da8202862a97d2e5cfd..6074b524892d36662cdeb2d19f03c73c128853d9 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h @@ -18,6 +18,7 @@ #include "aarch64_operand.h" #include "aarch64_insn.h" #include "aarch64_abi.h" +#include "aarch64_cgfunc.h" #include "loop.h" #include "cg_dominance.h" #include "cg_pre.h" @@ -672,7 +673,7 @@ class LiveRange { } bool IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLevel) const; - std::vector Rematerialize(AArch64CGFunc *cgFunc, AArch64RegOperand ®Op); + std::vector Rematerialize(AArch64CGFunc *cgFunc, RegOperand ®Op); private: regno_t regNO = 0; @@ -1272,7 +1273,7 @@ class GraphColorRegAllocator : public RegAllocator { const MapleMap &GetLrMap() const { return lrMap; } - Insn *SpillOperand(Insn &insn, const Operand &opnd, bool isDef, AArch64RegOperand &phyOpnd, bool forCall = false); + Insn *SpillOperand(Insn &insn, const Operand &opnd, bool isDef, RegOperand &phyOpnd, bool forCall = false); private: struct SetLiveRangeCmpFunc { bool operator()(const LiveRange *lhs, const LiveRange *rhs) const { @@ -1366,7 +1367,7 @@ class GraphColorRegAllocator : public RegAllocator { MemOperand *GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, bool isDef); void SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); void SpillOperandForSpillPost(Insn &insn, const Operand &opnd, - AArch64RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); + RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); MemOperand *GetConsistentReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, RegType regType); MemOperand *GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h index 61a7ef038e0f37c4aecdb93662999df1194e15d9..d1e0e762d2bd648c1eeb6c676700f3b280e8c969 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h @@ -46,12 +46,12 @@ class AArch64DepAnalysis : public DepAnalysis { void BuildDepsDefReg(Insn &insn, regno_t regNO) override; void BuildDepsAmbiInsn(Insn &insn) override; void BuildDepsMayThrowInsn(Insn &insn) override; - bool NeedBuildDepsMem(const AArch64MemOperand &memOpnd, - const AArch64MemOperand *nextMemOpnd, const Insn &memInsn) const; + bool NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, const Insn &memInsn) const; void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) override; void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) override; - void BuildAntiDepsDefStackMem(Insn &insn, AArch64MemOperand &memOpnd, const AArch64MemOperand *nextMemOpnd); - void BuildOutputDepsDefStackMem(Insn &insn, AArch64MemOperand &memOpnd, const AArch64MemOperand *nextMemOpnd); + void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + void BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); void BuildDepsMemBar(Insn &insn) override; void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) override; void BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) override; @@ -67,9 +67,8 @@ class AArch64DepAnalysis : public DepAnalysis { bool IsFrameReg(const RegOperand&) const override; private: - AArch64MemOperand *GetNextMemOperand(const Insn &insn, - const AArch64MemOperand &aarchMemOpnd) const; - void BuildMemOpndDependency(Insn &insn, Operand &opnd, const AArch64OpndProp ®Prop); + MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const; + void BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndProp ®Prop); void BuildOpndDependency(Insn &insn); void BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes); void SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum); @@ -78,7 +77,7 @@ class AArch64DepAnalysis : public DepAnalysis { void BuildMayThrowInsnDependency(Insn &insn); void UpdateRegUseAndDef(Insn &insn, DepNode &depNode, MapleVector &nodes); void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); - AArch64MemOperand *BuildNextMemOperandByByteSize(const AArch64MemOperand &aarchMemOpnd, uint32 byteSize) const; + MemOperand *BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const; void AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type); void AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type); void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, bool isFromClinit) const; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h index 638f09ffde66f495a5129295d48abdf29f64c699..18e6413fb65a006ede865ee843064c35d997221e 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h @@ -63,6 +63,9 @@ class AArch64Ebo : public Ebo { bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const override; bool ResIsNotDefAndUse(Insn &insn) const override; bool LiveOutOfBB(const Operand &opnd, const BB &bb) const override; + bool IsInvalidReg(const RegOperand &opnd) const override; + bool IsZeroRegister(const Operand &opnd) const override; + bool IsConstantImmOrReg(const Operand &opnd) const override; bool OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); bool ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, MOperator oldMop, const RegOperand& opnd); @@ -70,7 +73,6 @@ class AArch64Ebo : public Ebo { private: /* The number of elements in callerSaveRegTable must less then 45. */ static constexpr int32 kMaxCallerSaveReg = 45; - bool IsZeroRegister(const Operand &opnd) const; MOperator ExtLoadSwitchBitSize(MOperator lowMop) const; bool CheckCondCode(const CondOperand &cond) const; bool CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, @@ -78,7 +80,7 @@ class AArch64Ebo : public Ebo { bool CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp); bool CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp); bool CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp); - bool SimplifyBothConst(BB &bb, Insn &insn, const AArch64ImmOperand &immOperand0, const AArch64ImmOperand &immOperand1, + bool SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, const ImmOperand &immOperand1, uint32 opndSize); AArch64CC_t GetReverseCond(const CondOperand &cond) const; bool CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h index c6c8d14d2762b7aa11f7a389d92060151c0ae6d6..15f1a31f1be50c189be2761c487338c953f8a613 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h @@ -34,6 +34,34 @@ class AArch64AsmEmitter : public AsmEmitter { void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) override; void RecordRegInfo(FuncEmitInfo &funcEmitInfo); void Run(FuncEmitInfo &funcEmitInfo) override; + + private: + /* cfi & dbg need target info ? */ + void EmitAArch64CfiInsn(Emitter &emitter, Insn &insn); + void EmitAArch64DbgInsn(Emitter &emitter, Insn &insn); + + void EmitAArch64Insn(Emitter &emitter, Insn &insn); + void EmitClinit(Emitter &emitter, Insn &insn) const; + void EmitAdrpLdr(Emitter &emitter, Insn &insn) const; + void EmitCounter(Emitter &emitter, Insn &insn) const; + void EmitInlineAsm(Emitter &emitter, Insn &insn) const; + void EmitClinitTail(Emitter &emitter, Insn &insn) const; + void EmitLazyLoad(Emitter &emitter, Insn &insn) const; + void EmitAdrpLabel(Emitter &emitter, Insn &insn) const; + void EmitLazyLoadStatic(Emitter &emitter, Insn &insn) const; + void EmitArrayClassCacheLoad(Emitter &emitter, Insn &insn) const; + void EmitGetAndAddInt(Emitter &emitter, Insn &insn) const; + void EmitGetAndSetInt(Emitter &emitter, Insn &insn) const; + void EmitCompareAndSwapInt(Emitter &emitter, Insn &insn) const; + void EmitStringIndexOf(Emitter &emitter, Insn &insn) const; + void EmitLazyBindingRoutine(Emitter &emitter, Insn &insn) const; + void EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const; + void EmitCTlsDescRel(Emitter &emitter, Insn &insn) const; + void EmitCTlsDescCall(Emitter &emitter, Insn &insn) const; + void EmitSyncLockTestSet(Emitter &emitter, Insn &insn) const; + + void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const; + bool CheckInsnRefField(Insn &insn, size_t opndIndex) const; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h index 6b3059f170191caac0e61df0ef0b9da559672269..0e41f70039af3570833c72f71234b691b2fcc15a 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h @@ -102,9 +102,7 @@ class ForwardPropPattern : public OptimizePattern { */ class BackPropPattern : public OptimizePattern { public: - explicit BackPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) { - globalProp = cgFunc.GetMirModule().IsCModule() && !cgFunc.IsAfterRegAlloc(); - } + explicit BackPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} ~BackPropPattern() override = default; bool CheckCondition(Insn &insn) final; void Optimize(Insn &insn) final; @@ -389,8 +387,12 @@ class SameDefPattern : public OptimizePattern { void Init() final; private: - bool IsSameDef(Insn &currInsn, Insn &sameInsn); - bool SrcRegIsRedefined(Insn &currInsn, Insn &sameInsn, regno_t regNo); + bool IsSameDef(); + bool SrcRegIsRedefined(regno_t regNo); + bool IsSameOperand(Operand &opnd0, Operand &opnd1); + + Insn *currInsn; + Insn *sameInsn; }; /* diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h index ce76201c0778ae6da6211a84dfa96783d9a591c9..fb71afd639b9a06515e6de88af58c26113abb168 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h @@ -65,8 +65,6 @@ class AArch64Insn : public Insn { return (mOp == MOP_pseudo_ret_int || mOp == MOP_pseudo_ret_float); } - bool IsUseSpecReg() const override; - bool OpndIsDef(uint32 id) const override; bool OpndIsUse(uint32 id) const override; bool IsEffectiveCopy() const override { @@ -152,9 +150,7 @@ class AArch64Insn : public Insn { bool IsDMBInsn() const override; - void PrepareVectorOperand(AArch64RegOperand *regOpnd, uint32 &compositeOpnds) const; - - void Emit(const CG&, Emitter&) const override; + void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds) const; void Dump() const override; @@ -174,8 +170,6 @@ class AArch64Insn : public Insn { MOperator FlipConditionOp(MOperator flippedOp, uint32 &targetIdx) override; - bool CheckRefField(size_t opndIndex, bool isEmit) const; - uint8 GetLoadStoreSize() const; bool IsRegDefined(regno_t regNO) const override; @@ -188,21 +182,6 @@ class AArch64Insn : public Insn { private: void CheckOpnd(const Operand &opnd, const OpndProp &mopd) const; - void EmitClinit(const CG&, Emitter&) const; - void EmitAdrpLdr(const CG&, Emitter&) const; - void EmitLazyBindingRoutine(Emitter&) const; - void EmitClinitTail(Emitter&) const; - void EmitAdrpLabel(Emitter&) const; - void EmitLazyLoad(Emitter&) const; - void EmitLazyLoadStatic(Emitter&) const; - void EmitArrayClassCacheLoad(Emitter&) const; - void EmitCheckThrowPendingException(const CG&, Emitter&) const; - void EmitGetAndAddInt(Emitter &emitter) const; - void EmitGetAndSetInt(Emitter &emitter) const; - void EmitCompareAndSwapInt(Emitter &emitter) const; - void EmitStringIndexOf(Emitter &emitter) const; - void EmitCounter(const CG&, Emitter&) const; - void EmitInlineAsm(const CG&, Emitter&) const; }; struct VectorRegSpec { @@ -292,6 +271,93 @@ class AArch64cleancallInsn : public AArch64Insn { private: int32 refSkipIndex; }; + +class OpndEmitVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + OpndEmitVisitor(Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~OpndEmitVisitor() = default; + protected: + Emitter &emitter; +}; + +class A64OpndEmitVisitor : public OpndEmitVisitor { + public: + A64OpndEmitVisitor(Emitter &emitter, const OpndProp *operandProp) + : OpndEmitVisitor(emitter), + opndProp(operandProp) {} + ~A64OpndEmitVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(LogicalShiftLeftOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(OfstOperand *v) final; + void Visit(ListOperand *v) final; + + void EmitVectorOperand(RegOperand &v); + void EmitIntReg(RegOperand &v, uint8 opndSz = kMaxSimm32); + + const OpndProp *opndProp; +}; + +/*TODO : Delete */ +class OpndTmpDumpVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + OpndTmpDumpVisitor() {} + virtual ~OpndTmpDumpVisitor() = default; +}; + +class A64OpndDumpVisitor : public OpndTmpDumpVisitor { + public: + A64OpndDumpVisitor() : OpndTmpDumpVisitor() {} + ~A64OpndDumpVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(LogicalShiftLeftOperand *v) final; + void Visit(PhiOperand *v) final; +}; + } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h index 9c4e7ade380c89027a9c1e1b0bba4320b178d477..6dac1c2d0604b76c6f607bddbd7f366114a2b3fa 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h @@ -15,8 +15,7 @@ #ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H #define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H -#include "operand.h" -#include "mad.h" +#include "isa.h" #define DEFINE_MOP(op, ...) op, enum AArch64MOP_t : maple::uint32 { @@ -33,8 +32,8 @@ namespace maplebe { constexpr int kAarch64StackPtrAlignment = 16; constexpr int32 kOffsetAlign = 8; -constexpr uint32 kIntregBytelen = 8; /* 64-bit */ -constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ constexpr int kSizeOfFplr = 16; enum StpLdpImmBound : int { @@ -48,6 +47,7 @@ enum StrLdrPerPostBound : int64 { kStrLdrPerPostLowerBound = -256, kStrLdrPerPostUpperBound = 255 }; + constexpr int64 kStrAllLdrAllImmLowerBound = 0; enum StrLdrImmUpperBound : int64 { kStrLdrImm32UpperBound = 16380, /* must be a multiple of 4 */ @@ -144,169 +144,23 @@ static inline RegType GetRegType(AArch64reg r) { enum MemoryOrdering : uint32 { kMoNone = 0, - kMoAcquire = (1ULL << 0), /* ARMv8 */ - kMoAcquireRcpc = (1ULL << 1), /* ARMv8.3 */ - kMoLoacquire = (1ULL << 2), /* ARMv8.1 */ - kMoRelease = (1ULL << 3), /* ARMv8 */ - kMoLorelease = (1ULL << 4) /* ARMv8.1 */ -}; -} /* namespace AArch64isa */ - -enum RegPropState : uint32 { - kRegPropUndef = 0, - kRegPropDef = 0x1, - kRegPropUse = 0x2 -}; -enum RegAddress : uint32 { - kRegHigh = 0x4, - kRegLow = 0x8 -}; -constexpr uint32 kMemLow12 = 0x10; -constexpr uint32 kLiteralLow12 = kMemLow12; -constexpr uint32 kPreInc = 0x20; -constexpr uint32 kPostInc = 0x40; -constexpr uint32 kLoadLiteral = 0x80; -constexpr uint32 kVector = 0x100; - -class RegProp { - public: - RegProp(RegType t, AArch64reg r, uint32 d) : regType(t), physicalReg(r), defUse(d) {} - virtual ~RegProp() = default; - const RegType &GetRegType() const { - return regType; - } - const AArch64reg &GetPhysicalReg() const { - return physicalReg; - } - uint32 GetDefUse() const { - return defUse; - } - private: - RegType regType; - AArch64reg physicalReg; - uint32 defUse; /* used for register use/define and other properties of other operand */ -}; - -class X86OpndProp : public OpndProp { - -}; - -class AArch64OpndProp : public OpndProp { - public: - AArch64OpndProp(Operand::OperandType t, RegProp p, uint8 s) : opndType(t), regProp(p), size(s) {} - virtual ~AArch64OpndProp() = default; - Operand::OperandType GetOperandType() const { - return opndType; - } - - const RegProp &GetRegProp() const { - return regProp; - } - - bool IsPhysicalRegister() const { - return opndType == Operand::kOpdRegister && regProp.GetPhysicalReg() < kMaxRegNum; - } - - bool IsRegister() const { - return opndType == Operand::kOpdRegister; - } - - bool IsRegDef() const { - return opndType == Operand::kOpdRegister && (regProp.GetDefUse() & kRegPropDef); - } - - bool IsRegUse() const { - return opndType == Operand::kOpdRegister && (regProp.GetDefUse() & kRegPropUse); - } - - bool IsMemLow12() const { - return opndType == Operand::kOpdMem && (regProp.GetDefUse() & kMemLow12); - } - - bool IsLiteralLow12() const { - return opndType == Operand::kOpdStImmediate && (regProp.GetDefUse() & kLiteralLow12); - } - - bool IsDef() const { - return regProp.GetDefUse() & kRegPropDef; - } - - bool IsUse() const { - return regProp.GetDefUse() & kRegPropUse; - } - - bool IsLoadLiteral() const { - return regProp.GetDefUse() & kLoadLiteral; - } - - uint8 GetSize() const { - return size; - } - - uint32 GetOperandSize() const { - return static_cast(size); - } - - bool IsVectorOperand() const { - return regProp.GetDefUse() & kVector; - } - - void SetContainImm() { - isContainImm = true; - } - - bool IsContainImm() const { - return isContainImm; - } - - protected: - bool isContainImm = false; - - private: - Operand::OperandType opndType; - RegProp regProp; - uint8 size; -}; - -/* - * Operand which might include immediate value. - * function ptr returns whether a immediate is legal in specific target - */ -class AArch64ImmOpndProp : public AArch64OpndProp { - public: - AArch64ImmOpndProp(Operand::OperandType t, const RegProp &p, uint8 s, const std::function f) - : AArch64OpndProp(t, p, s), - validFunc(f) { - SetContainImm(); - } - virtual ~AArch64ImmOpndProp() = default; - - bool IsValidImmOpnd(int64 value) const { - CHECK_FATAL(validFunc, " Have not set valid function yet in AArch64ImmOpndProp"); - return validFunc(value); - } - - private: - std::function validFunc; -}; - -struct X86MD { - + kMoAcquire = (1ULL << 0), /* ARMv8 */ + kMoAcquireRcpc = (1ULL << 1), /* ARMv8.3 */ + kMoLoacquire = (1ULL << 2), /* ARMv8.1 */ + kMoRelease = (1ULL << 3), /* ARMv8 */ + kMoLorelease = (1ULL << 4) /* ARMv8.1 */ }; +} /* namespace AArch64isa */ struct AArch64MD { MOperator opc; std::vector operand; uint64 properties; - LatencyType latencyType; + uint32 latencyType; const std::string &name; const std::string &format; uint32 atomicNum; /* indicate how many asm instructions it will emit. */ - bool UseSpecReg() const { - return properties & USESPECREG; - } - uint32 GetAtomicNum() const { return atomicNum; } @@ -327,9 +181,9 @@ struct AArch64MD { return properties & CANTHROW; } - AArch64OpndProp *GetOperand(int nth) const { + OpndProp *GetOperand(int nth) const { ASSERT(nth < operand.size(), "index of Operand should not be bigger than MaxOperandNum"); - return static_cast(operand[nth]); + return operand[nth]; } uint32 GetOperandSize() const { @@ -417,7 +271,11 @@ struct AArch64MD { return properties & ISVECTOR; } - LatencyType GetLatencyType() const { + bool IsNop() const { + return properties & ISNOP; + } + + uint32 GetLatencyType() const { return latencyType; } }; @@ -433,6 +291,6 @@ inline void GetNextOffsetCalleeSaved(int &offset) { } MOperator GetMopPair(MOperator mop); -} /* namespace maplebe */ +} /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 1ef6c323d9e1554d96d23a696883ef3369598e2b..54c3294260177914bfa4d6e6c3136d16334c34b1 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -50,9 +50,10 @@ DEFINE_MOP(MOP_xadrp, {mopdReg64ID,mopdLiteral},ISLOADADDR,kLtShift,"adrp","0,1" /* MOP_xadr */ DEFINE_MOP(MOP_xadri64, {mopdReg64ID,mopdImm64},ISLOADADDR,kLtShift,"adr","0,1",1) /* MOP_xadrpl12 */ -DEFINE_MOP(MOP_xadrpl12, {mopdReg64ID,mopdReg64IS,mopdLiteralL12},0,kLtAlu,"add","0,1,2",1) +DEFINE_MOP(MOP_xadrpl12, {mopdReg64ID,mopdReg64IS,mopdLiteralL12},ISLOADADDR,kLtAlu,"add","0,1,2",1) -/* MOP_xaddrrr AARCH64 Arithmetic: add */ +/* AARCH64 Arithmetic: add */ +/* MOP_xaddrrr */ DEFINE_MOP(MOP_xaddrrr, {mopdReg64ID,mopdReg64IS,mopdReg64IS},0,kLtAlu,"add","0,1,2",1) /* MOP_xaddrrrs */ DEFINE_MOP(MOP_xaddrrrs, {mopdReg64ID,mopdReg64IS,mopdReg64IS,mopdBitShift64},0,kLtAluShift,"add","0,1,2,3",1) @@ -289,6 +290,13 @@ DEFINE_MOP(MOP_vnotui, {mopdReg64VD,mopdImm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) /* MOP_vnotvi */ DEFINE_MOP(MOP_vnotvi, {mopdReg128VD,mopdImm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) +/* MOP_xrevrr */ +DEFINE_MOP(MOP_xrevrr, {mopdReg64ID,mopdReg64IS},0,kLtAlu,"rev","0,1",1) +/* MOP_wrevrr */ +DEFINE_MOP(MOP_wrevrr, {mopdReg32ID,mopdReg32IS},0,kLtAlu,"rev","0,1",1) +/* MOP_xrevrr */ +DEFINE_MOP(MOP_wrevrr16, {mopdReg32ID,mopdReg32IS},0,kLtAlu,"rev16","0,1",1) + /* MOP_wfmaxrrr */ DEFINE_MOP(MOP_wfmaxrrr, {mopdReg32FD,mopdReg32FS,mopdReg32FS},0,kLtFpalu,"fmax","0,1,2",1) /* MOP_xfmaxrrr */ @@ -317,28 +325,28 @@ DEFINE_MOP(MOP_wmnegrrr, {mopdReg32ID,mopdReg32IS,mopdReg32IS},0,kLtMul,"mneg"," /* MOP_xmnegrrr */ DEFINE_MOP(MOP_xmnegrrr, {mopdReg64ID,mopdReg64IS,mopdReg64IS},0,kLtMul,"mneg","0,1,2",1) -/* MPO_wubfxrri5i5 */ +/* MOP_wubfxrri5i5 */ DEFINE_MOP(MOP_wubfxrri5i5, {mopdReg32ID,mopdReg32IS,mopdImm5,mopdImm5},0,kLtAluShift,"ubfx","0,1,2,3",1) -/* MPO_xubfxrri6i6 */ +/* MOP_xubfxrri6i6 */ DEFINE_MOP(MOP_xubfxrri6i6, {mopdReg64ID,mopdReg64IS,mopdImm6,mopdImm6},0,kLtAluShift,"ubfx","0,1,2,3",1) -/* MPO_wsbfxrri5i5 -- Signed Bitfield Extract */ +/* MOP_wsbfxrri5i5 -- Signed Bitfield Extract */ DEFINE_MOP(MOP_wsbfxrri5i5, {mopdReg32ID,mopdReg32IS,mopdImm5,mopdImm5},0,kLtAluShift,"sbfx","0,1,2,3",1) -/* MPO_xsbfxrri6i6 */ +/* MOP_xsbfxrri6i6 */ DEFINE_MOP(MOP_xsbfxrri6i6, {mopdReg64ID,mopdReg64IS,mopdImm6,mopdImm6},0,kLtAluShift,"sbfx","0,1,2,3",1) -/* MPO_wubfizrri5i5 -- Unsigned Bitfield Insert in Zero */ +/* MOP_wubfizrri5i5 -- Unsigned Bitfield Insert in Zero */ DEFINE_MOP(MOP_wubfizrri5i5, {mopdReg32ID,mopdReg32IS,mopdImm5,mopdImm5},0,kLtAluShift,"ubfiz","0,1,2,3",1) -/* MPO_xubfizrri6i6 */ +/* MOP_xubfizrri6i6 */ DEFINE_MOP(MOP_xubfizrri6i6, {mopdReg64ID,mopdReg64IS,mopdImm6,mopdImm6},0,kLtAluShift,"ubfiz","0,1,2,3",1) -/* MPO_xsbfizrri6i6 Signed Bitfield Insert in Zero */ +/* MOP_xsbfizrri6i6 Signed Bitfield Insert in Zero */ DEFINE_MOP(MOP_xsbfizrri6i6, {mopdReg64ID,mopdReg64IS,mopdImm6,mopdImm6},0,kLtAluShift,"sbfiz","0,1,2,3",1) -/* MPO_wbfirri5i5 -- Bitfield Insert */ -DEFINE_MOP(MPO_wbfirri5i5, {mopdReg32IDS,mopdReg32IS,mopdImm5,mopdImm5},ISMOVE|ISPARTDEF,kLtAluShift,"bfi","0,1,2,3",1) -/* MPO_xbfirri6i6 */ -DEFINE_MOP(MPO_xbfirri6i6, {mopdReg64IDS,mopdReg64IS,mopdImm6,mopdImm6},ISMOVE|ISPARTDEF,kLtAluShift,"bfi","0,1,2,3",1) +/* MOP_wbfirri5i5 -- Bitfield Insert */ +DEFINE_MOP(MOP_wbfirri5i5, {mopdReg32IDS,mopdReg32IS,mopdImm5,mopdImm5},ISMOVE|ISPARTDEF,kLtAluShift,"bfi","0,1,2,3",1) +/* MOP_xbfirri6i6 */ +DEFINE_MOP(MOP_xbfirri6i6, {mopdReg64IDS,mopdReg64IS,mopdImm6,mopdImm6},ISMOVE|ISPARTDEF,kLtAluShift,"bfi","0,1,2,3",1) /* MOP_xlslrri6,--- Logical Shift Left */ DEFINE_MOP(MOP_xlslrri6, {mopdReg64ID,mopdReg64IS,mopdImm6},0,kLtAluShift,"lsl","0,1,2",1) @@ -452,11 +460,50 @@ DEFINE_MOP(MOP_xbl, {mopdFuncName,mopdLISTS},ISCALL|CANTHROW,kLtBranch,"bl","0" /* MOP_xblr -- branch with link (call) to register; this is a special definition */ DEFINE_MOP(MOP_xblr, {mopdReg64IS,mopdLISTS},ISCALL|CANTHROW,kLtBranch,"blr","0",1) +/* Tls descriptor */ +/* + * add x0, #:tprel_hi12:symbol, lsl #12 + * add x0, #:tprel_lo12_nc:symbol + */ +DEFINE_MOP(MOP_tls_desc_rel, {mopdReg64ID,mopdReg64IS,mopdLiteral},0,kLtAlu,"tlsdescrel","0,1",2) + +/* + * .tlsdesccall symbol + * blr x0 + */ +DEFINE_MOP(MOP_tls_desc_call, {mopdReg64IS,mopdLiteral,mopdLISTS},ISCALL|CANTHROW,kLtBranch,"tlsdesccall","0",2) + +/* System register access */ +/* MOP_mrs */ +DEFINE_MOP(MOP_mrs, {mopdReg64ID,mopdSTRING},ISMOVE,kLtAlu,"mrs","0,1",1) + + /* Inline asm */ /* Number of instructions generated by inline asm is arbitrary. Use a large number here. */ /* asm string, output list, clobber list, input list, output constraint, input constraint, out reg prefix, in reg prefix */ DEFINE_MOP(MOP_asm, {mopdSTRING,mopdLISTD,mopdLISTD,mopdLISTS,mopdLISTS,mopdLISTS,mopdLISTS,mopdLISTS},CANTHROW|HASACQUIRE|HASRELEASE,kLtUndef,"asm","0,1,2,3",100) +/* c sync builtins */ +/* + * intrinsic_sync_lock_test_setI w0, w1, x2, w3, lable1 + * label1: + * ldxr w0, [x2] + * stxr w1, w3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setI, {mopdReg32ID,mopdReg32ID,mopdReg64IS,mopdReg32IS,mopdLabel},HASLOOP|CANTHROW,kLtBranch,"intrinsic_sync_lock_test_setI","0,1,2,3,4",5) + +/* + * intrinsic_sync_lock_test_setL x0, w1, x2, x3, lable1 + * label1: + * ldxr x0, [x2] + * stxr w1, x3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setL, {mopdReg64ID,mopdReg32ID,mopdReg64IS,mopdReg64IS,mopdLabel},HASLOOP|CANTHROW,kLtBranch,"intrinsic_sync_lock_test_setL","0,1,2,3,4",5) + /* AARCH64 LOADS */ /* MOP_wldrsb --- Load Register Signed Byte */ DEFINE_MOP(MOP_wldrsb, {mopdReg32ID,mopdMem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1) @@ -503,13 +550,13 @@ DEFINE_MOP(MOP_qldp, {mopdReg128VD,mopdReg128VD,mopdMem128PS},ISLOAD|ISLOADPAIR| /* AARCH64 Load with Acquire semantics */ /* MOP_wldarb */ -DEFINE_MOP(MOP_wldarb, {mopdReg32ID,mopdMem8S},ISLOAD|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1) +DEFINE_MOP(MOP_wldarb, {mopdReg32ID,mopdMem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1) /* MOP_wldarh */ -DEFINE_MOP(MOP_wldarh, {mopdReg32ID, mopdMem16S},ISLOAD|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1) +DEFINE_MOP(MOP_wldarh, {mopdReg32ID, mopdMem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1) /* MOP_wldar */ -DEFINE_MOP(MOP_wldar, {mopdReg32ID,mopdMem32S},ISLOAD|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1) +DEFINE_MOP(MOP_wldar, {mopdReg32ID,mopdMem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1) /* MOP_xldar */ -DEFINE_MOP(MOP_xldar, {mopdReg64ID,mopdMem64S},ISLOAD|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1) +DEFINE_MOP(MOP_xldar, {mopdReg64ID,mopdMem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1) /* MOP_wmovkri16 */ DEFINE_MOP(MOP_wmovkri16, {mopdReg32IDS,mopdImm16,mopdLSL4},ISMOVE|ISPARTDEF,kLtShift,"movk","0,1,2",1) @@ -712,21 +759,21 @@ DEFINE_MOP(MOP_qstp, {mopdReg128VS,mopdReg128VS,mopdMem128PD},ISSTORE|ISSTOREPAI /* AARCH64 Store with Release semantics */ /* MOP_wstlrb -- Store-Release Register Byte */ -DEFINE_MOP(MOP_wstlrb, {mopdReg32IS,mopdMem8D},ISSTORE|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1) +DEFINE_MOP(MOP_wstlrb, {mopdReg32IS,mopdMem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1) /* MOP_wstlrh -- Store-Release Register Halfword */ -DEFINE_MOP(MOP_wstlrh, {mopdReg32IS,mopdMem16D},ISSTORE|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1) +DEFINE_MOP(MOP_wstlrh, {mopdReg32IS,mopdMem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1) /* MOP_wstlr -- Store-Release Register Word */ -DEFINE_MOP(MOP_wstlr, {mopdReg32IS,mopdMem32D},ISSTORE|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1) +DEFINE_MOP(MOP_wstlr, {mopdReg32IS,mopdMem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1) /* MOP_xstlr -- Store-Release Register Double word */ -DEFINE_MOP(MOP_xstlr, {mopdReg64IS,mopdMem64D},ISSTORE|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1) +DEFINE_MOP(MOP_xstlr, {mopdReg64IS,mopdMem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1) /* AARCH64 Store exclusive with/without release semantics */ -DEFINE_MOP(MOP_wstxrb, {mopdReg32ID,mopdReg32IS,mopdMem8D, },ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrb","0,1,2",1) +DEFINE_MOP(MOP_wstxrb, {mopdReg32ID,mopdReg32IS,mopdMem8D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrb","0,1,2",1) DEFINE_MOP(MOP_wstxrh, {mopdReg32ID,mopdReg32IS,mopdMem16D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrh","0,1,2",1) DEFINE_MOP(MOP_wstxr, {mopdReg32ID,mopdReg32IS,mopdMem32D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1) DEFINE_MOP(MOP_xstxr, {mopdReg32ID,mopdReg64IS,mopdMem64D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1) -DEFINE_MOP(MOP_wstlxrb,{mopdReg32ID,mopdReg32IS,mopdMem8D, },ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrb","0,1,2",1) +DEFINE_MOP(MOP_wstlxrb,{mopdReg32ID,mopdReg32IS,mopdMem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrb","0,1,2",1) DEFINE_MOP(MOP_wstlxrh,{mopdReg32ID,mopdReg32IS,mopdMem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrh","0,1,2",1) DEFINE_MOP(MOP_wstlxr, {mopdReg32ID,mopdReg32IS,mopdMem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1) DEFINE_MOP(MOP_xstlxr, {mopdReg32ID,mopdReg64IS,mopdMem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1) @@ -1097,7 +1144,7 @@ DEFINE_MOP(MOP_pseudo_ret_float, {mopdReg64FS},0,kLtUndef,"//MOP_pseudo_ret_floa DEFINE_MOP(MOP_pseudo_eh_def_x, {mopdReg64ID},0,kLtUndef,"//MOP_pseudo_eh_def_x","0", 0) /*MOP_nop */ -DEFINE_MOP(MOP_nop, {},0,kLtAlu,"nop","", 1) +DEFINE_MOP(MOP_nop, {},ISNOP,kLtAlu,"nop","", 1) /* for comments */ /* MOP_comment */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h index 3eda525e2a16fa7ead67e76335970a72622ff303..52d29b2eb37b460d24c500ea2e04566bdb0e7967 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h @@ -22,135 +22,12 @@ #include "aarch64_isa.h" #include "operand.h" #include "cg.h" -#include "aarch64_immediate.h" #include "emit.h" #include "common_utils.h" namespace maplebe { using namespace maple; -class AArch64RegOperand : public RegOperand { - public: - AArch64RegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flg = 0) - : RegOperand(regNO, size, kind), flag(flg) { - ASSERT(kind != kRegTyUndef, "Reg type must be specified"); - } - - ~AArch64RegOperand() override = default; - - void SetRefField(bool newIsRefField) { - isRefField = newIsRefField; - } - - bool IsInvalidRegister() const override { - return (GetRegisterNumber() == AArch64reg::kRinvalid); - } - - bool IsPhysicalRegister() const override { - return AArch64isa::IsPhysicalRegister(GetRegisterNumber()); - } - - bool IsVirtualRegister() const override { - return !IsPhysicalRegister(); - } - - bool IsBBLocalVReg() const override { - return IsVirtualRegister() && RegOperand::IsBBLocalVReg(); - } - - bool IsSaveReg(MIRType &ty, BECommon &beCommon) const override; - - static AArch64RegOperand &Get32bitZeroRegister() { - return zero32; - } - - static AArch64RegOperand &Get64bitZeroRegister() { - return zero64; - } - - static AArch64RegOperand &GetZeroRegister(uint32 bitLen) { - /* - * It is possible to have a bitLen < 32, eg stb. - * Set it to 32 if it is less than 32. - */ - if (bitLen < k32BitSize) { - bitLen = k32BitSize; - } - ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen); - return (bitLen == k32BitSize) ? Get32bitZeroRegister() : Get64bitZeroRegister(); - } - - bool IsZeroRegister() const override { - return GetRegisterNumber() == RZR; - } - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - void SetIF64Vec() { - if64Vec = true; - } - - bool GetIF64Vec() const { - return if64Vec; - } - - void SetVecLanePosition(int32 pos) { - vecLane = static_cast(pos); - } - - int32 GetVecLanePosition() const { - return vecLane; - } - - void SetVecLaneSize(uint32 size) { - vecLaneSize = static_cast(size); - } - - uint32 GetVecLaneSize() const { - return vecLaneSize; - } - - void SetVecElementSize(uint32 size) { - vecElementSize = size; - } - - uint64 GetVecElementSize() const { - return vecElementSize; - } - - bool operator==(const AArch64RegOperand &opnd) const; - - bool operator<(const AArch64RegOperand &opnd) const; - - void EmitVectorOpnd(Emitter &emitter) const; - void Emit(Emitter &emitter, const OpndProp *opndProp) const override; - void Dump() const override { - std::array prims = { "U", "R", "V", "C", "X", "Vra" }; - std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; - bool isVirtual = IsVirtualRegister(); - ASSERT(regType < kRegTyLast, "unexpected regType"); - regno_t reg = isVirtual ? regNO : (regNO - 1); - LogInfo::MapleLogger() << (isVirtual ? "vreg:" : " reg:") << prims[regType] << reg - << " Ty: " << classes[regType] << " Vb: [" << static_cast(validBitsNum) << "]" - << " Sz: [" << size << "]" ; - } - - bool IsSPOrFP() const override; - - private: - static AArch64RegOperand zero64; - static AArch64RegOperand zero32; - - bool isRefField = false; - uint32 flag; - int16 vecLane = -1; /* -1 for whole reg, 0 to 15 to specify each lane one at a time */ - uint16 vecLaneSize = 0; /* Number of lanes */ - uint64 vecElementSize = 0; /* size of vector element in each lane */ - bool if64Vec = false; /* operand returning 64x1's int value in FP/Simd register */ -}; - /* * http://stackoverflow.com/questions/30904718/range-of-immediate-values-in-armv8-a64-assembly * @@ -175,50 +52,6 @@ class AArch64RegOperand : public RegOperand { * rotated by 0 to e-1 bits. This mechanism can generate 5,334 unique * 64-bit patterns (as 2,667 pairs of pattern and their bitwise inverse)." */ -class AArch64ImmOperand : public ImmOperand { - public: - AArch64ImmOperand(int64 val, uint32 size, bool isSigned, VaryType varyType = kNotVary, bool isFmov = false) - : ImmOperand(val, size, isSigned, varyType), isFmov(isFmov) {} - - ~AArch64ImmOperand() override = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const override { - return maplebe::IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); - } - - bool IsBitmaskImmediate() const { - ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); - ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); - return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); - } - - bool IsBitmaskImmediate(uint32 destSize) const { - ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); - ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); - return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); - } - - bool IsSingleInstructionMovable() const override { - return (IsMoveWidableImmediate(static_cast(value), static_cast(size)) || - IsMoveWidableImmediate(~static_cast(value), static_cast(size)) || - IsBitmaskImmediate()); - } - - bool IsSingleInstructionMovable(uint32 destSize) const { - return (IsMoveWidableImmediate(static_cast(value), static_cast(destSize)) || - IsMoveWidableImmediate(~static_cast(value), static_cast(destSize)) || - IsBitmaskImmediate(destSize)); - } - - void Emit(Emitter &emitter, const OpndProp *prop) const override; - - private: - bool isFmov; -}; class ImmFPZeroOperand : public OperandVisitable { public: @@ -253,114 +86,6 @@ class ImmFPZeroOperand : public OperandVisitable { } }; -class AArch64OfstOperand : public OfstOperand { - public: - enum OfstType : uint8 { - kSymbolOffset, - kImmediateOffset, - kSymbolImmediateOffset, - }; - - /* only for symbol offset */ - AArch64OfstOperand(const MIRSymbol &mirSymbol, uint32 size, int32 relocs) - : OfstOperand(kOpdOffset, 0, size, true), - offsetType(kSymbolOffset), symbol(&mirSymbol), relocs(relocs) {} - /* only for Immediate offset */ - AArch64OfstOperand(int64 val, uint32 size, VaryType isVar = kNotVary) - : OfstOperand(kOpdOffset, static_cast(val), size, true, isVar), - offsetType(kImmediateOffset), symbol(nullptr), relocs(0) {} - /* for symbol and Immediate offset */ - AArch64OfstOperand(const MIRSymbol &mirSymbol, int64 val, uint32 size, int32 relocs, VaryType isVar = kNotVary) - : OfstOperand(kOpdOffset, val, size, true, isVar), - offsetType(kSymbolImmediateOffset), - symbol(&mirSymbol), - relocs(relocs) {} - - ~AArch64OfstOperand() override = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const override { - /* mask1 is a 64bits number that is all 1 shifts left size bits */ - const uint64 mask1 = 0xffffffffffffffffUL << size; - /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ - uint64 mask2 = (static_cast(1) << static_cast(nLowerZeroBits)) - 1UL; - return (mask2 & static_cast(value)) == 0UL && - (mask1 & ((static_cast(value)) >> nLowerZeroBits)) == 0UL; - } - - bool IsSymOffset() const { - return offsetType == kSymbolOffset; - } - bool IsImmOffset() const { - return offsetType == kImmediateOffset; - } - bool IsSymAndImmOffset() const { - return offsetType == kSymbolImmediateOffset; - } - - const MIRSymbol *GetSymbol() const { - return symbol; - } - - const std::string &GetSymbolName() const { - return symbol->GetName(); - } - - int64 GetOffsetValue() const { - return GetValue(); - } - - void SetOffsetValue(int32 offVal) { - SetValue(static_cast(offVal)); - } - - void AdjustOffset(int32 delta) { - Add(static_cast(delta)); - } - - bool operator==(const AArch64OfstOperand &opnd) const { - return (offsetType == opnd.offsetType && symbol == opnd.symbol && - OfstOperand::operator==(opnd) && relocs == opnd.relocs); - } - - bool operator<(const AArch64OfstOperand &opnd) const { - return (offsetType < opnd.offsetType || - (offsetType == opnd.offsetType && symbol < opnd.symbol) || - (offsetType == opnd.offsetType && symbol == opnd.symbol && GetValue() < opnd.GetValue())); - } - - void Emit(Emitter &emitter, const OpndProp *prop) const override; - - void Dump() const override { - if (IsImmOffset()) { - LogInfo::MapleLogger() << "ofst:" << GetValue(); - } else { - LogInfo::MapleLogger() << GetSymbolName(); - LogInfo::MapleLogger() << "+offset:" << GetValue(); - } - } - - bool IsBitmaskImmediate() const { - ASSERT(!IsZero(), "0 is reserved for bitmask immediate"); - ASSERT(!IsAllOnes(), "-1 is reserved for bitmask immediate"); - return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); - } - - bool IsSingleInstructionMovable() const override { - return (IsMoveWidableImmediate(static_cast(value), static_cast(size)) || - IsMoveWidableImmediate(~static_cast(value), static_cast(size)) || - IsBitmaskImmediate()); - } - - private: - OfstType offsetType; - const MIRSymbol *symbol; - int32 relocs; -}; - /* representing for global variables address */ class StImmOperand : public OperandVisitable { public: @@ -408,8 +133,7 @@ class StImmOperand : public OperandVisitable { void Emit(Emitter &emitter, const OpndProp *opndProp) const override; void Dump() const override { - LogInfo::MapleLogger() << GetName(); - LogInfo::MapleLogger() << "+offset:" << offset; + CHECK_FATAL(false, "dont run here"); } private: @@ -418,26 +142,6 @@ class StImmOperand : public OperandVisitable { int32 relocs; }; -class FunctionLabelOperand : public LabelOperand { - public: - explicit FunctionLabelOperand(const char *func) : LabelOperand(func, 0) {} - - ~FunctionLabelOperand() override = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - void Emit(Emitter &emitter, const OpndProp *opndProp) const override { - (void)opndProp; - emitter.Emit(parentFunc); - } - - void Dump() const override { - LogInfo::MapleLogger() << "func :" << parentFunc; - } -}; - /* Use StImmOperand instead? */ class FuncNameOperand : public OperandVisitable { public: @@ -493,422 +197,6 @@ class FuncNameOperand : public OperandVisitable { const MIRSymbol *symbol; }; -class AArch64CGFunc; - -/* - * Table C1-6 A64 Load/Store addressing modes - * | Offset - * Addressing Mode | Immediate | Register | Extended Register - * - * Base register only | [base{,#0}] | - | - - * (no offset) | B_OI_NONE | | - * imm=0 - * - * Base plus offset | [base{,#imm}] | [base,Xm{,LSL #imm}] | [base,Wm,(S|U)XTW {#imm}] - * B_OI_NONE | B_OR_X | B_OR_X - * imm=0,1 (0,3) | imm=00,01,10,11 (0/2,s/u) - * - * Pre-indexed | [base, #imm]! | - | - - * - * Post-indexed | [base], #imm | [base], Xm(a) | - - * - * Literal | label | - | - - * (PC-relative) - * - * a) The post-indexed by register offset mode can be used with the SIMD Load/Store - * structure instructions described in Load/Store Vector on page C3-154. Otherwise - * the post-indexed by register offset mode is not available. - */ -class AArch64MemOperand : public MemOperand { - public: - enum AArch64AddressingMode : uint8 { - kAddrModeUndef, - /* AddrMode_BO, base, offset. EA = [base] + offset; */ - kAddrModeBOi, /* INTACT: EA = [base]+immediate */ - /* - * PRE: base += immediate, EA = [base] - * POST: EA = [base], base += immediate - */ - kAddrModeBOrX, /* EA = [base]+Extend([offreg/idxreg]), OR=Wn/Xn */ - kAddrModeLiteral, /* AArch64 insruction LDR takes literal and */ - /* - * "calculates an address from the PC value and an immediate offset, - * loads a word from memory, and writes it to a register." - */ - kAddrModeLo12Li // EA = [base] + #:lo12:Label+immediate. (Example: [x0, #:lo12:__Label300+456] - }; - /* - * ARMv8-A A64 ISA Overview by Matteo Franchin @ ARM - * (presented at 64-bit Android on ARM. Sep. 2015) p.14 - * o Address to load from/store to is a 64-bit base register + an optional offset - * LDR X0, [X1] ; Load from address held in X1 - * STR X0, [X1] ; Store to address held in X1 - * - * o Offset can be an immediate or a register - * LDR X0, [X1, #8] ; Load from address [X1 + 8 bytes] - * LDR X0, [X1, #-8] ; Load with negative offset - * LDR X0, [X1, X2] ; Load from address [X1 + X2] - * - * o A Wn register offset needs to be extended to 64 bits - * LDR X0, [X1, W2, SXTW] ; Sign-extend offset in W2 - * LDR X0, [X1, W2, UXTW] ; Zero-extend offset in W2 - * - * o Both Xn and Wn register offsets can include an optional left-shift - * LDR X0, [X1, W2, UXTW #2] ; Zero-extend offset in W2 & left-shift by 2 - * LDR X0, [X1, X2, LSL #2] ; Left-shift offset in X2 by 2 - * - * p.15 - * Addressing Modes Analogous C Code - * int *intptr = ... // X1 - * int out; // W0 - * o Simple: X1 is not changed - * LDR W0, [X1] out = *intptr; - * o Offset: X1 is not changed - * LDR W0, [X1, #4] out = intptr[1]; - * o Pre-indexed: X1 changed before load - * LDR W0, [X1, #4]! =|ADD X1,X1,#4 out = *(++intptr); - * |LDR W0,[X1] - * o Post-indexed: X1 changed after load - * LDR W0, [X1], #4 =|LDR W0,[X1] out = *(intptr++); - * |ADD X1,X1,#4 - */ - enum ExtendInfo : uint8 { - kShiftZero = 0x1, - kShiftOne = 0x2, - kShiftTwo = 0x4, - kShiftThree = 0x8, - kUnsignedExtend = 0x10, - kSignExtend = 0x20 - }; - - enum IndexingOption : uint8 { - kIntact, /* base register stays the same */ - kPreIndex, /* base register gets changed before load */ - kPostIndex, /* base register gets changed after load */ - }; - - AArch64MemOperand(AArch64reg reg, int32 offset, uint32 size, IndexingOption idxOpt = kIntact) - : MemOperand(size, - CG::GetCurCGFuncNoConst()->GetMemoryPool()->New(reg, k64BitSize, kRegTyInt), - nullptr, - CG::GetCurCGFuncNoConst()->GetMemoryPool()->New(offset, k32BitSize), nullptr), - addrMode(kAddrModeBOi), - extend(0), - idxOpt(idxOpt), - noExtend(false), - isStackMem(false) { - if (reg == RSP || reg == RFP) { - isStackMem = true; - } - } - - AArch64MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand *index, - OfstOperand *offset, const MIRSymbol *symbol) - : MemOperand(size, &base, index, offset, symbol), - addrMode(mode), - extend(0), - idxOpt(kIntact), - noExtend(false), - isStackMem(false) { - if (base.GetRegisterNumber() == RSP || base.GetRegisterNumber() == RFP) { - isStackMem = true; - } - } - - AArch64MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand &index, - OfstOperand *offset, const MIRSymbol &symbol, bool noExtend) - : MemOperand(size, &base, &index, offset, &symbol), - addrMode(mode), - extend(0), - idxOpt(kIntact), - noExtend(noExtend), - isStackMem(false) { - if (base.GetRegisterNumber() == RSP || base.GetRegisterNumber() == RFP) { - isStackMem = true; - } - } - - AArch64MemOperand(AArch64AddressingMode mode, uint32 dSize, RegOperand &baseOpnd, RegOperand &indexOpnd, - uint32 shift, bool isSigned = false) - : MemOperand(dSize, &baseOpnd, &indexOpnd, nullptr, nullptr), - addrMode(mode), - extend((isSigned ? kSignExtend : kUnsignedExtend) | (1U << shift)), - idxOpt(kIntact), - noExtend(false), - isStackMem(false) { - if (baseOpnd.GetRegisterNumber() == RSP || baseOpnd.GetRegisterNumber() == RFP) { - isStackMem = true; - } - } - - AArch64MemOperand(AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) - : MemOperand(dSize, nullptr, nullptr, nullptr, &sym), - addrMode(mode), - extend(0), - idxOpt(kIntact), - noExtend(false), - isStackMem(false) { - ASSERT(mode == kAddrModeLiteral, "This constructor version is supposed to be used with AddrMode_Literal only"); - } - - ~AArch64MemOperand() override = default; - - /* - Copy constructor - */ - explicit AArch64MemOperand(const AArch64MemOperand &memOpnd) - : MemOperand(memOpnd), addrMode(memOpnd.addrMode), extend(memOpnd.extend), idxOpt(memOpnd.idxOpt), - noExtend(memOpnd.noExtend), isStackMem(memOpnd.isStackMem) {} - AArch64MemOperand &operator=(const AArch64MemOperand &memOpnd) = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - AArch64AddressingMode GetAddrMode() const { - return addrMode; - } - - const std::string &GetSymbolName() const { - return GetSymbol()->GetName(); - } - - void SetBaseRegister(AArch64RegOperand &baseRegOpnd) { - MemOperand::SetBaseRegister(baseRegOpnd); - } - - bool IsStackMem() const { - return isStackMem; - } - - void SetStackMem(bool isStack) { - isStackMem = isStack; - } - - RegOperand *GetOffsetRegister() const { - return MemOperand::GetIndexRegister(); - } - - Operand *GetOffset() const override; - - void SetOffsetRegister(AArch64RegOperand &osr) { - MemOperand::SetIndexRegister(osr); - } - - AArch64OfstOperand *GetOffsetImmediate() const { - return static_cast(GetOffsetOperand()); - } - - void SetOffsetImmediate(OfstOperand &ofstOpnd) { - MemOperand::SetOffsetOperand(ofstOpnd); - } - - /* Returns N where alignment == 2^N */ - static int32 GetImmediateOffsetAlignment(uint32 dSize) { - ASSERT(dSize >= k8BitSize, "error val:dSize"); - ASSERT(dSize <= k128BitSize, "error val:dSize"); - ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); - /* dSize==8: 0, dSize==16 : 1, dSize==32: 2, dSize==64: 3 */ - return __builtin_ctz(dSize) - static_cast(kBaseOffsetAlignment); - } - - static int32 GetMaxPIMM(uint32 dSize) { - dSize = dSize > k64BitSize ? k64BitSize : dSize; - ASSERT(dSize >= k8BitSize, "error val:dSize"); - ASSERT(dSize <= k128BitSize, "error val:dSize"); - ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); - int32 alignment = GetImmediateOffsetAlignment(dSize); - /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ - ASSERT(alignment >= kOffsetAlignmentOf8Bit, "error val:alignment"); - ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); - return (kMaxPimm[alignment]); - } - - static int32 GetMaxPairPIMM(uint32 dSize) { - ASSERT(dSize >= k32BitSize, "error val:dSize"); - ASSERT(dSize <= k128BitSize, "error val:dSize"); - ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); - int32 alignment = GetImmediateOffsetAlignment(dSize); - /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ - ASSERT(alignment >= kOffsetAlignmentOf32Bit, "error val:alignment"); - ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); - return (kMaxPairPimm[static_cast(alignment) - k2BitSize]); - } - - bool IsOffsetMisaligned(uint32 dSize) const { - ASSERT(dSize >= k8BitSize, "error val:dSize"); - ASSERT(dSize <= k128BitSize, "error val:dSize"); - ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); - if (dSize == k8BitSize || addrMode != kAddrModeBOi) { - return false; - } - AArch64OfstOperand *ofstOpnd = GetOffsetImmediate(); - return ((static_cast(ofstOpnd->GetOffsetValue()) & - static_cast((1U << static_cast(GetImmediateOffsetAlignment(dSize))) - 1)) != 0); - } - - static bool IsSIMMOffsetOutOfRange(int64 offset, bool is64bit, bool isLDSTPair) { - if (!isLDSTPair) { - return (offset < kMinSimm32 || offset > kMaxSimm32); - } - if (is64bit) { - return (offset < kMinSimm64 || offset > kMaxSimm64Pair) || (static_cast(offset) & k7BitSize) ; - } - return (offset < kMinSimm32 || offset > kMaxSimm32Pair) || (static_cast(offset) & k3BitSize); - } - - static bool IsPIMMOffsetOutOfRange(int32 offset, uint32 dSize) { - ASSERT(dSize >= k8BitSize, "error val:dSize"); - ASSERT(dSize <= k128BitSize, "error val:dSize"); - ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); - return (offset < 0 || offset > GetMaxPIMM(dSize)); - } - - bool operator<(const AArch64MemOperand &opnd) const { - return addrMode < opnd.addrMode || - (addrMode == opnd.addrMode && GetBaseRegister() < opnd.GetBaseRegister()) || - (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && - GetIndexRegister() < opnd.GetIndexRegister()) || - (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && - GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() < opnd.GetOffsetOperand()) || - (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && - GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && - GetSymbol() < opnd.GetSymbol()) || - (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && - GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && - GetSymbol() == opnd.GetSymbol() && GetSize() < opnd.GetSize()) || - (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && - GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && - GetSymbol() == opnd.GetSymbol() && GetSize() == opnd.GetSize() && extend < opnd.extend); - } - - bool operator==(const AArch64MemOperand &opnd) const { - return (GetSize() == opnd.GetSize()) && (addrMode == opnd.addrMode) && (extend == opnd.extend) && - (GetBaseRegister() == opnd.GetBaseRegister()) && - (GetIndexRegister() == opnd.GetIndexRegister()) && - (GetSymbol() == opnd.GetSymbol()) && - (GetOffsetOperand() == opnd.GetOffsetOperand()) ; - } - - bool Less(const Operand &right) const override; - - bool NoAlias(const AArch64MemOperand &rightOpnd) const; - - bool NoOverlap(const AArch64MemOperand &rightOpnd) const; - - VaryType GetMemVaryType() override { - Operand *ofstOpnd = GetOffsetOperand(); - if (ofstOpnd != nullptr) { - auto *opnd = static_cast(ofstOpnd); - return opnd->GetVary(); - } - return kNotVary; - } - - bool IsExtendedRegisterMode() const { - return addrMode == kAddrModeBOrX; - } - - void UpdateExtend(ExtendInfo flag) { - extend = flag | (1U << ShiftAmount()); - } - - bool SignedExtend() const { - return IsExtendedRegisterMode() && ((extend & kSignExtend) != 0); - } - - bool UnsignedExtend() const { - return IsExtendedRegisterMode() && !SignedExtend(); - } - - uint32 ShiftAmount() const { - uint32 scale = extend & 0xF; - /* 8 is 1 << 3, 4 is 1 << 2, 2 is 1 << 1, 1 is 1 << 0; */ - return (scale == 8) ? 3 : ((scale == 4) ? 2 : ((scale == 2) ? 1 : 0)); - } - - bool ShouldEmitExtend() const { - return !noExtend && ((extend & 0x3F) != 0); - } - - IndexingOption GetIndexOpt() const { - return idxOpt; - } - - void SetIndexOpt(IndexingOption newidxOpt) { - idxOpt = newidxOpt; - } - - bool IsIntactIndexed() const { - return idxOpt == kIntact; - } - - bool IsPostIndexed() const { - return idxOpt == kPostIndex; - } - - bool IsPreIndexed() const { - return idxOpt == kPreIndex; - } - - std::string GetExtendAsString() const { - if (GetOffsetRegister()->GetSize() == k64BitSize) { - return std::string("LSL"); - } - return ((extend & kSignExtend) != 0) ? std::string("SXTW") : std::string("UXTW"); - } - - void Emit(Emitter &emitter, const OpndProp *opndProp) const override; - - void Dump() const override; - - /* Return true if given operand has the same base reg and offset with this. */ - bool Equals(Operand &operand) const override; - bool Equals(const AArch64MemOperand &opnd) const; - - private: - static constexpr int32 kLdpStp32SimmUpperBound = 252; - - static constexpr int32 kLdpStp64SimmUpperBound = 504; - - AArch64AddressingMode addrMode; - - uint32 extend; /* used with offset register ; AddrMode_B_OR_X */ - - IndexingOption idxOpt; /* used with offset immediate ; AddrMode_B_OI */ - - bool noExtend; - - bool isStackMem; -}; - -class AArch64ListOperand : public ListOperand { - public: - explicit AArch64ListOperand(MapleAllocator &allocator) : ListOperand(allocator) {} - - ~AArch64ListOperand() override = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - void Emit(Emitter &emitter, const OpndProp *opndProp) const override; -}; - -class AArch64PhiOperand : public PhiOperand { - public: - explicit AArch64PhiOperand(MapleAllocator &allocator) : PhiOperand(allocator) {} - - ~AArch64PhiOperand() override = default; - - Operand *Clone(MemPool &memPool) const override { - return memPool.Clone(*this); - } - - void Emit(Emitter &emitter, const OpndProp *opndProp) const override; - void Dump() const override; -}; - class CondOperand : public OperandVisitable { public: explicit CondOperand(AArch64CC_t cc) : OperandVisitable(Operand::kOpdCond, k4ByteSize), cc(cc) {} @@ -932,11 +220,12 @@ class CondOperand : public OperandVisitable { bool Less(const Operand &right) const override; void Dump() const override { - LogInfo::MapleLogger() << "CC: " << ccStrs[cc]; + CHECK_FATAL(false, "dont run here"); } - private: static const char *ccStrs[kCcLast]; + + private: AArch64CC_t cc; }; @@ -984,7 +273,7 @@ class LogicalShiftLeftOperand : public OperandVisitable } void Dump() const override { - LogInfo::MapleLogger() << "LSL: " << shiftAmount; + CHECK_FATAL(false, "dont run here"); } private: @@ -1028,7 +317,17 @@ class ExtendShiftOperand : public OperandVisitable { bool Less(const Operand &right) const override; - void Dump() const override; + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsOpdExtend()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetExtendOp() == extendOp && op.GetShiftAmount() == shiftAmount)); + } private: ExtendOp extendOp; @@ -1071,9 +370,15 @@ class BitShiftOperand : public OperandVisitable { } void Dump() const override { - CHECK_FATAL((shiftOp != kUndef), "shift is undef!"); - LogInfo::MapleLogger() << ((shiftOp == kLSL) ? "LSL: " : ((shiftOp == kLSR) ? "LSR: " : "ASR: ")); - LogInfo::MapleLogger() << shiftAmount; + CHECK_FATAL(false, "dont run here"); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsOpdShift()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetShiftOp() == shiftOp && op.GetShiftAmount() == shiftAmount)); } private: @@ -1163,9 +468,9 @@ class ListConstraintOperand : public OperandVisitable { } /* namespace maplebe */ namespace std { template<> /* function-template-specialization */ -class std::hash { +class std::hash { public: - size_t operator()(const maplebe::AArch64MemOperand &x) const { + size_t operator()(const maplebe::MemOperand &x) const { std::size_t seed = 0; hash_combine(seed, x.GetAddrMode()); hash_combine(seed, x.GetSize()); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index 6498d893e918d03ceef1d555fbbc6368b618ab18..6903bee4f58a86dc548d47097d92379aa18406b4 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -111,7 +111,9 @@ class ContinuousCmpCsetPattern : public CGPeepPattern { ~ContinuousCmpCsetPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "ContinuousCmpCsetPattern"; + } private: bool CheckCondCode(const CondOperand &condOpnd) const; @@ -145,7 +147,9 @@ class CselToCsetPattern : public CGPeepPattern { ~CselToCsetPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "CselToCsetPattern"; + } private: bool IsOpndDefByZero(const Insn &insn); @@ -176,7 +180,9 @@ class CsetCbzToBeqPattern : public CGPeepPattern { CsetCbzToBeqPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : CGPeepPattern(cgFunc, currBB, currInsn, info) {} ~CsetCbzToBeqPattern() override = default; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "CsetCbzToBeqPattern"; + } bool CheckCondition(Insn &insn) override; void Run(BB &bb, Insn &insn) override; @@ -207,7 +213,9 @@ class NegCmpToCmnPattern : public CGPeepPattern { ~NegCmpToCmnPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "NegCmpToCmnPattern"; + } private: Insn *prevInsn = nullptr; @@ -226,7 +234,9 @@ class ExtLslToBitFieldInsertPattern : public CGPeepPattern { ExtLslToBitFieldInsertPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : CGPeepPattern(cgFunc, currBB, currInsn, info) {} ~ExtLslToBitFieldInsertPattern() override = default; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "ExtLslToBitFieldInsertPattern"; + } bool CheckCondition(Insn &insn) override; void Run(BB &bb, Insn &insn) override; @@ -273,7 +283,9 @@ class AndCmpBranchesToTbzPattern : public CGPeepPattern { ~AndCmpBranchesToTbzPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "AndCmpBranchesToTbzPattern"; + } private: bool CheckAndSelectPattern(const Insn &currInsn); @@ -313,7 +325,9 @@ class ZeroCmpBranchesToTbzPattern : public CGPeepPattern { ~ZeroCmpBranchesToTbzPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "ZeroCmpBranchesToTbzPattern"; + } private: bool CheckAndSelectPattern(const Insn &currInsn); @@ -336,7 +350,9 @@ class MvnAndToBicPattern : public CGPeepPattern { ~MvnAndToBicPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "MvnAndToBicPattern"; + } private: Insn *prevInsn1 = nullptr; @@ -363,13 +379,14 @@ class AndCbzToTbzPattern : public CGPeepPattern { ~AndCbzToTbzPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "AndCbzToTbzPattern"; + } private: Insn *prevInsn = nullptr; }; - /* * Specific Extension Elimination, includes sxt[b|h|w] & uxt[b|h|w]. There are scenes: * 1. PrevInsn is mov @@ -465,12 +482,13 @@ class ElimSpecificExtensionPattern : public CGPeepPattern { ~ElimSpecificExtensionPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "ElimSpecificExtensionPattern"; + } protected: enum SpecificExtType : uint8 { EXTUNDEF = 0, - AND, SXTB, SXTH, SXTW, @@ -494,7 +512,6 @@ class ElimSpecificExtensionPattern : public CGPeepPattern { uint64 extValueRangeTable[SpecificExtTypeSize][kValueTypeNum] = { /* {minValue, maxValue} */ {kInvalidValue, kInvalidValue}, /* UNDEF */ - {kInvalidValue, kInvalidValue}, /* AND */ {0xFFFFFFFFFFFFFF80, 0x7F}, /* SXTB */ {0xFFFFFFFFFFFF8000, 0x7FFF}, /* SXTH */ {0xFFFFFFFF80000000, kInvalidValue}, /* SXTW */ @@ -506,8 +523,6 @@ class ElimSpecificExtensionPattern : public CGPeepPattern { /* {prevOrigMop, prevNewMop} */ {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UNDEF */ - {{MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, - {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* AND */ {{MOP_wldrb, MOP_wldrsb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldr, MOP_wldrsb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTB */ {{MOP_wldrh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldrsh, MOP_wldrsh}, @@ -525,8 +540,6 @@ class ElimSpecificExtensionPattern : public CGPeepPattern { /* {prevMop, currMop} */ {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UNDEF */ - {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, - {MOP_undef, MOP_undef}}, /* AND */ {{MOP_xsxtb32, MOP_xsxtb32}, {MOP_xsxtb64, MOP_xsxtb64}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTB */ {{MOP_xsxtb32, MOP_xsxth32}, {MOP_xsxtb64, MOP_xsxth64}, {MOP_xsxth32, MOP_xsxth32}, @@ -586,48 +599,6 @@ class OneHoleBranchPattern : public CGPeepPattern { MOperator newOp = MOP_undef; }; -/* - * cmp w0, #0 - * cset w1, NE --> mov w1, w0 - * - * cmp w0, #0 - * cset w1, EQ --> eor w1, w0, 1 - * - * cmp w0, #1 - * cset w1, NE --> eor w1, w0, 1 - * - * cmp w0, #1 - * cset w1, EQ --> mov w1, w0 - * - * cmp w0, #0 - * cset w0, NE -->null - * - * cmp w0, #1 - * cset w0, EQ -->null - * - * condition: - * 1. the first operand of cmp instruction must has only one valid bit - * 2. the second operand of cmp instruction must be 0 or 1 - * 3. flag register of cmp isntruction must not be used later - */ -class CmpCsetOpt : public CGPeepPattern { - public: - CmpCsetOpt(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) - : CGPeepPattern(cgFunc, currBB, currInsn, info) {} - ~CmpCsetOpt() override = default; - void Run(BB &bb, Insn &csetInsn) override; - bool CheckCondition(Insn &csetInsn) override; - std::string GetPatternName() override { - return "CmpCsetOpt Pattern"; - }; - - private: - bool IsContinuousCmpCset(const Insn &curInsn); - bool OpndDefByOneValidBit(const Insn &defInsn); - Insn *cmpInsn = nullptr; - int64 cmpConstVal = -1; -}; - /* * Combine logical shift and orr to [extr wd, wn, wm, #lsb / extr xd, xn, xm, #lsb] * Example 1) @@ -659,7 +630,9 @@ class LogicShiftAndOrrToExtrPattern : public CGPeepPattern { ~LogicShiftAndOrrToExtrPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "LogicShiftAndOrrToExtrPattern"; + } private: Insn *prevLsrInsn = nullptr; @@ -707,7 +680,9 @@ class SimplifyMulArithmeticPattern : public CGPeepPattern { ~SimplifyMulArithmeticPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "SimplifyMulArithmeticPattern"; + } protected: enum ArithmeticType : uint8 { @@ -759,7 +734,9 @@ class LsrAndToUbfxPattern : public CGPeepPattern { ~LsrAndToUbfxPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; - std::string GetPatternName() override; + std::string GetPatternName() override { + return "LsrAndToUbfxPattern"; + } private: Insn *prevInsn = nullptr; @@ -783,7 +760,7 @@ class OrrToMovPattern : public CGPeepPattern { private: MOperator newMop = MOP_undef; - AArch64RegOperand *reg2 = nullptr; + RegOperand *reg2 = nullptr; }; /* @@ -902,9 +879,9 @@ class CombineContiLoadAndStorePattern : public CGPeepPattern { bool IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, int64 baseOfst); void RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn); MOperator GetMopHigherByte(MOperator mop) const; - bool SplitOfstWithAddToCombine(Insn &insn, const AArch64MemOperand &memOpnd); + bool SplitOfstWithAddToCombine(Insn &insn, const MemOperand &memOpnd); bool doAggressiveCombine = false; - AArch64MemOperand *memOpnd = nullptr; + MemOperand *memOpnd = nullptr; }; /* @@ -971,6 +948,28 @@ class FmovRegPattern : public CGPeepPattern { Insn *nextInsn = nullptr; }; +/* sbfx ireg1, ireg2, 0, 32 + * use ireg1.32 + * => + * sbfx ireg1, ireg2, 0, 32 + * use ireg2.32 + */ +class SbfxOptPattern : public CGPeepPattern { +public: + SbfxOptPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~SbfxOptPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "SbfxOptPattern"; + } + +private: + Insn *nextInsn = nullptr; + bool toRemove = false; + std::vector cands; +}; + /* cbnz x0, labelA * mov x0, 0 * b return-bb @@ -1019,11 +1018,21 @@ class CsetCbzToBeqOptAArch64 : public PeepPattern { /* When exist load after load or load after store, and [MEM] is * totally same. Then optimize them. */ -class ContiLDRorSTRToSameMEMAArch64 : public PeepPattern { +class ContiLDRorSTRToSameMEMPattern : public CGPeepPattern { public: - explicit ContiLDRorSTRToSameMEMAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~ContiLDRorSTRToSameMEMAArch64() override = default; + ContiLDRorSTRToSameMEMPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ContiLDRorSTRToSameMEMPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ContiLDRorSTRToSameMEMPattern"; + } + + private: + Insn *prevInsn = nullptr; + bool loadAfterStore = false; + bool loadAfterLoad = false; }; /* @@ -1031,11 +1040,19 @@ class ContiLDRorSTRToSameMEMAArch64 : public PeepPattern { * mov x1, x0 * bl MCC_IncDecRef_NaiveRCFast */ -class RemoveIncDecRefAArch64 : public PeepPattern { +class RemoveIncDecRefPattern : public CGPeepPattern { public: - explicit RemoveIncDecRefAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~RemoveIncDecRefAArch64() override = default; + RemoveIncDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncDecRefPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; }; /* @@ -1062,11 +1079,16 @@ class RemoveIncDecRefAArch64 : public PeepPattern { * ldr w0, [x1] * ret */ -class InlineReadBarriersAArch64 : public PeepPattern { +class InlineReadBarriersPattern : public CGPeepPattern { public: - explicit InlineReadBarriersAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~InlineReadBarriersAArch64() override = default; + InlineReadBarriersPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~InlineReadBarriersPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "InlineReadBarriersPattern"; + } }; /* @@ -1084,11 +1106,19 @@ class InlineReadBarriersAArch64 : public PeepPattern { * asr x16, x16, #17 * add x2, x16, x0, LSR #31 */ -class ReplaceDivToMultiAArch64 : public PeepPattern { +class ReplaceDivToMultiPattern : public CGPeepPattern { public: - explicit ReplaceDivToMultiAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~ReplaceDivToMultiAArch64() override = default; + ReplaceDivToMultiPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceDivToMultiPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceDivToMultiPattern"; + } + + private: + Insn *prevInsn = nullptr; + Insn *prePrevInsn = nullptr; }; /* @@ -1375,11 +1405,20 @@ class ReplaceCmpToCmnAArch64 : public PeepPattern { * mov x1, XX * bl MCC_IncDecRef_NaiveRCFast */ -class RemoveIncRefAArch64 : public PeepPattern { +class RemoveIncRefPattern : public CGPeepPattern { public: - explicit RemoveIncRefAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~RemoveIncRefAArch64() override = default; + RemoveIncRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncRefPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncRefPattern"; + } + + private: + Insn *insnMov2 = nullptr; + Insn *insnMov1 = nullptr; }; /* @@ -1472,7 +1511,7 @@ class ComplexMemOperandLabelAArch64 : public PeepPattern { * mov R1, vreg4 * mov R2, vreg5 */ -class WriteFieldCallAArch64 : public PeepPattern { +class WriteFieldCallPattern : public CGPeepPattern { public: struct WriteRefFieldParam { Operand *objOpnd = nullptr; @@ -1480,18 +1519,22 @@ class WriteFieldCallAArch64 : public PeepPattern { int64 fieldOffset = 0; Operand *fieldValue = nullptr; }; - explicit WriteFieldCallAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~WriteFieldCallAArch64() override = default; + WriteFieldCallPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~WriteFieldCallPattern() override = default; void Run(BB &bb, Insn &insn) override; - void Reset() { - hasWriteFieldCall = false; - prevCallInsn = nullptr; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "WriteFieldCallPattern"; } private: bool hasWriteFieldCall = false; Insn *prevCallInsn = nullptr; + Insn *nextInsn = nullptr; WriteRefFieldParam firstCallParam; + WriteRefFieldParam currentCallParam; + std::vector paramDefInsns; bool WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m, std::vector ¶mDefInsns); bool IsWriteRefFieldCallInsn(const Insn &insn); @@ -1502,11 +1545,19 @@ class WriteFieldCallAArch64 : public PeepPattern { * mov x0, xzr/#0 * bl MCC_DecRef_NaiveRCFast */ -class RemoveDecRefAArch64 : public PeepPattern { +class RemoveDecRefPattern : public CGPeepPattern { public: - explicit RemoveDecRefAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~RemoveDecRefAArch64() override = default; + RemoveDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveDecRefPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; }; /* @@ -1531,11 +1582,20 @@ class OneHoleBranchesAArch64 : public PeepPattern { * => * bl MCC_IncRef_NaiveRCFast */ -class ReplaceIncDecWithIncAArch64 : public PeepPattern { +class ReplaceIncDecWithIncPattern : public CGPeepPattern { public: - explicit ReplaceIncDecWithIncAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~ReplaceIncDecWithIncAArch64() override = default; + ReplaceIncDecWithIncPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceIncDecWithIncPattern() override = default; void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceIncDecWithIncPattern"; + } + + private: + Insn *prevInsn = nullptr; + FuncNameOperand *target = nullptr; }; /* diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index 082e6319b30ed41ffa58f72fa506e40e997666d9..5243ce26bb5c101322e85946d296a781ec114fd1 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -16,37 +16,44 @@ ADDTARGETPHASE("createstartendlabel", true); ADDTARGETPHASE("buildehfunc", true); ADDTARGETPHASE("handlefunction", true); - ADDTARGETPHASE("moveargs", true); - /* SSA PHASES */ - ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); - ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("moveargs", true); + /* SSA PHASES */ + ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + } /* Normal OPT PHASES */ - ADDTARGETPHASE("cgprepeephole", true); + ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole()); ADDTARGETPHASE("ebo", CGOptions::DoEBO()); ADDTARGETPHASE("prepeephole", CGOptions::DoPrePeephole()) ADDTARGETPHASE("ico", CGOptions::DoICO()) ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); - ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()) - ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()) - ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()) + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()); + ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()); + } + ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()); - ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()) + ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()); ADDTARGETPHASE("ebo1", CGOptions::DoEBO()); ADDTARGETPHASE("prescheduling", !GetMIRModule()->IsJavaModule() && CGOptions::DoPreSchedule()); ADDTARGETPHASE("raopt", CGOptions::DoPreLSRAOpt()); ADDTARGETPHASE("cgsplitcriticaledge", GetMIRModule()->IsCModule()); ADDTARGETPHASE("regalloc", true); - ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()) - ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()) - ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); - ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())) + ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()); + ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); + } + ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())); ADDTARGETPHASE("generateproepilog", true); ADDTARGETPHASE("framefinalize", true); ADDTARGETPHASE("dbgfixcallframeoffsets", true); @@ -54,11 +61,11 @@ ADDTARGETPHASE("peephole0", CGOptions::DoPeephole()) ADDTARGETPHASE("postebo", CGOptions::DoEBO()); ADDTARGETPHASE("postcfgo", CGOptions::DoCFGO()); - ADDTARGETPHASE("cgpostpeephole", true) + ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole()) ADDTARGETPHASE("peephole", CGOptions::DoPeephole()) ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule()); ADDTARGETPHASE("yieldpoint", GetMIRModule()->IsJavaModule() && CGOptions::IsInsertYieldPoint()); ADDTARGETPHASE("scheduling", CGOptions::DoSchedule()); ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis()); ADDTARGETPHASE("fixshortbranch", true); - ADDTARGETPHASE("cgemit", true); \ No newline at end of file + ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h index a6efce01885e5092eb0d614aafdd1c6708016473..cd24e2bafa445bec1305fb64a9631e9700e22397 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h @@ -29,15 +29,19 @@ class AArch64GenProEpilog : public GenProEpilog { public: explicit AArch64GenProEpilog(CGFunc &func) : GenProEpilog(func) { useFP = func.UseFP(); - stackBaseReg = useFP ? R29 : RSP; + if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stackBaseReg = RFP; + } else { + stackBaseReg = useFP ? R29 : RSP; + } exitBB2CallSitesMap.clear(); } ~AArch64GenProEpilog() override = default; bool TailCallOpt() override; bool NeedProEpilog() override; - static AArch64MemOperand *SplitStpLdpOffsetForCalleeSavedWithAddInstruction( - CGFunc &cgFunc, const AArch64MemOperand &mo, uint32 bitLen, AArch64reg baseReg = AArch64reg::kRinvalid); + static MemOperand *SplitStpLdpOffsetForCalleeSavedWithAddInstruction( + CGFunc &cgFunc, const MemOperand &mo, uint32 bitLen, AArch64reg baseReg = AArch64reg::kRinvalid); static void AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int offset); static void AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset); static void AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h index fd6f461d5b0410ca04d8276d9f8a03e1368620ba..ec60d7d0bb7c16b0ac4aa893d480c38f4b5b149f 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h @@ -22,8 +22,8 @@ namespace maplebe{ class AArch64Prop : public CGProp { public: - AArch64Prop(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) - : CGProp(mp, f, sInfo){} + AArch64Prop(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) + : CGProp(mp, f, sInfo, ll){} ~AArch64Prop() override = default; /* do not extend life range */ @@ -52,19 +52,19 @@ class A64StrLdrProp { cgDce(&dce) {} void DoOpt(); private: - AArch64MemOperand *StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod = kUndef); - static MemPropMode SelectStrLdrPropMode(const AArch64MemOperand &currMemOpnd); - bool ReplaceMemOpnd(const AArch64MemOperand &currMemOpnd, const Insn *defInsn); - AArch64MemOperand *SelectReplaceMem(const Insn &defInsn, const AArch64MemOperand &currMemOpnd); - AArch64RegOperand *GetReplaceReg(AArch64RegOperand &a64Reg); - AArch64MemOperand *HandleArithImmDef(AArch64RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize); - AArch64MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, - bool isSigned, uint32 memSize); - bool CheckNewMemOffset(const Insn &insn, AArch64MemOperand *newMemOpnd, uint32 opndIdx); - void DoMemReplace(const RegOperand &replacedReg, AArch64MemOperand &newMem, Insn &useInsn); - uint32 GetMemOpndIdx(AArch64MemOperand *newMemOpnd, const Insn &insn); - - bool CheckSameReplace(const RegOperand &replacedReg, const AArch64MemOperand *memOpnd); + MemOperand *StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod = kUndef); + static MemPropMode SelectStrLdrPropMode(const MemOperand &currMemOpnd); + bool ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn); + MemOperand *SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd); + RegOperand *GetReplaceReg(RegOperand &a64Reg); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize); + MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, + bool isSigned, uint32 memSize); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx); + void DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn); + uint32 GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn); + + bool CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd); CGFunc *cgFunc; CGSSAInfo *ssaInfo; @@ -94,23 +94,23 @@ class A64ConstProp { /* false : default lsl #0 true: lsl #12 (only support 12 bit left shift in aarch64) */ static MOperator GetRegImmMOP(MOperator regregMop, bool withLeftShift); static MOperator GetReversalMOP(MOperator arithMop); - + static MOperator GetFoldMopAndVal(int64 &newVal, int64 constVal, Insn &arithInsn); private: - bool ConstProp(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd); + bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); /* use xzr/wzr in aarch64 to shrink register live range */ void ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg); /* replace old Insn with new Insn, update ssa info automatically */ void ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn); - AArch64ImmOperand *CanDoConstFold(const AArch64ImmOperand &value1, const AArch64ImmOperand &value2, - ArithmeticType aT, bool is64Bit); + ImmOperand *CanDoConstFold(const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit); /* optimization */ - bool MovConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd); - bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd, ArithmeticType aT); - bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const AArch64ImmOperand &constOpnd, ArithmeticType aT); - bool ShiftConstReplace(DUInsnInfo &useDUInfo, const AArch64ImmOperand &constOpnd); + bool MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); + bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT); + bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT); + bool ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd); + bool BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd); MemPool *constPropMp; CGFunc *cgFunc; @@ -120,7 +120,8 @@ class A64ConstProp { class CopyRegProp : public PropOptimizePattern { public: - CopyRegProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + CopyRegProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : PropOptimizePattern(cgFunc, cgssaInfo, ll) {} ~CopyRegProp() override = default; bool CheckCondition(Insn &insn) final; void Optimize(Insn &insn) final; @@ -132,6 +133,7 @@ class CopyRegProp : public PropOptimizePattern { srcVersion = nullptr; } private: + bool IsValidCopyProp(RegOperand &dstReg, RegOperand &srcReg); void VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn); VRegVersion *destVersion = nullptr; VRegVersion *srcVersion = nullptr; @@ -202,7 +204,7 @@ class FpSpConstProp : public PropOptimizePattern { int64 ArithmeticFold(int64 valInUse, ArithmeticType useAT) const; RegOperand *fpSpBase = nullptr; - AArch64ImmOperand *shiftOpnd = nullptr; + ImmOperand *shiftOpnd = nullptr; ArithmeticType aT = kUndefArith; VRegVersion *replaced = nullptr; }; @@ -292,7 +294,6 @@ protected: private: void SelectExtendOrShift(const Insn &def); - bool CheckDefUseInfo(uint32 size); SuffixType CheckOpType(const Operand &lastOpnd) const; void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount); void SetExMOpType(const Insn &use); @@ -309,6 +310,42 @@ private: LsMOpType lsMOpType; }; +/* + * optimization for call convention + */ +class A64PregCopyPattern : public PropOptimizePattern { + public: + A64PregCopyPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~A64PregCopyPattern() override = default; + bool CheckCondition(Insn &insn) override; + void Optimize(Insn &insn) override; + void Run() override; + + protected: + void Init() override { + validDefInsns.clear(); + firstPhiInsn = nullptr; + differIdx = -1; + differOrigNO = 0; + isCrossPhi = false; + } + + private: + bool CheckValidDefInsn(Insn *defInsn); + bool CheckMultiUsePoints(VRegVersion *version); + bool CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn); + bool DFSFindValidDefInsns(Insn *curDefInsn, std::unordered_map &visited); + Insn &CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn); + RegOperand &DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited); + std::unordered_map FindDifferPhiDefOpnds(); + RegOperand *GetDifferPhiDef(); + std::vector validDefInsns; + Insn *firstPhiInsn = nullptr; + int differIdx = -1; + regno_t differOrigNO = 0; + bool isCrossPhi = false; +}; + class A64ReplaceRegOpndVisitor : public ReplaceRegOpndVisitor { public: A64ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldRegister ,RegOperand &newRegister) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h index 8bd959c69f2e94ab253433aa23e2700832bacd3c..18f8b0821844c2cbbb4189ab2c31b7e0c1701938 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h @@ -19,14 +19,14 @@ #include "live.h" namespace maplebe { -class AArch64RegisterCoalesce : public RegisterCoalesce { +class AArch64LiveIntervalAnalysis : public LiveIntervalAnalysis { public: - AArch64RegisterCoalesce(CGFunc &func, MemPool &memPool) - : RegisterCoalesce(func, memPool), + AArch64LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) + : LiveIntervalAnalysis(func, memPool), vregLive(alloc.Adapter()), candidates(alloc.Adapter()) {} - ~AArch64RegisterCoalesce() override = default; + ~AArch64LiveIntervalAnalysis() override = default; void ComputeLiveIntervals() override; bool IsUnconcernedReg(const RegOperand ®Opnd) const; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h index f2d8cb448251aa9a7c5d8074b4377919e8e0e1bc..f14cee004b92c59adb146a4f475553a62f4fc09e 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h @@ -59,10 +59,6 @@ class SavedRegInfo { saveSet.insert(r); } - void RemoveSaveReg(regno_t r) { - saveSet.erase(r); - } - void InsertEntryReg(regno_t r) { restoreEntrySet.insert(r); } @@ -83,6 +79,10 @@ class SavedRegInfo { return restoreExitSet; } + void RemoveSaveReg(regno_t r) { + saveSet.erase(r); + } + private: MapleSet saveSet; MapleSet restoreEntrySet; @@ -117,7 +117,8 @@ class AArch64RegSavesOpt : public RegSavesOpt { pDomInfo(&pdom), bbSavedRegs(alloc.Adapter()), regSavedBBs(alloc.Adapter()), - regOffset(alloc.Adapter()) { + regOffset(alloc.Adapter()), + id2bb(alloc.Adapter()) { bbSavedRegs.resize(func.NumBBs()); regSavedBBs.resize(sizeof(CalleeBitsType)<<3); for (int i = 0; i < bbSavedRegs.size(); ++i) { @@ -145,7 +146,7 @@ class AArch64RegSavesOpt : public RegSavesOpt { bool AlreadySavedInDominatorList(BB *bb, regno_t reg) const; void DetermineCalleeSaveLocationsDoms(); void DetermineCalleeSaveLocationsPre(); - void DetermineCalleeRestoreLocations(); + bool DetermineCalleeRestoreLocations(); int32 FindNextOffsetForCalleeSave(); void InsertCalleeSaveCode(); void InsertCalleeRestoreCode(); @@ -221,6 +222,14 @@ class AArch64RegSavesOpt : public RegSavesOpt { return bbSavedRegs[bid]; } + void SetId2bb(BB *bb) { + id2bb[bb->GetId()] = bb; + } + + BB *GetId2bb(uint32 bid) { + return id2bb[bid]; + } + private: DomAnalysis *domInfo; PostDomAnalysis *pDomInfo; @@ -230,6 +239,7 @@ class AArch64RegSavesOpt : public RegSavesOpt { MapleVector bbSavedRegs; /* set of regs to be saved in a BB */ MapleVector regSavedBBs; /* set of BBs to be saved for a reg */ MapleMap regOffset; /* save offset of each register */ + MapleMap id2bb; /* bbid to bb* mapping */ bool oneAtaTime = false; regno_t oneAtaTimeReg = 0; }; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h index d04e8eaf1424b62bb70d727331e67b66922180d2..a24a8106044e5b431d9d8800c27d08861765cdb8 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h @@ -25,7 +25,7 @@ class AArch64CGSSAInfo : public CGSSAInfo { ~AArch64CGSSAInfo() override = default; void DumpInsnInSSAForm(const Insn &insn) const override; RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) override; - AArch64MemOperand *CreateMemOperand(AArch64MemOperand &memOpnd, bool isOnSSA /* false = on cgfunc */); + MemOperand *CreateMemOperand(MemOperand &memOpnd, bool isOnSSA /* false = on cgfunc */); void ReplaceInsn(Insn &oriInsn, Insn &newInsn) override; void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) override; void CreateNewInsnSSAInfo(Insn &newInsn) override; @@ -34,6 +34,7 @@ class AArch64CGSSAInfo : public CGSSAInfo { void RenameInsn(Insn &insn) override; VRegVersion *RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx); RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) override; + void CheckAsmDUbinding(Insn &insn, VRegVersion *toBeReplaced, VRegVersion *newVersion); }; class A64SSAOperandRenameVisitor : public SSAOperandVisitor { @@ -49,7 +50,8 @@ class A64SSAOperandRenameVisitor : public SSAOperandVisitor { AArch64CGSSAInfo *ssaInfo; }; -class A64OpndSSAUpdateVsitor : public SSAOperandVisitor { +class A64OpndSSAUpdateVsitor : public SSAOperandVisitor, + public OperandVisitor { public: explicit A64OpndSSAUpdateVsitor(AArch64CGSSAInfo &cssaInfo) : ssaInfo(&cssaInfo) {} ~A64OpndSSAUpdateVsitor() override = default; @@ -65,6 +67,15 @@ class A64OpndSSAUpdateVsitor : public SSAOperandVisitor { void Visit(RegOperand *v) final; void Visit(ListOperand *v) final; void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; + + bool IsPhi() { + return isPhi; + } + + void SetPhi(bool flag) { + isPhi = flag; + } private: void UpdateRegUse(uint32 ssaIdx); @@ -72,6 +83,7 @@ class A64OpndSSAUpdateVsitor : public SSAOperandVisitor { AArch64CGSSAInfo *ssaInfo; bool isDecrease = false; std::set deletedDef; + bool isPhi = false; }; class A64SSAOperandDumpVisitor : public SSAOperandDumpVisitor { diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h index 897b6f7799e8cbf44b1c847726eda96a241f4591..4d63cf8797aafb1d4d5a43df5a15db154d4331d9 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h @@ -46,16 +46,16 @@ class AArch64StoreLoadOpt : public StoreLoadOpt { void StrLdrIndexModeOpt(Insn &currInsn); bool CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, regno_t replaceRegNo); bool CheckDefInsn(Insn &defInsn, Insn &currInsn); - bool CheckNewMemOffset(const Insn &insn, AArch64MemOperand *newMemOpnd, uint32 opndIdx); - AArch64MemOperand *HandleArithImmDef(AArch64RegOperand &replace, Operand *oldOffset, int64 defVal); - AArch64MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset); - AArch64MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal); + MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset); + MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned); bool CanDoMemProp(const Insn *insn); - bool CanDoIndexOpt(const AArch64MemOperand &MemOpnd); + bool CanDoIndexOpt(const MemOperand &MemOpnd); void MemPropInit(); - void SelectPropMode(const AArch64MemOperand &currMemOpnd); + void SelectPropMode(const MemOperand &currMemOpnd); int64 GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize); - AArch64MemOperand *SelectIndexOptMode(Insn &insn, const AArch64MemOperand &curMemOpnd); + MemOperand *SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd); bool ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset); void MemProp(Insn &insn); void ProcessStrPair(Insn &insn); @@ -76,4 +76,4 @@ class AArch64StoreLoadOpt : public StoreLoadOpt { }; } /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H */ \ No newline at end of file +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h index cb8300a2e77492da7372e8fae83c09d453e3b114..3379b9be1d3ac2a734185c7d632bfaccdf8eb80d 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h @@ -31,9 +31,8 @@ namespace maplebe { * @return memory operand for new load machine opcode * or nullptr if memory operand can't be obtained */ -AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, - const Insn &loadIns, - MOperator newLoadMop); +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, MOperator newLoadMop); } // namespace maplebe #endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..04ac025055c40ef2b335ac47f2fa9c0f5adb3dad --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H + +#include "cg_validbit_opt.h" +#include "operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +class AArch64ValidBitOpt : public ValidBitOpt { + public: + AArch64ValidBitOpt(CGFunc &f, CGSSAInfo &info) : ValidBitOpt(f, info) {} + ~AArch64ValidBitOpt() override = default; + + void DoOpt(BB &bb, Insn &insn) override; + void SetValidBits(Insn &insn) override; + bool SetPhiValidBits(Insn &insn) override; +}; + +/* + * Example 1) + * def w9 def w9 + * ... ... + * and w4, w9, #255 ===> mov w4, w9 + * + * Example 2) + * and w6[16], w0[16], #FF00[16] mov w6, w0 + * asr w6, w6[16], #8[4] ===> asr w6, w6 + */ +class AndValidBitPattern : public ValidBitPattern { + public: + AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~AndValidBitPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndValidBitPattern"; + } + + private: + bool CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const; + MOperator newMop = MOP_undef; + RegOperand *desReg = nullptr; + RegOperand *srcReg = nullptr; +}; + +/* + * Example 1) + * uxth w1[16], w2[16] / uxtb w1[8], w2[8] + * ===> + * mov w1, w2 + * + * Example 2) + * ubfx w1, w2[16], #0, #16 + * ===> + * mov w1, w2 + */ +class ZxtValidBitPattern : public ValidBitPattern { + public: + ZxtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~ZxtValidBitPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ZxtValidBitPattern"; + } + + private: + RegOperand *newDstOpnd = nullptr; + RegOperand *newSrcOpnd = nullptr; + MOperator newMop = MOP_undef; +}; + +/* + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetVBPattern : public ValidBitPattern { + public: + CmpCsetVBPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpCsetVBPattern() override = default; + void Run(BB &bb, Insn &csetInsn) override; + bool CheckCondition(Insn &csetInsn) override; + std::string GetPatternName() override { + return "CmpCsetPattern"; + }; + + private: + bool IsContinuousCmpCset(const Insn &curInsn); + bool OpndDefByOneValidBit(const Insn &defInsn); + Insn *cmpInsn = nullptr; + int64 cmpConstVal = -1; +}; + +/* + * cmp w0[16], #32768 + * bge label ===> tbnz w0, #15, label + * + * bge / blt + */ +class CmpBranchesPattern : public ValidBitPattern { + public: + CmpBranchesPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpBranchesPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CmpBranchesPattern"; + }; + + private: + void SelectNewMop(MOperator mop); + Insn *prevCmpInsn = nullptr; + int64 newImmVal = -1; + MOperator newMop = MOP_undef; + bool is64Bit = false; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H */ + diff --git a/src/mapleall/maple_be/src/cg/Isel.cpp b/src/mapleall/maple_be/include/cg/abi.h similarity index 52% rename from src/mapleall/maple_be/src/cg/Isel.cpp rename to src/mapleall/maple_be/include/cg/abi.h index 5063aa9c9de1e1b0cf414fe7d12e0b3c55195160..57c4ad728c7d19b09d0c3be87db131df3fb0fc3d 100644 --- a/src/mapleall/maple_be/src/cg/Isel.cpp +++ b/src/mapleall/maple_be/include/cg/abi.h @@ -12,12 +12,28 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ +#ifndef MAPLEBE_INCLUDE_CG_ABI_H +#define MAPLEBE_INCLUDE_CG_ABI_H + +#include +#include "types_def.h" +#include "operand.h" -#include "Isel.h" namespace maplebe { +enum ArgumentClass : uint8 { + kNoClass, + kIntegerClass, + kFloatClass, + kPointerClass, + kVectorClass, + kMemoryClass, + kShortVectorClass, + kCompositeTypeHFAClass, /* Homegeneous Floating-point Aggregates for AArch64 */ + kCompositeTypeHVAClass, /* Homegeneous Short-Vector Aggregates for AArch64 */ +}; + +using regno_t = uint32_t; + +} /* namespace maplebe */ -bool InstructionSelector::PhaseRun(maplebe::CGFunc &f) { - return true; -} -MAPLE_TRANSFORM_PHASE_REGISTER(InstructionSelector, instructionselector) -} \ No newline at end of file +#endif /* MAPLEBE_INCLUDE_CG_ABI_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def new file mode 100644 index 0000000000000000000000000000000000000000..376d0ae6ae43fb3edd75238876706f39eedec3a3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -0,0 +1,140 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + + /* Abstract Maple Machine IR */ + /* {mop, opnds, prop, latency, name, format, length} */ + DEFINE_MOP(MOP_undef, {}, ISABSTRACT,0,"","",0) + + /* conversion between all types and registers */ + DEFINE_MOP(MOP_copy_ri_8, {&OpndDescription::Reg8ID,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_8","",1) + DEFINE_MOP(MOP_copy_rr_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS},ISABSTRACT|ISMOVE,0,"copy_rr_8","",1) + DEFINE_MOP(MOP_copy_ri_16, {&OpndDescription::Reg16ID,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_16","",1) + DEFINE_MOP(MOP_copy_rr_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS},ISABSTRACT|ISMOVE,0,"copy_rr_16","",1) + DEFINE_MOP(MOP_copy_ri_32, {&OpndDescription::Reg32ID,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_32","",1) + DEFINE_MOP(MOP_copy_rr_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS},ISABSTRACT|ISMOVE,0,"copy_rr_32","",1) + DEFINE_MOP(MOP_copy_ri_64, {&OpndDescription::Reg64ID,&OpndDescription::Imm64},ISABSTRACT|ISMOVE,0,"copy_ri_64","",1) + DEFINE_MOP(MOP_copy_rr_64, {&OpndDescription::Reg64ID, &OpndDescription::Reg64IS},ISABSTRACT|ISMOVE,0,"copy_rr_64","",1) + + DEFINE_MOP(MOP_copy_fi_8, {&OpndDescription::Reg8FD,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_8","",1) + DEFINE_MOP(MOP_copy_ff_8, {&OpndDescription::Reg8FD,&OpndDescription::Reg8FS},ISABSTRACT|ISMOVE,0,"copy_ff_8","",1) + DEFINE_MOP(MOP_copy_fi_16, {&OpndDescription::Reg16FD,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_16","",1) + DEFINE_MOP(MOP_copy_ff_16, {&OpndDescription::Reg16FD,&OpndDescription::Reg16FS},ISABSTRACT|ISMOVE,0,"copy_ff_16","",1) + DEFINE_MOP(MOP_copy_fi_32, {&OpndDescription::Reg32FD,&OpndDescription::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_32","",1) + DEFINE_MOP(MOP_copy_ff_32, {&OpndDescription::Reg32FD,&OpndDescription::Reg32FS},ISABSTRACT|ISMOVE,0,"copy_ff_32","",1) + DEFINE_MOP(MOP_copy_fi_64, {&OpndDescription::Reg64FD,&OpndDescription::Imm64},ISABSTRACT|ISMOVE,0,"copy_fi_64","",1) + DEFINE_MOP(MOP_copy_ff_64, {&OpndDescription::Reg64FD, &OpndDescription::Reg64FS},ISABSTRACT|ISMOVE,0,"copy_ff_64","",1) + + /* register trunc */ + DEFINE_MOP(MOP_trunc_rr_8_16, {&OpndDescription::Reg8ID,&OpndDescription::Reg16IS},ISABSTRACT|ISCONVERSION,0,"trunc_r8_r16","",1) + DEFINE_MOP(MOP_trunc_rr_8_32, {&OpndDescription::Reg8ID,&OpndDescription::Reg32IS},ISABSTRACT|ISCONVERSION,0,"trunc_r8_r32","",1) + DEFINE_MOP(MOP_trunc_rr_8_64, {&OpndDescription::Reg8ID,&OpndDescription::Reg64IS},ISABSTRACT|ISCONVERSION,0,"trunc_r8_r64","",1) + DEFINE_MOP(MOP_trunc_ri_8_16, {&OpndDescription::Reg8ID,&OpndDescription::Imm16},ISABSTRACT|ISCONVERSION,0,"trunc_r8_i16","",1) + DEFINE_MOP(MOP_trunc_ri_8_32, {&OpndDescription::Reg8ID,&OpndDescription::Imm32},ISABSTRACT|ISCONVERSION,0,"trunc_r8_i32","",1) + DEFINE_MOP(MOP_trunc_ri_8_64, {&OpndDescription::Reg8ID,&OpndDescription::Imm64},ISABSTRACT|ISCONVERSION,0,"trunc_r8_i64","",1) + DEFINE_MOP(MOP_trunc_rr_16_32, {&OpndDescription::Reg16ID,&OpndDescription::Reg32IS},ISABSTRACT|ISCONVERSION,0,"trunc_r16_r32","",1) + DEFINE_MOP(MOP_trunc_ri_16_32, {&OpndDescription::Reg16ID,&OpndDescription::Imm32},ISABSTRACT|ISCONVERSION,0,"trunc_r16_i32","",1) + DEFINE_MOP(MOP_trunc_rr_16_64, {&OpndDescription::Reg16ID,&OpndDescription::Reg64IS},ISABSTRACT|ISCONVERSION,0,"trunc_r16_r64","",1) + DEFINE_MOP(MOP_trunc_ri_16_64, {&OpndDescription::Reg16ID,&OpndDescription::Imm64},ISABSTRACT|ISCONVERSION,0,"trunc_r16_i64","",1) + DEFINE_MOP(MOP_trunc_rr_32_64, {&OpndDescription::Reg32ID,&OpndDescription::Reg64IS},ISABSTRACT|ISCONVERSION,0,"trunc_r32_r64","",1) + DEFINE_MOP(MOP_trunc_ri_32_64, {&OpndDescription::Reg32ID,&OpndDescription::Imm64},ISABSTRACT|ISCONVERSION,0,"trunc_r32_i64","",1) + + /* register extend */ + DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDescription::Reg16ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r8","",1) + DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDescription::Reg16ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDescription::Reg32ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r8","",1) + DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDescription::Reg32ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDescription::Reg32ID,&OpndDescription::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r16","",1) + DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDescription::Reg32ID,&OpndDescription::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r16","",1) + DEFINE_MOP(MOP_zext_ri_16_8, {&OpndDescription::Reg16ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"zext_r16_i8","",1) + DEFINE_MOP(MOP_sext_ri_16_8, {&OpndDescription::Reg16ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"sext_r16_i8","",1) + DEFINE_MOP(MOP_zext_ri_32_8, {&OpndDescription::Reg32ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"zext_r32_i8","",1) + DEFINE_MOP(MOP_sext_ri_32_8, {&OpndDescription::Reg32ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"sext_r32_i8","",1) + DEFINE_MOP(MOP_zext_ri_32_16, {&OpndDescription::Reg32ID,&OpndDescription::Imm16},ISABSTRACT|ISCONVERSION,0,"zext_r32_i16","",1) + DEFINE_MOP(MOP_sext_ri_32_16, {&OpndDescription::Reg32ID,&OpndDescription::Imm16},ISABSTRACT|ISCONVERSION,0,"sext_r32_i16","",1) + + DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDescription::Reg64ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r8","",1) + DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDescription::Reg64ID,&OpndDescription::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r8","",1) + DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDescription::Reg64ID,&OpndDescription::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r16","",1) + DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDescription::Reg64ID,&OpndDescription::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r16","",1) + DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDescription::Reg64ID,&OpndDescription::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) + DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDescription::Reg64ID,&OpndDescription::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + DEFINE_MOP(MOP_zext_ri_64_8, {&OpndDescription::Reg64ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"zext_r64_i8","",1) + DEFINE_MOP(MOP_sext_ri_64_8, {&OpndDescription::Reg64ID,&OpndDescription::Imm8},ISABSTRACT|ISCONVERSION,0,"sext_r64_i8","",1) + DEFINE_MOP(MOP_zext_ri_64_16, {&OpndDescription::Reg64ID,&OpndDescription::Imm16},ISABSTRACT|ISCONVERSION,0,"zext_r64_i16","",1) + DEFINE_MOP(MOP_sext_ri_64_16, {&OpndDescription::Reg64ID,&OpndDescription::Imm16},ISABSTRACT|ISCONVERSION,0,"sext_r64_i16","",1) + DEFINE_MOP(MOP_zext_ri_64_32, {&OpndDescription::Reg64ID,&OpndDescription::Imm32},ISABSTRACT|ISCONVERSION,0,"zext_r64_i32","",1) + DEFINE_MOP(MOP_sext_ri_64_32, {&OpndDescription::Reg64ID,&OpndDescription::Imm32},ISABSTRACT|ISCONVERSION,0,"sext_r64_i32","",1) + + /* conversion between different kinds of registers */ + DEFINE_MOP(MOP_cvt_rf_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_32","",1) + + /* Support transformation between memory and registers */ + DEFINE_MOP(MOP_str_8, {&OpndDescription::Reg8IS,&OpndDescription::Mem8D},ISABSTRACT|ISSTORE,0,"str_8","",1) + DEFINE_MOP(MOP_str_16, {&OpndDescription::Reg16IS,&OpndDescription::Mem16D},ISABSTRACT|ISSTORE,0,"str_16","",1) + DEFINE_MOP(MOP_str_32, {&OpndDescription::Reg32IS,&OpndDescription::Mem32D},ISABSTRACT|ISSTORE,0,"str_32","",1) + DEFINE_MOP(MOP_str_64, {&OpndDescription::Reg64IS,&OpndDescription::Mem64D},ISABSTRACT|ISSTORE,0,"str_64","",1) + DEFINE_MOP(MOP_load_8, {&OpndDescription::Reg8ID,&OpndDescription::Mem8S},ISABSTRACT|ISLOAD,0,"load_8","",1) + DEFINE_MOP(MOP_load_16, {&OpndDescription::Reg16ID,&OpndDescription::Mem16S},ISABSTRACT|ISLOAD,0,"load_16","",1) + DEFINE_MOP(MOP_load_32, {&OpndDescription::Reg32ID,&OpndDescription::Mem32S},ISABSTRACT|ISLOAD,0,"load_32","",1) + DEFINE_MOP(MOP_load_64, {&OpndDescription::Reg64ID,&OpndDescription::Mem64S},ISABSTRACT|ISLOAD,0,"load_64","",1) + DEFINE_MOP(MOP_str_f_8, {&OpndDescription::Reg8FS,&OpndDescription::Mem8D},ISABSTRACT|ISSTORE,0,"str_f_8","",1) + DEFINE_MOP(MOP_str_f_16, {&OpndDescription::Reg16FS,&OpndDescription::Mem16D},ISABSTRACT|ISSTORE,0,"str_f_16","",1) + DEFINE_MOP(MOP_str_f_32, {&OpndDescription::Reg32FS,&OpndDescription::Mem32D},ISABSTRACT|ISSTORE,0,"str_f_32","",1) + DEFINE_MOP(MOP_str_f_64, {&OpndDescription::Reg64FS,&OpndDescription::Mem64D},ISABSTRACT|ISSTORE,0,"str_f_64","",1) + DEFINE_MOP(MOP_load_f_8, {&OpndDescription::Reg8FD,&OpndDescription::Mem8S},ISABSTRACT|ISLOAD,0,"load_f_8","",1) + DEFINE_MOP(MOP_load_f_16, {&OpndDescription::Reg16FD,&OpndDescription::Mem16S},ISABSTRACT|ISLOAD,0,"load_f_16","",1) + DEFINE_MOP(MOP_load_f_32, {&OpndDescription::Reg32FD,&OpndDescription::Mem32S},ISABSTRACT|ISLOAD,0,"load_f_32","",1) + DEFINE_MOP(MOP_load_f_64, {&OpndDescription::Reg64FD,&OpndDescription::Mem64S},ISABSTRACT|ISLOAD,0,"load_f_64","",1) + + /* Support three address basic operations */ + DEFINE_MOP(MOP_add_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},ISABSTRACT|ISBASICOP,0,"add_8","",1) + DEFINE_MOP(MOP_add_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg16IS},ISABSTRACT|ISBASICOP,0,"add_16","",1) + DEFINE_MOP(MOP_add_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISBASICOP,0,"add_32","",1) + DEFINE_MOP(MOP_add_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},ISABSTRACT|ISBASICOP,0,"add_64","",1) + DEFINE_MOP(MOP_sub_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) + DEFINE_MOP(MOP_sub_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg16IS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) + DEFINE_MOP(MOP_sub_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) + DEFINE_MOP(MOP_sub_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) + DEFINE_MOP(MOP_or_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},ISABSTRACT|ISBASICOP,0,"or_8","",1) + DEFINE_MOP(MOP_or_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg16IS},ISABSTRACT|ISBASICOP,0,"or_16","",1) + DEFINE_MOP(MOP_or_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISBASICOP,0,"or_32","",1) + DEFINE_MOP(MOP_or_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},ISABSTRACT|ISBASICOP,0,"or_64","",1) + DEFINE_MOP(MOP_xor_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},ISABSTRACT|ISBASICOP,0,"xor_8","",1) + DEFINE_MOP(MOP_xor_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg16IS},ISABSTRACT|ISBASICOP,0,"xor_16","",1) + DEFINE_MOP(MOP_xor_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISBASICOP,0,"xor_32","",1) + DEFINE_MOP(MOP_xor_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},ISABSTRACT|ISBASICOP,0,"xor_64","",1) + DEFINE_MOP(MOP_and_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},ISABSTRACT|ISBASICOP,0,"and_8","",1) + DEFINE_MOP(MOP_and_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg16IS},ISABSTRACT|ISBASICOP,0,"and_16","",1) + DEFINE_MOP(MOP_and_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISBASICOP,0,"and_32","",1) + DEFINE_MOP(MOP_and_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1) + + /* shift -- shl/ashr/lshr */ + DEFINE_MOP(MOP_shl_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1) + DEFINE_MOP(MOP_shl_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_16","",1) + DEFINE_MOP(MOP_shl_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_32","",1) + DEFINE_MOP(MOP_shl_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_64","",1) + DEFINE_MOP(MOP_ashr_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_8","",1) + DEFINE_MOP(MOP_ashr_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_16","",1) + DEFINE_MOP(MOP_ashr_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_32","",1) + DEFINE_MOP(MOP_ashr_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_64","",1) + DEFINE_MOP(MOP_lshr_8, {&OpndDescription::Reg8ID,&OpndDescription::Reg8IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_8","",1) + DEFINE_MOP(MOP_lshr_16, {&OpndDescription::Reg16ID,&OpndDescription::Reg16IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_16","",1) + DEFINE_MOP(MOP_lshr_32, {&OpndDescription::Reg32ID,&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_32","",1) + DEFINE_MOP(MOP_lshr_64, {&OpndDescription::Reg64ID,&OpndDescription::Reg64IS, &OpndDescription::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_64","",1) + + /* Support two address basic operations */ + DEFINE_MOP(MOP_neg_32, {&OpndDescription::Reg32ID,&OpndDescription::Mem32S},ISABSTRACT|ISUNARYOP,0,"neg_32","",1) + DEFINE_MOP(MOP_not_64, {&OpndDescription::Reg64ID,&OpndDescription::Mem64S},ISABSTRACT|ISUNARYOP,0,"not_64","",1) + DEFINE_MOP(MOP_not_32, {&OpndDescription::Reg32ID,&OpndDescription::Mem32S},ISABSTRACT|ISUNARYOP,0,"not_32","",1) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/alignment.h b/src/mapleall/maple_be/include/cg/alignment.h index 9caf471173ddfeb2c23ab25c08a9a50326c256ce..8c588039992dbb776a8444eaf221544b96965d21 100644 --- a/src/mapleall/maple_be/include/cg/alignment.h +++ b/src/mapleall/maple_be/include/cg/alignment.h @@ -24,9 +24,9 @@ namespace maplebe { class AlignAnalysis { public: - AlignAnalysis(CGFunc &func, MemPool &memPool) + AlignAnalysis(CGFunc &func, MemPool &memP) : cgFunc(&func), - alignAllocator(&memPool), + alignAllocator(&memP), loopHeaderBBs(alignAllocator.Adapter()), jumpTargetBBs(alignAllocator.Adapter()), alignInfos(alignAllocator.Adapter()), diff --git a/src/mapleall/maple_be/include/cg/asm_info.h b/src/mapleall/maple_be/include/cg/asm_info.h index 26848d93ec63b342b302d203cabf3a3c9f6d2c0f..bfce53ec0a8e59ac827c4b59d9d2130c544d67cc 100644 --- a/src/mapleall/maple_be/include/cg/asm_info.h +++ b/src/mapleall/maple_be/include/cg/asm_info.h @@ -35,6 +35,7 @@ enum AsmLabel : uint8 { kAsmQuad, kAsmSize, kAsmType, + kAsmText, kAsmHidden }; @@ -124,6 +125,10 @@ class AsmInfo { return asmHidden; } + const MapleString &GetText() const { + return asmText; + } + const MapleString &GetSet() const { return asmSet; } @@ -134,18 +139,14 @@ class AsmInfo { explicit AsmInfo(MemPool &memPool) #if TARGX86 || TARGX86_64 - : asmCmnt("\t#\t", &memPool), + : asmCmnt("\t//\t", &memPool), #elif TARGARM32 : asmCmnt("\t@\t", &memPool), #else - : asmCmnt("\t//\t", &memPool), + : asmCmnt("\t#\t", &memPool), #endif -#if TARGX86 || TARGX86_64 - asmAtObt("\t@object\t", &memPool), -#else asmAtObt("\t%object\t", &memPool), -#endif asmFile("\t.file\t", &memPool), asmSection("\t.section\t", &memPool), asmRodata(".rodata\t", &memPool), @@ -173,6 +174,7 @@ class AsmInfo { asmSize("\t.size\t", &memPool), asmType("\t.type\t", &memPool), asmHidden("\t.hidden\t", &memPool), + asmText("\t.text\t", &memPool), asmSet("\t.set\t", &memPool), asmWeakref("\t.weakref\t", &memPool){} @@ -200,6 +202,7 @@ class AsmInfo { MapleString asmSize; MapleString asmType; MapleString asmHidden; + MapleString asmText; MapleString asmSet; MapleString asmWeakref; }; diff --git a/src/mapleall/maple_be/include/cg/call_conv.h b/src/mapleall/maple_be/include/cg/call_conv.h index aed5b2b9aaadfd1fd96d4d3803bbeed67d471983..d7a194022c5d054baef0b5c74807e86a186514fa 100644 --- a/src/mapleall/maple_be/include/cg/call_conv.h +++ b/src/mapleall/maple_be/include/cg/call_conv.h @@ -59,6 +59,97 @@ struct CCLocInfo { return reg3; } }; + +class LmbcFormalParamInfo { + public: + LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz) : + type(nullptr), primType(pType), offset(ofst), size(sz), regNO(0), vregNO(0), numRegs(0), + fpSize(0), isReturn(false), isPureFloat(false), isOnStack(false) {} + + ~LmbcFormalParamInfo() = default; + + MIRStructType *GetType() { + return type; + } + void SetType(MIRStructType *ty) { + type = ty; + } + PrimType GetPrimType() { + return primType; + } + void SetPrimType(PrimType pType) { + primType = pType; + } + uint32 GetOffset() { + return offset; + } + void SetOffset(uint32 ofs) { + offset = ofs; + } + uint32 GetSize() { + return size; + } + void SetSize(uint32 sz) { + size = sz; + } + regno_t GetRegNO() { + return regNO; + } + void SetRegNO(regno_t reg) { + regNO = reg; + } + regno_t GetVregNO() { + return vregNO; + } + void SetVregNO(regno_t reg) { + vregNO = reg; + } + uint32 GetNumRegs() { + return numRegs; + } + void SetNumRegs(uint32 num) { + numRegs = num; + } + uint32 GetFpSize() { + return fpSize; + } + void SetFpSize(uint32 sz) { + fpSize = sz; + } + bool IsReturn() { + return isReturn; + } + void SetIsReturn() { + isReturn = true; + } + bool IsPureFloat() { + return isPureFloat; + } + void SetIsPureFloat() { + isPureFloat = true; + } + bool IsInReg() { + return (isOnStack == false); + } + bool IsOnStack() { + return isOnStack; + } + void SetIsOnStack() { + isOnStack = true; + } + private: + MIRStructType *type; + PrimType primType; + uint32 offset; + uint32 size; /* size primtype or struct */ + regno_t regNO = 0; /* param reg num or starting reg num if numRegs > 0 */ + regno_t vregNO = 0; /* if no explicit regassing from IR, create move from param reg */ + uint32 numRegs = 0; /* number of regs for struct param */ + uint32 fpSize = 0; /* size of fp param if isPureFloat */ + bool isReturn; + bool isPureFloat = false; + bool isOnStack; /* small struct with arrays need to be saved onto stack */ +}; } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_CALL_CONV_H */ diff --git a/src/mapleall/maple_be/include/cg/cfi.h b/src/mapleall/maple_be/include/cg/cfi.h index 4cf111764bb1f7f139f6f2abf60e1816ab0b2504..c646660f4f8a247d014670d67262ce8a35dd4add 100644 --- a/src/mapleall/maple_be/include/cg/cfi.h +++ b/src/mapleall/maple_be/include/cg/cfi.h @@ -19,6 +19,8 @@ #include "mempool_allocator.h" #include "mir_symbol.h" +#include "operand.h" + /* * Reference: * GNU Binutils. AS documentation @@ -76,9 +78,7 @@ class CfiInsn : public maplebe::Insn { bool IsMachineInstruction() const override { return false; } - - void Emit(const maplebe::CG &cg, maplebe::Emitter &emitter) const override; - +#if TARGAARCH64 || TARGRISCV64 void Dump() const override; bool Check() const override; @@ -100,6 +100,7 @@ class CfiInsn : public maplebe::Insn { CHECK_FATAL(false, "cfi do not def regs"); return std::set(); } +#endif private: CfiInsn &operator=(const CfiInsn&); diff --git a/src/mapleall/maple_be/include/cg/cg.h b/src/mapleall/maple_be/include/cg/cg.h index 678de0bfe096a2fd8a01ce9e168763beaffb0f2a..ec70be66be3d8a7bbb91257a240cd287cc4c66c9 100644 --- a/src/mapleall/maple_be/include/cg/cg.h +++ b/src/mapleall/maple_be/include/cg/cg.h @@ -42,6 +42,10 @@ class CGProp; class CGDce; class AlignAnalysis; class MoveRegArgs; +class MPISel; +class Standardize; +class LiveIntervalAnalysis; +class ValidBitOpt; class Globals { public: @@ -383,14 +387,27 @@ class CG { virtual AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const { return nullptr; }; + virtual MPISel *CreateMPIsel(MemPool &mp, CGFunc &f) const { + return nullptr; + } + virtual Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const { + return nullptr; + } + virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { + return nullptr; + } + /* Init SubTarget optimization */ virtual CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const { return nullptr; }; + virtual LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const { + return nullptr; + }; virtual PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { return nullptr; }; - virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { + virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const { return nullptr; }; virtual CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { diff --git a/src/mapleall/maple_be/include/cg/cg_cfg.h b/src/mapleall/maple_be/include/cg/cg_cfg.h index 2cfcc2202a8a637d469e7d08a259c51c18956076..8e2b538e6064bc9bb9b8d042d526002aa4ad81de 100644 --- a/src/mapleall/maple_be/include/cg/cg_cfg.h +++ b/src/mapleall/maple_be/include/cg/cg_cfg.h @@ -71,6 +71,7 @@ class CGCFG { void BuildCFG(); void CheckCFG(); + void CheckCFGFreq(); void InitInsnVisitor(CGFunc &func); InsnVisitor *GetInsnModifier() const { diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index ff975c5eddd5a8849dc324dbf4ac3a0d61cf8146..3be1251bb728f5c983f174aabcae434c3e0c814f 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -20,71 +20,51 @@ #include "operand.h" namespace maplebe { - -#if 0 class InsnBuilder { public: explicit InsnBuilder(MemPool &memPool) : mp(&memPool) {} virtual ~InsnBuilder() = default; - virtual Insn &BuildInsn(MOperator opCode) = 0; +#ifdef TARGX86_64 + Insn &BuildInsn(MOperator opCode, const InsnDescription &idesc); +#else + Insn &BuildInsn(MOperator opCode, const InsnDescription &idesc) { + (void)idesc; + Insn *a = nullptr; + return *a; + } + Insn &BuildInsn(MOperator opCode) { + Insn *a = nullptr; + return *a; + } +#endif protected: MemPool *mp; - -}; - - - - private: - -}; - - - -class OperandDescription { - public: - OperandDescription(Operand::OperandType ot,uint32 size, uint64 flag) - : opndType(), - size(), - flag() {} - private: - Operand::OperandType opndType; - uint32 size; - uint64 flag; }; -namespace X64 { - OperandDescription opnd32RegSrc(Operand::kOpdRegister, 8, 12); -} - - +constexpr uint32 baseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ class OperandBuilder { public: - explicit OperandBuilder(MemPool *mp) : Alloc(mp), pRegPool(), vRegPool() {} - virtual const RegOperand &GetorCreateVReg(OperandDescription &omd) = 0; - virtual const RegOperand &GetorCreatePReg(OpndProp &opndDesc) = 0; - virtual const RegOperand &GetorCreateImm(OpndProp &opndDesc) = 0; - virtual const RegOperand &GetorCreateMem(OpndProp &opndDesc) = 0; + explicit OperandBuilder(MemPool &mp) : alloc(&mp) {} + + /* create an operand in cgfunc when no mempool is supplied */ + CGImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); + CGImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + CGMemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); + CGMemOperand &CreateMem(CGRegOperand &baseOpnd, int64 offset, uint32 size); + CGRegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr); + CGRegOperand &CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + CGRegOperand &CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + CGListOperand &CreateList(MemPool *mp = nullptr); + CGFuncNameOperand &CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp = nullptr); + CGLabelOperand &CreateLabel(const char *parent, LabelIdx idx, MemPool *mp = nullptr); protected: - MapleAllocator Alloc; + MapleAllocator alloc; private: + uint32 virtualRegNum = 0; /* reg bank for multiple use */ - MapleUnorderedMap pRegPool; - MapleUnorderedMap vRegPool; - }; - -class AArch64OpndBuilder : public OperandBuilder { - public: - explicit AArch64OpndBuilder(MemPool *createMP) : OperandBuilder(createMP) { - - } - const RegOperand &GetorCreateVReg(OperandDescription &omd) override { - Alloc.New(); - } -}; -#endif } #endif //MAPLEBE_INCLUDE_CG_IRBUILDER_H diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index 1fcd99d705f56e3f47bb3d123b62ffeb0eeee6a7..5bf9ddf9b11d1926c9409752e50bdb2d531ba2ff 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -547,6 +547,10 @@ class CGOptions : public MapleDriverOptionBase { return rematLevel; } + static bool OptimizeForSize() { + return optForSize; + } + static void SetRematLevel(uint8 level) { rematLevel = level; } @@ -713,6 +717,18 @@ class CGOptions : public MapleDriverOptionBase { doGlobalOpt = false; } + static void EnableHotColdSplit() { + enableHotColdSplit = true; + } + + static void DisableHotColdSplit() { + enableHotColdSplit = false; + } + + static bool DoEnableHotColdSplit() { + return enableHotColdSplit; + } + static bool DoGlobalOpt() { return doGlobalOpt; } @@ -729,6 +745,18 @@ class CGOptions : public MapleDriverOptionBase { return doAlignAnalysis; } + static void EnableCondBrAlign() { + doCondBrAlign = true; + } + + static void DisableCondBrAlign() { + doCondBrAlign = false; + } + + static bool DoCondBrAlign() { + return doCondBrAlign; + } + static void EnableBigEndianInCG() { cgBigEndian = true; } @@ -1195,6 +1223,58 @@ class CGOptions : public MapleDriverOptionBase { return fastMath; } + static void EnableCommon() { + noCommon = false; + } + + static void DisableCommon() { + noCommon = true; + } + + static bool IsNoCommon() { + return noCommon; + } + + static void SetAlignMinBBSize(uint32 minBBSize) { + alignMinBBSize = minBBSize; + } + + static uint32 GetAlignMinBBSize() { + return alignMinBBSize; + } + + static void SetAlignMaxBBSize(uint32 maxBBSize) { + alignMaxBBSize = maxBBSize; + } + + static uint32 GetAlignMaxBBSize() { + return alignMaxBBSize; + } + + static void SetLoopAlignPow(uint32 loopPow) { + loopAlignPow = loopPow; + } + + static uint32 GetLoopAlignPow() { + return loopAlignPow; + } + + static void SetJumpAlignPow(uint32 jumpPow) { + jumpAlignPow = jumpPow; + } + + static uint32 GetJumpAlignPow() { + return jumpAlignPow; + } + + static void SetFuncAlignPow(uint32 funcPow) { + funcAlignPow = funcPow; + } + + static uint32 GetFuncAlignPow() { + return funcAlignPow; + } + private: std::vector phaseSequence; @@ -1223,6 +1303,8 @@ class CGOptions : public MapleDriverOptionBase { static std::string skipAfter; static std::string dumpFunc; static std::string duplicateAsmFile; + static bool optForSize; + static bool enableHotColdSplit; static bool useBarriersForVolatile; static bool timePhases; static bool cgBigEndian; @@ -1240,6 +1322,7 @@ class CGOptions : public MapleDriverOptionBase { static bool doRetMerge; static bool doSchedule; static bool doAlignAnalysis; + static bool doCondBrAlign; static bool doWriteRefFieldOpt; static bool doRegSavesOpt; static bool useSsaPreSave; @@ -1295,6 +1378,12 @@ class CGOptions : public MapleDriverOptionBase { static bool generalRegOnly; static std::string literalProfile; static bool fastMath; + static bool noCommon; + static uint32 alignMinBBSize; + static uint32 alignMaxBBSize; + static uint32 loopAlignPow; + static uint32 jumpAlignPow; + static uint32 funcAlignPow; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/cg_phasemanager.h b/src/mapleall/maple_be/include/cg/cg_phasemanager.h index 2b3d9b80ddebb080e525f3161e530cc931bd90fc..a5c40fcb50aeea6f2a71ed0341612e305014cded 100644 --- a/src/mapleall/maple_be/include/cg/cg_phasemanager.h +++ b/src/mapleall/maple_be/include/cg/cg_phasemanager.h @@ -71,6 +71,7 @@ class CgFuncPM : public FunctionPM { void EmitDebugInfo(const MIRModule &m) const; void EmitFastFuncs(const MIRModule &m) const; bool IsFramework(MIRModule &m) const; + void SweepUnusedStaticSymbol(MIRModule &m); CG *cg = nullptr; BECommon *beCommon = nullptr; diff --git a/src/mapleall/maple_be/include/cg/cg_prop.h b/src/mapleall/maple_be/include/cg/cg_prop.h index fd621e77203f6d647c7728182086755418d7e96b..9d0015da74eec8b9f9ca46d6b20749ff2cb0bfe5 100644 --- a/src/mapleall/maple_be/include/cg/cg_prop.h +++ b/src/mapleall/maple_be/include/cg/cg_prop.h @@ -20,15 +20,17 @@ #include "cg_ssa.h" #include "cg_dce.h" #include "cg.h" +#include "reg_coalesce.h" namespace maplebe { class CGProp { public: - CGProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) + CGProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) : memPool(&mp), cgFunc(&f), propAlloc(&mp), - ssaInfo(&sInfo) { + ssaInfo(&sInfo), + regll(&ll) { cgDce = f.GetCG()->CreateCGDce(mp, f, sInfo); } virtual ~CGProp() = default; @@ -46,6 +48,9 @@ class CGProp { CGDce *GetDce() { return cgDce; } + LiveIntervalAnalysis *GetRegll() { + return regll; + } private: virtual void CopyProp() = 0; @@ -53,26 +58,31 @@ class CGProp { virtual void PropPatternOpt() = 0; CGSSAInfo *ssaInfo; CGDce *cgDce = nullptr; + LiveIntervalAnalysis *regll; }; class PropOptimizeManager { public: - PropOptimizeManager(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) - : cgFunc(cgFunc), - optSsaInfo(cgssaInfo) {} ~PropOptimizeManager() = default; template - void Optimize() { - PropOptimizePattern optPattern(cgFunc, optSsaInfo); + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) { + PropOptimizePattern optPattern(cgFunc, cgssaInfo, ll); + optPattern.Run(); + } + template + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) { + PropOptimizePattern optPattern(cgFunc, cgssaInfo); optPattern.Run(); } - private: - CGFunc &cgFunc; - CGSSAInfo *optSsaInfo; }; class PropOptimizePattern { public: + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : cgFunc(cgFunc), + optSsaInfo(cgssaInfo), + regll(ll) {} + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : cgFunc(cgFunc), optSsaInfo(cgssaInfo) {} @@ -80,13 +90,17 @@ class PropOptimizePattern { virtual bool CheckCondition(Insn &insn) = 0; virtual void Optimize(Insn &insn) = 0; virtual void Run() = 0; + protected: std::string PhaseName() const { return "propopt"; } virtual void Init() = 0; + Insn *FindDefInsn(VRegVersion *useVersion); + CGFunc &cgFunc; CGSSAInfo *optSsaInfo = nullptr; + LiveIntervalAnalysis *regll = nullptr; }; class ReplaceRegOpndVisitor : public OperandVisitorBase, diff --git a/src/mapleall/maple_be/include/cg/cg_ssa.h b/src/mapleall/maple_be/include/cg/cg_ssa.h index fa790f7ea8717148e6efcec284807f15b87b16dd..6cd28a182c3138c4f35d75044990f4d49d001b71 100644 --- a/src/mapleall/maple_be/include/cg/cg_ssa.h +++ b/src/mapleall/maple_be/include/cg/cg_ssa.h @@ -156,7 +156,8 @@ class CGSSAInfo { vRegDefCount(ssaAlloc.Adapter()), vRegStk(ssaAlloc.Adapter()), allSSAOperands(ssaAlloc.Adapter()), - noDefVRegs(ssaAlloc.Adapter()) {} + noDefVRegs(ssaAlloc.Adapter()), + reversePostOrder(ssaAlloc.Adapter()) {} virtual ~CGSSAInfo() = default; void ConstructSSA(); VRegVersion *FindSSAVersion(regno_t ssaRegNO); /* Get specific ssa info */ @@ -164,11 +165,11 @@ class CGSSAInfo { virtual void ReplaceInsn(Insn &oriInsn, Insn &newInsn) = 0; virtual void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) = 0; virtual void CreateNewInsnSSAInfo(Insn &newInsn) = 0; + PhiOperand &CreatePhiOperand(); DUInsnInfo *CreateDUInsnInfo(Insn *cInsn, uint32 idx) { return memPool->New(cInsn, idx, ssaAlloc); } - const MapleUnorderedMap &GetAllSSAOperands() const { return allSSAOperands; } @@ -182,7 +183,11 @@ class CGSSAInfo { ASSERT(false, " original vreg is not existed"); return 0; } + MapleVector &GetReversePostOrder() { + return reversePostOrder; + } void DumpFuncCGIRinSSAForm() const; + virtual void DumpInsnInSSAForm(const Insn &insn) const = 0; static uint32 SSARegNObase; protected: @@ -214,7 +219,6 @@ class CGSSAInfo { virtual RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) = 0; void RenameSuccPhiUse(const BB &bb); void PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd); - virtual void DumpInsnInSSAForm(const Insn &insn) const = 0; void AddRenamedBB(uint32 bbID) { ASSERT(!renamedBBs.count(bbID), "cgbb has been renamed already"); @@ -223,6 +227,7 @@ class CGSSAInfo { bool IsBBRenamed(uint32 bbID) const { return renamedBBs.count(bbID); } + void SetReversePostOrder(); DomAnalysis *domInfo = nullptr; MapleSet renamedBBs; @@ -234,6 +239,8 @@ class CGSSAInfo { MapleUnorderedMap allSSAOperands; /* For virtual registers which do not have definition */ MapleSet noDefVRegs; + /* only save bb_id to reduce space */ + MapleVector reversePostOrder; int32 insnCount = 0; }; diff --git a/src/mapleall/maple_be/include/cg/cg_validbit_opt.h b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..e5465c57aab94210a3e53d494c28faaa8e1938b4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H + +#include "cg.h" +#include "cgfunc.h" +#include "bb.h" +#include "insn.h" +#include "cg_ssa.h" + +namespace maplebe { +#define CG_VALIDBIT_OPT_DUMP CG_DEBUG_FUNC(*cgFunc) +class ValidBitPattern { + public: + ValidBitPattern(CGFunc &f, CGSSAInfo &info) : + cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitPattern() = default; + std::string PhaseName() const { + return "cgvalidbitopt"; + } + + protected: + virtual std::string GetPatternName() = 0; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Run(BB &bb, Insn &insn) = 0; + Insn *GetDefInsn(const RegOperand &useReg); + InsnSet GetAllUseInsn(const RegOperand &defReg); + void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); + + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; + +class ValidBitOpt { + public: + ValidBitOpt(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitOpt() = default; + void Run(); + static uint32 GetImmValidBit(int64 value, uint32 size) { + if (value < 0) { + return size; + } else if (value == 0) { + return k1BitSize; + } + uint32 pos = 0; + constexpr int64 mask = 1; + for (uint32 i = 0; i <= k8BitSize * sizeof(int64); ++i, value >>= 1) { + if ((value & mask) == mask) { + pos = i + 1; + } + } + return pos; + } + + static int64 GetLogValueAtBase2(int64 val) { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; + } + + protected: + template + void Optimize(BB &bb, Insn &insn) { + VBOpt opt(*cgFunc, *ssaInfo); + opt.Run(bb, insn); + } + virtual void DoOpt(BB &bb, Insn &insn) = 0; + void RectifyValidBitNum(); + void RecoverValidBitNum(); + virtual void SetValidBits(Insn &insn) = 0; + virtual bool SetPhiValidBits(Insn &insn) = 0; + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; +MAPLE_FUNC_PHASE_DECLARE(CgValidBitOpt, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H */ diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 060f0aab59772d3a862df44c1347c2814499d536..50938a3907131268a9380044d2ed3a15db2e1a5d 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -40,6 +40,7 @@ namespace maplebe { #define FOR_ALL_BB(BASE, FUNC) FOR_BB_BETWEEN(BASE, FIRST_BB_OF_FUNC(FUNC), nullptr, GetNext) #define FOR_ALL_BB_REV(BASE, FUNC) FOR_BB_BETWEEN(BASE, LAST_BB_OF_FUNC(FUNC), nullptr, GetPrev) + /* For get insn */ #define FIRST_INSN(BLOCK) (BLOCK)->GetFirstInsn() #define LAST_INSN(BLOCK) (BLOCK)->GetLastInsn() @@ -321,6 +322,7 @@ class BB { const Insn *GetFirstInsn() const { return firstInsn; } + void SetFirstInsn(Insn *arg) { firstInsn = arg; } @@ -514,6 +516,7 @@ class BB { void SetCritical(bool arg) { isCritical = arg; } + bool HasCriticalEdge(); bool GetInsertUse() const { return insertUse; } @@ -621,6 +624,8 @@ class BB { MapleMap &GetPhiInsns() { return phiInsnList; } + bool IsInPhiList(regno_t regNO); + bool IsInPhiDef(regno_t regNO); const Insn *GetFirstLoc() const { return firstLoc; } diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 674807bdba2cb9e108980ba616ba78ccac82be43..f99eafc57b0f1b3db10c1681582642d4dcbb2720 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -26,6 +26,8 @@ #include "dbg.h" #include "reaching.h" #include "cg_cfg.h" +#include "cg_irbuilder.h" +#include "call_conv.h" /* MapleIR headers. */ #include "mir_parser.h" #include "mir_function.h" @@ -35,8 +37,8 @@ #include "mempool_allocator.h" namespace maplebe { -constexpr int32 kBBLimit = 10000; -constexpr int32 kFreqBase = 10000; +constexpr int32 kBBLimit = 100000; +constexpr int32 kFreqBase = 100000; struct MemOpndCmp { bool operator()(const MemOperand *lhs, const MemOperand *rhs) const { CHECK_FATAL(lhs != nullptr, "null ptr check"); @@ -125,6 +127,13 @@ class CGFunc { reachingDef = paramRd; } + InsnBuilder *GetInsnBuilder() { + return insnBuilder; + } + OperandBuilder *GetOpndBuilder() { + return opndBuilder; + } + bool GetRDStatus() const { return (reachingDef != nullptr); } @@ -139,7 +148,12 @@ class CGFunc { virtual bool NeedCleanup() = 0; virtual void GenerateCleanupCodeForExtEpilog(BB &bb) = 0; + void CreateLmbcFormalParamInfo(); + virtual uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) = 0; + virtual void AssignLmbcFormalParams() = 0; + LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); void GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc); + int32 GetFreqFromStmt(uint32 stmtId); void GenerateInstruction(); bool MemBarOpt(const StmtNode &membar); void UpdateCallBBFrequency(); @@ -156,7 +170,8 @@ class CGFunc { return maxRegCount; }; void DumpCFG() const; - void DumpCGIR() const; + void DumpCGIR(bool withTargetInfo = false) const; + virtual void DumpTargetIR(const Insn &insn) const {}; void DumpLoop() const; void ClearLoopInfo(); Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); @@ -174,6 +189,9 @@ class CGFunc { virtual void SelectAggDassign(DassignNode &stmt) = 0; virtual void SelectIassign(IassignNode &stmt) = 0; virtual void SelectIassignoff(IassignoffNode &stmt) = 0; + virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; + virtual void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) = 0; + virtual void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) = 0; virtual void SelectReturn(Operand *opnd) = 0; virtual void SelectIgoto(Operand *opnd0) = 0; @@ -184,25 +202,25 @@ class CGFunc { virtual void SelectCall(CallNode &callNode) = 0; virtual void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) = 0; virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; - virtual Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) = 0; - virtual Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, std::string &name) = 0; - virtual Operand *SelectCclz(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCctz(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCpopcount(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCparity(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCclrsb(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCisaligned(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCalignup(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCaligndown(IntrinsicopNode &intrinopNode) = 0; - virtual Operand *SelectCSyncAddFetch(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncFetchAdd(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncSubFetch(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncFetchSub(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCSyncLockRelease(IntrinsicopNode &intrinopNode, PrimType pty) = 0; - virtual Operand *SelectCReturnAddress(IntrinsicopNode &intrinopNode) = 0; + virtual Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinsicopNode, std::string name) = 0; + virtual Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinsicopNode, PrimType retType, + const std::string &name) = 0; + virtual Operand *SelectCclz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCctz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCpopcount(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCparity(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCclrsb(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCisaligned(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCalignup(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCaligndown(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) = 0; + virtual Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinsicopNode, PrimType pty) = 0; + virtual Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinsicopNode, PrimType pty) = 0; + virtual Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinsicopNode, PrimType pty) = 0; + virtual Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCReturnAddress(IntrinsicopNode &intrinsicopNode) = 0; virtual void SelectMembar(StmtNode &membar) = 0; virtual void SelectComment(CommentNode &comment) = 0; virtual void HandleCatch() = 0; @@ -210,11 +228,14 @@ class CGFunc { /* select expr */ virtual Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) = 0; virtual RegOperand *SelectRegread(RegreadNode &expr) = 0; - virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff = false) = 0; + virtual Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) = 0; virtual Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; virtual Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; virtual Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) = 0; + virtual Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) = 0; + virtual Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) = 0; virtual Operand *SelectIntConst(MIRIntConst &intConst) = 0; virtual Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) = 0; virtual Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) = 0; @@ -251,6 +272,7 @@ class CGFunc { virtual void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0) = 0; virtual Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) = 0; virtual Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; virtual Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) = 0; @@ -290,16 +312,24 @@ class CGFunc { virtual RegOperand &GetOrCreateFramePointerRegOperand() = 0; virtual RegOperand &GetOrCreateStackBaseRegOperand() = 0; virtual int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) = 0; - virtual Operand &GetZeroOpnd(uint32 size) = 0; + virtual RegOperand &GetZeroOpnd(uint32 size) = 0; virtual Operand &CreateCfiRegOperand(uint32 reg, uint32 size) = 0; virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; virtual Operand &CreateImmOperand(PrimType primType, int64 val) = 0; - virtual Operand *CreateZeroOperand(PrimType primType) = 0; - virtual void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn) = 0; + virtual void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) = 0; virtual void CleanupDeadMov(bool dump = false) = 0; virtual void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) = 0; virtual bool IsFrameReg(const RegOperand &opnd) const = 0; + virtual bool IsSPOrFP(const RegOperand &opnd) const { + return false; + }; + virtual bool IsReturnReg(const RegOperand &opnd) const { + return false; + }; + virtual bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &beCommon) const { + return false; + } /* For Neon intrinsics */ virtual RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) = 0; @@ -456,6 +486,14 @@ class CGFunc { return vRegCount; } + uint32 GetSSAvRegCount() const { + return ssaVRegCount; + } + + void SetSSAvRegCount(uint32 count) { + ssaVRegCount = count; + } + uint32 GetVRegSize(regno_t vregNum) { CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; @@ -529,6 +567,10 @@ class CGFunc { return cg; } + MIRModule &GetMirModule(){ + return mirModule; + } + const MIRModule &GetMirModule() const { return mirModule; } @@ -717,7 +759,6 @@ class CGFunc { } bool IsExtendReg(regno_t vregNum) { - CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); return extendSet.find(vregNum) != extendSet.end(); } @@ -725,6 +766,10 @@ class CGFunc { extendSet.insert(vregNum); } + void RemoveFromExtendSet(regno_t vregNum) { + extendSet.erase(vregNum); + } + bool IsExitBB(const BB ¤tBB) { for (BB *exitBB : exitBBVec) { if (exitBB == ¤tBB) { @@ -816,6 +861,10 @@ class CGFunc { emitStVec[id] = &symbol; } + void DeleteEmitSt(uint32 id) { + emitStVec.erase(id); + } + MapleVector &GetLoops() { return loops; } @@ -828,6 +877,39 @@ class CGFunc { loops.emplace_back(&loop); } + MapleVector &GetLmbcParamVec() { + return lmbcParamVec; + } + + void IncLmbcArgsInRegs(RegType ty) { + if (ty == kRegTyInt) { + lmbcIntArgs++; + } else { + lmbcFpArgs++; + } + } + + int16 GetLmbcArgsInRegs(RegType ty) { + return ty == kRegTyInt ? lmbcIntArgs : lmbcFpArgs; + } + + void ResetLmbcArgsInRegs() { + lmbcIntArgs = 0; + lmbcFpArgs = 0; + } + + void IncLmbcTotalArgs() { + lmbcTotalArgs++; + } + + int16 GetLmbcTotalArgs() { + return lmbcTotalArgs; + } + + void ResetLmbcTotalArgs() { + lmbcTotalArgs = 0; + } + MapleVector &GetAllBBs() { return bbVec; } @@ -915,12 +997,12 @@ class CGFunc { } void UpdateFrequency(const StmtNode &stmt) { - bool withFreqInfo = func.HasFreqMap() && !func.GetFreqMap().empty(); + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); if (!withFreqInfo) { return; } - auto it = func.GetFreqMap().find(stmt.GetStmtID()); - if (it != func.GetFreqMap().end()) { + auto it = func.GetLastFreqMap().find(stmt.GetStmtID()); + if (it != func.GetLastFreqMap().end()) { frequency = it->second; } } @@ -943,13 +1025,6 @@ class CGFunc { curBB->AppendBB(*newBB); } } - /* used for handle function, frequency is the laststmt->frequency. */ - if (curBB != nullptr) { - curBB->SetFrequency(frequency); - } else { - newBB->SetFrequency(frequency); - } - ASSERT(newBB->GetLastStmt() == nullptr, "newBB's lastStmt must be nullptr"); return newBB; } @@ -1036,6 +1111,7 @@ class CGFunc { uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ uint32 firstNonPregVRegNO; uint32 vRegCount; /* for assigning a number for each CG virtual register */ + uint32 ssaVRegCount = 0; /* vreg count in ssa */ uint32 maxRegCount; /* for the current virtual register number limit */ size_t lSymSize; /* size of local symbol table imported */ MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ @@ -1134,7 +1210,6 @@ class CGFunc { void SetHasAsm() { hasAsm = true; } - private: CGFunc &operator=(const CGFunc &cgFunc); CGFunc(const CGFunc&); @@ -1142,6 +1217,10 @@ class CGFunc { bool CheckSkipMembarOp(const StmtNode &stmt); MIRFunction &func; EHFunc *ehFunc = nullptr; + + InsnBuilder *insnBuilder = nullptr; + OperandBuilder *opndBuilder = nullptr; + uint32 bbCnt = 0; uint32 labelIdx = 0; /* local label index number */ LabelNode *startLabel = nullptr; /* start label of the function */ @@ -1168,6 +1247,10 @@ class CGFunc { MapleVector lrVec; #endif /* TARGARM32 */ MapleVector loops; + MapleVector lmbcParamVec; + int32 lmbcIntArgs = 0; + int32 lmbcFpArgs = 0; + int32 lmbcTotalArgs = 0; CGCFG *theCFG = nullptr; uint32 nextSpillLocation = 0; @@ -1186,5 +1269,7 @@ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgEmission, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenProEpiLog, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_CGFUNC_H */ diff --git a/src/mapleall/maple_be/include/cg/dbg.h b/src/mapleall/maple_be/include/cg/dbg.h index 3a3b3ba68c9d29a0edabf509d96ad3b429e4ace0..ab16485f72271702a90c03164794a55c954be1ac 100644 --- a/src/mapleall/maple_be/include/cg/dbg.h +++ b/src/mapleall/maple_be/include/cg/dbg.h @@ -58,8 +58,7 @@ class DbgInsn : public maplebe::Insn { return false; } - void Emit(const maplebe::CG &cg, maplebe::Emitter &emitter) const override; - +#if TARGAARCH64 || TARGRISCV64 void Dump() const override; bool Check() const override; @@ -81,6 +80,7 @@ class DbgInsn : public maplebe::Insn { CHECK_FATAL(false, "dbg insn do not def regs"); return std::set(); } +#endif uint32 GetLoc() const; diff --git a/src/mapleall/maple_be/include/cg/ebo.h b/src/mapleall/maple_be/include/cg/ebo.h index 17a2e5dd707f6edac7607e9a55add348c25a7f83..73ae2fc691d51355d3fea69597c4b35f3c08ba48 100644 --- a/src/mapleall/maple_be/include/cg/ebo.h +++ b/src/mapleall/maple_be/include/cg/ebo.h @@ -215,6 +215,9 @@ class Ebo { virtual bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const = 0; virtual bool ResIsNotDefAndUse(Insn &insn) const = 0; virtual bool LiveOutOfBB(const Operand &opnd, const BB &bb) const = 0; + virtual bool IsInvalidReg(const RegOperand &opnd) const = 0; + virtual bool IsZeroRegister(const Operand &opnd) const = 0; + virtual bool IsConstantImmOrReg(const Operand &opnd) const = 0; OpndInfo *BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex); OpndInfo *BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector &origInfos); bool ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo, diff --git a/src/mapleall/maple_be/include/cg/emit.h b/src/mapleall/maple_be/include/cg/emit.h index a8ff853b227f0093883a7ec6f517e9639c1d5ea1..c7c11fb7593fab3ac5104e9b88262a60de37290a 100644 --- a/src/mapleall/maple_be/include/cg/emit.h +++ b/src/mapleall/maple_be/include/cg/emit.h @@ -162,7 +162,7 @@ class Emitter { currentMop = mOp; } - std::vector &GetStringPtr() { + MapleVector &GetStringPtr() { return stringPtr; } @@ -211,7 +211,7 @@ class Emitter { void EmitStructConstant(MIRConst &mirConst); void EmitVectorConstant(MIRConst &mirConst); void EmitLocalVariable(const CGFunc &cgFunc); - void EmitSymbolsWithPrefixSection(const MIRSymbol &symbol); + void EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName); void EmitGlobalVariable(); void EmitGlobalRootList(const MIRSymbol &mirSymbol); void EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, @@ -335,6 +335,7 @@ class Emitter { rangeIdx2PrefixStr(cg.GetMIRModule()->GetMPAllocator().Adapter()), arraySize(0), isFlexibleArray(false), + stringPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()), hugeSoTargets(cg.GetMIRModule()->GetMPAllocator().Adapter()), labdie2labidxTable(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()), fileMap(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()) { @@ -362,7 +363,7 @@ class Emitter { MemPool *memPool; uint32 arraySize; bool isFlexibleArray; - std::vector stringPtr; + MapleVector stringPtr; #if 1/* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */ /* Following code is under TARGAARCH64 condition */ uint64 javaInsnCount = 0; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_immediate.cpp b/src/mapleall/maple_be/include/cg/immvalid.def similarity index 51% rename from src/mapleall/maple_be/src/cg/aarch64/aarch64_immediate.cpp rename to src/mapleall/maple_be/include/cg/immvalid.def index 28157d213833589076a5b3fed4ccc9344cb8941a..052c7694448cdb88063b3cd5c88d23e3ef478d65 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_immediate.cpp +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -1,39 +1,10 @@ -/* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. - * - * OpenArkCompiler is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR - * FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - */ -#include "aarch64_immediate.h" -#include "common_utils.h" -#include "mpl_logging.h" - -#include -#include -#include - -namespace maplebe { static std::set ValidBitmaskImmSet = { #include "valid_bitmask_imm.txt" }; - -namespace { constexpr uint32 kMaxBitTableSize = 5; -#if DEBUG -constexpr uint32 kN16ChunksCheck = 2; -#endif constexpr std::array bitmaskImmMultTable = { 0x0000000100000001UL, 0x0001000100010001UL, 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL, }; -}; bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { /* mask1 is a 64bits number that is all 1 shifts left size bits */ @@ -106,42 +77,100 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { #endif } -bool IsMoveWidableImmediate(uint64 val, uint32 bitLen) { - if (bitLen == k64BitSize) { - /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ - if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || - ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { - return true; - } - } else { - /* get lower 32 bits */ - val &= static_cast(0xffffffff); +bool Imm12BitValid(int64 value) { + bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); + // for target linux-aarch64-gnu + result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +bool Imm12BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return true; } - /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ - return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || - (val & ((static_cast(0xffff)) << 0)) == val); + return maplebe::IsBitmaskImmediate(static_cast(value), k32BitSize); } -bool BetterUseMOVZ(uint64 val) { - int32 n16zerosChunks = 0; - int32 n16onesChunks = 0; - uint64 sa = 0; - /* a 64 bits number is split 4 chunks, each chunk has 16 bits. check each chunk whether is all 1 or is all 0 */ - for (uint64 i = 0; i < k4BitSize; ++i, sa += k16BitSize) { - uint64 chunkVal = (val >> (static_cast(sa))) & 0x0000FFFFUL; - if (chunkVal == 0) { - ++n16zerosChunks; - } else if (chunkVal == 0xFFFFUL) { - ++n16onesChunks; - } +bool Imm13BitValid(int64 value) { + bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); + // for target linux-aarch64-gnu + result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); + return result; +} + +bool Imm13BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return true; + } + return maplebe::IsBitmaskImmediate(static_cast(value), k64BitSize); +} + +bool Imm16BitValid(int64 value) { + bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); + /* + * for target linux-aarch64-gnu + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +/* + * 8bit : 0 + * halfword : 1 + * 32bit - word : 2 + * 64bit - word : 3 + * 128bit- word : 4 + */ +bool StrLdrSignedOfstValid(int64 value, uint wordSize) { + if (value <= k256BitSize && value >= kNegative256BitSize) { + return true; + } else if ((value > k256BitSize) && (value <= kMaxPimm[wordSize])) { + uint64 mask = (1U << wordSize) - 1U; + return (static_cast(value) & mask) ? false : true; + } + return false; +} + + +bool StrLdr8ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, 0); +} + +bool StrLdr16ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k1ByteSize); +} + +bool StrLdr32ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k2ByteSize); +} + +bool StrLdr32PairImmValid(int64 value) { + if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { + return (static_cast(value) & 3) ? false : true; + } + return false; +} + +bool StrLdr64ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k3ByteSize); +} + +bool StrLdr64PairImmValid(int64 value) { + if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { + return (static_cast(value) & 7) ? false : true; + } + return false; +} + +bool StrLdr128ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k4ByteSize); +} + +bool StrLdr128PairImmValid(int64 value) { + if (value < k1024BitSize && (value >= kNegative1024BitSize)) { + return (static_cast(value) & 0xf) ? false : true; } - /* - * note that since we already check if the value - * can be movable with as a single mov instruction, - * we should not exepct either n_16zeros_chunks>=3 or n_16ones_chunks>=3 - */ - ASSERT(n16zerosChunks <= kN16ChunksCheck, "n16zerosChunks ERR"); - ASSERT(n16onesChunks <= kN16ChunksCheck, "n16onesChunks ERR"); - return (n16zerosChunks >= n16onesChunks); + return false; } -} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h index 4d7db94151bb3f3ad2a9d389fdac000bed1c3f76..c15f17b25c34c82bc9bae1a5bf27410b130fcb40 100644 --- a/src/mapleall/maple_be/include/cg/insn.h +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -27,15 +27,12 @@ #include "common_utils.h" namespace maplebe { -using MOperator = uint32; - /* forward declaration */ class BB; class CG; class Emitter; class DepNode; - - +struct InsnDescription; class Insn { public: enum RetType : uint8 { @@ -47,7 +44,11 @@ class Insn { static constexpr uint8 kMaxStackOffsetSize = 2; Insn(MemPool &memPool, MOperator opc) - : mOp(opc), localAlloc(&memPool), opnds(localAlloc.Adapter()), comment(&memPool) {} + : mOp(opc), + localAlloc(&memPool), + opnds(localAlloc.Adapter()), + registerBinding(localAlloc.Adapter()), + comment(&memPool) {} Insn(MemPool &memPool, MOperator opc, Operand &opnd0) : Insn(memPool, opc) { opnds.emplace_back(&opnd0); } Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1) : Insn(memPool, opc) { opnds.emplace_back(&opnd0); @@ -78,30 +79,45 @@ class Insn { MOperator GetMachineOpcode() const { return mOp; } - +#ifdef TARGX86_64 + void SetMOP(const InsnDescription &idesc); +#else void SetMOP(MOperator mOp) { this->mOp = mOp; } +#endif void AddOperand(Operand &opnd) { opnds.emplace_back(&opnd); } Insn &AddOperandChain(Operand &opnd) { - opnds.emplace_back(&opnd); + AddOperand(opnd); return *this; } + /* use carefully which might cause insn to illegal */ + void CommuteOperands(uint32 dIndex, uint32 sIndex); + void CleanAllOperand() { + opnds.clear(); + } void PopBackOperand() { opnds.pop_back(); } + Operand &GetOperand(uint32 index) const { ASSERT(index < opnds.size(), "index out of range"); return *opnds[index]; } + + void ResizeOpnds(uint32 newSize) { + opnds.resize(static_cast(newSize)); + } + uint32 GetOperandSize() const { return static_cast(opnds.size()); } + void SetOperand(uint32 index, Operand &opnd) { ASSERT(index <= opnds.size(), "index out of range"); opnds[index] = &opnd; @@ -122,7 +138,7 @@ class Insn { return retSize; } - virtual bool IsMachineInstruction() const = 0; + virtual bool IsMachineInstruction() const; virtual bool IsPseudoInstruction() const { return false; @@ -144,10 +160,6 @@ class Insn { return false; } - virtual bool IsUseSpecReg() const { - return false; - } - virtual bool IsEffectiveCopy() const { return false; } @@ -203,9 +215,7 @@ class Insn { return false; } - virtual bool IsCall() const { - return false; - } + virtual bool IsCall() const; virtual bool IsAsmInsn() const { return false; @@ -263,33 +273,33 @@ class Insn { return false; } - virtual bool IsCondBranch() const { - return false; - } + virtual bool IsCondBranch() const; virtual bool IsUnCondBranch() const { return false; } - virtual bool IsMove() const{ - return false; - } + virtual bool IsMove() const; - virtual bool IsMoveRegReg() const{ + virtual bool IsMoveRegReg() const { return false; } + virtual bool IsBasicOp() const; + + virtual bool IsUnaryOp() const; + + virtual bool IsShift() const; + virtual bool IsPhi() const{ return false; } - virtual bool IsLoad() const { - return false; - } + virtual bool IsLoad() const; - virtual bool IsStore() const { - return false; - } + virtual bool IsStore() const; + + virtual bool IsConversion() const; virtual bool IsLoadPair() const { return false; @@ -449,9 +459,11 @@ class Insn { return 0; } - virtual void Emit(const CG&, Emitter&) const = 0; - +#if TARGAARCH64 || TARGRISCV64 virtual void Dump() const = 0; +#else + virtual void Dump() const; +#endif #if !RELEASE virtual bool Check() const { @@ -496,6 +508,7 @@ class Insn { return ((flags & kOpAccessRefField) != 0); } +#if TARGAARCH64 || TARGRISCV64 virtual bool IsRegDefined(regno_t regNO) const = 0; virtual std::set GetDefRegs() const = 0; @@ -505,6 +518,7 @@ class Insn { }; virtual bool IsDefinition() const = 0; +#endif virtual bool IsDestRegAlsoSrcReg() const { return false; @@ -549,10 +563,6 @@ class Insn { return 0; } - void SetMOperator(MOperator mOp) { - this->mOp = mOp; - } - void SetPrev(Insn *prev) { this->prev = prev; } @@ -733,6 +743,22 @@ class Insn { return labelOpnds; } + void SetInsnDescrption(const InsnDescription &newMD) { + md = &newMD; + } + + const InsnDescription *GetInsnDescrption() const { + return md; + } + + void AddRegBinding(uint32 regA, uint32 regB) { + registerBinding.emplace(regA, regB); + } + + const MapleMap& GetRegBinding() const { + return registerBinding; + } + protected: MOperator mOp; MapleAllocator localAlloc; @@ -744,6 +770,7 @@ class Insn { bool isPhiMovInsn = false; private: + MapleMap registerBinding; /* used for inline asm only */ enum OpKind : uint32 { kOpUnknown = 0, kOpCondDef = 0x1, @@ -769,6 +796,9 @@ class Insn { bool asmDefCondCode = false; bool asmModMem = false; bool needSplit = false; + + /* for multiple architecture */ + const InsnDescription *md = nullptr; }; struct InsnIdCmp { @@ -779,6 +809,7 @@ struct InsnIdCmp { } }; using InsnSet = std::set; +using InsnMapleSet = MapleSet; } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index d9977fa4d09970b3baba6e6d467bc2ab45011c62..91747594e74b114d17bce42f479f72bd56912fb0 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -17,77 +17,73 @@ #include #include "types_def.h" +#include "operand.h" namespace maplebe { -enum RegType : maple::uint8 { - kRegTyUndef, - kRegTyInt, - kRegTyFloat, - kRegTyCc, - kRegTyX87, - kRegTyVary, - kRegTyFpsc, - kRegTyIndex, - kRegTyLast, -}; - enum MopProperty : maple::uint8 { - kPropIsMove, - kPropIsLoad, - kPropIsLoadPair, - kPropIsStore, - kPropIsStorePair, - kPropIsLoadAddress, - kPropIsAtomic, - kPropIsCall, - kPropIsConversion, - kPropIsConditionalSet, - kPropUseSpecReg, - kPropIsCondDef, - kPropHasAcqure, - kPropHasAcqureRCpc, - kPropHasLOAcqure, - kPropHasRelease, - kPropHasLORelease, - kPropCanThrow, - kPropIsPartDefine, - kPropIsDMB, - kPropIsUnCondBr, - kPropIsCondBr, - kPropHasLoop, - kPropIsVectorOp, - kPropIsPhi, + kInsnIsAbstract, + kInsnIsMove, + kInsnIsLoad, + kInsnIsLoadPair, + kInsnIsStore, + kInsnIsStorePair, + kInsnIsLoadAddress, + kInsnIsAtomic, + kInsnIsCall, + kInsnIsConversion, + kInsnIsConditionalSet, + kInsnUseSpecReg, + kInsnIsCondDef, + kInsnHasAcqure, + kInsnHasAcqureRCpc, + kInsnHasLOAcqure, + kInsnHasRelease, + kInsnHasLORelease, + kInsnCanThrow, + kInsnIsPartDefine, + kInsnIsDMB, + kInsnIsUnCondBr, + kInsnIsCondBr, + kInsnHasLoop, + kInsnIsVectorOp, + kInsnIsBinaryOp, + kInsnIsPhi, + kInsnIsUnaryOp, + kInsnIsShift, + kInsnIsNop, }; - -#define ISMOVE (1ULL << kPropIsMove) -#define ISLOAD (1ULL << kPropIsLoad) -#define ISLOADPAIR (1ULL << kPropIsLoadPair) -#define ISSTORE (1ULL << kPropIsStore) -#define ISSTOREPAIR (1ULL << kPropIsStorePair) -#define ISLOADADDR (1ULL << kPropIsLoadAddress) -#define ISATOMIC (1ULL << kPropIsAtomic) -#define ISCALL (1ULL << kPropIsCall) -#define ISCONVERSION (1ULL << kPropIsConversion) -#define ISCONDSET (1ULL << kPropIsConditionalSet) -#define USESPECREG (1ULL << kPropUseSpecReg) -#define ISCONDDEF (1ULL << kPropIsCondDef) -#define HASACQUIRE (1ULL << kPropHasAcqure) -#define HASACQUIRERCPC (1ULL << kPropHasAcqureRCpc) -#define HASLOACQUIRE (1ULL << kPropHasLOAcqure) -#define HASRELEASE (1ULL << kPropHasRelease) -#define HASLORELEASE (1ULL << kPropHasLORelease) -#define CANTHROW (1ULL << kPropCanThrow) -#define ISPARTDEF (1ULL << kPropIsPartDefine) -#define ISDMB (1ULL << kPropIsDMB) -#define ISUNCONDBRANCH (1ULL << kPropIsUnCondBr) -#define ISCONDBRANCH (1ULL << kPropIsCondBr) -#define HASLOOP (1ULL << kPropHasLoop) -#define ISVECTOR (1ULL << kPropIsVectorOp) -#define ISPHI (1ULL << kPropIsPhi) - using regno_t = uint32_t; - -constexpr regno_t kInvalidRegNO = 0; +#define ISABSTRACT (1ULL << kInsnIsAbstract) +#define ISMOVE (1ULL << kInsnIsMove) +#define ISLOAD (1ULL << kInsnIsLoad) +#define ISLOADPAIR (1ULL << kInsnIsLoadPair) +#define ISSTORE (1ULL << kInsnIsStore) +#define ISSTOREPAIR (1ULL << kInsnIsStorePair) +#define ISLOADADDR (1ULL << kInsnIsLoadAddress) +#define ISATOMIC (1ULL << kInsnIsAtomic) +#define ISCALL (1ULL << kInsnIsCall) +#define ISCONVERSION (1ULL << kInsnIsConversion) +#define ISCONDSET (1ULL << kInsnIsConditionalSet) +#define USESPECREG (1ULL << kInsnUseSpecReg) +#define ISCONDDEF (1ULL << kInsnIsCondDef) +#define HASACQUIRE (1ULL << kInsnHasAcqure) +#define HASACQUIRERCPC (1ULL << kInsnHasAcqureRCpc) +#define HASLOACQUIRE (1ULL << kInsnHasLOAcqure) +#define HASRELEASE (1ULL << kInsnHasRelease) +#define HASLORELEASE (1ULL << kInsnHasLORelease) +#define CANTHROW (1ULL << kInsnCanThrow) +#define ISPARTDEF (1ULL << kInsnIsPartDefine) +#define ISDMB (1ULL << kInsnIsDMB) +#define ISUNCONDBRANCH (1ULL << kInsnIsUnCondBr) +#define ISCONDBRANCH (1ULL << kInsnIsCondBr) +#define HASLOOP (1ULL << kInsnHasLoop) +#define ISVECTOR (1ULL << kInsnIsVectorOp) +#define ISBASICOP (1ULL << kInsnIsBinaryOp) +#define ISPHI (1ULL << kInsnIsPhi) +#define ISUNARYOP (1ULL << kInsnIsUnaryOp) +#define ISSHIFT (1ULL << kInsnIsShift) +#define ISNOP (1ULL << kInsnIsNop) +constexpr maplebe::regno_t kInvalidRegNO = 0; /* * ARM64 has 32 int registes and 32 FP registers. @@ -107,9 +103,272 @@ class ConstraintFunction { } }; -/* empty class; just for parameter passing */ -class OpndProp {}; +/* + * abstract machine instruction + * a lower-level maple IR which is aimed to represent general machine instruction for extreme cpus + * 1. Support conversion between all types and registers + * 2. Support conversion between memory and registers + * 3. Support three address basic operations + * + */ +namespace abstract { +#define DEFINE_MOP(op, ...) op, +enum AbstractMOP_t : maple::uint32 { +#include "abstract_mmir.def" + kMopLast +}; +#undef DEFINE_MOP +} + +struct InsnDescription { + MOperator opc; + std::vector opndMD; + uint64 properties; + uint32 latencyType; + const std::string &name; + const std::string &format; + uint32 atomicNum; /* indicate how many asm instructions it will emit. */ + std::function validFunc = nullptr; /* If insn has immOperand, this function needs to be implemented. */ + + bool IsSame(const InsnDescription &left, + std::function cmp) const; + + bool IsCall() const { + return properties & ISCALL; + } + bool IsPhi() const { + return properties & ISPHI; + } + bool IsPhysicalInsn() const { + return !(properties & ISABSTRACT); + } + bool IsStore() const { + return (properties & ISSTORE); + } + bool IsLoad() const { + return (properties & ISLOAD); + } + bool IsConversion() const { + return (properties & ISCONVERSION); + } + bool IsLoadPair() const { + return (properties & (ISLOADPAIR)) != 0; + } + bool IsStorePair() const { + return (properties & (ISSTOREPAIR)) != 0; + } + bool IsLoadStorePair() const { + return (properties & (ISLOADPAIR | ISSTOREPAIR)) != 0; + } + bool IsMove() const { + return (properties & ISMOVE); + } + bool IsDMB() const { + return (properties & (ISDMB)) != 0; + } + bool IsBasicOp() const { + return (properties & ISBASICOP); + } + bool IsCondBranch() const { + return (properties & (ISCONDBRANCH)) != 0; + } + bool IsUnCondBranch() const { + return (properties & (ISUNCONDBRANCH)) != 0; + } + bool IsLoadAddress() const { + return (properties & (ISLOADADDR)) != 0; + } + bool IsAtomic() const { + return (properties & ISATOMIC) != 0; + } + bool IsCondDef() const { + return properties & ISCONDDEF; + } + bool IsPartDef() const { + return properties & ISPARTDEF; + } + bool IsVectorOp() const { + return properties & ISVECTOR; + } + bool IsVolatile() const { + return ((properties & HASRELEASE) != 0) || ((properties & HASACQUIRE) != 0); + } + bool IsMemAccessBar() const { + return (properties & (HASRELEASE | HASACQUIRE | HASACQUIRERCPC | HASLOACQUIRE | HASLORELEASE)) != 0; + } + bool IsMemAccess() const { + return (properties & (ISLOAD | ISSTORE | ISLOADPAIR | ISSTOREPAIR)) != 0; + } + bool IsBranch() const { + return (properties & (ISCONDBRANCH | ISUNCONDBRANCH)) != 0; + } + bool HasLoop() const { + return properties & HASLOOP; + } + bool CanThrow() const { + return properties & CANTHROW; + } + MOperator GetOpc() const { + return opc; + } + const OpndDescription *GetOpndDes(size_t index) const { + return opndMD[index]; + } + uint32 GetLatencyType() const { + return latencyType; + } + bool IsUnaryOp() const { + return (properties & ISUNARYOP); + } + bool IsShift() const { + return (properties & ISSHIFT); + } + const std::string &GetName() const { + return name; + } + const std::string &GetFormat() const { + return format; + } + uint32 GetAtomicNum() { + return atomicNum; + } + static const InsnDescription &GetAbstractId(MOperator opc) { + return abstractId[opc]; + } + static const InsnDescription abstractId[abstract::kMopLast]; +}; + +enum RegPropState : uint32 { + kRegPropUndef = 0, + kRegPropDef = 0x1, + kRegPropUse = 0x2 +}; +enum RegAddress : uint32 { + kRegHigh = 0x4, + kRegLow = 0x8 +}; +constexpr uint32 kMemLow12 = 0x10; +constexpr uint32 kLiteralLow12 = kMemLow12; +constexpr uint32 kPreInc = 0x20; +constexpr uint32 kPostInc = 0x40; +constexpr uint32 kLoadLiteral = 0x80; +constexpr uint32 kVector = 0x100; + +class RegProp { + public: + RegProp(RegType t, regno_t r, uint32 d) : regType(t), physicalReg(r), defUse(d) {} + virtual ~RegProp() = default; + const RegType &GetRegType() const { + return regType; + } + const regno_t &GetPhysicalReg() const { + return physicalReg; + } + uint32 GetDefUse() const { + return defUse; + } + + private: + RegType regType; + regno_t physicalReg; + uint32 defUse; /* used for register use/define and other properties of other operand */ +}; + +class OpndProp { + public: + OpndProp(Operand::OperandType t, RegProp p, uint8 s) : opndType(t), regProp(p), size(s) {} + virtual ~OpndProp() = default; + Operand::OperandType GetOperandType() const { + return opndType; + } + + const RegProp &GetRegProp() const { + return regProp; + } + + bool IsRegister() const { + return opndType == Operand::kOpdRegister; + } + + bool IsRegDef() const { + return opndType == Operand::kOpdRegister && (regProp.GetDefUse() & kRegPropDef); + } + + bool IsRegUse() const { + return opndType == Operand::kOpdRegister && (regProp.GetDefUse() & kRegPropUse); + } + + bool IsMemLow12() const { + return opndType == Operand::kOpdMem && (regProp.GetDefUse() & kMemLow12); + } + + bool IsLiteralLow12() const { + return opndType == Operand::kOpdStImmediate && (regProp.GetDefUse() & kLiteralLow12); + } + + bool IsDef() const { + return regProp.GetDefUse() & kRegPropDef; + } + + bool IsUse() const { + return regProp.GetDefUse() & kRegPropUse; + } + + bool IsLoadLiteral() const { + return regProp.GetDefUse() & kLoadLiteral; + } + + uint8 GetSize() const { + return size; + } + + uint32 GetOperandSize() const { + return static_cast(size); + } + + bool IsVectorOperand() const { + return regProp.GetDefUse() & kVector; + } + + void SetContainImm() { + isContainImm = true; + } + + bool IsContainImm() const { + return isContainImm; + } + + protected: + bool isContainImm = false; + + private: + Operand::OperandType opndType; + RegProp regProp; + uint8 size; +}; + +/* + * Operand which might include immediate value. + * function ptr returns whether a immediate is legal in specific target + */ +class ImmOpndProp : public OpndProp { + public: + ImmOpndProp(Operand::OperandType t, const RegProp &p, uint8 s, const std::function f) + : OpndProp(t, p, s), + validFunc(f) { + SetContainImm(); + } + virtual ~ImmOpndProp() = default; + + bool IsValidImmOpnd(int64 value) const { + CHECK_FATAL(validFunc, " Have not set valid function yet in ImmOpndProp"); + return validFunc(value); + } + + private: + std::function validFunc; +}; -} /* namespace maplebe */ +} /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_ISA_H */ +#endif /* MAPLEBE_INCLUDE_CG_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h new file mode 100644 index 0000000000000000000000000000000000000000..f5d17cc8e1ecd90d28c6d37b3582373f752b545e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_ISEL_H +#define MAPLEBE_INCLUDE_CG_ISEL_H + +#include "cgfunc.h" + +namespace maplebe { +/* macro expansion instruction selection */ +class MPISel { + public: + MPISel(MemPool &mp, CGFunc &f) : isMp(&mp), cgFunc(&f) {} + + virtual ~MPISel() = default; + + void doMPIS(); + + CGFunc *GetCurFunc() { + return cgFunc; + } + + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + + void SelectDassign(DassignNode &stmt, Operand &opndRhs); + void SelectIassign(IassignNode &stmt, MPISel &iSel, BaseNode &addr, BaseNode &rhs); + void SelectIassignoff(IassignoffNode &stmt); + Operand* SelectDread(const BaseNode &parent, AddrofNode &expr); + Operand* SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent); + Operand* SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0); + Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); + Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + CGImmOperand *SelectIntConst(MIRIntConst &intConst); + CGRegOperand *SelectRegread(RegreadNode &expr); + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + void SelectShift(Operand &resOpnd, Operand &o0, Operand &o1, Opcode shiftDirect, PrimType primType); + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + + virtual void SelectReturn(Operand &opnd) = 0; + virtual void SelectGoto(GotoNode &stmt) = 0; + virtual void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) = 0; + virtual void SelectCall(CallNode &callNode) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; + virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0); + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff); + virtual Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; + Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent); + protected: + MemPool *isMp; + CGFunc *cgFunc; + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); + void SelectCopy(Operand &dest, Operand &src, PrimType type); + void SelectIntCvt(maplebe::CGRegOperand &resOpnd, maplebe::Operand &opnd0, maple::PrimType toType); + CGRegOperand &SelectCopy2Reg(Operand &src, PrimType dtype); + private: + StmtNode *HandleFuncEntry(); + void HandleFuncExit(); + void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); + virtual CGMemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) = 0; + virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; + void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); + /* + * Support conversion between all types and registers + * only Support conversion between registers and memory + * alltypes -> reg -> mem + */ + template + void SelectCopyInsn(destTy &dest, srcTy &src, PrimType type); + void SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType); + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectExtractbits(CGRegOperand &resOpnd, CGRegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); +}; +MAPLE_FUNC_PHASE_DECLARE_BEGIN(InstructionSelector, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif /* MAPLEBE_INCLUDE_CG_ISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/memlayout.h b/src/mapleall/maple_be/include/cg/memlayout.h index f2bdf5130cd7c444267ec5a1238ae52e342b27df..6e27c0e7e26894911d0a43b2eb30baefc4f6689d 100644 --- a/src/mapleall/maple_be/include/cg/memlayout.h +++ b/src/mapleall/maple_be/include/cg/memlayout.h @@ -66,6 +66,8 @@ enum MemSegmentKind : uint8 { * responsibility to allocate space for those arguments in memory. */ kMsArgsToStkPass, + /* The red zone stack area will not be modified by the exception signal. */ + kMsRedZone, }; class CGFunc; @@ -219,7 +221,7 @@ class MemLayout { return segArgsRegPassed; } - const MemSegment &GetSegArgsStkPass() const { + const MemSegment &GetSegArgsToStkPass() const { return segArgsToStkPass; } diff --git a/src/mapleall/maple_be/include/cg/operand.def b/src/mapleall/maple_be/include/cg/operand.def new file mode 100644 index 0000000000000000000000000000000000000000..b70b14a48ea12b22770ae3e580689c9b64eb7c5e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/operand.def @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +DEFINE_MOP(Mem8S, {Operand::kOpdMem, operand::kIsUse, 8}) +DEFINE_MOP(Mem8D, {Operand::kOpdMem, operand::kIsDef, 8}) +DEFINE_MOP(Mem16S, {Operand::kOpdMem, operand::kIsUse, 16}) +DEFINE_MOP(Mem16D, {Operand::kOpdMem, operand::kIsDef, 16}) +DEFINE_MOP(Mem32D, {Operand::kOpdMem, operand::kIsDef, 32}) +DEFINE_MOP(Mem32S, {Operand::kOpdMem, operand::kIsUse, 32}) +DEFINE_MOP(Mem64D, {Operand::kOpdMem, operand::kIsDef, 64}) +DEFINE_MOP(Mem64S, {Operand::kOpdMem, operand::kIsUse, 64}) + +DEFINE_MOP(Reg8IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg8ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 8}) +DEFINE_MOP(Reg8IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg16ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 16}) +DEFINE_MOP(Reg16IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg16IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg32ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 32}) +DEFINE_MOP(Reg32IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg32IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg64ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 64}) +DEFINE_MOP(Reg64IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 64}) +DEFINE_MOP(Reg64IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 64}) + +DEFINE_MOP(Reg8FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 8}) +DEFINE_MOP(Reg8FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 8}) +DEFINE_MOP(Reg16FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 16}) +DEFINE_MOP(Reg16FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg16FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg32FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 32}) +DEFINE_MOP(Reg32FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg32FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg64FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 64}) +DEFINE_MOP(Reg64FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg64FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg128ID, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 128}) +DEFINE_MOP(Reg128IS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 128}) +DEFINE_MOP(Reg128IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 128}) + +DEFINE_MOP(Imm4, {Operand::kOpdImmediate, operand::kIsUse, 4}) +DEFINE_MOP(Imm5, {Operand::kOpdImmediate, operand::kIsUse, 5}) +DEFINE_MOP(Imm6, {Operand::kOpdImmediate, operand::kIsUse, 6}) +DEFINE_MOP(Imm8, {Operand::kOpdImmediate, operand::kIsUse, 8}) +DEFINE_MOP(Imm16, {Operand::kOpdImmediate, operand::kIsUse, 16}) +DEFINE_MOP(Imm32, {Operand::kOpdImmediate, operand::kIsUse, 32}) +DEFINE_MOP(Imm64, {Operand::kOpdImmediate, operand::kIsUse, 64}) +DEFINE_MOP(FpzeroImm8, {Operand::kOpdFPZeroImmediate, operand::kIsUse, 8}) + +/* for movk */ +DEFINE_MOP(Lsl4, {Operand::kOpdShift, operand::kIsUse, 4}) +DEFINE_MOP(Lsl6, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Lsl12, {Operand::kOpdShift, operand::kIsUse, 12}) +/* for shift */ +DEFINE_MOP(Bitshift32, {Operand::kOpdShift, operand::kIsUse, 5}) +DEFINE_MOP(Bitshift64, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Extendshift64, {Operand::kOpdExtend, operand::kIsUse, 3}) + +DEFINE_MOP(String0S, {Operand::kOpdString, operand::kIsUse, 0}) + +DEFINE_MOP(Lbl64, {Operand::kOpdUndef, operand::kIsUse, 64}) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/operand.h b/src/mapleall/maple_be/include/cg/operand.h index c8a644f8aa277227f5ed255d62df77e368f54469..fb42c16f93ac5d3698cba98f4fc638f917dcc7c7 100644 --- a/src/mapleall/maple_be/include/cg/operand.h +++ b/src/mapleall/maple_be/include/cg/operand.h @@ -16,23 +16,38 @@ #define MAPLEBE_INCLUDE_CG_OPERAND_H #include "becommon.h" -#include "isa.h" #include "cg_option.h" #include "visitor_common.h" /* maple_ir */ -#include "types_def.h" /* need uint8 etc */ -#include "prim_types.h" /* for PrimType */ +#include "types_def.h" /* need uint8 etc */ +#include "prim_types.h" /* for PrimType */ #include "mir_symbol.h" /* Mempool */ -#include "mempool_allocator.h" /* MapleList */ +#include "mempool_allocator.h" /* MapleList */ namespace maplebe { +class OpndProp; class Emitter; -namespace { -constexpr int32 kOffsetImmediateOpndSpace = 4; /* offset and immediate operand space is 4 */ +bool IsBitSizeImmediate(maple::uint64 val, maple::uint32 bitLen, maple::uint32 nLowerZeroBits); +bool IsBitmaskImmediate(maple::uint64 val, maple::uint32 bitLen); +bool IsMoveWidableImmediate(maple::uint64 val, maple::uint32 bitLen); +bool BetterUseMOVZ(maple::uint64 val); + +using regno_t = uint32_t; +using MOperator = uint32; +enum RegType : maple::uint8 { + kRegTyUndef, + kRegTyInt, + kRegTyFloat, + kRegTyCc, + kRegTyX87, + kRegTyVary, + kRegTyFpsc, + kRegTyIndex, + kRegTyLast, }; class Operand { @@ -40,19 +55,19 @@ class Operand { enum OperandType : uint8 { kOpdRegister, kOpdImmediate, + kOpdMem, + kOpdCond, /* for condition code */ + kOpdPhi, /* for phi operand */ kOpdFPImmediate, kOpdFPZeroImmediate, - kOpdStImmediate, /* use the symbol name as the offset */ - kOpdOffset, /* for the offset operand in MemOperand */ - kOpdMem, + kOpdStImmediate, /* use the symbol name as the offset */ + kOpdOffset, /* for the offset operand in MemOperand */ kOpdBBAddress, - kOpdList, /* for list operand */ - kOpdPhi, /* for phi operand */ - kOpdCond, /* for condition code */ - kOpdShift, /* for imm shift operand */ - kOpdRegShift, /* for reg shift operand */ - kOpdExtend, /* for extend operand */ - kOpdString, /* for comments */ + kOpdList, /* for list operand */ + kOpdShift, /* for imm shift operand */ + kOpdRegShift, /* for reg shift operand */ + kOpdExtend, /* for extend operand */ + kOpdString, /* for comments */ kOpdUndef }; @@ -76,8 +91,8 @@ class Operand { } bool IsConstImmediate() const { - return opndKind == kOpdImmediate || opndKind == kOpdOffset || - opndKind == kOpdFPImmediate || opndKind == kOpdFPZeroImmediate; + return opndKind == kOpdImmediate || opndKind == kOpdOffset || opndKind == kOpdFPImmediate || + opndKind == kOpdFPZeroImmediate; } bool IsOfstImmediate() const { @@ -89,8 +104,7 @@ class Operand { } bool IsImmediate() const { - ASSERT(kOpdOffset - kOpdImmediate == kOffsetImmediateOpndSpace, "offset and immediate operand space should be 4"); - return (kOpdImmediate <= opndKind && opndKind <= kOpdOffset); + return (kOpdFPImmediate <= opndKind && opndKind <= kOpdOffset) || opndKind == kOpdImmediate; } bool IsRegister() const { @@ -109,21 +123,6 @@ class Operand { return opndKind == kOpdMem; } - bool IsConstant() const { - return IsConstImmediate() || IsConstReg(); - } - - bool IsConstReg() const { - if (!IsRegister()) { - return false; - } - return IsZeroRegister(); - }; - - virtual bool IsZeroRegister() const { - return false; - }; - bool IsLabel() const { return opndKind == kOpdBBAddress; } @@ -174,13 +173,15 @@ class Operand { virtual void Dump() const = 0; + virtual bool Less(const Operand &right) const = 0; virtual void Accept(OperandVisitorBase &v) = 0; protected: - OperandType opndKind; /* operand type */ - uint32 size; /* size in bits */ + OperandType opndKind; /* operand type */ + uint32 size; /* size in bits */ + uint64 flag = 0; /* operand property*/ }; /* RegOperand */ @@ -190,7 +191,6 @@ enum RegOperandState : uint32 { kRegOpndSetHigh32 = 0x2 }; - template class OperandVisitable : public Operand { public: @@ -206,15 +206,20 @@ class OperandVisitable : public Operand { class RegOperand : public OperandVisitable { public: - RegOperand(regno_t regNum, uint32 size, RegType type) + RegOperand(regno_t regNum, uint32 size, RegType type, uint32 flg = 0) : OperandVisitable(kOpdRegister, size), regNO(regNum), regType(type), - validBitsNum(size) {} + validBitsNum(size), + flag(flg) {} ~RegOperand() override = default; using OperandVisitable::OperandVisitable; + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + void SetValidBitsNum(uint32 validNum) { validBitsNum = validNum; } @@ -247,11 +252,7 @@ class RegOperand : public OperandVisitable { regType = newTy; } - virtual bool IsVirtualRegister() const { - return false; - } - - virtual bool IsBBLocalVReg() const { + virtual bool IsBBLocalReg() const { return isBBLocal; } @@ -267,12 +268,12 @@ class RegOperand : public OperandVisitable { regNO = regNum; } - virtual bool IsInvalidRegister() const = 0; - virtual bool IsSaveReg(MIRType &mirType, BECommon &beCommon) const = 0; - virtual bool IsPhysicalRegister() const = 0; - virtual bool IsSPOrFP() const = 0; - void Emit(Emitter &emitter, const OpndProp *opndProp) const override = 0; - void Dump() const override = 0; + void Emit(Emitter &emitter, const OpndProp *opndProp) const override { + CHECK_FATAL(false, "do not run here"); + }; + void Dump() const override { + CHECK_FATAL(false, "do not run here"); + }; bool Less(const Operand &right) const override { if (&right == this) { @@ -311,7 +312,7 @@ class RegOperand : public OperandVisitable { return true; } return (BasicEquals(op) && regNO == op.GetRegisterNumber() && regType == op.GetRegisterType() && - IsBBLocalVReg() == op.IsBBLocalVReg()); + IsBBLocalReg() == op.IsBBLocalReg()); } static bool IsSameRegNO(const Operand &firstOpnd, const Operand &secondOpnd) { @@ -329,16 +330,71 @@ class RegOperand : public OperandVisitable { } return IsSameRegNO(firstOpnd, secondOpnd); } + void SetOpndSSAForm() { isSSAForm = true; } + void SetOpndOutOfSSAForm() { isSSAForm = false; } + bool IsSSAForm() const { return isSSAForm; } + void SetRefField(bool newIsRefField) { + isRefField = newIsRefField; + } + + bool IsPhysicalRegister() const { + return GetRegisterNumber() < 100 && !IsOfCC(); + } + + bool IsVirtualRegister() const { + return !IsPhysicalRegister(); + } + + bool IsBBLocalVReg() const { + return IsVirtualRegister() && IsBBLocalReg(); + } + + void SetIF64Vec() { + if64Vec = true; + } + + bool GetIF64Vec() const { + return if64Vec; + } + + void SetVecLanePosition(int32 pos) { + vecLane = static_cast(pos); + } + + int32 GetVecLanePosition() const { + return vecLane; + } + + void SetVecLaneSize(uint32 size) { + vecLaneSize = static_cast(size); + } + + uint32 GetVecLaneSize() const { + return vecLaneSize; + } + + void SetVecElementSize(uint32 size) { + vecElementSize = size; + } + + uint64 GetVecElementSize() const { + return vecElementSize; + } + + bool operator==(const RegOperand &opnd) const; + + bool operator<(const RegOperand &opnd) const; + protected: regno_t regNO; RegType regType; @@ -352,7 +408,13 @@ class RegOperand : public OperandVisitable { uint32 validBitsNum; /* use for SSA analysis */ bool isSSAForm = false; -}; /* class RegOperand */ + bool isRefField = false; + uint32 flag = 0; + int16 vecLane = -1; /* -1 for whole reg, 0 to 15 to specify each lane one at a time */ + uint16 vecLaneSize = 0; /* Number of lanes */ + uint64 vecElementSize = 0; /* size of vector element in each lane */ + bool if64Vec = false; /* operand returning 64x1's int value in FP/Simd register */ +}; /* class RegOperand */ enum VaryType : uint8 { kNotVary = 0, @@ -362,16 +424,45 @@ enum VaryType : uint8 { class ImmOperand : public OperandVisitable { public: - ImmOperand(int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary) - : OperandVisitable(kOpdImmediate, size), value(val), isSigned(isSigned), isVary(isVar) {} - ImmOperand(OperandType type, int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary) - : OperandVisitable(type, size), value(val), isSigned(isSigned), isVary(isVar) {} + ImmOperand(int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(kOpdImmediate, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} + ImmOperand(OperandType type, int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(type, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} ~ImmOperand() override = default; using OperandVisitable::OperandVisitable; - virtual bool IsSingleInstructionMovable() const = 0; - virtual bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const = 0; + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const { + return maplebe::IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); + } + + bool IsBitmaskImmediate() const { + ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); + } + + bool IsBitmaskImmediate(uint32 destSize) const { + ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); + } + + bool IsSingleInstructionMovable() const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(size)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(size)) || + IsBitmaskImmediate()); + } + + bool IsSingleInstructionMovable(uint32 destSize) const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(destSize)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(destSize)) || + IsBitmaskImmediate(destSize)); + } int64 GetValue() const { return value; @@ -405,6 +496,10 @@ class ImmOperand : public OperandVisitable { isSigned = true; } + void SetSigned(bool flag) { + isSigned = flag; + } + bool IsInBitSizeRot(uint8 size) const { return IsInBitSizeRot(size, value); } @@ -484,7 +579,9 @@ class ImmOperand : public OperandVisitable { return (value == iOpnd.value && isSigned == iOpnd.isSigned && size == iOpnd.GetSize()); } - void Emit(Emitter &emitter, const OpndProp *prop) const override = 0; + void Emit(Emitter &emitter, const OpndProp *prop) const override { + CHECK_FATAL(false, "do not run here"); + } void Dump() const override; @@ -529,17 +626,302 @@ class ImmOperand : public OperandVisitable { } return (value == op.GetValue() && isSigned == op.IsSignedValue()); } + bool IsFmov() { + return isFmov; + } protected: int64 value; bool isSigned; VaryType isVary; + bool isFmov = false; }; -using OfstOperand = ImmOperand; +class OfstOperand : public ImmOperand { + public: + enum OfstType : uint8 { + kSymbolOffset, + kImmediateOffset, + kSymbolImmediateOffset, + }; + + /* only for symbol offset */ + OfstOperand(const MIRSymbol &mirSymbol, uint32 size, int32 relocs) + : ImmOperand(kOpdOffset, 0, size, true, kNotVary, false), + offsetType(kSymbolOffset), symbol(&mirSymbol), relocs(relocs) {} + /* only for Immediate offset */ + OfstOperand(int64 val, uint32 size, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, static_cast(val), size, true, isVar, false), + offsetType(kImmediateOffset), symbol(nullptr), relocs(0) {} + /* for symbol and Immediate offset */ + OfstOperand(const MIRSymbol &mirSymbol, int64 val, uint32 size, int32 relocs, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, val, size, true, isVar, false), + offsetType(kSymbolImmediateOffset), + symbol(&mirSymbol), + relocs(relocs) {} + + ~OfstOperand() override = default; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsSymOffset() const { + return offsetType == kSymbolOffset; + } + bool IsImmOffset() const { + return offsetType == kImmediateOffset; + } + bool IsSymAndImmOffset() const { + return offsetType == kSymbolImmediateOffset; + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetSymbolName() const { + return symbol->GetName(); + } + + int64 GetOffsetValue() const { + return GetValue(); + } + + void SetOffsetValue(int32 offVal) { + SetValue(static_cast(offVal)); + } + + void AdjustOffset(int32 delta) { + Add(static_cast(delta)); + } + + bool operator==(const OfstOperand &opnd) const { + return (offsetType == opnd.offsetType && symbol == opnd.symbol && + ImmOperand::operator==(opnd) && relocs == opnd.relocs); + } + + bool operator<(const OfstOperand &opnd) const { + return (offsetType < opnd.offsetType || + (offsetType == opnd.offsetType && symbol < opnd.symbol) || + (offsetType == opnd.offsetType && symbol == opnd.symbol && GetValue() < opnd.GetValue())); + } + void Emit(Emitter &emitter, const OpndProp *prop) const override { + CHECK_FATAL(false, "dont run here"); + }; + + void Dump() const override { + if (IsImmOffset()) { + LogInfo::MapleLogger() << "ofst:" << GetValue(); + } else { + LogInfo::MapleLogger() << GetSymbolName(); + LogInfo::MapleLogger() << "+offset:" << GetValue(); + } + } + + private: + OfstType offsetType; + const MIRSymbol *symbol; + int32 relocs; +}; + +/* + * Table C1-6 A64 Load/Store addressing modes + * | Offset + * Addressing Mode | Immediate | Register | Extended Register + * + * Base register only | [base{,#0}] | - | - + * (no offset) | B_OI_NONE | | + * imm=0 + * + * Base plus offset | [base{,#imm}] | [base,Xm{,LSL #imm}] | [base,Wm,(S|U)XTW {#imm}] + * B_OI_NONE | B_OR_X | B_OR_X + * imm=0,1 (0,3) | imm=00,01,10,11 (0/2,s/u) + * + * Pre-indexed | [base, #imm]! | - | - + * + * Post-indexed | [base], #imm | [base], Xm(a) | - + * + * Literal | label | - | - + * (PC-relative) + * + * a) The post-indexed by register offset mode can be used with the SIMD Load/Store + * structure instructions described in Load/Store Vector on page C3-154. Otherwise + * the post-indexed by register offset mode is not available. + */ class MemOperand : public OperandVisitable { public: + enum AArch64AddressingMode : uint8 { + kAddrModeUndef, + /* AddrMode_BO, base, offset. EA = [base] + offset; */ + kAddrModeBOi, /* INTACT: EA = [base]+immediate */ + /* + * PRE: base += immediate, EA = [base] + * POST: EA = [base], base += immediate + */ + kAddrModeBOrX, /* EA = [base]+Extend([offreg/idxreg]), OR=Wn/Xn */ + kAddrModeLiteral, /* AArch64 insruction LDR takes literal and */ + /* + * "calculates an address from the PC value and an immediate offset, + * loads a word from memory, and writes it to a register." + */ + kAddrModeLo12Li // EA = [base] + #:lo12:Label+immediate. (Example: [x0, #:lo12:__Label300+456] + }; + /* + * ARMv8-A A64 ISA Overview by Matteo Franchin @ ARM + * (presented at 64-bit Android on ARM. Sep. 2015) p.14 + * o Address to load from/store to is a 64-bit base register + an optional offset + * LDR X0, [X1] ; Load from address held in X1 + * STR X0, [X1] ; Store to address held in X1 + * + * o Offset can be an immediate or a register + * LDR X0, [X1, #8] ; Load from address [X1 + 8 bytes] + * LDR X0, [X1, #-8] ; Load with negative offset + * LDR X0, [X1, X2] ; Load from address [X1 + X2] + * + * o A Wn register offset needs to be extended to 64 bits + * LDR X0, [X1, W2, SXTW] ; Sign-extend offset in W2 + * LDR X0, [X1, W2, UXTW] ; Zero-extend offset in W2 + * + * o Both Xn and Wn register offsets can include an optional left-shift + * LDR X0, [X1, W2, UXTW #2] ; Zero-extend offset in W2 & left-shift by 2 + * LDR X0, [X1, X2, LSL #2] ; Left-shift offset in X2 by 2 + * + * p.15 + * Addressing Modes Analogous C Code + * int *intptr = ... // X1 + * int out; // W0 + * o Simple: X1 is not changed + * LDR W0, [X1] out = *intptr; + * o Offset: X1 is not changed + * LDR W0, [X1, #4] out = intptr[1]; + * o Pre-indexed: X1 changed before load + * LDR W0, [X1, #4]! =|ADD X1,X1,#4 out = *(++intptr); + * |LDR W0,[X1] + * o Post-indexed: X1 changed after load + * LDR W0, [X1], #4 =|LDR W0,[X1] out = *(intptr++); + * |ADD X1,X1,#4 + */ + enum ExtendInfo : uint8 { + kShiftZero = 0x1, + kShiftOne = 0x2, + kShiftTwo = 0x4, + kShiftThree = 0x8, + kUnsignedExtend = 0x10, + kSignExtend = 0x20 + }; + + enum IndexingOption : uint8 { + kIntact, /* base register stays the same */ + kPreIndex, /* base register gets changed before load */ + kPostIndex, /* base register gets changed after load */ + }; + + MemOperand(uint32 size, const MIRSymbol &mirSymbol) : + OperandVisitable(Operand::kOpdMem, size), symbol(&mirSymbol) {} + + MemOperand(uint32 size, RegOperand *baseOp, RegOperand *indexOp, ImmOperand *ofstOp, const MIRSymbol *mirSymbol, + Operand *scaleOp = nullptr) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(baseOp), + indexOpnd(indexOp), + offsetOpnd(ofstOp), + scaleOpnd(scaleOp), + symbol(mirSymbol) {} + + MemOperand(RegOperand *base, OfstOperand *offset, uint32 size, IndexingOption idxOpt = kIntact) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(base), + indexOpnd(nullptr), + offsetOpnd(offset), + addrMode(kAddrModeBOi), + extend(0), + idxOpt(idxOpt), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *sym) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(index), + offsetOpnd(offset), + symbol(sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &sym, bool noExtend) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(&index), + offsetOpnd(offset), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(noExtend), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, RegOperand &baseOpnd, RegOperand &indexOpnd, + uint32 shift, bool isSigned = false) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(&baseOpnd), + indexOpnd(&indexOpnd), + offsetOpnd(nullptr), + symbol(nullptr), + addrMode(mode), + extend((isSigned ? kSignExtend : kUnsignedExtend) | (1U << shift)), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(nullptr), + indexOpnd(nullptr), + offsetOpnd(nullptr), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) { + ASSERT(mode == kAddrModeLiteral, "This constructor version is supposed to be used with AddrMode_Literal only"); + } + + /* Copy constructor */ + MemOperand(const MemOperand &memOpnd) + : OperandVisitable(Operand::kOpdMem, memOpnd.GetSize()), + baseOpnd(memOpnd.baseOpnd), + indexOpnd(memOpnd.indexOpnd), + offsetOpnd(memOpnd.offsetOpnd), + scaleOpnd(memOpnd.scaleOpnd), + symbol(memOpnd.symbol), + memoryOrder(memOpnd.memoryOrder), + addrMode(memOpnd.addrMode), + extend(memOpnd.extend), + idxOpt(memOpnd.idxOpt), + noExtend(memOpnd.noExtend), + isStackMem(memOpnd.isStackMem), + isStackArgMem(memOpnd.isStackArgMem){} + + MemOperand &operator=(const MemOperand &memOpnd) = default; + + ~MemOperand() override = default; + using OperandVisitable::OperandVisitable; + + MemOperand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Emit(Emitter &emitter, const OpndProp *opndProp) const override {}; + void Dump() const override {}; + RegOperand *GetBaseRegister() const { return baseOpnd; } @@ -556,11 +938,11 @@ class MemOperand : public OperandVisitable { indexOpnd = ®Opnd; } - OfstOperand *GetOffsetOperand() const { + ImmOperand *GetOffsetOperand() const { return offsetOpnd; } - void SetOffsetOperand(OfstOperand &oftOpnd) { + void SetOffsetOperand(ImmOperand &oftOpnd) { offsetOpnd = &oftOpnd; } @@ -572,20 +954,6 @@ class MemOperand : public OperandVisitable { return symbol; } - bool Equals(Operand &operand) const override { - if (!operand.IsMemoryAccessOperand()) { - return false; - } - auto &op = static_cast(operand); - if (&op == this) { - return true; - } - CHECK_FATAL(baseOpnd != nullptr, "baseOpnd is null in Equals"); - CHECK_FATAL(indexOpnd != nullptr, "indexOpnd is null in Equals"); - ASSERT(op.GetBaseRegister() != nullptr, "nullptr check"); - return (baseOpnd->Equals(*op.GetBaseRegister()) && indexOpnd->Equals(*op.GetIndexRegister())); - } - void SetMemoryOrdering(uint32 memOrder) { memoryOrder |= memOrder; } @@ -594,60 +962,236 @@ class MemOperand : public OperandVisitable { return (memoryOrder & memOrder) != 0; } - virtual Operand *GetOffset() const { - return nullptr; + void SetAccessSize(uint8 size) { + accessSize = size; + } + + uint8 GetAccessSize() const { + return accessSize; + } + + AArch64AddressingMode GetAddrMode() const { + return addrMode; + } + + const std::string &GetSymbolName() const { + return GetSymbol()->GetName(); + } + + bool IsStackMem() const { + return isStackMem; } - virtual VaryType GetMemVaryType() { + void SetStackMem(bool isStack) { + isStackMem = isStack; + } + + bool IsStackArgMem() const { + return isStackArgMem; + } + + void SetStackArgMem(bool isStackArg) { + isStackArgMem = isStackArg; + } + + Operand *GetOffset() const; + + OfstOperand *GetOffsetImmediate() const { + return static_cast(GetOffsetOperand()); + } + + /* Returns N where alignment == 2^N */ + static int32 GetImmediateOffsetAlignment(uint32 dSize) { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + /* dSize==8: 0, dSize==16 : 1, dSize==32: 2, dSize==64: 3 */ + return __builtin_ctz(dSize) - static_cast(kBaseOffsetAlignment); + } + + static int32 GetMaxPIMM(uint32 dSize) { + dSize = dSize > k64BitSize ? k64BitSize : dSize; + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + ASSERT(alignment >= kOffsetAlignmentOf8Bit, "error val:alignment"); + ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPimm[alignment]); + } + + static int32 GetMaxPairPIMM(uint32 dSize) { + ASSERT(dSize >= k32BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + ASSERT(alignment >= kOffsetAlignmentOf32Bit, "error val:alignment"); + ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPairPimm[static_cast(alignment) - k2BitSize]); + } + + bool IsOffsetMisaligned(uint32 dSize) const { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + if (dSize == k8BitSize || addrMode != kAddrModeBOi) { + return false; + } + OfstOperand *ofstOpnd = GetOffsetImmediate(); + return ((static_cast(ofstOpnd->GetOffsetValue()) & + static_cast((1U << static_cast(GetImmediateOffsetAlignment(dSize))) - 1)) != 0); + } + + static bool IsSIMMOffsetOutOfRange(int64 offset, bool is64bit, bool isLDSTPair) { + if (!isLDSTPair) { + return (offset < kMinSimm32 || offset > kMaxSimm32); + } + if (is64bit) { + return (offset < kMinSimm64 || offset > kMaxSimm64Pair) || (static_cast(offset) & k7BitSize) ; + } + return (offset < kMinSimm32 || offset > kMaxSimm32Pair) || (static_cast(offset) & k3BitSize); + } + + static bool IsPIMMOffsetOutOfRange(int32 offset, uint32 dSize) { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + return (offset < 0 || offset > GetMaxPIMM(dSize)); + } + + bool operator<(const MemOperand &opnd) const { + return addrMode < opnd.addrMode || + (addrMode == opnd.addrMode && GetBaseRegister() < opnd.GetBaseRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() < opnd.GetIndexRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() < opnd.GetOffsetOperand()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() < opnd.GetSymbol()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() < opnd.GetSize()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() == opnd.GetSize() && extend < opnd.extend); + } + + bool operator==(const MemOperand &opnd) const { + return (GetSize() == opnd.GetSize()) && (addrMode == opnd.addrMode) && (extend == opnd.extend) && + (GetBaseRegister() == opnd.GetBaseRegister()) && + (GetIndexRegister() == opnd.GetIndexRegister()) && + (GetSymbol() == opnd.GetSymbol()) && + (GetOffsetOperand() == opnd.GetOffsetOperand()) ; + } + + VaryType GetMemVaryType() { + Operand *ofstOpnd = GetOffsetOperand(); + if (ofstOpnd != nullptr) { + auto *opnd = static_cast(ofstOpnd); + return opnd->GetVary(); + } return kNotVary; } - void SetAccessSize(uint8 size) { - accessSize = size; + AArch64AddressingMode GetAddrMode() { + return addrMode; } - uint8 GetAccessSize() const { - return accessSize; + void SetAddrMode(AArch64AddressingMode val) { + addrMode = val; } - bool Less(const Operand &right) const override = 0; + bool IsExtendedRegisterMode() const { + return addrMode == kAddrModeBOrX; + } - MemOperand(uint32 size, const MIRSymbol &mirSymbol) - : OperandVisitable(Operand::kOpdMem, size), - symbol(&mirSymbol) {} + void UpdateExtend(ExtendInfo flag) { + extend = flag | (1U << ShiftAmount()); + } - MemOperand(uint32 size, RegOperand *baseOp, RegOperand *indexOp, OfstOperand *ofstOp, const MIRSymbol *mirSymbol, - Operand *scaleOp = nullptr) - : OperandVisitable(Operand::kOpdMem, size), - baseOpnd(baseOp), - indexOpnd(indexOp), - offsetOpnd(ofstOp), - scaleOpnd(scaleOp), - symbol(mirSymbol) {} + bool SignedExtend() const { + return IsExtendedRegisterMode() && ((extend & kSignExtend) != 0); + } - /* Copy constructor */ - MemOperand(const MemOperand &memOpnd) - : OperandVisitable(Operand::kOpdMem, memOpnd.GetSize()), - baseOpnd(memOpnd.baseOpnd), - indexOpnd(memOpnd.indexOpnd), - offsetOpnd(memOpnd.offsetOpnd), - scaleOpnd(memOpnd.scaleOpnd), - symbol(memOpnd.symbol), - memoryOrder(memOpnd.memoryOrder) {} + bool UnsignedExtend() const { + return IsExtendedRegisterMode() && !SignedExtend(); + } - MemOperand &operator=(const MemOperand &memOpnd) = default; + uint32 ShiftAmount() const { + uint32 scale = extend & 0xF; + /* 8 is 1 << 3, 4 is 1 << 2, 2 is 1 << 1, 1 is 1 << 0; */ + return (scale == 8) ? 3 : ((scale == 4) ? 2 : ((scale == 2) ? 1 : 0)); + } - ~MemOperand() override = default; - using OperandVisitable::OperandVisitable; + bool ShouldEmitExtend() const { + return !noExtend && ((extend & 0x3F) != 0); + } + + IndexingOption GetIndexOpt() const { + return idxOpt; + } + + void SetIndexOpt(IndexingOption newidxOpt) { + idxOpt = newidxOpt; + } + + bool GetNoExtend() const { + return noExtend; + } + + void SetNoExtend(bool val) { + noExtend = val; + } + + uint32 GetExtend() const { + return extend; + } + + void SetExtend(uint32 val) { + extend = val; + } + + bool IsIntactIndexed() const { + return idxOpt == kIntact; + } + + bool IsPostIndexed() const { + return idxOpt == kPostIndex; + } + + bool IsPreIndexed() const { + return idxOpt == kPreIndex; + } + + std::string GetExtendAsString() const { + if (GetIndexRegister()->GetSize() == k64BitSize) { + return std::string("LSL"); + } + return ((extend & kSignExtend) != 0) ? std::string("SXTW") : std::string("UXTW"); + } + + /* Return true if given operand has the same base reg and offset with this. */ + bool Equals(Operand &opnd) const override; + bool Equals(const MemOperand &opnd) const; + bool Less(const Operand &right) const override; private: - RegOperand *baseOpnd = nullptr; /* base register */ - RegOperand *indexOpnd = nullptr; /* index register */ - OfstOperand *offsetOpnd = nullptr; /* offset immediate */ + RegOperand *baseOpnd = nullptr; /* base register */ + RegOperand *indexOpnd = nullptr; /* index register */ + ImmOperand *offsetOpnd = nullptr; /* offset immediate */ Operand *scaleOpnd = nullptr; - const MIRSymbol *symbol; /* AddrMode_Literal */ + const MIRSymbol *symbol; /* AddrMode_Literal */ uint32 memoryOrder = 0; - uint8 accessSize = 0; /* temp, must be set right before use everytime. */ + uint8 accessSize = 0; /* temp, must be set right before use everytime. */ + AArch64AddressingMode addrMode = kAddrModeBOi; + uint32 extend = false; /* used with offset register ; AddrMode_B_OR_X */ + IndexingOption idxOpt = kIntact; /* used with offset immediate ; AddrMode_B_OI */ + bool noExtend = false; + bool isStackMem = false; + bool isStackArgMem = false; }; class LabelOperand : public OperandVisitable { @@ -682,7 +1226,9 @@ class LabelOperand : public OperandVisitable { orderID = idx; } - void Emit(Emitter &emitter, const OpndProp *opndProp) const override; + void Emit(Emitter &emitter, const OpndProp *opndProp) const override { + CHECK_FATAL(false, "do not run here"); + } void Dump() const override; @@ -733,6 +1279,10 @@ class ListOperand : public OperandVisitable { using OperandVisitable::OperandVisitable; + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + void PushOpnd(RegOperand &opnd) { opndList.push_back(&opnd); } @@ -741,7 +1291,9 @@ class ListOperand : public OperandVisitable { return opndList; } - void Emit(Emitter &emitter, const OpndProp *opndProp) const override = 0; + void Emit(Emitter &emitter, const OpndProp *opndProp) const override { + CHECK_FATAL(false, "do not run here"); + } void Dump() const override { for (auto it = opndList.begin(); it != opndList.end();) { @@ -782,9 +1334,17 @@ class PhiOperand : public OperandVisitable { ~PhiOperand() override = default; using OperandVisitable::OperandVisitable; - void Emit(Emitter &emitter, const OpndProp *opndProp) const override = 0; + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Emit(Emitter &emitter, const OpndProp *opndProp) const override { + CHECK_FATAL(false, "a CPU support phi?"); + } - void Dump() const override = 0; + void Dump() const override { + CHECK_FATAL(false, "NIY"); + } void InsertOpnd(uint32 bbId, RegOperand &phiParam) { ASSERT(!phiList.count(bbId), "cannot insert duplicate operand"); @@ -844,6 +1404,423 @@ class PhiOperand : public OperandVisitable { protected: MapleMap phiList; /* ssa-operand && BBId */ }; -} /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_OPERAND_H */ +class CGRegOperand : public OperandVisitable { + public: + CGRegOperand(regno_t regId, uint32 sz, RegType type) : OperandVisitable(kOpdRegister, sz), + regNO(regId), + regType(type) {} + ~CGRegOperand() override = default; + using OperandVisitable::OperandVisitable; + + regno_t GetRegisterNumber() const { + return regNO; + } + bool IsOfIntClass() const { + return regType == kRegTyInt; + } + + bool IsOfFloatOrSIMDClass() const { + return regType == kRegTyFloat; + } + + bool IsOfCC() const { + return regType == kRegTyCc; + } + + bool IsOfVary() const { + return regType == kRegTyVary; + } + + RegType GetRegisterType() const { + return regType; + } + + void SetRegisterType(RegType newTy) { + regType = newTy; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + bool Less(const Operand &right) const override { + return GetKind() < right.GetKind(); + } + /* delete soon */ + void Emit(Emitter&, const OpndProp*) const override {} + + void Dump() const override { + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "size : " << GetSize(); + LogInfo::MapleLogger() << " NO_" << GetRegisterNumber(); + } + private: + regno_t regNO; + RegType regType; +}; + +class CGImmOperand : public OperandVisitable { + public: + CGImmOperand(uint32 sz, int64 value) : OperandVisitable(kOpdImmediate, sz), val(value) {} + CGImmOperand(const MIRSymbol &symbol, int64 value, int32 relocs) + : OperandVisitable(kOpdStImmediate, 0), val(value), symbol(&symbol), relocs(relocs) {} + ~CGImmOperand() override = default; + using OperandVisitable::OperandVisitable; + + int64 GetValue() const { + return val; + } + const MIRSymbol *GetSymbol() const { + return symbol; + } + const std::string &GetName() const { + return symbol->GetName(); + } + int32 GetRelocs() const { + return relocs; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + bool Less(const Operand &right) const override { + return GetKind() < right.GetKind(); + } + + /* delete soon */ + void Emit(Emitter&, const OpndProp*) const override {} + + void Dump() const override { + if (GetSymbol() != nullptr && GetSize() == 0) { + /* for symbol form */ + LogInfo::MapleLogger() << "symbol : " << GetName(); + } else { + LogInfo::MapleLogger() << "imm "; + LogInfo::MapleLogger() << "size : " << GetSize(); + LogInfo::MapleLogger() << " value : " << GetValue(); + } + } + private: + int64 val; + const MIRSymbol *symbol; /* for Immediate in symbol form */ + int32 relocs; +}; + +class CGMemOperand : public OperandVisitable { + public: + explicit CGMemOperand(uint32 sz) : OperandVisitable(kOpdMem, sz) {} + ~CGMemOperand() override = default; + using OperandVisitable::OperandVisitable; + + void Dump() const override { + LogInfo::MapleLogger() << "mem "; + LogInfo::MapleLogger() << "size : " << GetSize(); + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + bool Less(const Operand &right) const override { + return GetKind() < right.GetKind(); + } + /* delete soon */ + void Emit(Emitter&, const OpndProp*) const override {} + + CGRegOperand *GetBaseRegister() const { + return baseReg; + } + + void SetBaseRegister(CGRegOperand &newReg) { + baseReg = &newReg; + } + + CGRegOperand *GetIndexRegister() const { + return indexReg; + } + + void SetIndexRegister(CGRegOperand &newIndex) { + indexReg = &newIndex; + } + + CGImmOperand *GetBaseOfst() const { + return baseOfst; + } + + void SetBaseOfst(CGImmOperand &newOfst) { + baseOfst = &newOfst; + } + + CGImmOperand *GetScaleFactor() const { + return scaleFactor; + } + + void SetScaleFactor(CGImmOperand &scale) { + scaleFactor = &scale; + } + + private: + CGRegOperand *baseReg = nullptr; + CGRegOperand *indexReg = nullptr; + CGImmOperand *baseOfst = nullptr; + CGImmOperand *scaleFactor = nullptr; +}; + +class CGListOperand : public OperandVisitable { + public: + explicit CGListOperand(MapleAllocator &allocator) : + OperandVisitable(Operand::kOpdList, 0), + opndList(allocator.Adapter()) {} + + ~CGListOperand() override = default; + + using OperandVisitable::OperandVisitable; + + void PushOpnd(CGRegOperand &opnd) { + opndList.push_back(&opnd); + } + + MapleList &GetOperands() { + return opndList; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + bool Less(const Operand &right) const override { + return GetKind() < right.GetKind(); + } + /* delete soon */ + void Emit(Emitter&, const OpndProp*) const override {} + + void Dump() const override { + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + } + + protected: + MapleList opndList; +}; + +class CGFuncNameOperand : public OperandVisitable { + public: + explicit CGFuncNameOperand(const MIRSymbol &fsym) : OperandVisitable(kOpdBBAddress, 0), + symbol(&fsym) {} + + ~CGFuncNameOperand() override = default; + using OperandVisitable::OperandVisitable; + + const std::string &GetName() const { + return symbol->GetName(); + } + + const MIRSymbol *GetFunctionSymbol() const { + return symbol; + } + + void SetFunctionSymbol(const MIRSymbol &fsym) { + symbol = &fsym; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + bool Less(const Operand &right) const override { + return GetKind() < right.GetKind(); + } + + /* delete soon */ + void Emit(Emitter&, const OpndProp*) const override {} + + void Dump() const override { + LogInfo::MapleLogger() << GetName(); + } + + private: + const MIRSymbol *symbol; +}; + +class CGLabelOperand : public OperandVisitable { + public: + CGLabelOperand(const char *parent, LabelIdx labIdx) + : OperandVisitable(kOpdBBAddress, 0), labelIndex(labIdx), parentFunc(parent) {} + + ~CGLabelOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + LabelIdx GetLabelIndex() const { + return labelIndex; + } + + const std::string &GetParentFunc() const { + return parentFunc; + } + + void Emit(Emitter &emitter, const OpndProp *opndProp) const override {} + + void Dump() const override { + LogInfo::MapleLogger() << "label "; + LogInfo::MapleLogger() << "name : " << GetParentFunc(); + LogInfo::MapleLogger() << " Idx: " << GetLabelIndex(); + } + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + int32 nRes = strcmp(parentFunc.c_str(), rightOpnd->parentFunc.c_str()); + if (nRes == 0) { + return labelIndex < rightOpnd->labelIndex; + } else { + return nRes < 0; + } + } + + protected: + LabelIdx labelIndex; + const std::string parentFunc; + +}; + +namespace operand { +/* bit 0-7 for common */ +enum CommOpndDescProp : maple::uint64 { + kIsDef = (1ULL << 0), + kIsUse = (1ULL << 1), + kIsVector = (1ULL << 2) + +}; + +/* bit 8-15 for reg */ +enum RegOpndDescProp : maple::uint64 { + kInt = (1ULL << 8), + kFloat = (1ULL << 9), + kRegTyCc = (1ULL << 10), + kRegTyVary = (1ULL << 11), +}; + +/* bit 16-23 for imm */ +enum ImmOpndDescProp : maple::uint64 { + +}; + +/* bit 24-31 for mem */ +enum MemOpndDescProp : maple::uint64 { + kMemLow12 = (1ULL << 24), + kLiteralLow12 = kMemLow12, + kIsLoadLiteral = (1ULL << 25) + +}; +} + +class OpndDescription { + public: + OpndDescription(Operand::OperandType t, maple::uint64 p, maple::uint32 s) : + opndType(t), property(p), size(s) {} + virtual ~OpndDescription() = default; + + Operand::OperandType GetOperandType() const { + return opndType; + } + + maple::uint32 GetSize() const { + return size; + } + + bool IsImm() const { + return opndType == Operand::kOpdImmediate; + } + + bool IsRegister() const { + return opndType == Operand::kOpdRegister; + } + + bool IsMem() const { + return opndType == Operand::kOpdMem; + } + + bool IsRegDef() const { + return opndType == Operand::kOpdRegister && (property & operand::kIsDef); + } + + bool IsRegUse() const { + return opndType == Operand::kOpdRegister && (property & operand::kIsUse); + } + + bool IsDef() const { + return (property & operand::kIsDef); + } + + bool IsUse() const { + return (property & operand::kIsUse); + } + + bool IsMemLow12() const { + return IsMem() && (property & operand::kMemLow12); + } + + bool IsLiteralLow12() const { + return opndType == Operand::kOpdStImmediate && (property & operand::kLiteralLow12); + } + + bool IsLoadLiteral() const { + return property & operand::kIsLoadLiteral; + } + +#define DEFINE_MOP(op, ...) static const OpndDescription op; +#include "operand.def" +#undef DEFINE_MOP + + private: + Operand::OperandType opndType; + maple::uint64 property; + maple::uint32 size; +}; + +class OpndDumpVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + explicit OpndDumpVisitor(const OpndDescription &operandDesc) : opndDesc(&operandDesc) {} + virtual ~OpndDumpVisitor() = default; + + protected: + virtual void DumpOpndPrefix() { + LogInfo::MapleLogger() << " (opnd:"; + } + virtual void DumpOpndSuffix() { + LogInfo::MapleLogger() << " )"; + } + void DumpSize(Operand &opnd) { + LogInfo::MapleLogger() << " [size:" << opnd.GetSize() << "]"; + } + const OpndDescription *GetOpndDesc() const { + return opndDesc; + } + + private: + const OpndDescription *opndDesc; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OPERAND_H */ diff --git a/src/mapleall/maple_be/include/cg/peep.h b/src/mapleall/maple_be/include/cg/peep.h index ca7eb045d4599e1a0784c1c4b4dbcbbc46b578aa..7753c218702eb6a5e0b89b8e772bb689ebba0a27 100644 --- a/src/mapleall/maple_be/include/cg/peep.h +++ b/src/mapleall/maple_be/include/cg/peep.h @@ -70,6 +70,12 @@ class CGPeepPattern { int64 GetLogValueAtBase2(int64 val) const; /* The CC reg is unique and cannot cross-version props. */ bool IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg); + /* optimization support function */ + bool IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); + bool FindRegLiveOut(const RegOperand ®Opnd, const BB &bb); + bool CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const; + bool CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const; + ReturnType IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const; bool GetPatternRes() const { return optSuccess; } diff --git a/src/mapleall/maple_be/include/cg/proepilog.h b/src/mapleall/maple_be/include/cg/proepilog.h index 7f46bdac1242a66170044863eade2bd2523fad2e..254602eab7e57c3a7cb88e23c2987329d17b8331 100644 --- a/src/mapleall/maple_be/include/cg/proepilog.h +++ b/src/mapleall/maple_be/include/cg/proepilog.h @@ -56,9 +56,6 @@ class GenProEpilog { CGFunc &cgFunc; int64 offsetFromCfa = 0; /* SP offset from Call Frame Address */ }; - -MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenProEpiLog, maplebe::CGFunc) -MAPLE_FUNC_PHASE_DECLARE_END } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_PROEPILOG_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_alloc_basic.h b/src/mapleall/maple_be/include/cg/reg_alloc_basic.h index d1d7cca142412edd37d1368595aff2abcb18a62c..661ed59ec0901a8bfbc34b17a4d8b05d1f2a118d 100644 --- a/src/mapleall/maple_be/include/cg/reg_alloc_basic.h +++ b/src/mapleall/maple_be/include/cg/reg_alloc_basic.h @@ -12,8 +12,8 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_DEFAULT_H -#define MAPLEBE_INCLUDE_CG_REG_ALLOC_DEFAULT_H +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H #include "reg_alloc.h" #include "operand.h" #include "cgfunc.h" @@ -28,7 +28,7 @@ class DefaultO0RegAllocator : public RegAllocator { regMap(std::less(), alloc.Adapter()), liveReg(std::less(), alloc.Adapter()), allocatedSet(std::less(), alloc.Adapter()), - regLiveness(std::less(), alloc.Adapter()), + regLiveness(alloc.Adapter()), rememberRegs(alloc.Adapter()) { regInfo = cgFunc.GetTargetRegInfo(); availRegSet.resize(regInfo->GetAllRegNum(), false); @@ -39,17 +39,32 @@ class DefaultO0RegAllocator : public RegAllocator { bool AllocateRegisters() override; void InitAvailReg(); + +#ifdef TARGX86_64 + bool AllocatePhysicalRegister(const CGRegOperand &opnd); +#else bool AllocatePhysicalRegister(const RegOperand &opnd); +#endif void ReleaseReg(regno_t reg); +#ifdef TARGX86_64 + void ReleaseReg(const CGRegOperand ®Opnd); +#else void ReleaseReg(const RegOperand ®Opnd); +#endif void GetPhysicalRegisterBank(RegType regType, uint8 &start, uint8 &end) const; void AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx); void AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx); void AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx); void AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx); +#ifndef TARGX86_64 void AllocHandleCallee(Insn &insn); +#endif bool IsSpecialReg(regno_t reg) const; +#ifdef TARGX86_64 + void SaveCalleeSavedReg(const CGRegOperand &opnd); +#else void SaveCalleeSavedReg(const RegOperand &opnd); +#endif protected: Operand *HandleRegOpnd(Operand &opnd); @@ -57,7 +72,11 @@ class DefaultO0RegAllocator : public RegAllocator { Operand *AllocSrcOpnd(Operand &opnd); Operand *AllocDestOpnd(Operand &opnd, const Insn &insn); +#ifdef TARGX86_64 + uint32 GetRegLivenessId(regno_t regNo); +#else uint32 GetRegLivenessId(Operand *opnd); +#endif void SetupRegLiveness(BB *bb); RegisterInfo *regInfo = nullptr; @@ -66,9 +85,13 @@ class DefaultO0RegAllocator : public RegAllocator { MapleMap regMap; /* virtual-register-to-physical-register map */ MapleSet liveReg; /* a set of currently live physical registers */ MapleSet allocatedSet; /* already allocated */ +#ifdef TARGX86_64 + MapleMap> regLiveness; +#else MapleMap regLiveness; +#endif MapleVector rememberRegs; }; } /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_DEFAULT_H */ +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_coalesce.h b/src/mapleall/maple_be/include/cg/reg_coalesce.h index d403ef172410d43944f564b855473fe036fcf287..0465ee42103dcb3dbed8c496b15a0413d6b4204a 100644 --- a/src/mapleall/maple_be/include/cg/reg_coalesce.h +++ b/src/mapleall/maple_be/include/cg/reg_coalesce.h @@ -122,11 +122,11 @@ public: } } - MapleSet &GetDefPoint() { + InsnMapleSet &GetDefPoint() { return defPoints; } - MapleSet &GetUsePoint() { + InsnMapleSet &GetUsePoint() { return usePoints; } @@ -180,28 +180,32 @@ public: private: MapleMap> ranges; MapleSet conflict; - MapleSet defPoints; - MapleSet usePoints; + InsnMapleSet defPoints; + InsnMapleSet usePoints; uint32 numCall = 0; RegType regType = kRegTyUndef; regno_t regno = 0; MapleAllocator &alloc; }; -class RegisterCoalesce { +class LiveIntervalAnalysis { public: - RegisterCoalesce(CGFunc &func, MemPool &memPool) + LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) : cgFunc(&func), memPool(&memPool), alloc(&memPool), vregIntervals(alloc.Adapter()) {} - virtual ~RegisterCoalesce() = default; + virtual ~LiveIntervalAnalysis() = default; virtual void ComputeLiveIntervals() = 0; virtual void CoalesceRegisters() = 0; void Run(); + void Analysis(); + void DoAnalysis(); + void ClearBFS(); void Dump(); + void CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc); LiveInterval *GetLiveInterval(regno_t regno) { auto it = vregIntervals.find(regno); if (it == vregIntervals.end()) { @@ -217,10 +221,18 @@ class RegisterCoalesce { MapleAllocator alloc; MapleMap vregIntervals; Bfs *bfs = nullptr; + bool runAnalysis = false; }; MAPLE_FUNC_PHASE_DECLARE(CgRegCoalesce, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CGliveIntervalAnalysis, maplebe::CGFunc) + LiveIntervalAnalysis *GetResult() { + return liveInterval; + } + LiveIntervalAnalysis *liveInterval = nullptr; + OVERRIDE_DEPENDENCE +MAPLE_FUNC_PHASE_DECLARE_END } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_REGCOALESCE_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 9a775ee9b04b37bac270a30933cbed3f04f3059d..d3bdd51736632759fab3625d6b97e2aba62e2e35 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -69,7 +69,12 @@ class RegisterInfo { virtual bool IsUnconcernedReg(regno_t regNO) const = 0; virtual bool IsUnconcernedReg(const RegOperand ®Opnd) const = 0; virtual void SaveCalleeSavedReg(MapleSet savedRegs) = 0; - + virtual bool IsVirtualRegister(const CGRegOperand ®Opnd) { + return false; + } + virtual bool IsUnconcernedReg(const CGRegOperand ®Opnd) const { + return false; + } private: MapleAllocator *memAllocator; MapleSet allIntRegs; diff --git a/src/mapleall/maple_be/include/cg/schedule.h b/src/mapleall/maple_be/include/cg/schedule.h index 7cd8da9768440feb23a0525bcfd9d6df85bbddd8..199b75cdea5039d1add81aedfb5b44176d43626b 100644 --- a/src/mapleall/maple_be/include/cg/schedule.h +++ b/src/mapleall/maple_be/include/cg/schedule.h @@ -31,7 +31,7 @@ class RegPressureSchedule { originalNodeSeries(alloc.Adapter()), readyList(alloc.Adapter()), partialList(alloc.Adapter()), partialSet(alloc.Adapter()), partialScheduledNode(alloc.Adapter()), optimisticScheduledNodes(alloc.Adapter()), - liveInRegNO(alloc.Adapter()), liveOutRegNO(alloc.Adapter()) {} + splitterIndexes(alloc.Adapter()),liveInRegNO(alloc.Adapter()), liveOutRegNO(alloc.Adapter()) {} virtual ~RegPressureSchedule() = default; void InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes); @@ -88,7 +88,7 @@ class RegPressureSchedule { /* optimistic schedule series with minimum register pressure */ MapleVector optimisticScheduledNodes; /* save split points */ - std::vector splitterIndexes; + MapleVector splitterIndexes; /* save integer register pressure */ std::vector integerRegisterPressureList; /* save the amount of every type register. */ diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..71d12e599b53c2388ed9fe904b5ec977a26fa62b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_STANDARDIZE_H +#define MAPLEBE_INCLUDE_STANDARDIZE_H + +#include "cgfunc.h" +namespace maplebe { +class Standardize { + public: + explicit Standardize(CGFunc &f) : cgFunc(&f) {} + + virtual ~Standardize() = default; + + /* + * for cpu instruction contains different operands + * maple provide a default implement from three address to two address + * convertion rule is: + * mop(dest, src1, src2) -> mov(dest, src1) + * mop(dest, src2) + * maple provide a default implement from two address to one address for unary op + * convertion rule is: + * mop(dest, src) -> mov(dest, src1) + * mop(dest) + */ + void AddressMapping(Insn &insn); + + void DoStandardize(); + + protected: + void SetAddressMapping(bool needMapping) { + needAddrMapping = needMapping; + } + bool NeedAddressMapping(Insn &insn) { + /* Operand number for two addressing mode is 2 */ + /* and 3 for three addressing mode */ + needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); + return needAddrMapping; + } + private: + /* + * For cpu's isa which approximates to maple backend machine IR. + * For example in ARM. + * Two instruction which have same operands and properties are considered to be same. + * It is able to do one instruction mapping only. [SISO] + * Usage guide: + * 1. The order of target_md.def and abstarct_mmir.def need to be same + * 2. write target Instruction Description comparsion rule For InsnDescription.IsSame(). + * Otherwise it will return false by default. + */ + virtual bool TryFastTargetIRMapping(Insn &insn) = 0; + + virtual void StdzMov(Insn &insn) = 0; + virtual void StdzStrLdr(Insn &insn) = 0; + virtual void StdzBasicOp(Insn &insn) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + CGFunc *cgFunc; + bool needAddrMapping = false; +}; +} +#endif /* MAPLEBE_INCLUDE_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/valid_bitmask_imm.txt b/src/mapleall/maple_be/include/cg/valid_bitmask_imm.txt similarity index 100% rename from src/mapleall/maple_be/src/cg/aarch64/valid_bitmask_imm.txt rename to src/mapleall/maple_be/include/cg/valid_bitmask_imm.txt diff --git a/src/mapleall/maple_be/include/cg/visitor_common.h b/src/mapleall/maple_be/include/cg/visitor_common.h index b61838e7152a288088ddfc095e136ce18b1e3a6c..9572dc1b9b18d76a82d1aa00d162862037083279 100644 --- a/src/mapleall/maple_be/include/cg/visitor_common.h +++ b/src/mapleall/maple_be/include/cg/visitor_common.h @@ -38,8 +38,7 @@ template class OperandVisitors : public OperandVisitor, public OperandVisitor, - public OperandVisitor + public OperandVisitor ... {}; - } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h index 72ba534f1b21bbd023375a8f6b493d9338cbcc20..e0a019ccbc7222f3a341315f14a177f814762cb1 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -11,4 +11,41 @@ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. - */ \ No newline at end of file + */ + +#ifndef MAPLEBE_INCLUDE_X64_MPISEL_H +#define MAPLEBE_INCLUDE_X64_MPISEL_H + +#include "isel.h" + +namespace maplebe { +class X64MPIsel : public MPISel { + public: + X64MPIsel(MemPool &mp, CGFunc &f) : MPISel(mp, f) {} + ~X64MPIsel() override = default; + void SelectReturn(Operand &opnd) override; + void SelectCall(CallNode &callNode) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + void SelectGoto(GotoNode &stmt) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + Operand* SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + CGMemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + + private: + CGMemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) override; + void SelectParmList(StmtNode &naryNode, CGListOperand &srcOpnds); + Insn &AppendCall(MIRSymbol &sym, CGListOperand &srcOpnds); + MOperator PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isSigned) const; + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpOp(CGRegOperand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode opCode, PrimType primType, + PrimType primOpndType, const BaseNode &parent); +}; +} + +#endif /* MAPLEBE_INCLUDE_X64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h b/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..5e66ac71edba7387a13f6303fd4982719b9134ba --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ABI_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ABI_H + +#include "x64_isa.h" +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace x64 { +constexpr int32 kNumIntParmRegs = 6; +constexpr int32 kNumIntReturnRegs = 2; +constexpr int32 kNumFloatParmRegs = 8; +constexpr int32 kNumFloatReturnRegs = 2; + +constexpr uint32 kNormalUseOperandNum = 3; +constexpr uint32 kMaxInstrForCondBr = 260000; + +constexpr X64reg intParmRegs[kNumIntParmRegs] = { R7, R6, R3, R2, R8, R9 }; +constexpr X64reg intReturnRegs[kNumIntReturnRegs] = { R0, R3 }; +constexpr X64reg floatParmRegs[kNumFloatParmRegs] = { V8, V9, V10, V11, V12, V13, V14, V15 }; +constexpr X64reg floatReturnRegs[kNumFloatReturnRegs] = { V8, V9 }; + +/* + * Refer to: + * x64-bit Architecture. + */ +bool IsAvailableReg(X64reg reg); +bool IsCalleeSavedReg(X64reg reg); +bool IsCallerSaveReg(X64reg reg); +bool IsParamReg(X64reg reg); +bool IsSpillReg(X64reg reg); +bool IsExtraSpillReg(X64reg reg); +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd); +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize); +} /* namespace x64 */ + +/* + * X64-bit Architecture. + * After the argument values have been computed, they are placed either in registers + * or pushed on the stack. The way how values are passed is described in the + * following sections. + * - INTEGER This class consists of integral types that fit into one of the general + purpose registers. + - SSE The class consists of types that fit into a vector register. + - SSEUP The class consists of types that fit into a vector register and can be passed + and returned in the upper bytes of it. + - X87, X87UP These classes consists of types that will be returned via the x87 FPU. + - COMPLEX_X87 This class consists of types that will be returned via the x87 FPU. + - NO_CLASS This class is used as initializer in the algorithms. It will be used for + padding and empty structures and unions. + - MEMORY This class consists of types that will be passed and returned in memory via the stack. + * + */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ABI_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def new file mode 100644 index 0000000000000000000000000000000000000000..fb81862ba5c651940a5fff1f7b93c75637e9eea8 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +/* Mapping between abstract maple machine IR and machine operation code of X86_64*/ +/* {mmir, mop} */ +DEFINE_MAPPING(abstract::MOP_undef, x64::MOP_begin) + +/* Mov */ +DEFINE_MAPPING(abstract::MOP_copy_ri_8, x64::MOP_movb_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_16, x64::MOP_movw_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_32, x64::MOP_movl_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_64, x64::MOP_movq_i_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_8, x64::MOP_movb_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_16, x64::MOP_movw_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_64, x64::MOP_movq_r_r) + +/* str/load */ +DEFINE_MAPPING(abstract::MOP_str_8, x64::MOP_movb_r_m) +DEFINE_MAPPING(abstract::MOP_str_16, x64::MOP_movw_r_m) +DEFINE_MAPPING(abstract::MOP_str_32, x64::MOP_movl_r_m) +DEFINE_MAPPING(abstract::MOP_str_64, x64::MOP_movq_r_m) +DEFINE_MAPPING(abstract::MOP_load_8, x64::MOP_movb_m_r) +DEFINE_MAPPING(abstract::MOP_load_16, x64::MOP_movw_m_r) +DEFINE_MAPPING(abstract::MOP_load_32, x64::MOP_movl_m_r) +DEFINE_MAPPING(abstract::MOP_load_64, x64::MOP_movq_m_r) + +/* shift -- shl/ashr/lshr */ +DEFINE_MAPPING(abstract::MOP_shl_8, x64::MOP_shlb_r_r) +DEFINE_MAPPING(abstract::MOP_shl_16, x64::MOP_shlw_r_r) +DEFINE_MAPPING(abstract::MOP_shl_32, x64::MOP_shll_r_r) +DEFINE_MAPPING(abstract::MOP_shl_64, x64::MOP_shlq_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_8, x64::MOP_sarb_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_16, x64::MOP_sarw_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_32, x64::MOP_sarl_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_64, x64::MOP_sarq_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_8, x64::MOP_shrb_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_16, x64::MOP_shrw_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_32, x64::MOP_shrl_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_64, x64::MOP_shrq_r_r) + +/* BasicOp */ +DEFINE_MAPPING(abstract::MOP_and_8, x64::MOP_andb_r_r) +DEFINE_MAPPING(abstract::MOP_and_16, x64::MOP_andw_r_r) +DEFINE_MAPPING(abstract::MOP_and_32, x64::MOP_andl_r_r) +DEFINE_MAPPING(abstract::MOP_and_64, x64::MOP_andq_r_r) +DEFINE_MAPPING(abstract::MOP_or_8, x64::MOP_orb_r_r) +DEFINE_MAPPING(abstract::MOP_or_16, x64::MOP_orw_r_r) +DEFINE_MAPPING(abstract::MOP_or_32, x64::MOP_orl_r_r) +DEFINE_MAPPING(abstract::MOP_or_64, x64::MOP_orq_r_r) +DEFINE_MAPPING(abstract::MOP_xor_8, x64::MOP_xorb_r_r) +DEFINE_MAPPING(abstract::MOP_xor_16, x64::MOP_xorw_r_r) +DEFINE_MAPPING(abstract::MOP_xor_32, x64::MOP_xorl_r_r) +DEFINE_MAPPING(abstract::MOP_xor_64, x64::MOP_xorq_r_r) +DEFINE_MAPPING(abstract::MOP_add_8, x64::MOP_addb_r_r) +DEFINE_MAPPING(abstract::MOP_add_16, x64::MOP_addw_r_r) +DEFINE_MAPPING(abstract::MOP_add_32, x64::MOP_addl_r_r) +DEFINE_MAPPING(abstract::MOP_add_64, x64::MOP_addq_r_r) +DEFINE_MAPPING(abstract::MOP_sub_8, x64::MOP_subb_r_r) +DEFINE_MAPPING(abstract::MOP_sub_16, x64::MOP_subw_r_r) +DEFINE_MAPPING(abstract::MOP_sub_32, x64::MOP_subl_r_r) +DEFINE_MAPPING(abstract::MOP_sub_64, x64::MOP_subq_r_r) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_args.h b/src/mapleall/maple_be/include/cg/x86_64/x64_args.h new file mode 100644 index 0000000000000000000000000000000000000000..9f4d8db1d11c763fa0a0b7cb1b99495cb44957a3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_args.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H + +#include "args.h" +#include "x64_isa.h" +#include "x64_cgfunc.h" +#include "x64_call_conv.h" + +namespace maplebe { +using namespace maple; +using namespace x64; + +struct ArgInfo { + X64reg reg; + MIRType *mirTy; + uint32 symSize; + uint32 stkSize; + RegType regType; + MIRSymbol *sym; + const X64SymbolAlloc *symLoc; + uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ + bool doMemPairOpt; + bool createTwoStores; + bool isTwoRegParm; +}; + +class X64MoveRegArgs : public MoveRegArgs { + public: + explicit X64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {} + ~X64MoveRegArgs() override = default; + void Run() override; + + private: + void CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const; + ArgInfo GetArgInfo(std::map &argsList, std::vector &numFpRegs, + std::vector &fpSize, uint32 argIndex) const; + bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const; + void GenerateStrInsn(ArgInfo &argInfo, X64reg reg2, uint32 numFpRegs, uint32 fpSize); + void MoveRegisterArgs(); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h b/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..02266c07675098d75afb4428918961e7ebd5982f --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" +#include "call_conv.h" +#include "abi.h" +#include "x64_abi.h" + +namespace maplebe { +using namespace maple; +using namespace x64; + +class X64CallConvImpl { + public: + explicit X64CallConvImpl(BECommon &be) : beCommon(be) {} + + ~X64CallConvImpl() = default; + + void InitCCLocInfo(CCLocInfo &pLoc) const; + + /* Passing value related */ + int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr); + + /* return value related */ + int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc); + + private: + X64reg AllocateGPParmRegister() { + return (nextGeneralParmRegNO < kNumIntParmRegs) ? + intParmRegs[nextGeneralParmRegNO++] : kRinvalid; + } + + void AllocateTwoGPParmRegisters(CCLocInfo &pLoc) { + if ((nextGeneralParmRegNO + 1) < kNumIntParmRegs) { + pLoc.reg0 = intParmRegs[nextGeneralParmRegNO++]; + pLoc.reg1 = intParmRegs[nextGeneralParmRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + X64reg AllocateGPReturnRegister() { + return (nextGeneralReturnRegNO < kNumIntReturnRegs) ? + intReturnRegs[nextGeneralReturnRegNO++] : kRinvalid; + } + + void AllocateTwoGPReturnRegisters(CCLocInfo &pLoc) { + if ((nextGeneralReturnRegNO + 1) < kNumIntReturnRegs) { + pLoc.reg0 = intReturnRegs[nextGeneralReturnRegNO++]; + pLoc.reg1 = intReturnRegs[nextGeneralReturnRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + BECommon &beCommon; + uint64 paramNum = 0; /* number of all types of parameters processed so far */ + int32 nextGeneralParmRegNO = 0; /* number of integer parameters processed so far */ + int32 nextGeneralReturnRegNO = 0; /* number of integer return processed so far */ + int32 nextStackArgAdress = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h index e43470dc70b8b1da893f47d76caca7332bdf00a6..74b67890da38f70094a8d61876ab51e7558a3fd0 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -18,16 +18,35 @@ #define MAPLEBE_INCLUDE_CG_X86_64_CG_H #include "cg.h" +#include "x64_isa.h" +#include "x64_MPISel.h" +#include "x64_standardize.h" +#include "x64_args.h" + namespace maplebe { +constexpr int32 kIntRegTypeNum = 5; + class X64CG : public CG { public: X64CG(MIRModule &mod, const CGOptions &opts) : CG(mod, opts) {} + static const InsnDescription kMd[x64::kMopLast]; void EnrollTargetPhases(MaplePhaseManager *pm) const override; /* Init SubTarget phase */ /*LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override; - MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override; AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override;*/ + MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + + MPISel *CreateMPIsel(MemPool &mp, CGFunc &f) const override { + return mp.New(mp, f); + } + + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + /* Init SubTarget optimization */ Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override; @@ -35,7 +54,7 @@ class X64CG : public CG { PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override; CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, - StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override; + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override; bool IsExclusiveFunc(MIRFunction &mirFunc) override; @@ -44,6 +63,14 @@ class X64CG : public CG { /* Used for GCTIB pattern merging */ std::string FindGCTIBPatternName(const std::string &name) const override; + static std::array, kIntRegTypeNum> intRegNames; + enum : uint8 { + kR8LowList, + kR8HighList, + kR16List, + kR32List, + kR64List + }; }; -} -#endif /* MAPLEBE_INCLUDE_CG_X86_64_CG_H */ +} // namespace maplebe +#endif /* MAPLEBE_INCLUDE_CG_X86_64_CG_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h index caad5c3c3d277e7bfc286c199ddf96a73fdc5a03..6f03d56091edc7ab95ebb52b3263027a9ac29b48 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -17,15 +17,21 @@ #include "cgfunc.h" #include "x64_memlayout.h" +#include "x64_isa.h" +#include "x64_reg_info.h" namespace maplebe { class X64CGFunc : public CGFunc { public: X64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, MemPool &memPool, StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) - : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId) { + : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId), + calleeSavedRegs(mallocator.Adapter()), + formalRegList(mallocator.Adapter()) { CGFunc::SetMemlayout(*memPool.New(b, f, mallocator)); CGFunc::GetMemlayout()->SetCurrFunction(*this); + CGFunc::SetTargetRegInfo(*memPool.New(mallocator)); + CGFunc::GetTargetRegInfo()->SetCurrFunction(*this); } /* null implementation yet */ InsnVisitor *NewInsnModifier() override; @@ -65,10 +71,10 @@ class X64CGFunc : public CGFunc { Operand *SelectCisaligned(IntrinsicopNode &intrinopNode) override; Operand *SelectCalignup(IntrinsicopNode &intrinopNode) override; Operand *SelectCaligndown(IntrinsicopNode &intrinopNode) override; - Operand *SelectCSyncAddFetch(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncFetchAdd(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncSubFetch(IntrinsicopNode &intrinopNode, PrimType pty) override; - Operand *SelectCSyncFetchSub(IntrinsicopNode &intrinopNode, PrimType pty) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) override; Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) override; Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) override; Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) override; @@ -84,6 +90,7 @@ class X64CGFunc : public CGFunc { Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) override; + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; Operand *SelectIntConst(MIRIntConst &intConst) override; Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) override; @@ -132,6 +139,7 @@ class X64CGFunc : public CGFunc { Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) override; Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; Operand *SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, const BaseNode &parent, bool hasCompare = false) override; @@ -157,13 +165,11 @@ class X64CGFunc : public CGFunc { RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; RegOperand &GetOrCreateFramePointerRegOperand() override; RegOperand &GetOrCreateStackBaseRegOperand() override; - int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) override; - Operand &GetZeroOpnd(uint32 size) override; + RegOperand &GetZeroOpnd(uint32 size) override; Operand &CreateCfiRegOperand(uint32 reg, uint32 size) override; Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; Operand &CreateImmOperand(PrimType primType, int64 val) override; - Operand *CreateZeroOperand(PrimType primType) override; - void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn) override; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) override; void CleanupDeadMov(bool dump = false) override; void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) override; bool IsFrameReg(const RegOperand &opnd) const override; @@ -204,10 +210,47 @@ class X64CGFunc : public CGFunc { RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) override; RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) override; RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) override; Operand &CreateFPImmZero(PrimType primType) override; void ProcessLazyBinding() override; void DBGFixCallFrameLocationOffsets() override; MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) override; + + int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) override; + CGRegOperand *GetBaseReg(const SymbolAlloc &symAlloc); + + const MapleVector &GetFormalRegList() const { + return formalRegList; + } + + void PushElemIntoFormalRegList(x64::X64reg reg) { + formalRegList.emplace_back(reg); + } + void AddtoCalleeSaved(x64::X64reg reg) { + return; + } + + private: + MapleVector calleeSavedRegs; + MapleVector formalRegList; /* store the parameters register used by this function */ + void DumpTargetIR(const Insn &insn) const override; +}; + +class X64OpndDumpVistor : public OpndDumpVisitor { + public: + explicit X64OpndDumpVistor(const OpndDescription &operandDesc) : OpndDumpVisitor(operandDesc) {}; + ~X64OpndDumpVistor() override = default; + + void Visit(CGRegOperand *v) final; + void Visit(CGImmOperand *v) final; + void Visit(CGMemOperand *v) final; + void Visit(CGFuncNameOperand *v) final; + void Visit(CGListOperand *v) final; + void Visit(CGLabelOperand *v) final; + + private: + void DumpRegInfo(CGRegOperand &v); }; } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h index d8123f7d7ea85a8cb56dccc1713bda3aef2a93fa..2ca732111294b1a581c893637011c69dc6ad0e6f 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h @@ -16,6 +16,8 @@ #define MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H #include "asm_emit.h" +#include "visitor_common.h" +#include "operand.h" namespace maplebe { @@ -34,6 +36,29 @@ class X64Emitter : public AsmEmitter { void Run(FuncEmitInfo &funcEmitInfo) override; }; +class CGOpndEmitVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + CGOpndEmitVisitor(Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~CGOpndEmitVisitor() = default; + + protected: + Emitter &emitter; +}; + +class X64OpndEmitVisitor : public CGOpndEmitVisitor { + public: + X64OpndEmitVisitor(Emitter &emitter) : CGOpndEmitVisitor(emitter) {} + ~X64OpndEmitVisitor() override = default; + + void Visit(CGRegOperand *v) final; + void Visit(CGImmOperand *v) final; + void Visit(CGMemOperand *v) final; + void Visit(CGFuncNameOperand *v) final; + void Visit(CGLabelOperand *v) final; +}; + } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def b/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..6a2880e283f224177fa8c7654737e2e81a6abf43 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * - %xmm0–%xmm1 used to pass and return floating point arguments + - %xmm2–%xmm7 used to pass floating point arguments + * + */ + +/* + * ID, prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill + */ +/*ST0 ~ ST7*/ +FP_SIMD_REG(0 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(1 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(2 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(3 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(4 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(5 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(6 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(7 , "B", "H", "S", "D", "Q", true, false, false, false, false) +/*XMM0 ~ XMM15*/ +FP_SIMD_REG(8 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(9 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(10, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(11, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(12, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(13, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(14, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(15, "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(16, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(17, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(18, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(19, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(20, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(21, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(22, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(23, "B", "H", "S", "D", "Q", true, false, false, false, false) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def b/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..65711f96b9bd1ac1e7838833831f0222380dedce --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * Registers in x86-64 + * + * - caller-save registers: %rax, %rcx, %rdx, %rdi, %rsi, %rsp, and %r8-r11 + * - callee-saved registers: %r12, %r13, %r14, %r15, %rbx, %rsp, %rbp. + * - In contrast to the Intel386 ABI, %rdi, and %rsi in x86-64 belong to the called function, not + * the caller. So, It's caller-save registers + * - User-level applications use as integer registers for passing the sequence %rdi, %rsi, %rdx, %rcx, + * %r8 and %r9. The kernel interface uses %rdi, %rsi, %rdx, %r10, %r8 and %r9. + * - the sequence %rax, %rdx is used to return INTEGER, + * - rdx is used to pass 3rd argument to functions; 2nd return register + * - %r11 is neither required to be preserved, nor is it used to pass arguments + */ +/* ID, 8-bit prefix, 8-16 bit prefix, 16-bit prefix, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */ +INT_REG(0 , "BL", "BH", "W", "L", "Q", true, false, false, false, false) +INT_REG(1 , "BL", "BH", "W", "L", "Q", true, true, false, false, false) +INT_REG(2 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(3 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(4 , "B", "", "W", "L", "Q", false, false, false, false, false) +INT_REG(5 , "B", "", "W", "L", "Q", false, true, false, false, false) +INT_REG(6 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(7 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(8 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(9 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(10, "B", "", "W", "L", "Q", true, false, false, false, false) +INT_REG(11, "B", "", "W", "L", "Q", true, false, false, false, false) +INT_REG(12, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(13, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(14, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(15, "B", "", "W", "L", "Q", true, true, false, false, false) +/* instruction pointer */ +INT_REG(16, "B", "", "W", "L", "Q", false, false, false, false, false) + +/* Alias */ +INT_REG_ALIAS(AX, 0) +INT_REG_ALIAS(BX, 1) +INT_REG_ALIAS(CX, 2) +INT_REG_ALIAS(DX, 3) +INT_REG_ALIAS(SP, 4) +INT_REG_ALIAS(BP, 5) +INT_REG_ALIAS(SI, 6) +INT_REG_ALIAS(DI, 7) + +INT_REG_ALIAS(FP, 5) +INT_REG_ALIAS(YP, 12) +INT_REG_ALIAS(IP, 16) +INT_REG_ALIAS(LAST_GP_REG, 16) + diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_irbuilder.h b/src/mapleall/maple_be/include/cg/x86_64/x64_irbuilder.h deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h new file mode 100644 index 0000000000000000000000000000000000000000..590d3bb39ec92d17c790400b652624b23f65621e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_H + +#include "operand.h" +#include "mad.h" + +namespace maplebe { +/* + * X64 Architecture Reference Manual + */ +constexpr int kX64StackPtrAlignment = 16; + +constexpr int32 kOffsetAlign = 8; +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr int kSizeOfFplr = 16; + +namespace x64 { +/* machine instruction description */ +#define DEFINE_MOP(op, ...) op, +enum X64MOP_t : maple::uint32 { +#include "x64_md.def" + kMopLast +}; +#undef DEFINE_MOP + +/* Registers in x64 state */ +enum X64reg : uint32 { + kRinvalid = kInvalidRegNO, +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) R##ID, +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) V##ID, +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + kMaxRegNum, + kRFLAG, + kAllRegNum, +/* integer registers alias */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) +#define INT_REG_ALIAS(ALIAS, ID) R##ALIAS = R##ID, +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +}; + +static inline bool IsGPRegister(X64reg r) { + return R0 <= r && r <= RLAST_GP_REG; +} + +static inline bool IsFPSIMDRegister(X64reg r) { + return V0 <= r && r <= V23; +} + +static inline bool IsFPRegister(X64reg r) { + return V0 <= r && r <= V7; +} + +static inline bool IsSIMDRegister(X64reg r) { + return V8 <= r && r <= V23; +} + +static inline bool IsPhysicalRegister(regno_t r) { + return r < kMaxRegNum; +} + +static inline RegType GetRegType(X64reg r) { + if (IsGPRegister(r)) { + return kRegTyInt; + } + if (IsFPSIMDRegister(r)) { + return kRegTyFloat; + } + ASSERT(false, "No suitable register type to return?"); + return kRegTyUndef; +} +} /* namespace x64 */ + +/* + * We save callee-saved registers from lower stack area to upper stack area. + * If possible, we store a pair of registers (int/int and fp/fp) in the stack. + * The Stack Pointer has to be aligned at 16-byte boundary. + * On X64, kIntregBytelen == 8 (see the above) + */ +inline void GetNextOffsetCalleeSaved(int &offset) { + offset += (kIntregBytelen << 1); +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a780596b3349ece95798d8b40cda7bd0d56321 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H + +#include "x64_isa.h" +#include "operand.h" + +namespace maplebe { + +namespace x64 { + /* register, imm , memory, cond */ +#define DEF_X64_MOV_MAPPING_INT(SIZE) \ +X64MOP_t movIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ + {MOP_mov##SIZE##_r_r, MOP_begin, MOP_mov##SIZE##_r_m, MOP_begin}, \ + {MOP_mov##SIZE##_i_r, MOP_begin, MOP_mov##SIZE##_i_m, MOP_begin}, \ + {MOP_mov##SIZE##_m_r, MOP_begin, MOP_begin, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ +}; + + /* register, imm , memory, cond */ +#define DEF_X64_CMP_MAPPING_INT(SIZE) \ +X64MOP_t cmpIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ + {MOP_cmp##SIZE##_r_r, MOP_cmp##SIZE##_r_i, MOP_cmp##SIZE##_r_m, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ + {MOP_cmp##SIZE##_m_r, MOP_cmp##SIZE##_m_i, MOP_begin, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ +}; + +DEF_X64_MOV_MAPPING_INT(b) +DEF_X64_MOV_MAPPING_INT(l) +DEF_X64_MOV_MAPPING_INT(q) +DEF_X64_CMP_MAPPING_INT(b) +DEF_X64_CMP_MAPPING_INT(l) +DEF_X64_CMP_MAPPING_INT(q) + +static inline X64MOP_t GetMovMop(Operand::OperandType dTy, PrimType dType, + Operand::OperandType sTy, PrimType sType) { + + X64MOP_t movOp= MOP_begin; + switch(GetPrimTypeSize(dType)){ + case k1ByteSize: + movOp = movIselMapb[sTy][dTy]; + break; + case k4ByteSize: + movOp = movIselMapl[sTy][dTy]; + break; + case k8ByteSize: + movOp = movIselMapq[sTy][dTy]; + break; + default: + movOp= MOP_begin; + } + return movOp; +} + +static inline X64MOP_t GetCmpMop(Operand::OperandType dTy, PrimType dType, + Operand::OperandType sTy, PrimType sType) { + X64MOP_t cmpOp= MOP_begin; + switch(GetPrimTypeSize(dType)){ + case k1ByteSize: + cmpOp = cmpIselMapb[sTy][dTy]; + break; + case k4ByteSize: + cmpOp = cmpIselMapl[sTy][dTy]; + break; + case k8ByteSize: + cmpOp = cmpIselMapq[sTy][dTy]; + break; + default: + cmpOp= MOP_begin; + break; + } + return cmpOp; +} + + /* {OPCODE, {register, imm , memory, cond}} */ +#define DEF_X64_SET_MAPPING_INT(OPCODE, TYPE) \ +{OPCODE, {x64::MOP_##TYPE##_r, x64::MOP_begin, x64::MOP_##TYPE##_m, x64::MOP_begin}} + +using SetIselMappingType = std::unordered_map>; +SetIselMappingType setUnsignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setbe), + DEF_X64_SET_MAPPING_INT(OP_ge, setae), + DEF_X64_SET_MAPPING_INT(OP_gt, seta), + DEF_X64_SET_MAPPING_INT(OP_lt, setb), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +SetIselMappingType setSignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setle), + DEF_X64_SET_MAPPING_INT(OP_ge, setge), + DEF_X64_SET_MAPPING_INT(OP_gt, setg), + DEF_X64_SET_MAPPING_INT(OP_lt, setl), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +#undef DEF_X64_SET_MAPPING_INT + +static inline X64MOP_t GetSetCCMop(maple::Opcode opcode, Operand::OperandType dTy, bool isSigned) { + ASSERT(dTy < Operand::OperandType::kOpdPhi, "illegal operand type"); + SetIselMappingType& setIselMapping = isSigned ? setSignedIselMapping : + setUnsignedIselMapping; + auto iter = setIselMapping.find(opcode); + if (iter == setIselMapping.end()) { + return x64::MOP_begin; + } + return iter->second[dTy]; +} + +} + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_md.def b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def new file mode 100644 index 0000000000000000000000000000000000000000..19f961986e13a375ad298d862c1b6129b4d81308 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* {mop, opnds, prop, latency, name, format, length} */ +/* begin machine operation code of X86_64 instruction , */ +DEFINE_MOP(MOP_begin, {},0,0,"","",0) + +/* # Definitions + * use x64 style b/w/l/q for 8b/16b/32b/64b operation + * and using AT&T style assembly + */ + +/* X64 MOVES */ +// TODO: fix intruction opnds, prop, latency, format and length +// TODO: the encoding and enumeration seems too verbose +// TODO: understand how other system represent these MOPs (especially for x86-64) +// TODO: this is still an experiment +// TODO: should make sure the convention is consistent with (AT&T style?) +// TODO: how the general Machine instruction is designed? + +/* mov */ +DEFINE_MOP(MOP_movb_r_r, {&OpndDescription::Reg8IS,&OpndDescription::Reg8ID},0,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_r, {&OpndDescription::Imm8,&OpndDescription::Reg8ID},0,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_m_r, {&OpndDescription::Mem8S,&OpndDescription::Reg8ID},ISLOAD,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_r_m, {&OpndDescription::Reg8IS,&OpndDescription::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_m, {&OpndDescription::Imm8,&OpndDescription::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) + +DEFINE_MOP(MOP_movw_r_r, {&OpndDescription::Reg16IS,&OpndDescription::Reg16ID},0,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_m_r, {&OpndDescription::Mem16S,&OpndDescription::Reg16ID},ISLOAD,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_i_r, {&OpndDescription::Imm16,&OpndDescription::Reg16ID},ISLOAD,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_r_m, {&OpndDescription::Reg16IS,&OpndDescription::Mem16D},ISSTORE,kLtAlu,"movw","0,1",1) + +DEFINE_MOP(MOP_movl_i_r, {&OpndDescription::Imm32,&OpndDescription::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_r_r, {&OpndDescription::Reg32IS,&OpndDescription::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_m_r, {&OpndDescription::Mem32S,&OpndDescription::Reg32ID},ISLOAD,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_i_m, {&OpndDescription::Imm32,&OpndDescription::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_r_m, {&OpndDescription::Reg32IS,&OpndDescription::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) + +DEFINE_MOP(MOP_movq_r_m, {&OpndDescription::Reg64IS,&OpndDescription::Mem64D},ISSTORE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_i_r, {&OpndDescription::Imm64,&OpndDescription::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_r_r, {&OpndDescription::Reg64IS,&OpndDescription::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_m_r, {&OpndDescription::Mem64S,&OpndDescription::Reg64ID},ISLOAD,kLtAlu,"movq","0,1",1) + +/* Moving from a smaller data size to 32 bits */ +/* zero extension */ +DEFINE_MOP(MOP_movzbl_m_r, {&OpndDescription::Mem8S,&OpndDescription::Reg32ID},0,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzbl_r_r, {&OpndDescription::Reg8IS,&OpndDescription::Reg32ID},0,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzwl_m_r, {&OpndDescription::Mem16S,&OpndDescription::Reg32ID},0,kLtAlu,"movzwl","0,1",1) +DEFINE_MOP(MOP_movzwl_r_r, {&OpndDescription::Reg16IS,&OpndDescription::Reg32ID},0,kLtAlu,"movzwl","0,1",1) +/* sign extension */ +DEFINE_MOP(MOP_movsbl_m_r, {&OpndDescription::Mem8S,&OpndDescription::Reg32ID},0,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movsbl_r_r, {&OpndDescription::Reg8IS,&OpndDescription::Reg32ID},0,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movswl_m_r, {&OpndDescription::Mem16S,&OpndDescription::Reg32ID},0,kLtAlu,"movswl","0,1",1) +DEFINE_MOP(MOP_movswl_r_r, {&OpndDescription::Reg16IS,&OpndDescription::Reg32ID},0,kLtAlu,"movswl","0,1",1) + +/* Moving from a smaller data size to 64 bits */ +/* zero extension */ +/* Perhaps unexpectedly, instructions that move or generate 32-bit register values also set the upper 32 bits of the register to zero. */ +/* Consequently, there is no need for an instruction movzlq. Similarly, the instruction movzbq has the exact same behavior as movzbl when */ +/* the destination is a register */ +/* sign extension */ +DEFINE_MOP(MOP_movsbq_m_r, {&OpndDescription::Mem8S,&OpndDescription::Reg64ID},0,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movsbq_r_r, {&OpndDescription::Reg8IS,&OpndDescription::Reg64ID},0,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movswq_m_r, {&OpndDescription::Mem16S,&OpndDescription::Reg64ID},0,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movswq_r_r, {&OpndDescription::Reg16IS,&OpndDescription::Reg64ID},0,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movslq_r_r, {&OpndDescription::Reg32IS,&OpndDescription::Reg64ID},0,kLtAlu,"movslq","0,1",1) +DEFINE_MOP(MOP_movslq_m_r, {&OpndDescription::Mem32S,&OpndDescription::Reg64ID},0,kLtAlu,"movslq","0,1",1) +DEFINE_MOP(MOP_movslq_i_r, {&OpndDescription::Imm32,&OpndDescription::Reg64ID},0,kLtAlu,"movslq","0,1",1) + +/* add */ +DEFINE_MOP(MOP_addb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_r_r, {&OpndDescription::Reg16IS, &OpndDescription::Reg16IDS},0,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IDS},0,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IDS},0,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addq_i_r, {&OpndDescription::Imm32,&OpndDescription::Reg64IDS},0,kLtAlu,"addq","0,1",1) + +DEFINE_MOP(MOP_movabs_i_r, {&OpndDescription::Imm64,&OpndDescription::Reg64ID},0,kLtAlu,"movabs","0,1",1) +DEFINE_MOP(MOP_movabs_l_r, {&OpndDescription::Lbl64,&OpndDescription::Reg64ID},0,kLtAlu,"movabs","0,1",1) +//The movabs instruction to load arbitrary 64-bit constant into register and to load/store integer register from/to arbitrary constant 64-bit address is available + +/* push & pop & lea */ +DEFINE_MOP(MOP_pushq_r, {&OpndDescription::Reg64IS},0,kLtAlu,"pushq","0",1) +DEFINE_MOP(MOP_popq_r, {&OpndDescription::Reg32IS},0,kLtAlu,"popq","0",1) + +DEFINE_MOP(MOP_leaq_m_r, {&OpndDescription::Mem64S,&OpndDescription::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leal_m_r, {&OpndDescription::Mem32S,&OpndDescription::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leaw_m_r, {&OpndDescription::Mem16S,&OpndDescription::Reg64ID},0,kLtAlu,"leaq","0,1",1) + +/* sub & sbb */ +DEFINE_MOP(MOP_subb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_r_r, {&OpndDescription::Reg16IS, &OpndDescription::Reg16IDS},0,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IDS},0,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IDS},0,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subq_i_r, {&OpndDescription::Imm32, &OpndDescription::Reg64ID},0,kLtAlu,"subq","0,1",1) + +/* and, or, xor, not, neg */ +DEFINE_MOP(MOP_andb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_r_r, {&OpndDescription::Reg16IS, &OpndDescription::Reg16IDS},0,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IDS},0,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IDS},0,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_orb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_r_r, {&OpndDescription::Reg16IS, &OpndDescription::Reg16IDS},0,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IDS},0,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IDS},0,kLtAlu,"orq","0,1",1) + +DEFINE_MOP(MOP_xorb_i_r, {&OpndDescription::Imm8, &OpndDescription::Reg8IDS},0,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_r_r, {&OpndDescription::Reg16IS, &OpndDescription::Reg16IDS},0,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IDS},0,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IDS},0,kLtAlu,"xorq","0,1",1) + +DEFINE_MOP(MOP_notw_r, {&OpndDescription::Reg16IDS},0,kLtAlu,"notw","0",1) +DEFINE_MOP(MOP_notl_r, {&OpndDescription::Reg32IDS},0,kLtAlu,"notl","0",1) +DEFINE_MOP(MOP_notq_r, {&OpndDescription::Reg64IDS},0,kLtAlu,"notq","0",1) + +DEFINE_MOP(MOP_negw_r, {&OpndDescription::Reg16IDS},0,kLtAlu,"negw","0",1) +DEFINE_MOP(MOP_negl_r, {&OpndDescription::Reg32IDS},0,kLtAlu,"negl","0",1) + +/* div, cdq */ +DEFINE_MOP(MOP_idiv_r, {&OpndDescription::Reg32IDS, &OpndDescription::Reg32ID},0,kLtAlu,"idiv","0",1) + +DEFINE_MOP(MOP_cdq, {&OpndDescription::Reg32IDS, &OpndDescription::Reg32ID},0,0,"cdq","",1) + +/* shift -- shl/sar/shr reg8, use cl */ +DEFINE_MOP(MOP_shlb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg16IDS},0,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg32IDS},0,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg64IDS},0,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_sarb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg16IDS},0,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg32IDS},0,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg64IDS},0,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_shrb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IDS},0,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg16IDS},0,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg32IDS},0,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg64IDS},0,kLtAlu,"shrq","0,1",1) + +/* jmp, je, jne */ +DEFINE_MOP(MOP_jmpq_r, {&OpndDescription::Reg64IDS},ISCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_m, {&OpndDescription::Mem64S},ISCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jmp","0",1) // ip relative + +DEFINE_MOP(MOP_je_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"je","0",1) +DEFINE_MOP(MOP_ja_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"ja","0",1) // unsigned > +DEFINE_MOP(MOP_jae_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jae","0",1) // unsigned >= +DEFINE_MOP(MOP_jne_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jne","0",1) +DEFINE_MOP(MOP_jb_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jb","0",1) // unsigned < +DEFINE_MOP(MOP_jbe_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jbe","0",1) // unsigned <= +DEFINE_MOP(MOP_jg_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jg","0",1) // signed > +DEFINE_MOP(MOP_jge_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jge","0",1) // signed >= +DEFINE_MOP(MOP_jl_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jl","0",1) // signed < +DEFINE_MOP(MOP_jle_l, {&OpndDescription::Lbl64},ISCONDBRANCH,kLtAlu,"jle","0",1) // signed <= + +/* cmp, test */ +DEFINE_MOP(MOP_cmpq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_r_m, {&OpndDescription::Reg64IS, &OpndDescription::Mem64S},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_m_r, {&OpndDescription::Mem64S, &OpndDescription::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpl_r_r, {&OpndDescription::Reg32IS, &OpndDescription::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_r_m, {&OpndDescription::Reg32IS, &OpndDescription::Mem32S},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_m_r, {&OpndDescription::Mem32S, &OpndDescription::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_r_i, {&OpndDescription::Reg32IS, &OpndDescription::Imm32},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_m_i, {&OpndDescription::Mem32S, &OpndDescription::Imm32},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpb_r_r, {&OpndDescription::Reg8IS, &OpndDescription::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_r_m, {&OpndDescription::Reg8IS, &OpndDescription::Mem8S},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_m_r, {&OpndDescription::Mem8S, &OpndDescription::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_r_i, {&OpndDescription::Reg8IS, &OpndDescription::Imm8},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_m_i, {&OpndDescription::Mem8S, &OpndDescription::Imm8},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_testq_r_r, {&OpndDescription::Reg64IS, &OpndDescription::Reg64IS},0,kLtAlu,"testq","0,1",1) + +/* setcc -- use ccreg(CF/ZF/SF/OF) */ +DEFINE_MOP(MOP_setbe_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_r, {&OpndDescription::Reg8ID},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_r, {&OpndDescription::Reg8ID},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_r, {&OpndDescription::Reg8ID},0,kLtAlu,"sete","0",1) +DEFINE_MOP(MOP_setbe_m, {&OpndDescription::Mem8D},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_m, {&OpndDescription::Mem8D},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_m, {&OpndDescription::Mem8D},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_m, {&OpndDescription::Mem8D},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_m, {&OpndDescription::Mem8D},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_m, {&OpndDescription::Mem8D},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_m, {&OpndDescription::Mem8D},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_m, {&OpndDescription::Mem8D},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_m, {&OpndDescription::Mem8D},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_m, {&OpndDescription::Mem8D},0,kLtAlu,"sete","0",1) + +/* cmov */ +/* condition move below */ +DEFINE_MOP(MOP_cmovbe_r_r, {&OpndDescription::Reg32IS,&OpndDescription::Reg32ID},0,kLtAlu,"cmovbe","0,1",1) +/* condition move equal */ +DEFINE_MOP(MOP_cmoveq_r_r, {&OpndDescription::Reg64IS,&OpndDescription::Reg64ID},0,kLtAlu,"cmoveq","0,1",1) +/* condition move not equal */ +DEFINE_MOP(MOP_cmovneq_r_r, {&OpndDescription::Reg64IS,&OpndDescription::Reg64ID},0,kLtAlu,"cmovneq","0,1",1) + +/* call, ret */ +DEFINE_MOP(MOP_callq_l, {&OpndDescription::Lbl64},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_m, {&OpndDescription::Mem64S},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_r, {&OpndDescription::Reg64IS},ISCALL,kLtAlu,"callq","0",1) + +DEFINE_MOP(MOP_retq, {},CANTHROW,kLtBranch,"ret","",1) + +/* imul */ +DEFINE_MOP(MOP_imulw_r_r, {&OpndDescription::Reg16IS,&OpndDescription::Reg16IDS},0,kLtAlu,"imulw","0,1",1) +DEFINE_MOP(MOP_imull_r_r, {&OpndDescription::Reg32IS,&OpndDescription::Reg32IDS},0,kLtAlu,"imull","0,1",1) +DEFINE_MOP(MOP_imulq_r_r, {&OpndDescription::Reg64IS,&OpndDescription::Reg64IDS},0,kLtAlu,"imulq","0,1",1) + +/* nop */ +// TODO: still not clear why we need so many forms of nop (except for patch) +DEFINE_MOP(MOP_nopb, {&OpndDescription::Mem8S},0,kLtAlu,"nopb","",1) +DEFINE_MOP(MOP_nopw, {&OpndDescription::Mem16S},0,kLtAlu,"nopw","",1) +DEFINE_MOP(MOP_nopl, {&OpndDescription::Mem32S},0,kLtAlu,"nopl","",1) +DEFINE_MOP(MOP_nop, {},0,0,"nop","",1) +/* end of X64 instructions */ + +/* invalid operation */ +DEFINE_MOP(MOP_movq_i_m, {&OpndDescription::Imm64,&OpndDescription::Mem64D},0,kLtAlu,"invalid","0,1",1) +DEFINE_MOP(MOP_cmpq_r_i, {&OpndDescription::Reg64IS, &OpndDescription::Imm64},0,kLtAlu,"invalid","0,1",1) +DEFINE_MOP(MOP_cmpq_m_i, {&OpndDescription::Mem64S, &OpndDescription::Imm64},0,kLtAlu,"invalid","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h index fdd137cd8250140509d525447ab144b6a7301acb..0919fa14abf3c7964b441648b25ebd4a4eee358c 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h @@ -19,6 +19,69 @@ #include "memlayout.h" namespace maplebe { +class X64SymbolAlloc : public SymbolAlloc { + public: + X64SymbolAlloc() = default; + + ~X64SymbolAlloc() = default; + + void SetRegisters(bool isR) { + isRegister = isR; + } + + inline bool IsRegister() const { + return isRegister; + } + + private: + bool isRegister = false; +}; +/* + * On X64, stack frames are structured as follows: + * + * The stack grows downward -- full descending (SP points + * to a filled slot). + * + * Any of the parts of a frame is optional, i.e., it is + * possible to write a caller-callee pair in such a way + * that the particular part is absent in the frame. + * + * Before a call is made, the frame looks like: + * | | + * ||----------------------------| + * | args passed on the stack | (we call them up-formals) + * ||----------------------------|<- Stack Pointer + * | | + * + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | callee-saved registers | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------|<- Stack Pointer + * | red zone | + * + * callee-saved registers include + * 1. rbx rbp r12 r14 r14 r15 + * 2. XMM0-XMM7 + */ + class X64MemLayout : public MemLayout { public: X64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator) @@ -29,8 +92,13 @@ class X64MemLayout : public MemLayout { uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmtNode, int32 &aggCopySize, bool isIcall) override { return 0; } - void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override {} + void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override; + uint64 StackFrameSize() const; + + const MemSegment &locals() const { + return segLocals; + } /* * "Pseudo-registers can be regarded as local variables of a * primitive type whose addresses are never taken" @@ -41,6 +109,18 @@ class X64MemLayout : public MemLayout { return nullptr; } + uint32 GetSizeOfLocals() const { + return segLocals.GetSize(); + } + private: + // Layout function + void LayoutFormalParams(); + void LayoutLocalVariables(); + + // util function + void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const; + + MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ }; } #endif // MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def b/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def index 992b2b5f9921f2a0804956ebb7cb942d3cb9041e..9d4d7df2150f9b4a7d6ae46d2e240f50a7e2bd22 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def @@ -14,4 +14,10 @@ */ ADDTARGETPHASE("layoutstackframe", true); - ADDTARGETPHASE("createstartendlabel", true); \ No newline at end of file + ADDTARGETPHASE("createstartendlabel", true); + ADDTARGETPHASE("instructionselector", true); + ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("regalloc", true); + ADDTARGETPHASE("generateproepilog", true); + /* ASM EMIT */ + ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h b/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..7f24e4d586451423c29efbca267c3ae97aa709d5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H + +#include "proepilog.h" +#include "x64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +class X64GenProEpilog : public GenProEpilog { + public: + explicit X64GenProEpilog(CGFunc &func) : GenProEpilog(func) { + } + ~X64GenProEpilog() override = default; + + bool TailCallOpt() override; + bool NeedProEpilog() override; + void Run() override; + private: + void GenerateProlog(BB &bb); + void GenerateEpilog(BB &bb); + + void GeneratePushRbpInsn(); + void GenerateMovRspToRbpInsn(); + void GenerateSubFrameSizeFromRspInsn(); + void GenerateAddFrameSizeToRspInsn(); + void GeneratePopInsn(); + void GenerateRetInsn(); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h b/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..ca97a412e0208a6d6b34604f50793d6b4958a354 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H +#include "reg_info.h" +#include "x64_abi.h" + +namespace maplebe { + +class X64RegInfo : public RegisterInfo { + public: + X64RegInfo(MapleAllocator &mallocator): RegisterInfo(mallocator) { + } + + ~X64RegInfo() override = default; + + uint32 GetAllRegNum() override { + return x64::kAllRegNum; + }; + uint32 GetInvalidReg() override { + return x64::kRinvalid; + }; + bool IsVirtualRegister(const CGRegOperand ®Opnd) override { + return regOpnd.GetRegisterNumber() > x64::kAllRegNum; + } + + void Init() override; + void Fini() override; + void SaveCalleeSavedReg(MapleSet savedRegs) override; + bool IsSpecialReg(regno_t regno) const override; + bool IsCalleeSavedReg(regno_t regno) const override; + bool IsYieldPointReg(regno_t regNO) const override; + bool IsUnconcernedReg(regno_t regNO) const override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const override { + return false; + } + bool IsUnconcernedReg(const CGRegOperand ®Opnd) const override; + RegOperand &GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag = 0) override; + ListOperand *CreateListOperand() override; + Insn *BuildMovInstruction(Operand &opnd0, Operand &opnd1) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e6c04a4c53ca624c101ab0c9fdcb0de245b1b4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_X64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { +class X64Standardize : public Standardize { + public: + explicit X64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(true); + } + + ~X64Standardize() override = default; + + private: + bool TryFastTargetIRMapping(Insn &insn) override; + + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; + void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; +}; +} +#endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/mdgen/gendef.py b/src/mapleall/maple_be/mdgen/gendef.py index 773fe0e81c3be8eb3c663170edd0b29ab891f710..a49862a0b8de538f732651ca65edf3d8c8c8d7fb 100755 --- a/src/mapleall/maple_be/mdgen/gendef.py +++ b/src/mapleall/maple_be/mdgen/gendef.py @@ -14,8 +14,8 @@ # FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # -import os, sys, subprocess, shlex, re -def Gendef(execTool, mdFiles, outputDir): +import os, sys, subprocess, shlex, re, argparse +def Gendef(execTool, mdFiles, outputDir, asanLib=None): tdList = [] for mdFile in mdFiles: if mdFile.find('sched') >= 0: @@ -26,12 +26,17 @@ def Gendef(execTool, mdFiles, outputDir): print("Command Injection !") return print("[*] %s" % (mdCmd)) - subprocess.check_call(shlex.split(mdCmd), shell = False) + localEnv = os.environ + if asanLib is not None: + asanEnv = asanLib.split("=") + localEnv[asanEnv[0]] = asanEnv[1] + print("env :" + str(asanEnv)) + subprocess.check_call(shlex.split(mdCmd), shell = False, env = localEnv) else: tdList.append(i) return -def Process(execTool, mdFileDir, outputDir): +def Process(execTool, mdFileDir, outputDir, asanLib=None): if not (os.path.exists(execTool)): print("maplegen is required before generating def files automatically") return @@ -48,24 +53,39 @@ def Process(execTool, mdFileDir, outputDir): if not (os.path.exists(outputDir)): print("Create the " + outputDir) os.makedirs(outputDir) - Gendef(execTool, mdFiles, outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) defFile = "%s/mplad_arch_define.def" % (outputDir) if not (os.path.exists(defFile)): - Gendef(execTool, mdFiles, outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) for mdfile in mdFiles: if (os.stat(mdfile).st_mtime > os.stat(defFile).st_mtime): - Gendef(execTool, mdFiles, outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) if (os.stat(execTool).st_mtime > os.stat(defFile).st_mtime): - Gendef(execTool, mdFiles, outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) -def help(): - print("Usage: %s maplegen_exe_directory mdfiles_directory output_defiless_directory" % (sys.argv[0])); +def get_arg_parser(): + parser = argparse.ArgumentParser( + description="maplegen") + parser.add_argument('-e', '--exe', + help='maplegen_exe_directory') + parser.add_argument('-m', '--md', + help='mdfiles_directory') + parser.add_argument('-o', '--out', + help='output_defiless_directory') + parser.add_argument('-a', '--asan', + help='enabled asan and followed env LD_PRELOAD=xxxx') + return parser def main(): - if len(sys.argv) != 4: - help(); - return - Process(sys.argv[1], sys.argv[2], sys.argv[3]) + parser = get_arg_parser() + args = parser.parse_args() + if (args.exe is None or args.md is None or args.out is None): + print(str(args)) + parser.print_help() + exit(-1) + + Process(args.exe, args.md, args.out, args.asan) + if __name__ == "__main__": main() diff --git a/src/mapleall/maple_be/src/ad/mad.cpp b/src/mapleall/maple_be/src/ad/mad.cpp index 59aa7c74f0b487a381592c00236bee1fde452bea..7cf6518526600fc1080ba5d6e5d9bd27e6ab03f1 100644 --- a/src/mapleall/maple_be/src/ad/mad.cpp +++ b/src/mapleall/maple_be/src/ad/mad.cpp @@ -303,13 +303,13 @@ bool StoreBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { case MOP_xstr: case MOP_sstr: case MOP_dstr: { - auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnSecondOpnd)); + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnSecondOpnd)); return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); } case MOP_wstp: case MOP_xstp: { - auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); } diff --git a/src/mapleall/maple_be/src/be/becommon.cpp b/src/mapleall/maple_be/src/be/becommon.cpp index cad074371aa8c853fb6b08258489c5a1329679e1..3f11d00f43577bfee2e04da6675e6bdd6d761cc4 100644 --- a/src/mapleall/maple_be/src/be/becommon.cpp +++ b/src/mapleall/maple_be/src/be/becommon.cpp @@ -138,17 +138,21 @@ void BECommon::ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { } return; } + auto structAttr = structType.GetTypeAttrs(); + auto structPack = static_cast(structAttr.GetPack()); for (uint32 j = 0; j < fields.size(); ++j) { TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); if (fieldTypeSize == 0) { ComputeTypeSizesAligns(*fieldType); fieldTypeSize = GetTypeSize(fieldTyIdx); } - uint8 fieldAlign = GetTypeAlign(fieldTyIdx); - uint8 attrAlign = structType.GetTyidxFieldAttrPair(j).second.GetAlign(); - fieldAlign = std::max(fieldAlign, attrAlign); + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto attrAlign = static_cast(fieldAttr.GetAlign()); + auto originAlign = std::max(attrAlign, GetTypeAlign(fieldTyIdx)); + uint8 fieldAlign = fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack); uint64 fieldAlignBits = fieldAlign * kBitsPerByte; CHECK_FATAL(fieldAlign != 0, "expect fieldAlign not equal 0"); MIRStructType *subStructType = fieldType->EmbeddedStructType(); @@ -159,34 +163,29 @@ void BECommon::ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { if (fieldType->GetKind() == kTypeBitField) { uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); /* is this field is crossing the align boundary of its base type? */ - if ((allocedSizeInBits / (fieldAlign * 8u)) != ((allocedSizeInBits + fieldSize - 1u) / (fieldAlign * 8u))) { - /* the field is crossing the align boundary of its base type; */ - /* align alloced_size_in_bits to fieldAlign */ - allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlign * kBitsPerByte); + if ((!structAttr.IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || + fieldSize == 0) { + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); } /* allocate the bitfield */ allocedSizeInBits += fieldSize; - allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlign * kBitsPerByte) / - kBitsPerByte); - if (fieldSize == 0) { - allocedSizeInBits = allocedSize * kBitsPerByte; - } + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); } else { - uint32 fldsizeinbits = fieldTypeSize * kBitsPerByte; bool leftoverbits = false; if (allocedSizeInBits == allocedSize * kBitsPerByte) { allocedSize = RoundUp(allocedSize, fieldAlign); } else { /* still some leftover bits on allocated words, we calculate things based on bits then. */ - if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fldsizeinbits - 1) / fieldAlignBits) { + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - 1) / fieldAlignBits) { /* the field is crossing the align boundary of its base type */ allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); } leftoverbits = true; } if (leftoverbits) { - allocedSizeInBits += fldsizeinbits; + allocedSizeInBits += fieldSizeBits; allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); } else { /* pad alloced_size according to the field alignment */ @@ -571,11 +570,15 @@ std::pair BECommon::GetFieldOffset(MIRStructType &structType, Fiel /* process the struct fields */ FieldVector fields = structType.GetFields(); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); for (uint32 j = 0; j < fields.size(); ++j) { TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); - uint8 fieldAlign = GetTypeAlign(fieldTyIdx); + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto originAlign = GetTypeAlign(fieldTyIdx); + auto fieldAlign = fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack); uint64 fieldAlignBits = fieldAlign * kBitsPerByte; CHECK_FATAL(fieldAlign != 0, "fieldAlign should not equal 0"); if (structType.GetKind() != kTypeUnion) { @@ -591,25 +594,25 @@ std::pair BECommon::GetFieldOffset(MIRStructType &structType, Fiel * We know that A zero-width bit field can cause the next field to be aligned on the next container * boundary where the container is the same size as the underlying type of the bit field. */ - if (((allocedSizeInBits / (fieldAlign * 8u)) != ((allocedSizeInBits + fieldSize - 1u) / (fieldAlign * 8u))) || + if ((!structType.GetTypeAttrs().IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || fieldSize == 0) { /* * the field is crossing the align boundary of its base type; * align alloced_size_in_bits to fieldAlign */ - allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlign * kBitsPerByte); + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); } /* allocate the bitfield */ if (curFieldID == fieldID) { - return std::pair((allocedSizeInBits / (fieldAlign * 8u)) * fieldAlign, - allocedSizeInBits % (fieldAlign * 8u)); + return std::pair((allocedSizeInBits / fieldAlignBits) * fieldAlign, + allocedSizeInBits % fieldAlignBits); } else { ++curFieldID; } allocedSizeInBits += fieldSize; - allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlign * kBitsPerByte) / kBitsPerByte); + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); } else { - uint32 fldSizeInBits = fieldTypeSize * k8BitSize; bool leftOverBits = false; uint64 offset = 0; @@ -618,7 +621,7 @@ std::pair BECommon::GetFieldOffset(MIRStructType &structType, Fiel offset = allocedSize; } else { /* still some leftover bits on allocated words, we calculate things based on bits then. */ - if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fldSizeInBits - k1BitSize) / fieldAlignBits) { + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - k1BitSize) / fieldAlignBits) { /* the field is crossing the align boundary of its base type */ allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); } @@ -644,7 +647,7 @@ std::pair BECommon::GetFieldOffset(MIRStructType &structType, Fiel } if (leftOverBits) { - allocedSizeInBits += fldSizeInBits; + allocedSizeInBits += fieldSizeBits; allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); } else { allocedSize += fieldTypeSize; @@ -669,7 +672,6 @@ std::pair BECommon::GetFieldOffset(MIRStructType &structType, Fiel } } CHECK_FATAL(false, "GetFieldOffset() fails to find field"); - return std::pair(0, 0); } diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index 6160f4db70b2036585485709374f88e322266ae4..ecf6a59ef12a40c0f4b360d7dffc10167a8ea06e 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -238,29 +238,47 @@ BaseNode *CGLowerer::SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent * functionality or performance reason so we need to lower it to if-then-else. */ bool CGLowerer::IsComplexSelect(const TernaryNode &tNode) const { - if (tNode.GetPrimType() == PTY_agg) { - return true; - } - - /* Iread may have side effect which may cause correctness issue. */ - if (HasIreadExpr(tNode.Opnd(1)) || HasIreadExpr(tNode.Opnd(2))) { - return true; - } + if (tNode.GetPrimType() == PTY_agg) { + return true; + } + /* Iread may have side effect which may cause correctness issue. */ + if (HasIreadExpr(tNode.Opnd(1)) || HasIreadExpr(tNode.Opnd(2))) { + return true; + } + // it will be generated many insn for complex expr, leading to + // worse performance than punishment of branch prediction error + constexpr size_t maxDepth = 3; + if (MaxDepth(tNode.Opnd(1)) > maxDepth || MaxDepth(tNode.Opnd(1)) > maxDepth) { + return true; + } + return false; +} - // it will be generated many insn for complex expr, leading to - // worse performance than punishment of branch prediction error - constexpr size_t maxDepth = 3; - if (MaxDepth(tNode.Opnd(1)) > maxDepth || MaxDepth(tNode.Opnd(1)) > maxDepth) { - return true; +int32 CGLowerer::FindTheCurrentStmtFreq(StmtNode *stmt) const { + while (stmt != nullptr) { + int32 freq = mirModule.CurFunction()->GetFreqFromLastStmt(stmt->GetStmtID()); + if (freq != -1) { + return freq; } - return false; + stmt = stmt->GetPrev(); + } + return -1; } /* Lower agg select node back to if-then-else stmt. */ +/* + 0(brfalse) + | \ + 1 2 + \ | + \ | + 3 +*/ BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode) { MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); MIRType *resultTy = 0; + MIRFunction *func = mirModule.CurFunction(); if (tNode.GetPrimType() == PTY_agg) { if (tNode.Opnd(1)->op == OP_dread) { DreadNode *trueNode = static_cast(tNode.Opnd(1)); @@ -286,22 +304,34 @@ BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &pare LabelIdx targetIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(targetIdx); brTargetStmt->SetOffset(targetIdx); + // Update the current stmt frequence + int32 currentStmtFreq = 0; + if (kOpcodeInfo.IsStmt(parent.GetOpCode())) { + currentStmtFreq = FindTheCurrentStmtFreq(static_cast(&parent)); + } + currentStmtFreq = currentStmtFreq == -1 ? 0 : currentStmtFreq; + func->SetLastFreqMap(brTargetStmt->GetStmtID(), currentStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), brTargetStmt); union { MIRSymbol *resSym; PregIdx resPreg; } cplxSelRes; // complex select result + uint32 fallthruStmtFreq = (currentStmtFreq + 1) / 2; if (tNode.GetPrimType() == PTY_agg) { static uint32 val = 0; std::string name("ComplexSelectTmp"); name.append(std::to_string(val++)); cplxSelRes.resSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *resultTy); DassignNode *dassignTrue = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(1)); + // Fallthru: update the frequence 1 + func->SetFirstFreqMap(dassignTrue->GetStmtID(), fallthruStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), dassignTrue); } else { cplxSelRes.resPreg = mirbuilder->GetCurrentFunction()->GetPregTab()->CreatePreg(tNode.GetPrimType()); RegassignNode *regassignTrue = mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(1)); + // Update the frequence first opnd + func->SetFirstFreqMap(regassignTrue->GetStmtID(), fallthruStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), regassignTrue); } @@ -309,23 +339,33 @@ BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &pare LabelIdx EndIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(EndIdx); gotoStmt->SetOffset(EndIdx); + // Update the frequence first opnd + func->SetLastFreqMap(gotoStmt->GetStmtID(), fallthruStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), gotoStmt); + uint32 targetStmtFreq = currentStmtFreq / 2; LabelNode *lableStmt = mirModule.CurFuncCodeMemPool()->New(); lableStmt->SetLabelIdx(targetIdx); + func->SetFirstFreqMap(lableStmt->GetStmtID(), targetStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), lableStmt); if (tNode.GetPrimType() == PTY_agg) { DassignNode *dassignFalse = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(2)); + // Update the frequence second opnd + func->SetLastFreqMap(dassignFalse->GetStmtID(), targetStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), dassignFalse); } else { RegassignNode *regassignFalse = mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(2)); + // Update the frequence 2 + func->SetLastFreqMap(regassignFalse->GetStmtID(), targetStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), regassignFalse); } lableStmt = mirModule.CurFuncCodeMemPool()->New(); lableStmt->SetLabelIdx(EndIdx); + // Update the frequence third opnd + func->SetFirstFreqMap(lableStmt->GetStmtID(), currentStmtFreq); blkNode.InsertAfter(blkNode.GetLast(), lableStmt); BaseNode *exprNode = (tNode.GetPrimType() == PTY_agg) ? @@ -690,68 +730,93 @@ BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { return rAdd; } -BaseNode *CGLowerer::LowerDreadBitfield(DreadNode &dread) { - MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); - MIRStructType *structTy = static_cast(symbol->GetType()); - CHECK_FATAL(structTy != nullptr, "LowerDreadBitfield: non-zero fieldID for non-structure"); - TyIdx fTyIdx = structTy->GetFieldTyIdx(dread.GetFieldID()); - CHECK_FATAL(fTyIdx != 0u, "LoweDreadBitField: field id out of range for the structure"); - MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); - if (fType->GetKind() != kTypeBitField) { - return &dread; +StmtNode *CGLowerer::WriteBitField(std::pair byteBitOffsets, MIRBitFieldType *fieldType, + BaseNode *baseAddr, BaseNode *rhs, BlockNode *block) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; + + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + + if ((bitOffset + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = beCommon.GetTypeSize(fieldType->GetTypeIndex()) * kBitsPerByte - bitOffset - bitSize; + } + auto depositBits = builder->CreateExprDepositbits(OP_depositbits, primType, bitOffset, bitSize, bitField, rhs); + return builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositBits); } - uint8 fieldAlign = beCommon.GetTypeAlign(fTyIdx); - std::pair byteBitOffsets = beCommon.GetFieldOffset(*structTy, dread.GetFieldID()); - CHECK_FATAL((static_cast(byteBitOffsets.first) % fieldAlign) == 0, - "LowerDreadBitfield: bitfield offset not multiple of its alignment"); - AddrofNode *addrofNode = mirModule.CurFuncCodeMemPool()->New(OP_addrof); - addrofNode->SetPrimType(LOWERED_PTR_TYPE); - addrofNode->SetStIdx(dread.GetStIdx()); + // if space not enough in the unit with size of primType, we would make an extra assignment from next bound + auto bitsRemained = (bitOffset + bitSize) - primTypeBitSize; + auto bitsExtracted = primTypeBitSize - bitOffset; + if(CGOptions::IsBigEndian()) { + bitOffset = 0; + } + auto *depositedLowerBits = + builder->CreateExprDepositbits(OP_depositbits, primType, bitOffset, bitsExtracted, bitField, rhs); + auto *assignedLowerBits = builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositedLowerBits); + block->AddStatement(assignedLowerBits); + auto *extractedHigherBits = + builder->CreateExprExtractbits(OP_extractbits, primType, bitsExtracted, bitsRemained, rhs); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, byteOffset + GetPrimTypeSize(primType), baseAddr); + auto *depositedHigherBits = + builder->CreateExprDepositbits(OP_depositbits, primType, 0, bitsRemained, bitFieldRemained, extractedHigherBits); + auto *assignedHigherBits = + builder->CreateStmtIassignoff(primType, byteOffset + GetPrimTypeSize(primType), baseAddr, depositedHigherBits); + return assignedHigherBits; +} - ConstvalNode *constNode = mirModule.CurFuncCodeMemPool()->New(); - constNode->SetPrimType(LOWERED_PTR_TYPE); - uint32 loweredPtrType = static_cast(LOWERED_PTR_TYPE); - CHECK_FATAL(loweredPtrType < GlobalTables::GetTypeTable().GetTypeTable().size(), - "LowerIassignBitField: subscript out of range"); - MIRType &type = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(loweredPtrType); - constNode->SetConstVal( - GlobalTables::GetIntConstTable().GetOrCreateIntConst(byteBitOffsets.first, type)); +BaseNode *CGLowerer::ReadBitField(std::pair byteBitOffsets, MIRBitFieldType *fieldType, + BaseNode *baseAddr) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; - BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); - addNode->SetPrimType(LOWERED_PTR_TYPE); - addNode->SetBOpnd(addrofNode, 0); - addNode->SetBOpnd(constNode, 1); - - IreadNode *ireadNode = mirModule.CurFuncCodeMemPool()->New(OP_iread); - ireadNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); - ireadNode->SetOpnd(addNode, 0); - MIRType pointedType(kTypeScalar, fType->GetPrimType()); - TyIdx pointedTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&pointedType); - const MIRType *pointToType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointedTyIdx); - MIRType *pointType = beCommon.BeGetOrCreatePointerType(*pointToType); - ireadNode->SetTyIdx(pointType->GetTypeIndex()); - - ExtractbitsNode *extrBitsNode = mirModule.CurFuncCodeMemPool()->New(OP_extractbits); - extrBitsNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + + if ((bitOffset + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = beCommon.GetTypeSize(fieldType->GetTypeIndex()) * kBitsPerByte - bitOffset - bitSize; + } + return builder->CreateExprExtractbits(OP_extractbits, primType, bitOffset, bitSize, bitField); + } + + // if space not enough in the unit with size of primType, the result would be binding of two exprs of load + auto bitsRemained = (bitOffset + bitSize) - primTypeBitSize; if (CGOptions::IsBigEndian()) { - uint8 bitSize = static_cast(fType)->GetFieldSize(); - extrBitsNode->SetBitsOffset(static_cast(fieldAlign * kBitsPerByte - byteBitOffsets.second - bitSize)); - } else { - extrBitsNode->SetBitsOffset(static_cast(byteBitOffsets.second)); + bitOffset = 0; } - extrBitsNode->SetBitsSize(static_cast(fType)->GetFieldSize()); - extrBitsNode->SetOpnd(ireadNode, 0); + auto *extractedLowerBits = + builder->CreateExprExtractbits(OP_extractbits, primType, bitOffset, bitSize - bitsRemained, bitField); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, byteOffset + GetPrimTypeSize(primType), baseAddr); + auto *result = builder->CreateExprDepositbits(OP_depositbits, primType, bitSize - bitsRemained, bitsRemained, + extractedLowerBits, bitFieldRemained); + return result; +} - return extrBitsNode; +BaseNode *CGLowerer::LowerDreadBitfield(DreadNode &dread) { + auto *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + auto *structTy = static_cast(symbol->GetType()); + auto fTyIdx = structTy->GetFieldTyIdx(dread.GetFieldID()); + auto *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &dread; + } + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dread.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), baseAddr); } BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { - CHECK_FATAL(iread.GetTyIdx() < GlobalTables::GetTypeTable().GetTypeTable().size(), - "LowerIassignBitField: subscript out of range"); uint32 index = iread.GetTyIdx(); MIRPtrType *pointerTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(index)); - CHECK_FATAL(pointerTy != nullptr, "LowerIreadBitField: type in iread should be pointer type"); MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); /* Here pointed type can be Struct or JArray */ MIRStructType *structTy = nullptr; @@ -761,51 +826,13 @@ BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { /* it's a Jarray type. using it's parent's field info: java.lang.Object */ structTy = static_cast(pointedTy)->GetParentType(); } - CHECK_FATAL(structTy != nullptr, "LowerIreadBitField: type in iread does not point to a struct"); TyIdx fTyIdx = structTy->GetFieldTyIdx(iread.GetFieldID()); - CHECK_FATAL(fTyIdx != 0u, "LowerIreadBitField: field id out of range for the structure"); MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); if (fType->GetKind() != kTypeBitField) { return &iread; } - uint8 fieldAlign = beCommon.GetTypeAlign(fTyIdx); - std::pair byteBitOffsets = beCommon.GetFieldOffset(*structTy, iread.GetFieldID()); - CHECK_FATAL((static_cast(byteBitOffsets.first) % fieldAlign) == 0, - "LowerIreadBitfield: bitfield offset not multiple of its alignment"); - - ConstvalNode *constNode = mirModule.CurFuncCodeMemPool()->New(); - constNode->SetPrimType(LOWERED_PTR_TYPE); - uint32 loweredPtrType = static_cast(LOWERED_PTR_TYPE); - MIRType &mirType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(loweredPtrType); - constNode->SetConstVal( - GlobalTables::GetIntConstTable().GetOrCreateIntConst(byteBitOffsets.first, mirType)); - - BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); - addNode->SetPrimType(LOWERED_PTR_TYPE); - addNode->SetBOpnd(iread.Opnd(0), 0); - addNode->SetBOpnd(constNode, 1); - - IreadNode *ireadNode = mirModule.CurFuncCodeMemPool()->New(OP_iread); - ireadNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); - ireadNode->SetOpnd(addNode, 0); - MIRType pointedType(kTypeScalar, fType->GetPrimType()); - TyIdx pointedTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&pointedType); - const MIRType *pointToType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointedTyIdx); - MIRType *pointType = beCommon.BeGetOrCreatePointerType(*pointToType); - ireadNode->SetTyIdx(pointType->GetTypeIndex()); - - ExtractbitsNode *extrBitsNode = mirModule.CurFuncCodeMemPool()->New(OP_extractbits); - extrBitsNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); - if (CGOptions::IsBigEndian()) { - uint8 bitSize = static_cast(fType)->GetFieldSize(); - extrBitsNode->SetBitsOffset(static_cast(fieldAlign * kBitsPerByte - byteBitOffsets.second - bitSize)); - } else { - extrBitsNode->SetBitsOffset(static_cast(byteBitOffsets.second)); - } - extrBitsNode->SetBitsSize(static_cast(fType)->GetFieldSize()); - extrBitsNode->SetOpnd(ireadNode, 0); - - return extrBitsNode; + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), iread.Opnd(0)); } // input node must be cvt, retype, zext or sext @@ -852,11 +879,10 @@ BlockNode *CGLowerer::LowerReturnStruct(NaryStmtNode &retNode) { MIRSymbol *retSt = curFunc->GetFormal(0); MIRPtrType *retTy = static_cast(retSt->GetType()); IassignNode *iassign = mirModule.CurFuncCodeMemPool()->New(); - if ((beCommon.GetTypeSize(retTy->GetPointedTyIdx().GetIdx()) > k16ByteSize) || (opnd0->GetPrimType() != PTY_agg)) { - iassign->SetTyIdx(retTy->GetTypeIndex()); - } else { + iassign->SetTyIdx(retTy->GetTypeIndex()); + if ((beCommon.GetTypeSize(retTy->GetPointedTyIdx().GetIdx()) <= k16ByteSize) && (opnd0->GetPrimType() == PTY_agg)) { /* struct goes into register. */ - iassign->SetTyIdx(retTy->GetPointedTyIdx()); + curFunc->SetStructReturnedInRegs(); } iassign->SetFieldID(0); iassign->SetRHS(opnd0); @@ -912,56 +938,10 @@ StmtNode *CGLowerer::LowerDassignBitfield(DassignNode &dassign, BlockNode &newBl if (fType->GetKind() != kTypeBitField) { return &dassign; } - uint8 fieldAlign = beCommon.GetTypeAlign(fTyIdx); - std::pair byteBitOffsets = beCommon.GetFieldOffset(*structTy, dassign.GetFieldID()); - CHECK_FATAL((static_cast(byteBitOffsets.first) % fieldAlign) == 0, - "LowerDassignBitfield: bitfield offset not multiple of its alignment"); - - AddrofNode *addrofNode = mirModule.CurFuncCodeMemPool()->New(OP_addrof); - addrofNode->SetPrimType(LOWERED_PTR_TYPE); - addrofNode->SetStIdx(dassign.GetStIdx()); - - ConstvalNode *constNode = mirModule.CurFuncCodeMemPool()->New(); - constNode->SetPrimType(LOWERED_PTR_TYPE); - uint32 loweredPtrType = static_cast(LOWERED_PTR_TYPE); - CHECK_FATAL(loweredPtrType < GlobalTables::GetTypeTable().GetTypeTable().size(), - "LowerIassignBitField: subscript out of range"); - MIRType &mirType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(loweredPtrType); - constNode->SetConstVal( - GlobalTables::GetIntConstTable().GetOrCreateIntConst(byteBitOffsets.first, mirType)); - - BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); - addNode->SetPrimType(LOWERED_PTR_TYPE); - addNode->SetBOpnd(addrofNode, 0); - addNode->SetBOpnd(constNode, 1); - - IreadNode *ireadNode = mirModule.CurFuncCodeMemPool()->New(OP_iread); - ireadNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); - ireadNode->SetOpnd(addNode, 0); - MIRType pointedType(kTypeScalar, fType->GetPrimType()); - TyIdx pointedTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&pointedType); - const MIRType *pointToType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointedTyIdx); - MIRType *pointType = beCommon.BeGetOrCreatePointerType(*pointToType); - ireadNode->SetTyIdx(pointType->GetTypeIndex()); - - DepositbitsNode *depositBits = mirModule.CurFuncCodeMemPool()->New(); - depositBits->SetPrimType(GetRegPrimType(fType->GetPrimType())); - if (CGOptions::IsBigEndian()) { - uint8 bitSize = static_cast(fType)->GetFieldSize(); - depositBits->SetBitsOffset(static_cast(fieldAlign * kBitsPerByte - byteBitOffsets.second - bitSize)); - } else { - depositBits->SetBitsOffset(static_cast(byteBitOffsets.second)); - } - depositBits->SetBitsSize(static_cast(fType)->GetFieldSize()); - depositBits->SetBOpnd(ireadNode, 0); - depositBits->SetBOpnd(dassign.GetRHS(), 1); - - IassignNode *iassignStmt = mirModule.CurFuncCodeMemPool()->New(); - iassignStmt->SetTyIdx(pointType->GetTypeIndex()); - iassignStmt->SetOpnd(addNode->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); - iassignStmt->SetRHS(depositBits); - - return iassignStmt; + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dassign.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dassign.GetFieldID()); + return WriteBitField(byteBitOffsets, static_cast(fType), baseAddr, dassign.GetRHS(), &newBlk); } StmtNode *CGLowerer::LowerIassignBitfield(IassignNode &iassign, BlockNode &newBlk) { @@ -987,82 +967,14 @@ StmtNode *CGLowerer::LowerIassignBitfield(IassignNode &iassign, BlockNode &newBl structTy = static_cast(pointedTy)->GetParentType(); } - CHECK_FATAL(structTy != nullptr, "LowerIassignBitField: type in iassign does not point to a struct"); TyIdx fTyIdx = structTy->GetFieldTyIdx(iassign.GetFieldID()); - CHECK_FATAL(fTyIdx != 0u, "LowerIassignBitField: field id out of range for the structure"); MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); if (fType->GetKind() != kTypeBitField) { return &iassign; } - uint8 fieldAlign = beCommon.GetTypeAlign(fTyIdx); - std::pair byteBitOffsets = beCommon.GetFieldOffset(*structTy, iassign.GetFieldID()); - CHECK_FATAL((static_cast(byteBitOffsets.first) % fieldAlign) == 0, - "LowerIassignBitfield: bitfield offset not multiple of its alignment"); - - ConstvalNode *constNode = mirModule.CurFuncCodeMemPool()->New(); - constNode->SetPrimType(LOWERED_PTR_TYPE); - uint32 loweredPtrType = static_cast(LOWERED_PTR_TYPE); - MIRType &mirType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(loweredPtrType); - constNode->SetConstVal( - GlobalTables::GetIntConstTable().GetOrCreateIntConst(byteBitOffsets.first, mirType)); - - BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); - addNode->SetPrimType(LOWERED_PTR_TYPE); - addNode->SetBOpnd(iassign.Opnd(0), 0); - addNode->SetBOpnd(constNode, 1); - /* - * If big endian, using LDR/STR will get the wrong value because the storage - * of value is different in little and big endian. - * So use bfx/bfi in big endian - */ - if (!CGOptions::IsBigEndian()) { - uint8 bitSize = static_cast(fType)->GetFieldSize(); - auto bitOffset = static_cast(byteBitOffsets.second); - if (((bitOffset == k8BitSize || bitOffset == k16BitSize || bitOffset == 0) && - (bitSize == k8BitSize || bitSize == k16BitSize)) || - (bitOffset == k24BitSize && bitSize == k8BitSize)) { - constNode->SetConstVal( - GlobalTables::GetIntConstTable().GetOrCreateIntConst( - static_cast(byteBitOffsets.first) + (bitOffset / k8BitSize), mirType)); - IassignNode *iassignStmt = mirModule.CurFuncCodeMemPool()->New(); - MIRType pointedType( - kTypeScalar, GetIntegerPrimTypeBySizeAndSign(bitSize, IsSignedInteger(fType->GetPrimType()))); - TyIdx pointedTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&pointedType); - const MIRType *pointToType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointedTyIdx); - MIRType *pointType = beCommon.BeGetOrCreatePointerType(*pointToType); - iassignStmt->SetTyIdx(pointType->GetTypeIndex()); - iassignStmt->SetOpnd(addNode->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); - iassignStmt->SetRHS(iassign.GetRHS()); - return iassignStmt; - } - } - IreadNode *ireadNode = mirModule.CurFuncCodeMemPool()->New(OP_iread); - ireadNode->SetPrimType(GetRegPrimType(fType->GetPrimType())); - ireadNode->SetOpnd(addNode, 0); - MIRType pointedType(kTypeScalar, fType->GetPrimType()); - TyIdx pointedTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&pointedType); - const MIRType *pointToType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointedTyIdx); - MIRType *pointType = beCommon.BeGetOrCreatePointerType(*pointToType); - ireadNode->SetTyIdx(pointType->GetTypeIndex()); - - DepositbitsNode *depositBits = mirModule.CurFuncCodeMemPool()->New(); - depositBits->SetPrimType(GetRegPrimType(fType->GetPrimType())); - if (CGOptions::IsBigEndian()) { - uint8 bitSize = static_cast(fType)->GetFieldSize(); - depositBits->SetBitsOffset(static_cast(fieldAlign * kBitsPerByte - byteBitOffsets.second - bitSize)); - } else { - depositBits->SetBitsOffset(static_cast(byteBitOffsets.second)); - } - depositBits->SetBitsSize(static_cast(fType)->GetFieldSize()); - depositBits->SetBOpnd(ireadNode, 0); - depositBits->SetBOpnd(iassign.GetRHS(), 1); - - IassignNode *iassignStmt = mirModule.CurFuncCodeMemPool()->New(); - iassignStmt->SetTyIdx(pointType->GetTypeIndex()); - iassignStmt->SetOpnd(addNode->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); - iassignStmt->SetRHS(depositBits); - - return iassignStmt; + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iassign.GetFieldID()); + auto *bitFieldType = static_cast(fType); + return WriteBitField(byteBitOffsets, bitFieldType, iassign.Opnd(0), iassign.GetRHS(), &newBlk); } void CGLowerer::LowerIassign(IassignNode &iassign, BlockNode &newBlk) { @@ -1120,7 +1032,7 @@ void CGLowerer::LowerAsmStmt(AsmNode *asmNode, BlockNode *newBlk) { DassignNode *CGLowerer::SaveReturnValueInLocal(StIdx stIdx, uint16 fieldID) { MIRSymbol *var; if (stIdx.IsGlobal()) { - var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx(), true); } else { var = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); } @@ -1267,7 +1179,7 @@ BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2 MIRSymbol *sym = nullptr; StIdx stIdx = p2nRets[0].first; if (stIdx.IsGlobal()) { - sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx(), true); } else { sym = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); } @@ -1371,6 +1283,30 @@ BlockNode *CGLowerer::LowerMemop(StmtNode &stmt) { return blk; } +BlockNode *CGLowerer::LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall) { + auto *builder = mirModule.GetMIRBuilder(); + auto *block = mirModule.CurFuncCodeMemPool()->New(); + auto intrinsicID = intrinsicCall.GetIntrinsic(); + auto &opndVector = intrinsicCall.GetNopnd(); + auto returnPair = intrinsicCall.GetReturnVec().begin(); + auto regFieldPair = returnPair->second; + if (regFieldPair.IsReg()) { + auto regIdx = regFieldPair.GetPregIdx(); + auto primType = mirModule.CurFunction()->GetPregItem(static_cast(regIdx))->GetPrimType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, primType, TyIdx(0), opndVector); + auto regAssign = builder->CreateStmtRegassign(primType, regIdx, intrinsicOp); + block->AddStatement(regAssign); + } else { + auto fieldID = regFieldPair.GetFieldID(); + auto stIdx = returnPair->first; + auto *type = mirModule.CurFunction()->GetLocalOrGlobalSymbol(stIdx)->GetType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, *type, opndVector); + auto dAssign = builder->CreateStmtDassign(stIdx, fieldID, intrinsicOp); + block->AddStatement(dAssign); + } + return LowerBlock(*block); +} + BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { StmtNode *newCall = nullptr; CallReturnVector *p2nRets = nullptr; @@ -1391,12 +1327,18 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { newCall = GenCallNode(stmt, funcCalled, origCall); p2nRets = &origCall.GetReturnVec(); static_cast(newCall)->SetReturnVec(*p2nRets); + MIRFunction *curFunc = mirModule.CurFunction(); + curFunc->SetLastFreqMap(newCall->GetStmtID(), curFunc->GetFreqFromLastStmt(stmt.GetStmtID())); break; } case OP_intrinsiccallassigned: case OP_xintrinsiccallassigned: { IntrinsiccallNode &intrincall = static_cast(stmt); - if (intrincall.GetIntrinsic() == INTRN_JAVA_POLYMORPHIC_CALL) { + auto intrinsicID = intrincall.GetIntrinsic(); + if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { + return LowerIntrinsiccallAassignedToAssignStmt(intrincall); + } + if (intrinsicID == INTRN_JAVA_POLYMORPHIC_CALL) { BaseNode *contextClassArg = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); constexpr int kContextIdx = 4; /* stable index in MCC_DexPolymorphicCall, never out of range */ intrincall.InsertOpnd(contextClassArg, kContextIdx); @@ -1664,6 +1606,7 @@ void CGLowerer::LowerSwitchOpnd(StmtNode &stmt, BlockNode &newBlk) { PregIdx pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(ptyp); RegassignNode *regAss = mirBuilder->CreateStmtRegassign(ptyp, pIdx, opnd); newBlk.AddStatement(regAss); + GetCurrentFunc()->SetLastFreqMap(regAss->GetStmtID(), GetCurrentFunc()->GetFreqFromLastStmt(stmt.GetStmtID())); stmt.SetOpnd(mirBuilder->CreateExprRegread(ptyp, pIdx), 0); } else { stmt.SetOpnd(LowerExpr(stmt, *stmt.Opnd(0), newBlk), 0); @@ -1792,7 +1735,7 @@ BlockNode *CGLowerer::LowerBlock(BlockNode &block) { case OP_intrinsiccall: case OP_call: case OP_icall: -#if TARGARM32 || TARGAARCH64 || TARGRISCV64 +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 LowerCallStmt(*stmt, nextStmt, *newBlk); #else LowerStmt(*stmt, *newBlk); @@ -2567,7 +2510,7 @@ BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkN } case OP_dread: - return LowerDread(static_cast(expr)); + return LowerDread(static_cast(expr), blkNode); case OP_addrof: return LowerAddrof(static_cast(expr)); @@ -2581,8 +2524,10 @@ BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkN case OP_select: if (IsComplexSelect(static_cast(expr))) { return LowerComplexSelect(static_cast(expr), parent, blkNode); - } else { + } else if (mirModule.GetFlavor() != kFlavorLmbc) { return SplitTernaryNodeResult(static_cast(expr), parent, blkNode); + } else { + return &expr; } case OP_sizeoftype: { @@ -2636,12 +2581,12 @@ BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkN } } -BaseNode *CGLowerer::LowerDread(DreadNode &dread) { +BaseNode *CGLowerer::LowerDread(DreadNode &dread, BlockNode &block) { /* use PTY_u8 for boolean type in dread/iread */ if (dread.GetPrimType() == PTY_u1) { dread.SetPrimType(PTY_u8); } - return (dread.GetFieldID() == 0 ? &dread : LowerDreadBitfield(dread)); + return (dread.GetFieldID() == 0 ? LowerDreadToThreadLocal(dread, block) : LowerDreadBitfield(dread)); } void CGLowerer::LowerRegassign(RegassignNode ®Node, BlockNode &newBlk) { @@ -2661,6 +2606,69 @@ void CGLowerer::LowerRegassign(RegassignNode ®Node, BlockNode &newBlk) { } } +BaseNode *CGLowerer::ExtractSymbolAddress(StIdx &stIdx, BlockNode &block) { + auto builder = mirModule.GetMIRBuilder(); + auto regIdx = mirModule.CurFunction()->GetPregTab()->CreatePreg(PTY_a64); + auto regAssign = builder->CreateStmtRegassign(PTY_a64, regIdx, builder->CreateExprAddrof(0, stIdx)); + block.AddStatement(regAssign); + // iassign <* u32> 0 (regread u64 %addr, dread u32 $x) + auto addr = builder->CreateExprRegread(PTY_a64, regIdx); + return addr; +} + +BaseNode *CGLowerer::LowerDreadToThreadLocal(BaseNode &expr, BlockNode &block) { + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + auto *result = &expr; + if (expr.GetOpCode() != maple::OP_dread) { + return result; + } + auto dread = static_cast(expr); + StIdx stIdx = dread.GetStIdx(); + if (!stIdx.IsGlobal()) { + return result; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx(), true); + + if (symbol->IsThreadLocal()) { + // iread <* u32> 0 (regread u64 %addr) + auto addr = ExtractSymbolAddress(stIdx, block); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbol->GetType()); + auto iread = mirModule.GetMIRBuilder()->CreateExprIread(*symbol->GetType(), *ptrType, dread.GetFieldID(), addr); + result = iread; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + +StmtNode *CGLowerer::LowerDassignToThreadLocal(StmtNode &stmt, BlockNode &block) { + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + StmtNode *result = &stmt; + if (stmt.GetOpCode() != maple::OP_dassign) { + return result; + } + auto dAssign = static_cast(stmt); + StIdx stIdx = dAssign.GetStIdx(); + if (!stIdx.IsGlobal()) { + return result; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx(), true); + if (symbol->IsThreadLocal()) { + // iassign <* u32> 0 (regread u64 %addr, dread u32 $x) + auto addr = ExtractSymbolAddress(stIdx, block); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbol->GetType()); + auto iassign = mirModule.GetMIRBuilder()->CreateStmtIassign(*ptrType, dAssign.GetFieldID(), addr, dAssign.GetRHS()); + result = iassign; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + void CGLowerer::LowerDassign(DassignNode &dsNode, BlockNode &newBlk) { StmtNode *newStmt = nullptr; BaseNode *rhs = nullptr; @@ -2694,7 +2702,7 @@ void CGLowerer::LowerDassign(DassignNode &dsNode, BlockNode &newBlk) { } if (newStmt != nullptr) { - newBlk.AddStatement(newStmt); + newBlk.AddStatement(LowerDassignToThreadLocal(*newStmt, newBlk)); } } @@ -3263,7 +3271,7 @@ BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &i if (intrnID == INTRN_C___builtin_expect) { return intrinNode.Opnd(0); } - if (intrinDesc.IsVectorOp()) { + if (intrinDesc.IsVectorOp() || intrinDesc.IsAtomic()) { return &intrinNode; } CHECK_FATAL(false, "unexpected intrinsic type in CGLowerer::LowerIntrinsicop"); @@ -3443,7 +3451,7 @@ StmtNode *CGLowerer::LowerIntrinsiccall(IntrinsiccallNode &intrincall, BlockNode return &intrincall; } IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; - if (intrinDesc->IsSpecial()) { + if (intrinDesc->IsSpecial() || intrinDesc->IsAtomic()) { /* For special intrinsics we leave them to CGFunc::SelectIntrinCall() */ return &intrincall; } @@ -3721,28 +3729,85 @@ bool CGLowerer::IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const case INTRN_C_isaligned: case INTRN_C_alignup: case INTRN_C_aligndown: + case INTRN_C___sync_add_and_fetch_1: case INTRN_C___sync_add_and_fetch_2: case INTRN_C___sync_add_and_fetch_4: case INTRN_C___sync_add_and_fetch_8: + case INTRN_C___sync_sub_and_fetch_1: case INTRN_C___sync_sub_and_fetch_2: case INTRN_C___sync_sub_and_fetch_4: case INTRN_C___sync_sub_and_fetch_8: + case INTRN_C___sync_fetch_and_add_1: case INTRN_C___sync_fetch_and_add_2: case INTRN_C___sync_fetch_and_add_4: case INTRN_C___sync_fetch_and_add_8: + case INTRN_C___sync_fetch_and_sub_1: case INTRN_C___sync_fetch_and_sub_2: case INTRN_C___sync_fetch_and_sub_4: case INTRN_C___sync_fetch_and_sub_8: + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: case INTRN_C___sync_bool_compare_and_swap_4: case INTRN_C___sync_bool_compare_and_swap_8: + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: case INTRN_C___sync_val_compare_and_swap_4: case INTRN_C___sync_val_compare_and_swap_8: + case INTRN_C___sync_lock_test_and_set_1: + case INTRN_C___sync_lock_test_and_set_2: case INTRN_C___sync_lock_test_and_set_4: case INTRN_C___sync_lock_test_and_set_8: case INTRN_C___sync_lock_release_8: case INTRN_C___sync_lock_release_4: + case INTRN_C___sync_lock_release_2: + case INTRN_C___sync_lock_release_1: + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + case INTRN_C___sync_fetch_and_nand_1: + case INTRN_C___sync_fetch_and_nand_2: + case INTRN_C___sync_fetch_and_nand_4: + case INTRN_C___sync_fetch_and_nand_8: + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + case INTRN_C___sync_nand_and_fetch_1: + case INTRN_C___sync_nand_and_fetch_2: + case INTRN_C___sync_nand_and_fetch_4: + case INTRN_C___sync_nand_and_fetch_8: + case INTRN_C___sync_synchronize: case INTRN_C__builtin_return_address: case INTRN_C__builtin_extract_return_addr: + case INTRN_C_memcmp: + case INTRN_C_strlen: + case INTRN_C_strcmp: + case INTRN_C_strncmp: + case INTRN_C_strchr: + case INTRN_C_strrchr: + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + + + return true; #endif default: diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp index 4b5299d962f310541f2f82d3f841eaccf0bcd36c..9008f02903dc86ff582e20b2d74246c895b69415 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp @@ -16,6 +16,7 @@ #include "insn.h" #include "loop.h" #include "aarch64_cg.h" +#include "cg_option.h" #include namespace maplebe { @@ -69,7 +70,13 @@ bool AArch64AlignAnalysis::IsInSizeRange(BB &bb) { } curBB = curBB->GetNext(); } - if (size <= kAlignMinBBSize || size >= kAlignMaxBBSize) { + AArch64AlignInfo targetInfo; + if (CGOptions::GetAlignMinBBSize() == 0 || CGOptions::GetAlignMaxBBSize() == 0) { + return false; + } + targetInfo.alignMinBBSize = (CGOptions::OptimizeForSize()) ? 16 : CGOptions::GetAlignMinBBSize(); + targetInfo.alignMaxBBSize = (CGOptions::OptimizeForSize()) ? 44 : CGOptions::GetAlignMaxBBSize(); + if (size <= targetInfo.alignMinBBSize || size >= targetInfo.alignMaxBBSize) { return false; } return true; @@ -93,7 +100,11 @@ void AArch64AlignAnalysis::ComputeLoopAlign() { continue; } bb->SetNeedAlign(true); - AlignInfo targetInfo; + if (CGOptions::GetLoopAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.loopAlign = CGOptions::GetLoopAlignPow(); if (alignInfos.find(bb) == alignInfos.end()) { alignInfos[bb] = targetInfo.loopAlign; } else { @@ -113,7 +124,11 @@ void AArch64AlignAnalysis::ComputeJumpAlign() { continue; } bb->SetNeedAlign(true); - AlignInfo targetInfo; + if (CGOptions::GetJumpAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.jumpAlign = (CGOptions::OptimizeForSize()) ? 3 : CGOptions::GetJumpAlignPow(); if (alignInfos.find(bb) == alignInfos.end()) { alignInfos[bb] = targetInfo.jumpAlign; } else { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp index de57700d175589640d5eda905659813779d838ec..0e972a4bf734bea8abb82d0ef383e54ce9afb970 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -38,8 +38,8 @@ void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsL uint32 start = 0; if (numFormal) { MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); - if (aarchCGFunc->GetBecommon().HasFuncReturnType(*func)) { - TyIdx tyIdx = aarchCGFunc->GetBecommon().GetFuncReturnType(*func); + if (func->IsReturnStruct()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { start = 1; } @@ -172,7 +172,7 @@ bool AArch64MoveRegArgs::IsInSameSegment(const ArgInfo &firstArgInfo, const ArgI void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) { AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); - AArch64RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*firstArgInfo.symLoc)); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*firstArgInfo.symLoc)); RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(firstArgInfo.reg, firstArgInfo.stkSize * kBitsPerByte, firstArgInfo.regType); @@ -193,27 +193,27 @@ void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgI MemOperand *memOpnd = nullptr; if (stOffset > limit || baseReg != nullptr) { if (baseReg == nullptr || lastSegment != firstArgInfo.symLoc->GetMemSegment()) { - AArch64ImmOperand &immOpnd = + ImmOperand &immOpnd = aarchCGFunc->CreateImmOperand(stOffset - firstArgInfo.symLoc->GetOffset(), k64BitSize, false); baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); lastSegment = firstArgInfo.symLoc->GetMemSegment(); aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, LOWERED_PTR_TYPE); } - AArch64OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(firstArgInfo.symLoc->GetOffset(), k32BitSize); + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(firstArgInfo.symLoc->GetOffset(), k32BitSize); if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd.SetVary(kUnAdjustVary); } - memOpnd = aarchCGFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - firstArgInfo.stkSize * kBitsPerByte, - *baseReg, nullptr, &offsetOpnd, firstArgInfo.sym); + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseReg, nullptr, &offsetOpnd, firstArgInfo.sym); } else { - AArch64OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(stOffset, k32BitSize); + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(stOffset, k32BitSize); if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd.SetVary(kUnAdjustVary); } - memOpnd = aarchCGFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - firstArgInfo.stkSize * kBitsPerByte, - *baseOpnd, nullptr, &offsetOpnd, firstArgInfo.sym); + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseOpnd, nullptr, &offsetOpnd, firstArgInfo.sym); } Insn &pushInsn = aarchCGFunc->GetCG()->BuildInstruction(mOp, regOpnd, *regOpnd2, *memOpnd); if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { @@ -223,18 +223,18 @@ void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgI aarchCGFunc->GetCurBB()->AppendInsn(pushInsn); } -void AArch64MoveRegArgs::GenOneInsn(ArgInfo &argInfo, AArch64RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, +void AArch64MoveRegArgs::GenOneInsn(ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset) { AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); MOperator mOp = aarchCGFunc->PickStInsn(stBitSize, argInfo.mirTy->GetPrimType()); RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(dest, stBitSize, argInfo.regType); - AArch64OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(offset), k32BitSize); if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd.SetVary(kUnAdjustVary); } - MemOperand *memOpnd = aarchCGFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - stBitSize, baseOpnd, nullptr, &offsetOpnd, argInfo.sym); + MemOperand *memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + stBitSize, baseOpnd, nullptr, &offsetOpnd, argInfo.sym); Insn &insn = aarchCGFunc->GetCG()->BuildInstruction(mOp, regOpnd, *memOpnd); if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); @@ -245,34 +245,34 @@ void AArch64MoveRegArgs::GenOneInsn(ArgInfo &argInfo, AArch64RegOperand &baseOpn void AArch64MoveRegArgs::GenerateStrInsn(ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize) { AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); int32 stOffset = aarchCGFunc->GetBaseOffset(*argInfo.symLoc); - AArch64RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*argInfo.symLoc)); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*argInfo.symLoc)); RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(argInfo.reg, argInfo.stkSize * kBitsPerByte, argInfo.regType); MemOperand *memOpnd = nullptr; - if (AArch64MemOperand::IsPIMMOffsetOutOfRange(stOffset, argInfo.symSize * kBitsPerByte) || + if (MemOperand::IsPIMMOffsetOutOfRange(stOffset, argInfo.symSize * kBitsPerByte) || (baseReg != nullptr && (lastSegment == argInfo.symLoc->GetMemSegment()))) { if (baseReg == nullptr || lastSegment != argInfo.symLoc->GetMemSegment()) { - AArch64ImmOperand &immOpnd = aarchCGFunc->CreateImmOperand(stOffset - argInfo.symLoc->GetOffset(), k64BitSize, - false); + ImmOperand &immOpnd = aarchCGFunc->CreateImmOperand(stOffset - argInfo.symLoc->GetOffset(), k64BitSize, + false); baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); lastSegment = argInfo.symLoc->GetMemSegment(); aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, PTY_a64); } - AArch64OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(argInfo.symLoc->GetOffset(), k32BitSize); + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(argInfo.symLoc->GetOffset(), k32BitSize); if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd.SetVary(kUnAdjustVary); } - memOpnd = aarchCGFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - argInfo.symSize * kBitsPerByte, *baseReg, - nullptr, &offsetOpnd, argInfo.sym); + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseReg, + nullptr, &offsetOpnd, argInfo.sym); } else { - AArch64OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(stOffset, k32BitSize); + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(stOffset, k32BitSize); if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd.SetVary(kUnAdjustVary); } - memOpnd = aarchCGFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - argInfo.symSize * kBitsPerByte, *baseOpnd, - nullptr, &offsetOpnd, argInfo.sym); + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseOpnd, + nullptr, &offsetOpnd, argInfo.sym); } MOperator mOp = aarchCGFunc->PickStInsn(argInfo.symSize * kBitsPerByte, argInfo.mirTy->GetPrimType()); @@ -436,9 +436,9 @@ void AArch64MoveRegArgs::MoveVRegisterArgs() { uint32 start = 0; if (formalCount) { MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); - if (aarchCGFunc->GetBecommon().HasFuncReturnType(*func)) { - TyIdx idx = aarchCGFunc->GetBecommon().GetFuncReturnType(*func); - if (aarchCGFunc->GetBecommon().GetTypeSize(idx) <= k16BitSize) { + if (func->IsReturnStruct()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16BitSize) { start = 1; } } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp index 8201883bd3442b32a070162c5c66949ab33fc28d..0be11cdb40dfb6cc1b6e063f216d3506b2bff5c8 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -141,6 +141,7 @@ uint32 ProcessStructWhenClassifyAggregate(const BECommon &be, MIRStructType &str } } if (isF32 || isF64) { + CHECK_FATAL(numRegs <= classesLength, "ClassifyAggregate: num regs exceed limit"); for (uint32 i = 0; i < numRegs; ++i) { classes[i] = kAArch64FloatClass; } @@ -204,6 +205,30 @@ int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentCla } } +/* external interface to look for pure float struct */ +uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { + if (structType.GetSize() > k32ByteSize) { + return 0; + } + AArch64ArgumentClass classes[kMaxRegCount]; + uint32 numRegs = ProcessStructWhenClassifyAggregate(beCommon, structType, classes, kMaxRegCount, fpSize); + if (numRegs == 0) { + return 0; + } + + bool isPure = true; + for (uint i = 0; i < numRegs; ++i) { + if (classes[i] != kAArch64FloatClass) { + isPure = false; + break; + } + } + if (isPure) { + return numRegs; + } + return 0; +} + void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { pLoc.reg0 = kRinvalid; pLoc.reg1 = kRinvalid; @@ -278,8 +303,9 @@ int32 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, boo if (isFirst) { MIRFunction *func = tFunc != nullptr ? tFunc : const_cast(beCommon.GetMIRModule().CurFunction()); - if (beCommon.HasFuncReturnType(*func)) { - size_t size = beCommon.GetTypeSize(beCommon.GetFuncReturnType(*func)); + if (func->IsReturnStruct()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + size_t size = beCommon.GetTypeSize(tyIdx); if (size == 0) { /* For return struct size 0 there is no return value. */ return 0; @@ -642,7 +668,7 @@ void AArch64CallConvImpl::InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo) { ccLocInfo.primTypeOfReg0 = PTY_i64; } } else { - ASSERT(ccLocInfo.regCount == kMaxRegCount, "reg count from ClassifyAggregate() should be 0, 1, or 2"); + ASSERT(ccLocInfo.regCount <= k2ByteSize, "reg count from ClassifyAggregate() should be 0, 1, or 2"); ASSERT(classes[0] == kAArch64IntegerClass, "error val :classes[0]"); ASSERT(classes[1] == kAArch64IntegerClass, "error val :classes[1]"); ccLocInfo.reg0 = AArch64Abi::intReturnRegs[0]; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp index 694040de9999912c604073a7c4a29586472c181b..5db4f1265889c94b749e879775b5b697ef86771d 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -19,6 +19,7 @@ #include "alignment.h" namespace maplebe { +#include "immvalid.def" #include "aarch64_opnd.def" #define DEFINE_MOP(...) {__VA_ARGS__}, const AArch64MD AArch64CG::kMd[kMopLast] = { @@ -321,6 +322,6 @@ Insn &AArch64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { } PhiOperand &AArch64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) { - return *mp.New(mAllocator); + return *mp.New(mAllocator); } } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 754c00e723c43cce7bd7556fefb0e742d028d731..43ced5ce52f02ce914493d7dc4d53c91edac44b7 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -17,6 +17,7 @@ #include #include #include +#include #include "cfi.h" #include "mpl_logging.h" #include "rt.h" @@ -27,6 +28,7 @@ #include "metadata_layout.h" #include "emit.h" #include "simplify.h" +#include namespace maplebe { using namespace maple; @@ -147,6 +149,18 @@ MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType, AArch64is } } +bool IsBlkassignForPush(BlkassignoffNode &bNode) { + BaseNode *dest = bNode.Opnd(0); + bool spBased = false; + if (dest->GetOpCode() == OP_regread) { + RegreadNode &node = static_cast(*dest); + if (-node.GetRegIdx() == kSregSp) { + spBased = true; + } + } + return spBased; +} + RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType) { RegOperand *resOpnd = nullptr; if (parent.GetOpCode() == OP_regassign) { @@ -154,8 +168,7 @@ RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimTyp PregIdx pregIdx = regAssignNode.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType); } else { resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -248,8 +261,8 @@ void AArch64CGFunc::SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &sr MOperator mOp = PickLdInsn(ssize, stype, memOrd); Operand *newSrc = &src; - auto &memOpnd = static_cast(src); - AArch64OfstOperand *immOpnd = memOpnd.GetOffsetImmediate(); + auto &memOpnd = static_cast(src); + OfstOperand *immOpnd = memOpnd.GetOffsetImmediate(); int32 offset = static_cast(immOpnd->GetOffsetValue()); RegOperand *origBaseReg = memOpnd.GetBaseRegister(); if (offset != 0) { @@ -308,8 +321,8 @@ void AArch64CGFunc::SelectStoreRelease(Operand &dest, PrimType dtype, Operand &s MOperator mOp = PickStInsn(dsize, stype, memOrd); Operand *newDest = &dest; - AArch64MemOperand *memOpnd = static_cast(&dest); - AArch64OfstOperand *immOpnd = memOpnd->GetOffsetImmediate(); + MemOperand *memOpnd = static_cast(&dest); + OfstOperand *immOpnd = memOpnd->GetOffsetImmediate(); int32 offset = static_cast(immOpnd->GetOffsetValue()); RegOperand *origBaseReg = memOpnd->GetBaseRegister(); if (offset != 0) { @@ -455,15 +468,15 @@ void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype if (maxLoopTime == 2) { /* copy lower 32 bits to higher 32 bits */ - AArch64ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MPO_xbfirri6i6, dest, dest, immOpnd, immOpnd)); + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd)); } } } std::string AArch64CGFunc::GenerateMemOpndVerbose(const Operand &src) { ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); - const MIRSymbol *symSecond = static_cast(&src)->GetSymbol(); + const MIRSymbol *symSecond = static_cast(&src)->GetSymbol(); if (symSecond != nullptr) { std::string key; MIRStorageClass sc = symSecond->GetStorageClass(); @@ -482,7 +495,7 @@ std::string AArch64CGFunc::GenerateMemOpndVerbose(const Operand &src) { void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype) { AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; - const MIRSymbol *sym = static_cast(&src)->GetSymbol(); + const MIRSymbol *sym = static_cast(&src)->GetSymbol(); if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_acquire)) { memOrd = AArch64isa::kMoAcquire; } @@ -615,8 +628,8 @@ void AArch64CGFunc::SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg if (maxLoopTime == 2) { /* copy lower 32 bits to higher 32 bits */ - AArch64ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); - Insn &insn = GetCG()->BuildInstruction(MPO_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + Insn &insn = GetCG()->BuildInstruction(MOP_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); if (curInsn != nullptr) { bb->InsertInsnBefore(*curInsn, insn); } else { @@ -635,7 +648,7 @@ void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::Op return; } AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; - const MIRSymbol *sym = static_cast(&dest)->GetSymbol(); + const MIRSymbol *sym = static_cast(&dest)->GetSymbol(); if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_release)) { memOrd = AArch64isa::kMoRelease; } @@ -652,9 +665,9 @@ void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::Op return; } - AArch64MemOperand *memOpnd = static_cast(&dest); + MemOperand *memOpnd = static_cast(&dest); ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); - if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeLo12Li) { + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { GetCurBB()->AppendInsn(GetCG()->BuildInstruction(strMop, src, dest)); return; } @@ -691,7 +704,7 @@ void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::Op GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mopMov, reg, *immOpnd)); MOperator mopAdd = MOP_xaddrrr; MemOperand &newDest = - GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); Insn &insn1 = GetCG()->BuildInstruction(strMop, src, newDest); Insn &insn2 = GetCG()->BuildInstruction(mopAdd, *newDest.GetBaseRegister(), @@ -729,9 +742,9 @@ void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, Prim break; case Operand::kOpdFPZeroImmediate: GetCurBB()->AppendInsn(GetCG()->BuildInstruction((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, - dest, AArch64RegOperand::GetZeroRegister(dsize))); + dest, GetZeroOpnd(dsize))); break; - case Operand::kOpdRegister: + case Operand::kOpdRegister: { if (opnd0Type == Operand::kOpdRegister && IsPrimitiveVector(stype)) { /* check vector reg to vector reg move */ CHECK_FATAL(IsPrimitiveVector(dtype), "invalid vectreg to vectreg move"); @@ -744,8 +757,14 @@ void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, Prim GetCurBB()->AppendInsn(*insn); break; } + RegOperand &desReg = static_cast(dest); + RegOperand &srcReg = static_cast(src); + if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) { + break; + } SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype); break; + } default: CHECK_FATAL(false, "NYI"); } @@ -771,7 +790,7 @@ RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dty * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to * adjust the offsets. */ -bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const AArch64MemOperand &memOpnd, uint32 bitLen) { +bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen) { ASSERT(bitLen >= k8BitSize, "bitlen error"); ASSERT(bitLen <= k128BitSize, "bitlen error"); @@ -780,14 +799,14 @@ bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const AArch64MemOperand &memOpnd } ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error"); - AArch64MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); - if ((mode == AArch64MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) { + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) { int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { offsetValue += static_cast(static_cast(GetMemlayout())->RealStackFrameSize() + 0xff); } offsetValue += 2 * kIntregBytelen; /* Refer to the above comment */ - return AArch64MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen); + return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen); } else { return false; } @@ -795,24 +814,24 @@ bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const AArch64MemOperand &memOpnd bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) { const AArch64MD *md = &AArch64CG::kMd[mOp]; - auto *opndProp = static_cast(md->operand[opndIdx]); + auto *opndProp = md->operand[opndIdx]; if (!opndProp->IsContainImm()) { return true; } Operand::OperandType opndTy = opndProp->GetOperandType(); if (opndTy == Operand::kOpdMem) { - auto *memOpnd = static_cast(o); - if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOrX) { + auto *memOpnd = static_cast(o); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX) { return true; } if (md->IsLoadStorePair() || - (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi && memOpnd->IsIntactIndexed())) { + (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd->IsIntactIndexed())) { int64 offsetValue = memOpnd->GetOffsetImmediate()->GetOffsetValue(); if (memOpnd->GetOffsetImmediate()->GetVary() == kUnAdjustVary) { offsetValue += static_cast(GetMemlayout())->RealStackFrameSize() + 0xffL; } - return static_cast(opndProp)->IsValidImmOpnd(offsetValue); - } else if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeLo12Li) { + return static_cast(opndProp)->IsValidImmOpnd(offsetValue); + } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { int32 offsetValue = static_cast(memOpnd->GetOffsetImmediate()->GetOffsetValue()); return offsetValue == 0; } else { @@ -821,23 +840,23 @@ bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) return (offsetValue <= static_cast(k256BitSize) && offsetValue >= kNegative256BitSize); } } else if (opndTy == Operand::kOpdImmediate) { - return static_cast(opndProp)->IsValidImmOpnd(static_cast(o)->GetValue()); + return static_cast(opndProp)->IsValidImmOpnd(static_cast(o)->GetValue()); } else { CHECK_FATAL(false, "This Operand does not contain immediate"); } return true; } -AArch64MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, - RegOperand &baseReg, int64 offset) { - return static_cast(CreateMemOpnd(baseReg, offset, bitLen)); +MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, + RegOperand &baseReg, int64 offset) { + return CreateMemOpnd(baseReg, offset, bitLen); } -bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const AArch64MemOperand &memOpnd, uint32 bitLen) { - if (memOpnd.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) { +bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) { + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) { return false; } - AArch64OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); int32 opndVal = static_cast(ofstOpnd->GetOffsetValue()); int32 maxPimm = memOpnd.GetMaxPIMM(bitLen); int32 q0 = opndVal / maxPimm; @@ -861,12 +880,39 @@ RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum) { return resOpnd; } -AArch64MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const AArch64MemOperand &memOpnd, uint32 bitLen, - uint32 baseRegNum, bool isDest, - Insn *insn, bool forPair) { - ASSERT((memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd"); +/* + * When immediate of str/ldr is over 256bits, it should be aligned according to the reg byte size. + * Here we split the offset into (512 * n) and +/-(new Offset) when misaligned, to make sure that + * the new offet is always under 256 bits. + */ +MemOperand &AArch64CGFunc::ConstraintOffsetToSafeRegion(uint32 bitLen, MemOperand &memOpnd) { + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + int32 multiplier = (offsetValue / k512BitSize) + (offsetValue % k512BitSize > k256BitSize); + int32 addMount = multiplier * k512BitSize; + int32 newOffset = offsetValue - addMount; + RegOperand *baseReg = memOpnd.GetBaseRegister(); + ImmOperand &immAddMount = CreateImmOperand(addMount, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddMount.SetVary(kUnAdjustVary); + } + + RegOperand *resOpnd = GetBaseRegForSplit(kRinvalid); + SelectAdd(*resOpnd, *baseReg, immAddMount, PTY_i64); + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, newOffset); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum, bool isDest, + Insn *insn, bool forPair) { + ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd"); ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd"); - AArch64OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); int64 opndVal = ofstOpnd->GetOffsetValue(); auto it = hashMemOpndTable.find(memOpnd); @@ -884,16 +930,16 @@ AArch64MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const AArch64Mem */ int32 maxPimm = 0; if (!forPair) { - maxPimm = AArch64MemOperand::GetMaxPIMM(bitLen); + maxPimm = MemOperand::GetMaxPIMM(bitLen); } else { - maxPimm = AArch64MemOperand::GetMaxPairPIMM(bitLen); + maxPimm = MemOperand::GetMaxPairPIMM(bitLen); } ASSERT(maxPimm != 0, "get max pimm failed"); int64 q0 = opndVal / maxPimm + (opndVal < 0 ? -1 : 0); int64 addend = q0 * maxPimm; int64 r0 = opndVal - addend; - int64 alignment = AArch64MemOperand::GetImmediateOffsetAlignment(bitLen); + int64 alignment = MemOperand::GetImmediateOffsetAlignment(bitLen); int64 q1 = static_cast(static_cast(r0) >> static_cast(alignment)); int64 r1 = static_cast(static_cast(r0) & ((1u << static_cast(alignment)) - 1)); int64 remained = static_cast(static_cast(q1) << static_cast(alignment)); @@ -904,7 +950,7 @@ AArch64MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const AArch64Mem suffixClear = 0xff; } int64 remainedTmp = remained + (addend & suffixClear); - if (!AArch64MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && + if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && ((static_cast(remainedTmp) & ((1u << static_cast(alignment)) - 1)) == 0)) { remained = remainedTmp; addend = (addend & ~suffixClear); @@ -924,7 +970,7 @@ AArch64MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const AArch64Mem } else { SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn); } - AArch64MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained); + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained); newMemOpnd.SetStackMem(memOpnd.IsStackMem()); return newMemOpnd; } @@ -942,21 +988,21 @@ void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) { */ RegOperand *AArch64CGFunc::ExtractNewMemBase(const MemOperand &memOpnd) { const MIRSymbol *sym = memOpnd.GetSymbol(); - AArch64MemOperand::AArch64AddressingMode mode = static_cast(&memOpnd)->GetAddrMode(); - if (mode == AArch64MemOperand::kAddrModeLiteral) { + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if (mode == MemOperand::kAddrModeLiteral) { return nullptr; } RegOperand *baseOpnd = memOpnd.GetBaseRegister(); ASSERT(baseOpnd != nullptr, "nullptr check"); RegOperand &resultOpnd = CreateRegisterOperandOfType(baseOpnd->GetRegisterType(), baseOpnd->GetSize() / kBitsPerByte); bool is64Bits = (baseOpnd->GetSize() == k64BitSize); - if (mode == AArch64MemOperand::kAddrModeLo12Li) { + if (mode == MemOperand::kAddrModeLo12Li) { StImmOperand &stImm = CreateStImmOperand(*sym, 0, 0); Insn &addInsn = GetCG()->BuildInstruction(MOP_xadrpl12, resultOpnd, *baseOpnd, stImm); addInsn.SetComment("new add insn"); GetCurBB()->AppendInsn(addInsn); - } else if (mode == AArch64MemOperand::kAddrModeBOi) { - AArch64OfstOperand *offsetOpnd = static_cast(&memOpnd)->GetOffsetImmediate(); + } else if (mode == MemOperand::kAddrModeBOi) { + OfstOperand *offsetOpnd = memOpnd.GetOffsetImmediate(); if (offsetOpnd->GetOffsetValue() != 0) { MOperator mOp = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, resultOpnd, *baseOpnd, *offsetOpnd)); @@ -964,8 +1010,8 @@ RegOperand *AArch64CGFunc::ExtractNewMemBase(const MemOperand &memOpnd) { return baseOpnd; } } else { - CHECK_FATAL(mode == AArch64MemOperand::kAddrModeBOrX, "unexpect addressing mode."); - RegOperand *regOpnd = static_cast(&memOpnd)->GetOffsetRegister(); + CHECK_FATAL(mode == MemOperand::kAddrModeBOrX, "unexpect addressing mode."); + RegOperand *regOpnd = static_cast(&memOpnd)->GetIndexRegister(); MOperator mOp = is64Bits ? MOP_xaddrrr : MOP_waddrrr; GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, resultOpnd, *baseOpnd, *regOpnd)); } @@ -1015,9 +1061,8 @@ void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPTyp } else { memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); } - AArch64MemOperand &archMemOperand = *static_cast(memOpnd); - if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(archMemOperand, dataSize)) { - memOpnd = &SplitOffsetWithAddInstruction(archMemOperand, dataSize); + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); } /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to @@ -1038,9 +1083,11 @@ void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPTyp isVolStore = false; } } + + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; if (symbol->GetAsmAttr() != UStrIdx(0)) { std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); - AArch64RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); SelectCopy(specifiedOpnd, type->GetPrimType(), opnd0, rhsPType); } else if (memOrd == AArch64isa::kMoNone) { mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp); @@ -1063,12 +1110,12 @@ void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { ((size == k64BitSize) ? MOP_xstr : MOP_undef)); CHECK_FATAL(mOp != MOP_undef, "illegal size for dassignoff"); MemOperand *memOpnd = &GetOrCreateMemOpnd(*symbol, offset, size); - AArch64MemOperand &archMemOperand = *static_cast(memOpnd); if ((memOpnd->GetMemVaryType() == kNotVary) && - (IsImmediateOffsetOutOfRange(archMemOperand, size) || (offset % 8 != 0))) { - memOpnd = &SplitOffsetWithAddInstruction(archMemOperand, size); + (IsImmediateOffsetOutOfRange(*memOpnd, size) || (offset % 8 != 0))) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, size); } Operand &stOpnd = LoadIntoRegister(opnd0, true, size, false); + memOpnd = memOpnd->IsOffsetMisaligned(size) ? &ConstraintOffsetToSafeRegion(size, *memOpnd) : memOpnd; Insn &insn = GetCG()->BuildInstruction(mOp, stOpnd, *memOpnd); GetCurBB()->AppendInsn(insn); } @@ -1076,7 +1123,7 @@ void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { Operand *opnd0 = HandleExpr(stmt, *stmt.Opnd(0)); RegOperand &baseReg = LoadIntoRegister(*opnd0, PTY_a64); - auto &zwr = AArch64RegOperand::Get32bitZeroRegister(); + auto &zwr = GetZeroOpnd(k32BitSize); auto &mem = CreateMemOpnd(baseReg, 0, k32BitSize); Insn &loadRef = GetCG()->BuildInstruction(MOP_wldr, zwr, mem); loadRef.SetDoNotRemove(true); @@ -1087,10 +1134,10 @@ void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { } void AArch64CGFunc::SelectAbort() { - AArch64RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); auto &mem = CreateMemOpnd(inOpnd, 0, k64BitSize); Insn &movXzr = GetCG()->BuildInstruction(MOP_xmovri64, inOpnd, CreateImmOperand(0, k64BitSize,false)); - Insn &loadRef = GetCG()->BuildInstruction(MOP_wldr, AArch64RegOperand::Get64bitZeroRegister(), mem); + Insn &loadRef = GetCG()->BuildInstruction(MOP_wldr, GetZeroOpnd(k64BitSize), mem); loadRef.SetDoNotRemove(true); movXzr.SetDoNotRemove(true); GetCurBB()->AppendInsn(movXzr); @@ -1131,9 +1178,9 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { } } Operand *asmString = &CreateStringOperand(node.asmString); - AArch64ListOperand *listInputOpnd = memPool->New(*GetFuncScopeAllocator()); - AArch64ListOperand *listOutputOpnd = memPool->New(*GetFuncScopeAllocator()); - AArch64ListOperand *listClobber = memPool->New(*GetFuncScopeAllocator()); + ListOperand *listInputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listOutputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listClobber = CreateListOpnd(*GetFuncScopeAllocator()); ListConstraintOperand *listInConstraint = memPool->New(*GetFuncScopeAllocator()); ListConstraintOperand *listOutConstraint = memPool->New(*GetFuncScopeAllocator()); ListConstraintOperand *listInRegPrefix = memPool->New(*GetFuncScopeAllocator()); @@ -1188,7 +1235,7 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { int64 scale = mirIntConst->GetValue(); if (str.find("r") != std::string::npos) { bool isSigned = scale < 0; - AArch64ImmOperand &immOpnd = CreateImmOperand(scale, k64BitSize, isSigned); + ImmOperand &immOpnd = CreateImmOperand(scale, k64BitSize, isSigned); /* set default type as a 64 bit reg */ PrimType pty = isSigned ? PTY_i64 : PTY_u64; auto &tempReg = static_cast(CreateRegisterOperandOfType(pty)); @@ -1197,7 +1244,7 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { listInRegPrefix->stringList.push_back( static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pty, tempReg.GetSize(), str)))); } else { - AArch64RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); listInputOpnd->PushOpnd(static_cast(inOpnd)); listInRegPrefix->stringList.push_back( @@ -1358,16 +1405,13 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { CHECK_FATAL(0, "Inline asm clobber list not handled"); } } - return; } void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { RegOperand *regOpnd = nullptr; PregIdx pregIdx = stmt.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { - /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); } else { regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -1380,6 +1424,13 @@ void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { } ASSERT(regOpnd != nullptr, "null ptr check!"); SelectCopy(*regOpnd, dtype, opnd0, rhsType); + if (GetCG()->GenerateVerboseCG()) { + if (GetCurBB()->GetLastInsn()) { + GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } else if (GetCurBB()->GetPrev()->GetLastInsn()) { + GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } + } if ((Globals::GetInstance()->GetOptimLevel() == 0) && (pregIdx >= 0)) { MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx); @@ -1390,8 +1441,8 @@ void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { } } -AArch64MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 align) { - AArch64MemOperand *lhsMemOpnd = static_cast(&memOpnd); +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 align) { + MemOperand *lhsMemOpnd = &memOpnd; if ((lhsMemOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*lhsMemOpnd, align * kBitsPerByte)) { RegOperand *addReg = &CreateRegisterOperandOfType(PTY_i64); @@ -1400,8 +1451,8 @@ AArch64MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 al return lhsMemOpnd; } -AArch64MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx) { - auto *a64MemOpnd = static_cast(&memOpnd); +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx) { + auto *a64MemOpnd = &memOpnd; if ((a64MemOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, &memOpnd, opndIdx)) { if (opndIdx == kInsnSecondOpnd) { a64MemOpnd = &SplitOffsetWithAddInstruction(*a64MemOpnd, dSize); @@ -1415,8 +1466,8 @@ AArch64MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &mem return a64MemOpnd; } -AArch64MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, - bool needLow12) { +MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, + bool needLow12) { MemOperand *memOpnd; if (sym.GetStorageClass() == kScFormal && GetBecommon().GetTypeSize(sym.GetTyIdx()) > k16ByteSize) { /* formal of size of greater than 16 is copied by the caller and the pointer to it is passed. */ @@ -1425,7 +1476,7 @@ AArch64MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); Insn &ldInsn = GetCG()->BuildInstruction(PickLdInsn(k64BitSize, PTY_i64), *vreg, *memOpnd); GetCurBB()->AppendInsn(ldInsn); - memOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, vreg, nullptr, + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, vreg, nullptr, &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), nullptr); } else { memOpnd = &GetOrCreateMemOpnd(sym, offset, align * kBitsPerByte, false, needLow12); @@ -1448,14 +1499,14 @@ RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol & RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd) { RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - AArch64OfstOperand *ofstOpnd = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand *ofstOpnd = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xaddrri12, *tgtAddr, exprOpnd, *ofstOpnd)); return tgtAddr; } RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(uint64 copySize) { RegOperand *vregMemcpySize = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - AArch64ImmOperand *sizeOpnd = &CreateImmOperand(static_cast(copySize), k64BitSize, false); + ImmOperand *sizeOpnd = &CreateImmOperand(static_cast(copySize), k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xmovri32, *vregMemcpySize, *sizeOpnd)); return vregMemcpySize; } @@ -1518,13 +1569,11 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { } RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); - AArch64MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); - bool rhsIsLo12 = (static_cast(rhsBaseMemOpnd)->GetAddrMode() - == AArch64MemOperand::kAddrModeLo12Li); - bool lhsIsLo12 = (static_cast(lhsBaseMemOpnd)->GetAddrMode() - == AArch64MemOperand::kAddrModeLo12Li); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); if (lhsSize > kParmMemcpySize) { std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); @@ -1545,10 +1594,10 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { for (uint32 i = 0; i < (lhsSize / copySize); i++) { uint64 rhsBaseOffset = i * copySize + static_cast(rhsOffsetVal); uint64 lhsBaseOffset = i * copySize + static_cast(lhsOffsetVal); - AArch64MemOperand::AArch64AddressingMode addrMode = - rhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); /* generate the load */ MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); @@ -1570,8 +1619,8 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { ASSERT(newLoadInsn != nullptr, "build load instruction failed in SelectAggDassign"); lastLdr = AggtStrLdrInsert(bothUnion, lastLdr, *newLoadInsn); /* generate the store */ - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); - addrMode = lhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; sym = lhsIsLo12 ? lhsSymbol : nullptr; Insn *newStoreInsn = nullptr; MemOperand *lhsMemOpnd = @@ -1600,10 +1649,10 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { } /* generate the load */ MemOperand *rhsMemOpnd; - AArch64MemOperand::AArch64AddressingMode addrMode = - rhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); @@ -1612,9 +1661,9 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, result, *rhsMemOpnd)); /* generate the store */ - addrMode = lhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; sym = lhsIsLo12 ? lhsSymbol : nullptr; - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); MemOperand *lhsMemOpnd; lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); @@ -1643,11 +1692,10 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); - AArch64MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); - bool lhsIsLo12 = (static_cast(lhsBaseMemOpnd)->GetAddrMode() - == AArch64MemOperand::kAddrModeLo12Li); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); if (lhsSize > kParmMemcpySize) { std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); @@ -1667,8 +1715,8 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { uint64 rhsBaseOffset = rhsOffset + i * copySize; uint64 lhsBaseOffset = static_cast(lhsOffsetVal) + i * copySize; /* generate the load */ - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); - MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, copySize * k8BitSize, + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, addrOpnd, nullptr, &ofstOpnd, nullptr); regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, copySize)); RegOperand &result = CreateVirtualRegisterOperand(vRegNO); @@ -1689,10 +1737,10 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { insn->MarkAsAccessRefField(isRefField); GetCurBB()->AppendInsn(*insn); /* generate the store */ - AArch64MemOperand::AArch64AddressingMode addrMode = - lhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); if (doPair) { @@ -1716,8 +1764,8 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { continue; } /* generate the load */ - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); - MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, addrOpnd, nullptr, &ofstOpnd, nullptr); rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); @@ -1726,10 +1774,10 @@ void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { insn.MarkAsAccessRefField(isRefField); GetCurBB()->AppendInsn(insn); /* generate the store */ - AArch64MemOperand::AArch64AddressingMode addrMode = - lhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); MemOperand *lhsMemOpnd; lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); @@ -1845,7 +1893,9 @@ void AArch64CGFunc::SelectIassign(IassignNode &stmt) { } ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset); - if (isVolStore && static_cast(memOpnd).GetAddrMode() == AArch64MemOperand::kAddrModeBOi) { + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; + if (isVolStore && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { memOrd = AArch64isa::kMoRelease; isVolStore = false; } @@ -1863,26 +1913,275 @@ void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { PrimType destType = stmt.GetPrimType(); MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.GetBOpnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; Operand *valOpnd = HandleExpr(stmt, *stmt.GetBOpnd(1)); Operand &srcOpnd = LoadIntoRegister(*valOpnd, true, GetPrimTypeBitSize(destType)); SelectCopy(memOpnd, destType, srcOpnd, destType); } +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + uint32 bitlen = GetPrimTypeSize(primType) * kBitsPerByte; + + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + MemOperand *memOpnd; + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + if (offset < 0) { + RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); + Insn &addInsn = GetCG()->BuildInstruction(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); + GetCurBB()->AppendInsn(addInsn); + OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, nullptr, offsetOpnd, nullptr); + } else { + OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + } + memOpnd->SetStackMem(true); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); +} + +void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + uint32 byteLen = GetPrimTypeSize(pTy); + uint32 bitLen = byteLen * kBitsPerByte; + RegType regTy = GetRegTyFromPrimTy(pTy); + int32 curRegArgs = GetLmbcArgsInRegs(regTy); + if (curRegArgs < k8ByteSize) { + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, offset, 1); + } + else { + /* Move into allocated space */ + Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); + Operand ® = LoadIntoRegister(opnd, pTy); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(bitLen, pTy), + reg, memOpd)); + } + IncLmbcArgsInRegs(regTy); /* num of args in registers */ + IncLmbcTotalArgs(); /* num of args */ +} + +/* Search for CALL/ICALL/ICALLPROTO node, must be called from a blkassignoff node */ +MIRType *AArch64CGFunc::GetAggTyFromCallSite(StmtNode *stmt) { + for ( ; stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { + break; + } + } + CHECK_FATAL(stmt && (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto), + "blkassign sp not followed by call"); + int32 nargs = GetLmbcTotalArgs(); + MIRType *ty = nullptr; + if (stmt->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + if (fn->IsReturnStruct()) { + ++nargs; + } + if (fn->GetFormalCount() > 0) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetNthParamTyIdx(nargs)); + } + // would return null if the actual parameter is bogus + } else if (stmt->GetOpCode() == OP_icallproto) { + IcallNode *icallproto = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); + MIRFuncType *fType = static_cast(type); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetRetTyIdx()); + if (retType->GetKind() == kTypeStruct) { + ++nargs; + } + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + } else { + CHECK_FATAL(stmt->GetOpCode() == OP_icallproto, + "GetAggTyFromCallSite:: unexpected call operator"); + } + return ty; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForRet(BlkassignoffNode &bNode, Operand *src) { + PrimType pTy; + uint32 size = 0; + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + MIRFunction *func = &GetFunction(); + + if (func->IsReturnStruct()) { + /* This blkassignoff is for struct return? */ + int32 loadSize; + int32 numRegs = 0; + if (bNode.GetNext()->GetOpCode() == OP_return) { + MIRStructType *ty = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFuncRetStructTyIdx())); + uint32 fpregs = FloatParamRegRequired(ty, size); + if (fpregs > 0) { + /* pure floating point in agg */ + numRegs = fpregs; + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + for (uint32 i = 0; i < fpregs; i++) { + int s = (i == 0) ? 0 : (i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + AArch64reg reg = static_cast(V0 + i); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, loadSize, kRegTyFloat); + SelectCopy(*res, pTy, mem, pTy); + } + } else { + /* int/float mixed */ + numRegs = 2; + pTy = PTY_i64; + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(R0, loadSize, kRegTyInt); + SelectCopy(*res, pTy, mem, pTy); + if (bNode.blockSize > k8ByteSize) { + MemOperand &mem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + res = &GetOrCreatePhysicalRegisterOperand(R1, loadSize, kRegTyInt); + SelectCopy(*res, pTy, mem, pTy); + } + } + bool intReg = fpregs == 0; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = static_cast((intReg ? R0 : V0) + i); + MOperator mop = intReg ? MOP_pseudo_ret_int: MOP_pseudo_ret_float; + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize, + intReg ? kRegTyInt : kRegTyFloat); + Insn &pseudo = GetCG()->BuildInstruction(mop, dest); + GetCurBB()->AppendInsn(pseudo); + } + return true; + } + } + return false; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src) { + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + if (IsBlkassignForPush(bNode)) { + PrimType pTy = PTY_i64; + MIRStructType *ty = static_cast(GetAggTyFromCallSite(&bNode)); + uint32 size = 0; + uint32 fpregs = ty ? FloatParamRegRequired(ty, size) : 0; /* fp size determined */ + if (fpregs > 0) { + /* pure floating point in agg */ + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + for (uint32 i = 0; i < fpregs; i++) { + int s = (i == 0) ? 0 : (i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset + s, fpregs); + IncLmbcArgsInRegs(kRegTyFloat); + } + IncLmbcTotalArgs(); + return true; + } else if (bNode.blockSize <= k16ByteSize) { + /* integer/mixed types in register/s */ + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset, bNode.blockSize > k8ByteSize ? 2 : 1); + IncLmbcArgsInRegs(kRegTyInt); + if (bNode.blockSize > k8ByteSize) { + MemOperand &mem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset + k8ByteSize, 2); + IncLmbcArgsInRegs(kRegTyInt); + } + IncLmbcTotalArgs(); + return true; + } + } + return false; +} + +/* If blkassignoff for argument, this function loads the agg arguments into + virtual registers, disregard if there is sufficient physicall call + registers. Argument > 16-bytes are copied to preset space and ptr + result is loaded into virtual register. + If blassign is not for argument, this function simply memcpy */ +void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) +{ + CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + if (LmbcSmallAggForRet(bNode, src)) { + return; + } else if (LmbcSmallAggForCall(bNode, src)) { + return; + } + /* memcpy for agg assign OR large agg for arg/ret */ + Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + std::vector opndVec; + opndVec.push_back(regResult); /* result */ + opndVec.push_back(PrepareMemcpyParamOpnd(bNode.offset, *dest));/* param 0 */ + opndVec.push_back(src); /* param 1 */ + opndVec.push_back(PrepareMemcpyParamOpnd(bNode.blockSize));/* param 2 */ + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + if (IsBlkassignForPush(bNode)) { + SetLmbcArgInfo(static_cast(src), PTY_i64, (int32)bNode.offset, 1); + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + } +} + void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); uint32 lhsOffset = 0; MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); - MIRSymbol *addrSym = nullptr; - MIRPtrType *lhsPointerType = nullptr; - if (stmtType->GetPrimType() == PTY_agg) { - /* Move into regs */ - AddrofNode &addrofnode = static_cast(stmt.GetAddrExprBase()); - addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofnode.GetStIdx()); - MIRType *addrty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); - lhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrty->GetTypeIndex())); - } else { - lhsPointerType = static_cast(stmtType); + MIRPtrType *lhsPointerType = static_cast(stmtType); + bool loadToRegs4StructReturn = false; + if (mirModule.CurFunction()->StructReturnedInRegs()) { + MIRSymbol *retSt = mirModule.CurFunction()->GetFormal(0); + if (stmt.Opnd(0)->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(stmt.Opnd(0)); + MIRSymbol *addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + loadToRegs4StructReturn = (retSt == addrSym); + } } MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); if (stmt.GetFieldID() != 0) { @@ -1922,23 +2221,17 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); MIRType *rhsType = rhsSymbol->GetType(); if (rhsDread->GetFieldID() != 0) { - MIRStructType *structType = static_cast(rhsSymbol->GetType()); + MIRStructType *structType = static_cast(rhsSymbol->GetType()); ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); rhsType = structType->GetFieldType(rhsDread->GetFieldID()); rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); } - if (stmtType->GetPrimType() == PTY_agg) { + if (loadToRegs4StructReturn) { /* generate move to regs for agg return */ CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); AArch64CallConvImpl parmlocator(GetBecommon()); CCLocInfo pLoc; - MIRSymbol *retSt = GetBecommon().GetMIRModule().CurFunction()->GetFormal(0); - if (retSt == addrSym) { - /* return value */ - parmlocator.LocateNextParm(*lhsType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); - } else { - parmlocator.InitCCLocInfo(pLoc); - } + parmlocator.LocateNextParm(*lhsType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); /* aggregates are 8 byte aligned. */ Operand *rhsmemopnd = nullptr; RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ @@ -2031,8 +2324,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { } RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); - bool rhsIsLo12 = (static_cast(rhsBaseMemOpnd)->GetAddrMode() - == AArch64MemOperand::kAddrModeLo12Li); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); if (lhsSize > kParmMemcpySize) { std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); @@ -2051,10 +2343,10 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { for (uint32 i = 0; i < (lhsSize / copySize); ++i) { uint32 rhsBaseOffset = static_cast(rhsOffsetVal + i * copySize); uint32 lhsBaseOffset = lhsOffset + i * copySize; - AArch64MemOperand::AArch64AddressingMode addrMode = - rhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, copySize); @@ -2074,9 +2366,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, result, *rhsMemOpnd)); } /* generate the store */ - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); - MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, copySize * k8BitSize, - static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, nullptr); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, nullptr); if (doPair) { MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, result.GetSize(), kInsnThirdOpnd); @@ -2097,10 +2389,10 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { if ((lhsSizeCovered + newAlignUsed) > lhsSize) { continue; } - AArch64MemOperand::AArch64AddressingMode addrMode = - rhsIsLo12 ? AArch64MemOperand::kAddrModeLo12Li : AArch64MemOperand::kAddrModeBOi; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); @@ -2110,9 +2402,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, result, *rhsMemOpnd)); /* generate the store */ - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); - MemOperand &lhsMemOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, - static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, static_cast(nullptr)); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand &lhsMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, static_cast(nullptr)); mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); lhsMemOpnd = *FixLargeMemOpnd(mOp, lhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, result, lhsMemOpnd)); @@ -2135,7 +2427,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } - if (stmtType->GetPrimType() == PTY_agg) { + if (loadToRegs4StructReturn) { /* generate move to regs. */ CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ @@ -2147,8 +2439,8 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { } uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; for (uint32 i = 0; i < numRegs; i++) { - AArch64OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); - Operand &rhsmemopnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); @@ -2193,9 +2485,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { for (uint32 i = 0; i < (lhsSize / copySize); i++) { /* generate the load */ uint32 operandSize = copySize * k8BitSize; - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + i * copySize, k32BitSize); - MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, operandSize, - static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + i * copySize, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); bool doPair = ((copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); Insn *insn = nullptr; @@ -2213,9 +2505,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { insn->MarkAsAccessRefField(isRefField); GetCurBB()->AppendInsn(*insn); /* generate the store */ - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + i * copySize, k32BitSize); - MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, operandSize, - static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + i * copySize, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); if (doPair) { MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *static_cast(lhsMemOpnd), operandSize, kInsnThirdOpnd); @@ -2237,10 +2529,10 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { continue; } /* generate the load */ - AArch64OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); uint32 memOpndSize = newAlignUsed * k8BitSize; - MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memOpndSize, - static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); RegOperand &result = CreateVirtualRegisterOperand(vRegNO); MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); @@ -2249,9 +2541,9 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { insn.MarkAsAccessRefField(isRefField); GetCurBB()->AppendInsn(insn); /* generate the store */ - AArch64OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); - MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memOpndSize, - static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOpST, result, *lhsMemOpnd)); @@ -2298,7 +2590,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { if (parent.op == OP_eval) { if (symbol->GetAttr(ATTR_volatile)) { /* Need to generate loads for the upper parts of the struct. */ - Operand &dest = AArch64RegOperand::GetZeroRegister(k64BitSize); + Operand &dest = GetZeroOpnd(k64BitSize); uint32 numLoads = static_cast(RoundUp(aggSize, k64BitSize) / k64BitSize); for (uint32 o = 0; o < numLoads; ++o) { if (parmCopy) { @@ -2306,8 +2598,8 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { } else { memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * kSizeOfPtr, kSizeOfPtr); } - if (IsImmediateOffsetOutOfRange(*static_cast(memOpnd), kSizeOfPtr)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), kSizeOfPtr); + if (IsImmediateOffsetOutOfRange(*memOpnd, kSizeOfPtr)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, kSizeOfPtr); } SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); } @@ -2326,17 +2618,18 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); } if ((memOpnd->GetMemVaryType() == kNotVary) && - IsImmediateOffsetOutOfRange(*static_cast(memOpnd), dataSize)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), dataSize); + IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); } /* a local register variable defined with a specified register */ if (symbol->GetAsmAttr() != UStrIdx(0)) { std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); - AArch64RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); return &specifiedOpnd; } PrimType resultType = expr.GetPrimType(); RegOperand &resOpnd = GetOrCreateResOperand(parent, symType); + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; SelectCopy(resOpnd, resultType, *memOpnd, symType); return &resOpnd; } @@ -2367,7 +2660,7 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f } AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); - AArch64ImmOperand *offset = nullptr; + ImmOperand *offset = nullptr; if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false, kUnAdjustVary); } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { @@ -2401,13 +2694,16 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f comm.append(symbol->GetName()); insn->SetComment(comm); } + } else if (symbol->IsThreadLocal()) { + SelectAddrofThreadLocal(result, stImm); + return; } else { Operand *srcOpnd = &result; if (!IsAfterRegAlloc()) { // Create a new vreg/preg for the upper bits of the address - PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(LOWERED_PTR_TYPE); + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); - regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(LOWERED_PTR_TYPE)); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); // Register this vreg mapping @@ -2426,15 +2722,15 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f } if (CGOptions::IsPIC() && symbol->NeedPIC()) { /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ - AArch64OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); - AArch64MemOperand &memOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, - static_cast(srcOpnd), nullptr, &offset, nullptr); + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, + static_cast(srcOpnd), nullptr, &offset, nullptr); GetCurBB()->AppendInsn( GetCG()->BuildInstruction(memOpnd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); if (stImm.GetOffset() > 0) { - AArch64ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); SelectAdd(result, result, immOpnd, PTY_u64); } } else { @@ -2443,10 +2739,10 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f } } -void AArch64CGFunc::SelectAddrof(Operand &result, AArch64MemOperand &memOpnd, FieldID field) { +void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field) { const MIRSymbol *symbol = memOpnd.GetSymbol(); if (symbol->GetStorageClass() == kScAuto) { - auto *offsetOpnd = static_cast(memOpnd.GetOffsetImmediate()); + auto *offsetOpnd = static_cast(memOpnd.GetOffsetImmediate()); Operand &immOpnd = CreateImmOperand(offsetOpnd->GetOffsetValue(), PTY_u32, false); ASSERT(memOpnd.GetBaseRegister() != nullptr, "nullptr check"); SelectAdd(result, *memOpnd.GetBaseRegister(), immOpnd, PTY_u32); @@ -2474,18 +2770,23 @@ void AArch64CGFunc::SelectAddrof(Operand &result, AArch64MemOperand &memOpnd, Fi } } -Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { +Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); int32 offset = 0; - if (expr.GetFieldID() != 0) { - MIRStructType *structType = static_cast(symbol->GetType()); - /* with array of structs, it is possible to have nullptr */ - if (structType != nullptr) { - offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); + if (isAddrofoff) { + offset = addrofoffExpr.offset; + } else { + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + /* with array of structs, it is possible to have nullptr */ + if (structType != nullptr) { + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + } } } if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && - ((expr.GetFieldID() != 0) || + ((isAddrofoff == false && expr.GetFieldID() != 0) || (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { /* * Struct param is copied on the stack by caller if struct size > 16. @@ -2499,9 +2800,9 @@ Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { isAggParamInReg = true; structAddr = stackAddr; } else { - AArch64OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); - AArch64MemOperand *mo = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, - stackAddr, nullptr, offopnd, nullptr); + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand *mo = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, + stackAddr, nullptr, offopnd, nullptr); structAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xldr, *structAddr, *mo)); } @@ -2538,10 +2839,14 @@ Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { return &result; } - SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), expr.GetFieldID()); + SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), isAddrofoff ? 0 : expr.GetFieldID()); return &result; } +Operand *AArch64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + return SelectAddrof(static_cast(static_cast(expr)), parent, true); +} + Operand &AArch64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { uint32 instrSize = static_cast(expr.SizeOfInstr()); PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : @@ -2591,6 +2896,71 @@ Operand &AArch64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode return dst; } +Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) { + auto offset = ireadoff.GetOffset(); + auto primType = ireadoff.GetPrimType(); + auto bitSize = GetPrimTypeBitSize(primType); + auto *baseAddr = ireadoff.Opnd(0); + auto *result = &CreateRegisterOperandOfType(primType); + auto *addrOpnd = HandleExpr(ireadoff, *baseAddr); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); + auto mop = PickLdInsn(bitSize, primType); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mop, *result, memOpnd)); + return result; +} + +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType) { + MemOperand *memOpnd; + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + uint32 bitlen = byteSize * kBitsPerByte; + if (offset < 0) { + RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); + Insn &addInsn = GetCG()->BuildInstruction(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); + GetCurBB()->AppendInsn(addInsn); + OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, + nullptr, offsetOpnd, nullptr); + } else { + OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, + nullptr, offsetOpnd, nullptr); + } + memOpnd->SetStackMem(true); + RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); + MOperator mOp = PickLdInsn(bitlen, primType); + Insn &load = GetCG()->BuildInstruction(mOp, *result, *memOpnd); + GetCurBB()->AppendInsn(load); + return result; +} + +Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + RegType regty = GetRegTyFromPrimTy(primType); + RegOperand *result = nullptr; + if (offset >= 0) { + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(offset); + if (info->GetPrimType() == PTY_agg) { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + CHECK_FATAL(primType == info->GetPrimType(), "Incorrect primtype"); + CHECK_FATAL(offset == info->GetOffset(), "Incorrect offset"); + if (info->GetRegNO() == 0) { + /* TODO : follow lmbc sp offset for now */ + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + result = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(info->GetRegNO()), bitlen, regty); + } + } + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + return result; +} + Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, PrimType finalBitFieldDestType) { int32 offset = 0; @@ -2642,7 +3012,7 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, Operand *result = nullptr; if (parent.GetOpCode() == OP_eval) { /* regSize << 3, that is regSize * 8, change bytes to bits */ - result = &AArch64RegOperand::GetZeroRegister(regSize << 3); + result = &GetZeroOpnd(regSize << 3); } else { result = &GetOrCreateResOperand(parent, expr.GetPrimType()); } @@ -2692,11 +3062,12 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, isAggParamInReg = false; return aggParamReg; } - if (isVolLoad && (static_cast(memOpnd)->GetAddrMode() == AArch64MemOperand::kAddrModeBOi)) { + if (isVolLoad && (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) { memOrd = AArch64isa::kMoAcquire; isVolLoad = false; } + memOpnd = memOpnd->IsOffsetMisaligned(bitSize) ? &ConstraintOffsetToSafeRegion(bitSize, *memOpnd) : memOpnd; if (memOrd == AArch64isa::kMoNone) { MOperator mOp = 0; if (finalBitFieldDestType == kPtyInvalid) { @@ -2705,11 +3076,11 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType); } if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), bitSize); + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); } Insn &insn = GetCG()->BuildInstruction(mOp, *result, *memOpnd); if (parent.GetOpCode() == OP_eval && result->IsRegister() && - static_cast(result)->IsZeroRegister()) { + static_cast(result)->GetRegisterNumber() == RZR) { insn.SetComment("null-check"); } GetCurBB()->AppendInsn(insn); @@ -2717,7 +3088,7 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, if (parent.op != OP_eval) { const AArch64MD *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; OpndProp *prop = md->GetOperand(0); - if ((static_cast(prop)->GetSize()) < insn.GetOperand(0).GetSize()) { + if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) { switch (destType) { case PTY_i8: mOp = MOP_xsxtb64; @@ -2745,10 +3116,9 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, } } } else { - if ((memOpnd->GetMemVaryType() == kNotVary) && - IsImmediateOffsetOutOfRange(*static_cast(memOpnd), bitSize)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), bitSize); - } + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, bitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } AArch64CGFunc::SelectLoadAcquire(*result, destType, *memOpnd, destType, memOrd, false); } GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); @@ -3054,7 +3424,7 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod /* Special cases, i.e., comparing with zero * Do not perform optimization for C, unlike Java which has no unsigned int. */ - if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0)) { + if (static_cast(opnd1)->IsZero() && (Globals::GetInstance()->GetOptimLevel() > 0)) { bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0); if (finish) { return; @@ -3097,7 +3467,7 @@ void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) { bool isFloat = IsPrimitiveFloat(node->GetOpndType()); opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType()); /* - * most of FP constants are passed as AArch64MemOperand + * most of FP constants are passed as MemOperand * except 0.0 which is passed as kOpdFPZeroImmediate */ Operand::OperandType opnd1Type = opnd1->GetKind(); @@ -3209,9 +3579,7 @@ Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opn auto ®AssignNode = static_cast(parent); PregIdx pregIdx = regAssignNode.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { - /* if it is one of special registers */ - ASSERT(-pregIdx != kSregRetval0, "the dest of RegAssign node must not be kSregRetval0"); - resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx); + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype); } else { resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } @@ -3255,7 +3623,7 @@ void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, return; } else { /* add reg, #imm */ - AArch64ImmOperand *immOpnd = static_cast(&opnd1); + ImmOperand *immOpnd = static_cast(&opnd1); if (immOpnd->IsNegative()) { immOpnd->Negate(); SelectSub(resOpnd, opnd0, *immOpnd, primType); @@ -3303,7 +3671,7 @@ void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, if (bitNum <= k16ValidBit) { int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; - AArch64ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); SelectCopyImm(regOpnd, immOpnd1, primType); uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; @@ -3372,8 +3740,7 @@ Operand &AArch64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &p MIRSymbol &symbol = *mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofNode->GetStIdx()); ASSERT(addrofNode->GetFieldID() == 0, "For debug SelectCGArrayElemAdd."); - PrimType primType = addrofNode->GetPrimType(); - Operand &result = GetOrCreateResOperand(parent, primType); + Operand &result = GetOrCreateResOperand(parent, PTY_a64); /* OP_constval */ ConstvalNode *constvalNode = static_cast(opnd1); @@ -3405,14 +3772,15 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, return; } - AArch64ImmOperand *immOpnd = static_cast(&opnd1); + ImmOperand *immOpnd = static_cast(&opnd1); if (immOpnd->IsNegative()) { immOpnd->Negate(); SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType); return; } - if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { /* * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers @@ -3426,7 +3794,6 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { isSplitSub = true; - int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); /* process higher 12 bits */ ImmOperand &immOpnd2 = CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue()); @@ -3459,7 +3826,7 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, if (bitNum <= k16ValidBit) { int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; - AArch64ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); SelectCopyImm(regOpnd, immOpnd1, primType); uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; @@ -3536,7 +3903,7 @@ void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, if (otherOp->GetKind() != Operand::kOpdRegister) { otherOp = &SelectCopy(*otherOp, primType, primType); } - AArch64ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(immValue) - 1, dsize, false); + ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(immValue) - 1, dsize, false); SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType); if (imm->GetValue() < 0) { SelectNeg(resOpnd, resOpnd, primType); @@ -3555,11 +3922,11 @@ void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, if (otherOp->GetKind() != Operand::kOpdRegister) { otherOp = &SelectCopy(*otherOp, primType, primType); } - AArch64ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false); + ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false); RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType); SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType); SelectAdd(resOpnd, *otherOp, tmpOpnd, primType); - AArch64ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false); + ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false); SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType); if (imm->GetValue() < 0) { SelectNeg(resOpnd, resOpnd, primType); @@ -3607,7 +3974,7 @@ void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opn return; } int32 shiftNumber = __builtin_ffsll(immValue) - 1; - AArch64ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false); + ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false); Operand &tmpOpnd = CreateRegisterOperandOfType(primType); SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType); uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; @@ -3627,7 +3994,7 @@ void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opn if (imm->GetValue() != 0) { if ((imm->GetValue() > 0) && ((static_cast(imm->GetValue()) & (static_cast(imm->GetValue()) - 1)) == 0)) { - AArch64ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false); + ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false); SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType); return; @@ -3717,7 +4084,7 @@ void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOp * for unsigned rem op, just use and */ if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) { - AArch64ImmOperand *imm = nullptr; + ImmOperand *imm = nullptr; Insn *movImmInsn = GetCurBB()->GetLastInsn(); if (movImmInsn && ((movImmInsn->GetMachineOpcode() == MOP_xmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) && @@ -3726,13 +4093,12 @@ void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOp * mov w1, #2 * rem res, w0, w1 */ - imm = static_cast(&movImmInsn->GetOperand(kInsnSecondOpnd)); + imm = static_cast(&movImmInsn->GetOperand(kInsnSecondOpnd)); } else if (opnd1.IsImmediate()) { /* * rem res, w0, #2 */ - imm = opnd1.IsZeroRegister() ? &CreateImmOperand(0, opnd1.GetSize(), false) - : static_cast(&opnd1); + imm = static_cast(&opnd1); } /* positive or negative do not have effect on the result */ const int64 dividor = (imm != nullptr) ? ((imm->GetValue() >= 0) ? imm->GetValue() : ((-1) * imm->GetValue())) @@ -3742,10 +4108,10 @@ void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOp if (is64Bits) { CHECK_FATAL(Log2OfDividor < k64BitSize, "imm out of bound"); if (isSigned) { - AArch64ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned); + ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned); if (Log2OfDividor != 1) { /* 63->shift ALL , 32 ->32bit register */ - AArch64ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned); + ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xasrrri6, temp, opnd0, rightShiftAll)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xlsrrri6, temp, temp, rightShiftValue)); @@ -3753,21 +4119,21 @@ void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOp GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xlsrrri6, temp, opnd0, rightShiftValue)); } GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xaddrrr, resOpnd, opnd0, temp)); - AArch64ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, resOpnd, resOpnd, remBits)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xsubrrr, resOpnd, resOpnd, temp)); } else { - AArch64ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xandrri13, resOpnd, opnd0, remBits)); } return; } else { CHECK_FATAL(Log2OfDividor < k32BitSize, "imm out of bound"); if (isSigned) { - AArch64ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned); + ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned); if (Log2OfDividor != 1) { /* 31->shift ALL , 32 ->32bit register */ - AArch64ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned); + ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wasrrri5, temp, opnd0, rightShiftAll)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wlsrrri5, temp, temp, rightShiftValue)); @@ -3776,12 +4142,12 @@ void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOp } GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_waddrrr, resOpnd, opnd0, temp)); - AArch64ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wandrri12, resOpnd, resOpnd, remBits)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wsubrrr, resOpnd, resOpnd, temp)); } else { - AArch64ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wandrri12, resOpnd, opnd0, remBits)); } @@ -3864,7 +4230,7 @@ void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhs Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); /* - * most of FP constants are passed as AArch64MemOperand + * most of FP constants are passed as MemOperand * except 0.0 which is passed as kOpdFPZeroImmediate */ Operand::OperandType opnd1Type = rhsOpnd.GetKind(); @@ -3883,7 +4249,7 @@ void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhs * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow) * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow) */ - AArch64RegOperand &xzr = AArch64RegOperand::GetZeroRegister(dsize); + RegOperand &xzr = GetZeroOpnd(dsize); if ((opcode == OP_cmpl) || (opcode == OP_cmpg)) { ASSERT(isFloat, "incorrect operand types"); SelectTargetFPCmpQuiet(opnd0, *opnd1, GetPrimTypeBitSize(primType)); @@ -3909,7 +4275,6 @@ void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhs return; } - static_cast(&resOpnd)->SetValidBitsNum(1); // lt u8 i32 ( xxx, 0 ) => get sign bit if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() && (static_cast(opnd1)->GetValue() == 0) && parent.GetOpCode() != OP_select) { @@ -4162,12 +4527,12 @@ void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operan return; } - AArch64ImmOperand *immOpnd = static_cast(&opnd1); + ImmOperand *immOpnd = static_cast(&opnd1); if (immOpnd->IsZero()) { if (operatorCode == kAND) { uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr; GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mopMv, resOpnd, - AArch64RegOperand::GetZeroRegister(dsize))); + GetZeroOpnd(dsize))); } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) { SelectCopy(resOpnd, primType, opnd0, primType); } @@ -4195,7 +4560,7 @@ void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operan if (bitNum <= k16ValidBit) { int64 newImm = (static_cast(immVal) >> static_cast(tail0BitNum)) & 0xFFFF; - AArch64ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); SelectCopyImm(regOpnd, immOpnd1, primType); MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true); int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; @@ -4302,11 +4667,18 @@ Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &o isOneElemVector = symbol->GetAttr(ATTR_oneelem_simd); } + Operand *opd0 = &opnd0; + PrimType otyp0 = expr->GetPrimType(); + if (IsPrimitiveVector(dtype) && opnd0.IsConstImmediate()) { + opd0 = SelectVectorFromScalar(dtype, opd0, node.Opnd(0)->GetPrimType()); + otyp0 = dtype; + } + if (IsPrimitiveVector(dtype) && opnd1.IsConstImmediate()) { int64 sConst = static_cast(opnd1).GetValue(); - resOpnd = SelectVectorShiftImm(dtype, &opnd0, &opnd1, static_cast(sConst), opcode); + resOpnd = SelectVectorShiftImm(dtype, opd0, &opnd1, static_cast(sConst), opcode); } else if ((IsPrimitiveVector(dtype) || isOneElemVector) && !opnd1.IsConstImmediate()) { - resOpnd = SelectVectorShift(dtype, &opnd0, expr->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), opcode); + resOpnd = SelectVectorShift(dtype, opd0, otyp0, &opnd1, node.Opnd(1)->GetPrimType(), opcode); } else { PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); resOpnd = &GetOrCreateResOperand(parent, primType); @@ -4354,7 +4726,7 @@ void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1 MOperator mopShift; if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) { - AArch64ImmOperand *immOpnd1 = static_cast(&opnd1); + ImmOperand *immOpnd1 = static_cast(&opnd1); const int64 kVal = immOpnd1->GetValue(); const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits; if (kVal == 0) { @@ -4476,6 +4848,17 @@ Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNo return resOpnd; } +Operand *AArch64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + auto bitWidth = (GetPrimTypeBitSize(dtype)); + RegOperand *resOpnd = nullptr; + resOpnd = &GetOrCreateResOperand(parent, dtype); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtype); + uint32 mopBswap = bitWidth == 64 ? MOP_xrevrr : (bitWidth == 32 ? MOP_wrevrr : MOP_wrevrr16); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mopBswap, *resOpnd, newOpnd0)); + return resOpnd; +} + Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) { PrimType dtype = node.GetPrimType(); bool isSigned = IsSignedInteger(dtype); @@ -4528,9 +4911,11 @@ Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpn } else { MOperator mOp = MOP_undef; if (bitSize == k8BitSize) { - mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_xuxtb32) : (isSigned ? MOP_xsxtb32 : MOP_xuxtb32); + mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef) : + (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef)); } else if (bitSize == k16BitSize) { - mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_xuxth32) : (isSigned ? MOP_xsxth32 : MOP_xuxth32); + mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef) : + (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef)); } else if (bitSize == k32BitSize) { mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr; } @@ -4542,8 +4927,8 @@ Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpn } uint32 mopBfx = is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5); - AArch64ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); - AArch64ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2)); return &resOpnd; } @@ -4588,9 +4973,9 @@ Operand *AArch64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, return &resOpnd; } else { Operand &movOpnd = LoadIntoRegister(opnd1, regType); - uint32 mopBfi = is64Bits ? MPO_xbfirri6i6 : MPO_wbfirri5i5; - AArch64ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); - AArch64ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + uint32 mopBfi = is64Bits ? MOP_xbfirri6i6 : MOP_wbfirri5i5; + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mopBfi, opnd0, movOpnd, immOpnd1, immOpnd2)); return &opnd0; } @@ -4788,7 +5173,8 @@ Operand *AArch64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode return dst; } -Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, std::string &name) { +Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, + const std::string &name) { MapleVector argNodes = intrnNode.GetNopnd(); std::vector opndVec; std::vector opndTypes; @@ -4819,7 +5205,7 @@ Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) { uint32 argSize = GetPrimTypeBitSize(argType); ASSERT((argSize == k64BitSize || argSize == k32BitSize), "Unexpect arg type"); /* cmp */ - AArch64ImmOperand &zeroOpnd = CreateImmOperand(0, argSize, false); + ImmOperand &zeroOpnd = CreateImmOperand(0, argSize, false); Operand &rflag = GetOrCreateRflag(); GetCurBB()->AppendInsn(GetCG()->BuildInstruction( argSize == k64BitSize ? MOP_xcmpri : MOP_wcmpri, rflag, destOpnd, zeroOpnd)); @@ -4833,7 +5219,7 @@ Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) { /* csincc */ GetCurBB()->AppendInsn(GetCG()->BuildInstruction( argSize == k64BitSize ? MOP_xcsincrrrc : MOP_wcsincrrrc, - *tempResReg, AArch64RegOperand::Get32bitZeroRegister(), *tempResReg, GetCondOperand(CC_EQ), rflag)); + *tempResReg, GetZeroOpnd(k32BitSize), *tempResReg, GetCondOperand(CC_EQ), rflag)); return tempResReg; } @@ -5308,7 +5694,7 @@ void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOp /* contains the index */ if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { - addOpnd = static_cast(&SelectCopy(*addOpnd, PTY_u64, PTY_u64)); + addOpnd = static_cast(&SelectCopy(*addOpnd, PTY_u64, PTY_u64)); } RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64); @@ -5320,7 +5706,7 @@ void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOp /* load the displacement into a register by accessing memory at base + index*8 */ Operand *disp = - memPool->New(AArch64MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift); + CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift); RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64); SelectAdd(tgt, baseOpnd, *disp, PTY_u64); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xbr, tgt)); @@ -5506,48 +5892,68 @@ bool AArch64CGFunc::IsRegSameRematInfo(const RegOperand ®Dest, const RegOpera } } -void AArch64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn) { +void AArch64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) { auto opndNum = static_cast(insn.GetOperandSize()); for (int i = opndNum - 1; i >= 0; --i) { Operand &opnd = insn.GetOperand(static_cast(i)); if (opnd.IsList()) { std::list tempRegStore; - auto& opndList = static_cast(opnd).GetOperands(); - - for (auto it = opndList.begin(), end = opndList.end(); it != end;) { + auto& opndList = static_cast(opnd).GetOperands(); + bool needReplace = false; + for (auto it = opndList.begin(), end = opndList.end(); it != end; ++it) { auto *regOpnd = *it; - - if (regOpnd->Equals(regDest)) { - tempRegStore.push_back(®Src); - it = opndList.erase(it); + if (regOpnd->GetRegisterNumber() == destNO) { + needReplace = true; + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + tempRegStore.push_back(®Dest); + } else { + tempRegStore.push_back(®Src); + } } else { - ++it; + tempRegStore.push_back(regOpnd); } } - - for (auto newOpnd : tempRegStore) { - static_cast(opnd).PushOpnd(*newOpnd); + if (needReplace) { + opndList.clear(); + for (auto newOpnd : tempRegStore) { + static_cast(opnd).PushOpnd(*newOpnd); + } } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *baseRegOpnd = memOpnd.GetBaseRegister(); RegOperand *indexRegOpnd = memOpnd.GetIndexRegister(); - AArch64MemOperand *newMem = static_cast(memOpnd.Clone(*GetMemoryPool())); - if ((baseRegOpnd != nullptr && baseRegOpnd->Equals(regDest)) || - (indexRegOpnd != nullptr && indexRegOpnd->Equals(regDest))) { - if (baseRegOpnd != nullptr && baseRegOpnd->Equals(regDest)) { - newMem->SetBaseRegister(static_cast(regSrc)); + MemOperand *newMem = static_cast(memOpnd.Clone(*GetMemoryPool())); + if ((baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) || + (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO)) { + if (baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetBaseRegister(regDest); + } else { + newMem->SetBaseRegister(regSrc); + } } - if (indexRegOpnd != nullptr && indexRegOpnd->Equals(regDest)) { - newMem->SetIndexRegister(regSrc); + if (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetIndexRegister(regDest); + } else { + newMem->SetIndexRegister(regSrc); + } } insn.SetMemOpnd(&GetOrCreateMemOpnd(*newMem)); } } else if (opnd.IsRegister()) { auto ®Opnd = static_cast(opnd); - if (regOpnd.Equals(regDest)) { + if (regOpnd.GetRegisterNumber() == destNO) { ASSERT(regOpnd.GetRegisterNumber() != kRFLAG, "both condi and reg"); - insn.SetOperand(static_cast(i), regSrc); + if (regDest.GetSize() != regSrc.GetSize()) { + regOpnd.SetRegisterNumber(regSrc.GetRegisterNumber()); + } else { + insn.SetOperand(static_cast(i), regSrc); + } } } } @@ -5603,8 +6009,17 @@ void AArch64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &r } } -Operand &AArch64CGFunc::GetZeroOpnd(uint32 size) { - return AArch64RegOperand::GetZeroRegister(size <= k32BitSize ? k32BitSize : k64BitSize); +RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen) { + /* + * It is possible to have a bitLen < 32, eg stb. + * Set it to 32 if it is less than 32. + */ + if (bitLen < k32BitSize) { + bitLen = k32BitSize; + } + ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen); + return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt) : + GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); } bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const { @@ -5615,6 +6030,29 @@ bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const { } } +bool AArch64CGFunc::IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &beCommon) const { + AArch64CallConvImpl retLocator(beCommon); + CCLocInfo retMechanism; + retLocator.InitReturnInfo(mirType, retMechanism); + if (retMechanism.GetRegCount() > 0) { + return reg.GetRegisterNumber() == retMechanism.GetReg0() || reg.GetRegisterNumber() == retMechanism.GetReg1() || + reg.GetRegisterNumber() == retMechanism.GetReg2() || reg.GetRegisterNumber() == retMechanism.GetReg3(); + } + return false; +} + +bool AArch64CGFunc::IsSPOrFP(const RegOperand &opnd) const { + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = opnd.GetRegisterNumber(); + return (regOpnd.IsPhysicalRegister() && + (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); +} + +bool AArch64CGFunc::IsReturnReg(const RegOperand &opnd) const { + regno_t regNO = opnd.GetRegisterNumber(); + return (regNO == R0) || (regNO == V0); +} + /* * This function returns true to indicate that the clean up code needs to be generated, * otherwise it does not need. In GCOnly mode, it always returns false. @@ -5651,12 +6089,12 @@ void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { /* this is necessary for code insertion. */ SetCurBB(bb); - AArch64RegOperand ®Opnd0 = + RegOperand ®Opnd0 = GetOrCreatePhysicalRegisterOperand(R0, kSizeOfPtr * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); - AArch64RegOperand ®Opnd1 = + RegOperand ®Opnd1 = GetOrCreatePhysicalRegisterOperand(R1, kSizeOfPtr * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); /* allocate 16 bytes to store reg0 and reg1 (each reg has 8 bytes) */ - AArch64MemOperand &frameAlloc = CreateCallFrameOperand(-16, kSizeOfPtr * kBitsPerByte); + MemOperand &frameAlloc = CreateCallFrameOperand(-16, kSizeOfPtr * kBitsPerByte); Insn &allocInsn = GetCG()->BuildInstruction(MOP_xstp, regOpnd0, regOpnd1, frameAlloc); allocInsn.SetDoNotRemove(true); AppendInstructionTo(allocInsn, *this); @@ -5664,7 +6102,7 @@ void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { /* invoke MCC_CleanupLocalStackRef(). */ HandleRCCall(false); /* deallocate 16 bytes which used to store reg0 and reg1 */ - AArch64MemOperand &frameDealloc = CreateCallFrameOperand(16, kSizeOfPtr * kBitsPerByte); + MemOperand &frameDealloc = CreateCallFrameOperand(16, kSizeOfPtr * kBitsPerByte); GenRetCleanup(cleanEANode, true); Insn &deallocInsn = GetCG()->BuildInstruction(MOP_xldp, regOpnd0, regOpnd1, frameDealloc); deallocInsn.SetDoNotRemove(true); @@ -5688,7 +6126,7 @@ void AArch64CGFunc::GenerateCleanupCode(BB &bb) { SetCurBB(bb); /* R0 is lived-in for clean-up code, save R0 before invocation */ - AArch64RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); if (!GetCG()->GenLocalRC()) { /* by pass local RC operations. */ @@ -5720,7 +6158,7 @@ void AArch64CGFunc::GenerateCleanupCode(BB &bb) { sym->SetNameStrIdx(funcName); sym->SetStorageClass(kScText); sym->SetSKind(kStFunc); - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); srcOpnds->PushOpnd(livein); AppendCall(*sym, *srcOpnds); /* @@ -5735,10 +6173,114 @@ void AArch64CGFunc::GenerateCleanupCode(BB &bb) { SetCleanupBB(*GetCurBB()); } +uint32 AArch64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) { + AArch64CallConvImpl parmlocator(GetBecommon()); + return parmlocator.FloatParamRegRequired(*structType, fpSize); +} + +/* + * Map param registers to formals. For small structs passed in param registers, + * create a move to vreg since lmbc IR does not create a regassign for them. + */ +void AArch64CGFunc::AssignLmbcFormalParams() { + PrimType primType; + uint32 offset; + regno_t intReg = R0; + regno_t fpReg = V0; + for (auto param : GetLmbcParamVec()) { + primType = param->GetPrimType(); + offset = param->GetOffset(); + if (param->IsReturn()) { + param->SetRegNO(R8); + } else if (IsPrimitiveInteger(primType)) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + intReg++; + } + } else if (IsPrimitiveFloat(primType)) { + if (fpReg > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + fpReg++; + } + } else if (primType == PTY_agg) { + if (param->IsPureFloat()) { + uint32 numFpRegs = param->GetNumRegs(); + if ((fpReg + numFpRegs - kOneRegister) > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + param->SetNumRegs(numFpRegs); + fpReg += numFpRegs; + } + } else if (param->GetSize() > k16ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetIsOnStack(); + intReg++; + } + } else if (param->GetSize() <= k8ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kOneRegister); + intReg++; + } + } else { + /* size > 8 && size <= 16 */ + if ((intReg + kOneRegister) > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kTwoRegister); + intReg += kTwoRegister; + } + } + if (param->GetRegNO() != 0) { + for (int i = 0; i < param->GetNumRegs(); ++i) { + PrimType pType = PTY_i64; + RegType rType = kRegTyInt; + uint32 rSize = k8ByteSize; + if (param->IsPureFloat()) { + rType = kRegTyFloat; + if (param->GetFpSize() <= k4ByteSize) { + pType = PTY_f32; + rSize = k4ByteSize; + } else { + pType = PTY_f64; + } + } + regno_t vreg = NewVReg(rType, rSize); + RegOperand &dest = GetOrCreateVirtualRegisterOperand(vreg); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(static_cast(param->GetRegNO() + i), + rSize * kBitsPerByte, rType); + SelectCopy(dest, pType, src, pType); + if (param->GetVregNO() == 0) { + param->SetVregNO(vreg); + } + Operand *memOpd = &CreateMemOpnd(RFP, offset + (i * rSize), rSize); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction( + PickStInsn(rSize * kBitsPerByte, pType), dest, *memOpd)); + param->SetIsOnStack(); + } + } + } else { + CHECK_FATAL(false, "lmbc formal primtype not handled"); + } + } +} + /* if offset < 0, allocation; otherwise, deallocation */ -AArch64MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, int32 size) { - return *memPool->New(RSP, offset, size, - (offset < 0) ? AArch64MemOperand::kPreIndex : AArch64MemOperand::kPostIndex); +MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, int32 size) { + MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); + memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex); + return *memOpnd; } AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { @@ -5752,8 +6294,65 @@ AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { /* kShiftAmount12 = 12, less than 16, use 4 bit to store, bitLen is 4 */ LogicalShiftLeftOperand AArch64CGFunc::addSubLslOperand(kShiftAmount12, 4); -AArch64MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { - return *memPool->New(RFP, offset, size); +MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + MemOperand *memOp = CreateStackMemOpnd(reg, offset, size); + return *memOp; +} + +MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size) { + auto *memOp = memPool->New( + memPool->New(preg, k64BitSize, kRegTyInt), + &CreateOfstOpnd(offset, k32BitSize), + size); + if (preg == RFP || preg == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *symbol) { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &symbol, bool noExtend) { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol, noExtend); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, + uint32 shift, bool isSigned) { + auto *memOp = memPool->New( + mode, dSize, base, indexOpnd, shift, isSigned); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + const MIRSymbol &sym) { + auto *memOp = memPool->New(mode, dSize, sym); + return memOp; } void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) { @@ -5769,12 +6368,12 @@ void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) { !GetFunction().GetAttr(FUNCATTR_critical_native) && !GetFunction().GetAttr(FUNCATTR_bridge)) { RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, kSizeOfPtr * kBitsPerByte, kRegTyInt); - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); - AArch64RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); srcOpnds->PushOpnd(parmRegOpnd1); Operand &immOpnd = CreateImmOperand(0, k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xadri64, parmRegOpnd1, immOpnd)); - AArch64RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); srcOpnds->PushOpnd(parmRegOpnd2); SelectCopy(parmRegOpnd2, PTY_a64, fpReg, PTY_a64); @@ -5820,7 +6419,7 @@ bool AArch64CGFunc::HasStackLoadStore() { void AArch64CGFunc::GenerateYieldpoint(BB &bb) { /* ldr wzr, [RYP] # RYP hold address of the polling page. */ - auto &wzr = AArch64RegOperand::Get32bitZeroRegister(); + auto &wzr = GetZeroOpnd(k32BitSize); auto &pollingPage = CreateMemOpnd(RYP, 0, k32BitSize); auto &yieldPoint = GetCG()->BuildInstruction(MOP_wldr, wzr, pollingPage); if (GetCG()->GenerateVerboseCG()) { @@ -6105,7 +6704,7 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for Insn &ldrInsn = GetCG()->BuildInstruction(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); GetCurBB()->AppendInsn(ldrInsn); - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); srcOpnds->PushOpnd(phyOpnd); MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); std::string funcName("MCC_DecRef_NaiveRCFast"); @@ -6120,23 +6719,23 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for return true; } - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); - AArch64ImmOperand &beginOpnd = CreateImmOperand(realMin, k64BitSize, true); + ImmOperand &beginOpnd = CreateImmOperand(realMin, k64BitSize, true); regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); SelectAdd(vReg0, fpOpnd, beginOpnd, PTY_i64); - AArch64RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd1); SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); uint32 realRefNum = (realMax - realMin) / kOffsetAlign + 1; - AArch64ImmOperand &countOpnd = CreateImmOperand(realRefNum, k64BitSize, true); + ImmOperand &countOpnd = CreateImmOperand(realRefNum, k64BitSize, true); - AArch64RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd2); SelectCopyImm(parmRegOpnd2, countOpnd, PTY_i64); @@ -6144,9 +6743,9 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for if ((skipSym != nullptr) && (skipOffset >= realMin) && (skipOffset <= realMax)) { /* call cleanupskip */ uint32 stOffset = (skipOffset - realMin) / kOffsetAlign; - AArch64ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); - AArch64RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd3); SelectCopyImm(parmRegOpnd3, retLoc, PTY_i64); @@ -6180,11 +6779,16 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for return true; } +RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) { + RegOperand *res = memPool->New(vRegNO, size, kind, flg); + return res; +} + RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); ASSERT(vRegNO < vRegTable.size() , "index out of range"); uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); - RegOperand *res = memPool->New(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); vRegOperandTable[vRegNO] = res; return *res; } @@ -6331,7 +6935,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { Insn &ldrInsn = GetCG()->BuildInstruction(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); GetCurBB()->AppendInsn(ldrInsn); - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); srcOpnds->PushOpnd(phyOpnd); MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); std::string funcName("MCC_DecRef_NaiveRCFast"); @@ -6365,7 +6969,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { Insn &ldrInsn = GetCG()->BuildInstruction(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, *stackLoc); GetCurBB()->AppendInsn(ldrInsn); - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); srcOpnds->PushOpnd(phyOpnd); MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); std::string funcName("MCC_DecRef_NaiveRCFast"); @@ -6382,11 +6986,11 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { } bool needSkip = false; - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); - AArch64ImmOperand *beginOpnd = + ImmOperand *beginOpnd = &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef, k64BitSize, true); - AArch64ImmOperand *countOpnd = &CreateImmOperand(refNum, k64BitSize, true); + ImmOperand *countOpnd = &CreateImmOperand(refNum, k64BitSize, true); int32 refSkipIndex = -1; if (!begin && retRef != nullptr) { AArch64SymbolAlloc *symLoc = @@ -6410,7 +7014,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); SelectAdd(vReg0, fpOpnd, *beginOpnd, PTY_i64); - AArch64RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd1); SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); @@ -6418,7 +7022,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); SelectCopyImm(vReg1, *countOpnd, PTY_i64); - AArch64RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd2); SelectCopy(parmRegOpnd2, PTY_a64, vReg1, PTY_a64); @@ -6439,13 +7043,13 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { } AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); int32 stOffset = symLoc->GetOffset() / kOffsetAlign; - AArch64ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); SelectCopyImm(vReg2, retLoc, PTY_i64); - AArch64RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); srcOpnds->PushOpnd(parmRegOpnd3); SelectCopy(parmRegOpnd3, PTY_a64, vReg2, PTY_a64); @@ -6469,7 +7073,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { } void AArch64CGFunc::SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, - AArch64ListOperand &srcOpnds, + ListOperand &srcOpnds, int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID) { /* * in two param regs if possible @@ -6502,25 +7106,25 @@ void AArch64CGFunc::SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRT CreateCallStructParamPassByStack(symSize, &sym, nullptr, ploc.memOffset); } else { /* pass by param regs. */ - AArch64RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); + RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); srcOpnds.PushOpnd(*parmOpnd0); if (ploc.reg1) { - AArch64RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); + RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); srcOpnds.PushOpnd(*parmOpnd1); } if (ploc.reg2) { - AArch64RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); + RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); srcOpnds.PushOpnd(*parmOpnd2); } if (ploc.reg3) { - AArch64RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); + RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); srcOpnds.PushOpnd(*parmOpnd3); } } } void AArch64CGFunc::SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, - AArch64ListOperand &srcOpnds, int32 offset, + ListOperand &srcOpnds, int32 offset, AArch64CallConvImpl &parmLocator) { int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); @@ -6550,35 +7154,35 @@ void AArch64CGFunc::SelectParmListIreadSmallAggregate(const IreadNode &iread, MI default: break; } - AArch64OfstOperand *offOpnd0 = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand *offOpnd0 = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); MemOperand *mopnd = - &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd0, nullptr); + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd0, nullptr); CreateCallStructParamPassByReg(ploc.reg0, *mopnd, srcOpnds, state); if (ploc.reg1) { - AArch64OfstOperand *offOpnd1 = + OfstOperand *offOpnd1 = &GetOrCreateOfstOpnd(((ploc.fpSize ? ploc.fpSize : kSizeOfPtr) + static_cast(offset)), k32BitSize); - mopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd1, nullptr); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd1, nullptr); CreateCallStructParamPassByReg(ploc.reg1, *mopnd, srcOpnds, state); } if (ploc.reg2) { - AArch64OfstOperand *offOpnd2 = + OfstOperand *offOpnd2 = &GetOrCreateOfstOpnd(((ploc.fpSize ? (ploc.fpSize * k4BitShift) : kSizeOfPtr) + static_cast(offset)), k32BitSize); - mopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd2, nullptr); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd2, nullptr); CreateCallStructParamPassByReg(ploc.reg2, *mopnd, srcOpnds, state); } if (ploc.reg3) { - AArch64OfstOperand *offOpnd3 = + OfstOperand *offOpnd3 = &GetOrCreateOfstOpnd(((ploc.fpSize ? (ploc.fpSize * k8BitShift) : kSizeOfPtr) + static_cast(offset)), k32BitSize); - mopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd3, nullptr); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd3, nullptr); CreateCallStructParamPassByReg(ploc.reg3, *mopnd, srcOpnds, state); } } } void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, - AArch64ListOperand &srcOpnds, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset) { /* @@ -6605,8 +7209,8 @@ void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRT parmLocator.LocateNextParm(structType, ploc); uint32 numMemOp = static_cast(RoundUp(symSize, kSizeOfPtr) / kSizeOfPtr); /* round up */ /* Create the struct copies. */ - AArch64RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, fromOffset, - ploc); + RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, + fromOffset, ploc); if (parmOpnd) { srcOpnds.PushOpnd(*parmOpnd); } @@ -6614,7 +7218,7 @@ void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRT } void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, - AArch64ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset) { uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); @@ -6622,7 +7226,7 @@ void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MI CCLocInfo ploc; parmLocator.LocateNextParm(structType, ploc); uint32 numMemOp = static_cast(RoundUp(symSize, kSizeOfPtr) / kSizeOfPtr); /* round up */ - AArch64RegOperand *parmOpnd = + RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, nullptr, addrOpnd1, structCopyOffset, fromOffset, ploc); structCopyOffset += static_cast(numMemOp * kSizeOfPtr); if (parmOpnd) { @@ -6644,10 +7248,10 @@ void AArch64CGFunc::CreateCallStructParamPassByStack(int32 symSize, const MIRSym } } else { if (CGOptions::IsArm64ilp32()) { - ldMopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, &GetOrCreateOfstOpnd(static_cast(j) * k8ByteSize, k32BitSize), nullptr); } else { - ldMopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, &GetOrCreateOfstOpnd(static_cast(j) * kSizeOfPtr, k32BitSize), nullptr); } } @@ -6662,11 +7266,11 @@ void AArch64CGFunc::CreateCallStructParamPassByStack(int32 symSize, const MIRSym } } -AArch64RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, - const CCLocInfo &ploc, int32 offset, uint32 parmNum) { +RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, + const CCLocInfo &ploc, int32 offset, uint32 parmNum) { uint32 memSize; PrimType primType; - AArch64RegOperand *parmOpnd; + RegOperand *parmOpnd; uint32 dataSizeBits; AArch64reg reg; switch (parmNum) { @@ -6719,18 +7323,18 @@ AArch64RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol } } MOperator selectedMop = PickLdInsn(dataSizeBits, primType); - if ((static_cast(memOpnd)->GetAddrMode() == AArch64MemOperand::kAddrModeBOi) && + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && !IsOperandImmValid(selectedMop, memOpnd, kInsnSecondOpnd)) { - memOpnd = &SplitOffsetWithAddInstruction(*static_cast(memOpnd), dataSizeBits); + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSizeBits); } GetCurBB()->AppendInsn(cg->BuildInstruction(selectedMop, *parmOpnd, *memOpnd)); return parmOpnd; } -void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, AArch64ListOperand &srcOpnds, +void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, fpParamState state) { - AArch64RegOperand *parmOpnd; + RegOperand *parmOpnd; uint32 dataSizeBits = 0; PrimType pType = PTY_void; parmOpnd = nullptr; @@ -6753,7 +7357,7 @@ void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &me MOperator selectedMop = PickLdInsn(dataSizeBits, pType); if (!IsOperandImmValid(selectedMop, &memOpnd, kInsnSecondOpnd)) { - memOpnd = SplitOffsetWithAddInstruction(static_cast(memOpnd), dataSizeBits); + memOpnd = SplitOffsetWithAddInstruction(memOpnd, dataSizeBits); } GetCurBB()->AppendInsn(cg->BuildInstruction(selectedMop, *parmOpnd, memOpnd)); srcOpnds.PushOpnd(*parmOpnd); @@ -6768,26 +7372,26 @@ void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - AArch64ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); + ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); opndVec.push_back(parmOpnd); /* param 0 */ if (sym != nullptr) { if (sym->GetStorageClass() == kScGlobal || sym->GetStorageClass() == kScExtern) { StImmOperand &stopnd = CreateStImmOperand(*sym, fromOffset, 0); - AArch64RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); SelectAddrof(staddropnd, stopnd); opndVec.push_back(&staddropnd); /* param 1 */ } else if (sym->GetStorageClass() == kScAuto || sym->GetStorageClass() == kScFormal) { RegOperand *parm1Reg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); AArch64SymbolAlloc *symloc = static_cast(GetMemlayout()->GetSymAllocInfo(sym->GetStIndex())); - AArch64RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); + RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); int32 stoffset = GetBaseOffset(*symloc); - AArch64ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset) + fromOffset, k64BitSize, false); + ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset), k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xaddrri12, *parm1Reg, *baseOpnd, *offsetOpnd1)); if (sym->GetStorageClass() == kScFormal) { MemOperand *ldmopnd = - &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, parm1Reg, nullptr, + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, parm1Reg, nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), static_cast(nullptr)); RegOperand *tmpreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); RegOperand *vreg2 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); @@ -6801,7 +7405,7 @@ void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand } else if (sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic) { CHECK_FATAL(sym->GetSKind() != kStConst, "Unsupported sym const for struct param"); StImmOperand *stopnd = &CreateStImmOperand(*sym, 0, 0); - AArch64RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xadrp, staddropnd, *stopnd)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xadrpl12, staddropnd, staddropnd, *stopnd)); opndVec.push_back(&staddropnd); /* param 1 */ @@ -6813,16 +7417,16 @@ void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand } RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - AArch64ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); + ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xmovri32, vreg3, sizeOpnd)); opndVec.push_back(&vreg3); /* param 2 */ SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); } -AArch64RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, - RegOperand *addrOpd, int32 copyOffset, - int32 fromOffset, const CCLocInfo &ploc) { +RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, + RegOperand *addrOpd, int32 copyOffset, + int32 fromOffset, const CCLocInfo &ploc) { /* Create the struct copies. */ MemOperand *ldMopnd = nullptr; MemOperand *stMopnd = nullptr; @@ -6833,7 +7437,7 @@ AArch64RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMem RegOperand &vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); Insn &ldInsn = cg->BuildInstruction(PickLdInsn(k64BitSize, PTY_i64), vreg, base); GetCurBB()->AppendInsn(ldInsn); - ldMopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, &vreg, nullptr, + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &vreg, nullptr, &GetOrCreateOfstOpnd((j * kSizeOfPtr + static_cast(fromOffset)), k32BitSize), nullptr); } else { if (CGOptions::IsArm64ilp32()) { @@ -6843,7 +7447,7 @@ AArch64RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMem } } } else { - ldMopnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, addrOpd, nullptr, + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpd, nullptr, &GetOrCreateOfstOpnd((j * kSizeOfPtr + static_cast(fromOffset)), k32BitSize), nullptr); } if (CGOptions::IsArm64ilp32()) { @@ -6862,7 +7466,7 @@ AArch64RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMem } /* Create the copy address parameter for the struct */ RegOperand *fpopnd = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - AArch64ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); + ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); if (ploc.reg0 == kRinvalid) { RegOperand &res = CreateRegisterOperandOfType(PTY_u64); SelectAdd(res, *fpopnd, *offset, PTY_u64); @@ -6871,16 +7475,17 @@ AArch64RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMem GetCG()->BuildInstruction(PickStInsn(k64BitSize, PTY_i64), res, stMopnd2)); return nullptr; } else { - AArch64RegOperand *parmOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); + RegOperand *parmOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), + k64BitSize, kRegTyInt); SelectAdd(*parmOpnd, *fpopnd, *offset, PTY_a64); return parmOpnd; } } void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, - AArch64CallConvImpl &parmLocator, AArch64ListOperand &srcOpnds) { + AArch64CallConvImpl &parmLocator, ListOperand &srcOpnds) { RegOperand &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - AArch64ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); + ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); CCLocInfo ploc; parmLocator.LocateNextParm(structType, ploc); @@ -6896,7 +7501,7 @@ void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 } } -void AArch64CGFunc::SelectParmListForAggregate(BaseNode &argExpr, AArch64ListOperand &srcOpnds, +void AArch64CGFunc::SelectParmListForAggregate(BaseNode &argExpr, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, int32 &structCopyOffset) { uint64 symSize; int32 rhsOffset = 0; @@ -6954,8 +7559,9 @@ size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); TyIdx retIdx = callFunc->GetReturnTyIdx(); size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); - if ((retSize == 0) && GetBecommon().HasFuncReturnType(*callFunc)) { - return GetBecommon().GetTypeSize(GetBecommon().GetFuncReturnType(*callFunc)); + if ((retSize == 0) && callFunc->IsReturnStruct()) { + TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); + return GetBecommon().GetTypeSize(tyIdx); } return retSize; } else if (naryNode.GetOpCode() == OP_icall) { @@ -7039,7 +7645,7 @@ void AArch64CGFunc::SelectParmListPreprocess(const StmtNode &naryNode, size_t st We return a list of registers to the call instruction because they may be needed in the register allocation phase. */ -void AArch64CGFunc::SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOpnds, bool isCallNative) { +void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) { size_t i = 0; if ((naryNode.GetOpCode() == OP_icall) || isCallNative) { i++; @@ -7048,6 +7654,8 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOp AArch64CallConvImpl parmLocator(GetBecommon()); CCLocInfo ploc; int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + std::vector insnForStackArgs; + uint32 stackArgsCount = 0; for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) { bool is64x1vec = false; MIRType *ty = nullptr; @@ -7112,7 +7720,7 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOp ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; RegOperand *expRegOpnd = nullptr; Operand *opnd = HandleExpr(naryNode, *argExpr); - if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { + if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { is64x1vec = true; } if (!opnd->IsRegister()) { @@ -7139,7 +7747,7 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOp if (ploc.reg0 != kRinvalid) { /* load to the register. */ CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); - AArch64RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType); srcOpnds.PushOpnd(parmRegOpnd); @@ -7149,13 +7757,23 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, AArch64ListOperand &srcOp ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); } } - Operand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); - GetCurBB()->AppendInsn( + MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetCG()->BuildInstruction(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd, - actMemOpnd)); + actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == 2 && stackArgsCount < kShiftAmount12) { + insnForStackArgs.emplace_back(&strInsn); + stackArgsCount++; + } else { + GetCurBB()->AppendInsn(strInsn); + } } ASSERT(ploc.reg1 == 0, "SelectCall NYI"); } + for (auto &strInsn : insnForStackArgs) { + GetCurBB()->AppendInsn(*strInsn); + } } /* @@ -7176,7 +7794,7 @@ Operand *AArch64CGFunc::SelectClearStackCallParam(const AddrofNode &expr, int64 "Warning: we expect AddrOf with StImmOperand is not used for local variables"; } auto *symLoc = static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); - AArch64ImmOperand *offset = nullptr; + ImmOperand *offset = nullptr; if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false, kUnAdjustVary); } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { @@ -7203,7 +7821,7 @@ Operand *AArch64CGFunc::SelectClearStackCallParam(const AddrofNode &expr, int64 } /* select paramters for MCC_DecRefResetPair and MCC_ClearLocalStackRef function */ -void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, AArch64ListOperand &srcOpnds, +void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion) { AArch64CallConvImpl parmLocator(GetBecommon()); CCLocInfo ploc; @@ -7224,7 +7842,7 @@ void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, AArch parmLocator.LocateNextParm(*ty, ploc); CHECK_FATAL(ploc.reg0 != 0, "the parameter of ClearStackCall must be passed by register"); CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); - AArch64RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(primType)); SelectCopy(parmRegOpnd, primType, *expRegOpnd, primType); srcOpnds.PushOpnd(parmRegOpnd); @@ -7237,7 +7855,7 @@ void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, AArch * generate an intrinsic instruction instead of a function call * intrinsic_get_add_int w0, xt, ws, ws, x1, x2, w3, label */ -void AArch64CGFunc::IntrinsifyGetAndAddInt(AArch64ListOperand &srcOpnds, PrimType pty) { +void AArch64CGFunc::IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty) { MapleList &opnds = srcOpnds.GetOperands(); /* Unsafe.getAndAddInt has more than 4 parameters */ ASSERT(opnds.size() >= 4, "ensure the operands number"); @@ -7268,7 +7886,7 @@ void AArch64CGFunc::IntrinsifyGetAndAddInt(AArch64ListOperand &srcOpnds, PrimTyp * intrinsify Unsafe.getAndSetInt and Unsafe.getAndSetLong * generate an intrinsic instruction instead of a function call */ -void AArch64CGFunc::IntrinsifyGetAndSetInt(AArch64ListOperand &srcOpnds, PrimType pty) { +void AArch64CGFunc::IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty) { MapleList &opnds = srcOpnds.GetOperands(); /* Unsafe.getAndSetInt has 4 parameters */ ASSERT(opnds.size() == 4, "ensure the operands number"); @@ -7298,7 +7916,7 @@ void AArch64CGFunc::IntrinsifyGetAndSetInt(AArch64ListOperand &srcOpnds, PrimTyp * intrinsify Unsafe.compareAndSwapInt and Unsafe.compareAndSwapLong * generate an intrinsic instruction instead of a function call */ -void AArch64CGFunc::IntrinsifyCompareAndSwapInt(AArch64ListOperand &srcOpnds, PrimType pty) { +void AArch64CGFunc::IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty) { MapleList &opnds = srcOpnds.GetOperands(); /* Unsafe.compareAndSwapInt has more than 5 parameters */ ASSERT(opnds.size() >= 5, "ensure the operands number"); @@ -7343,7 +7961,7 @@ RegOperand *AArch64CGFunc::CheckStringIsCompressed(BB &bb, RegOperand &str, int3 RegOperand &countLowestBitOpnd = CreateRegisterOperandOfType(countPty); MOperator andOp = bitSize == k64BitSize ? MOP_xandrri13 : MOP_wandrri12; bb.AppendInsn(GetCG()->BuildInstruction(andOp, countLowestBitOpnd, countOpnd, immValueOne)); - AArch64RegOperand &wzr = AArch64RegOperand::GetZeroRegister(bitSize); + RegOperand &wzr = GetZeroOpnd(bitSize); MOperator cmpOp = (bitSize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; Operand &rflag = GetOrCreateRflag(); bb.AppendInsn(GetCG()->BuildInstruction(cmpOp, rflag, wzr, countLowestBitOpnd)); @@ -7446,7 +8064,7 @@ void AArch64CGFunc::GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString * bl String.indexOf, srcString, patternString * Label.joint: */ -void AArch64CGFunc::IntrinsifyStringIndexOf(AArch64ListOperand &srcOpnds, const MIRSymbol &funcSym) { +void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym) { MapleList &opnds = srcOpnds.GetOperands(); /* String.indexOf opnd size must be more than 2 */ ASSERT(opnds.size() >= 2, "ensure the operands number"); @@ -7499,6 +8117,58 @@ void AArch64CGFunc::IntrinsifyStringIndexOf(AArch64ListOperand &srcOpnds, const callBB->AppendBB(*jointBB); SetCurBB(*jointBB); } + +/* Lmbc calls have no argument, they are all explicit iassignspoff or + blkassign. Info collected and to be emitted here */ +void AArch64CGFunc::LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn) { + if (GetLmbcArgInfo() == nullptr) { + return; /* no arg */ + } + CHECK_FATAL(GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc, "To be called for Lmbc model only"); + MapleVector &args = GetLmbcCallArgs(); + MapleVector &types = GetLmbcCallArgTypes(); + MapleVector &offsets = GetLmbcCallArgOffsets(); + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + int iCnt = 0; + int fCnt = 0; + for (int i = isArgReturn ? 1 : 0; i < args.size(); i++) { + RegType ty = args[i]->GetRegisterType(); + PrimType pTy = types[i]; + AArch64reg reg; + if (args[i]->IsOfIntClass() && (iCnt + regs[i]) <= k8ByteSize) { + reg = static_cast(R0 + iCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else if (!args[i]->IsOfIntClass() && (fCnt + regs[i]) <= k8ByteSize) { + reg = static_cast(V0 + fCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else { + int32 pSize = GetPrimTypeSize(pTy); + Operand &memOpd = CreateMemOpnd(RSP, offsets[i], pSize); + GetCurBB()->AppendInsn( + GetCG()->BuildInstruction(PickStInsn(pSize * kBitsPerByte, pTy), + *args[i], memOpd)); + } + } + /* Load x8 if 1st arg is for agg return */ + if (isArgReturn) { + AArch64reg reg = static_cast(R8); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, + GetPrimTypeSize(PTY_a64) * kBitsPerByte, + kRegTyInt); + SelectCopy(*res, PTY_a64, *args[0], PTY_a64); + srcOpnds->PushOpnd(*res); + } + ResetLmbcArgInfo(); /* reset */ + ResetLmbcArgsInRegs(); + ResetLmbcTotalArgs(); +} + void AArch64CGFunc::SelectCall(CallNode &callNode) { MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); @@ -7509,7 +8179,10 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { GetCurBB()->AppendInsn(CreateCommentInsn(comment)); } - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + LmbcSelectParmList(srcOpnds, fn->IsFirstArgReturn()); + } bool callNative = false; if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || (fsym->GetName() == "MCC_CallSlowNative0") || (fsym->GetName() == "MCC_CallSlowNative1") || @@ -7532,11 +8205,11 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { BaseNode *funcArgExpr = callNode.Opnd(0); PrimType ptype = funcArgExpr->GetPrimType(); Operand *funcOpnd = HandleExpr(callNode, *funcArgExpr); - AArch64RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, - GetRegTyFromPrimTy(PTY_a64)); + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, + GetRegTyFromPrimTy(PTY_a64)); SelectCopy(livein, ptype, *funcOpnd, ptype); - AArch64RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, kRegTyInt); + RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, kRegTyInt); srcOpnds->PushOpnd(extraOpnd); } const std::string &funcName = fsym->GetName(); @@ -7574,8 +8247,12 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { } void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); - SelectParmList(icallNode, *srcOpnds); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + LmbcSelectParmList(srcOpnds, false /*fType->GetRetAttrs().GetAttr(ATTR_firstarg_return)*/); + } else { + SelectParmList(icallNode, *srcOpnds); + } Operand *fptrOpnd = &srcOpnd; if (fptrOpnd->GetKind() != Operand::kOpdRegister) { @@ -7641,35 +8318,34 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { AArch64CallConvImpl retLocator(GetBecommon()); CCLocInfo retMech; retLocator.InitReturnInfo(*retTyp, retMech); - if (retMech.GetRegCount() > 0) { - CHECK_FATAL(opnd0 != nullptr, "opnd0 must not be nullptr"); + if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); AArch64reg retReg = static_cast(retMech.GetReg0()); if (opnd0->IsRegister()) { RegOperand *regOpnd = static_cast(opnd0); if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) { - AArch64RegOperand &retOpnd = + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp); SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType); } } else if (opnd0->IsMemoryAccessOperand()) { - AArch64MemOperand *memopnd = static_cast(opnd0); - AArch64RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + auto *memopnd = static_cast(opnd0); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp); MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0()); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, retOpnd, *memopnd)); } else if (opnd0->IsConstImmediate()) { ImmOperand *immOpnd = static_cast(opnd0); if (!is64x1vec) { - AArch64RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0())); SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0()); } else { PrimType rType = GetFunction().GetReturnType()->GetPrimType(); RegOperand *reg = &CreateRegisterOperandOfType(rType); SelectCopy(*reg, rType, *immOpnd, rType); - AArch64RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(PTY_f64), GetRegTyFromPrimTy(PTY_f64)); Insn &insn = GetCG()->BuildInstruction(MOP_xvmovdr, retOpnd, *reg); GetCurBB()->AppendInsn(insn); @@ -7677,8 +8353,6 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { } else { CHECK_FATAL(false, "nyi"); } - } else if (opnd0 != nullptr) { /* pass in memory */ - CHECK_FATAL(false, "SelectReturn: return in memory NYI"); } GetExitBBsVec().emplace_back(GetCurBB()); } @@ -7692,6 +8366,9 @@ RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, Pr case kSregFp: reg = RFP; break; + case kSregGp: + reg = RFP; + break; case kSregThrownval: { /* uses x0 == R0 */ ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); if (Globals::GetInstance()->GetOptimLevel() == 0) { @@ -7720,7 +8397,7 @@ RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, Pr return GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); } -AArch64RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) { +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) { ASSERT(!asmAttr.empty(), "Get inline asm string failed in GetOrCreatePhysicalRegisterOperand"); RegType rKind = kRegTyUndef; uint32 rSize = 0; @@ -7765,8 +8442,8 @@ AArch64RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string return GetOrCreatePhysicalRegisterOperand(rNO, rSize, rKind); } -AArch64RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, - RegType kind, uint32 flag) { +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, + RegType kind, uint32 flag) { uint64 aarch64PhyRegIdx = regNO; ASSERT(flag == 0, "Do not expect flag here"); if (size <= k32BitSize) { @@ -7779,12 +8456,12 @@ AArch64RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg size = (size == k128BitSize) ? k128BitSize : k64BitSize; aarch64PhyRegIdx = aarch64PhyRegIdx << 2; } - AArch64RegOperand *phyRegOpnd = nullptr; + RegOperand *phyRegOpnd = nullptr; auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx); if (phyRegIt != phyRegOperandTable.end()) { phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx]; } else { - phyRegOpnd = memPool->New(regNO, size, kind, flag); + phyRegOpnd = memPool->New(regNO, size, kind, flag); phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd); } return *phyRegOpnd; @@ -7818,11 +8495,6 @@ LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb) { return GetOrCreateLabelOperand(labelIdx); } -LabelOperand &AArch64CGFunc::CreateFuncLabelOperand(const MIRSymbol &funcSymbol) { - const char *funcName = memPool->New(funcSymbol.GetName())->c_str(); - return *memPool->New(funcName); -} - uint32 AArch64CGFunc::GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const { /* Generating a larger sized mem op than alignment if allowed by aggregate starting address */ uint32 offsetAlign1 = (offset1 == 0) ? k8ByteSize : offset1; @@ -7836,7 +8508,7 @@ uint32 AArch64CGFunc::GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alig } } -AArch64OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size) { +OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size) { uint64 aarch64OfstRegIdx = offset; aarch64OfstRegIdx = (aarch64OfstRegIdx << 1); if (size == k64BitSize) { @@ -7847,7 +8519,7 @@ AArch64OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 siz if (it != hashOfstOpndTable.end()) { return *it->second; } - AArch64OfstOperand *res = memPool->New(offset, size); + OfstOperand *res = &CreateOfstOpnd(offset, size); hashOfstOpndTable[aarch64OfstRegIdx] = res; return *res; } @@ -7859,15 +8531,14 @@ void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, st rematInsns.emplace_back(&GetCG()->BuildInstruction(MOP_xadrp, result, stImm)); if (CGOptions::IsPIC() && symbol->NeedPIC()) { /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ - AArch64OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); - AArch64MemOperand &memOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, - static_cast(srcOpnd), nullptr, - &offset, nullptr); + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, + static_cast(srcOpnd), nullptr, &offset, nullptr); rematInsns.emplace_back(&GetCG()->BuildInstruction( memOpnd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); if (stImm.GetOffset() > 0) { - AArch64ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); rematInsns.emplace_back(&GetCG()->BuildInstruction(MOP_xaddrri12, result, result, immOpnd)); return; } @@ -7877,36 +8548,36 @@ void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, st } MemOperand &AArch64CGFunc::GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, - bool needLow12, AArch64RegOperand *regOp, + bool needLow12, RegOperand *regOp, std::vector& rematInsns) { MIRStorageClass storageClass = symbol.GetStorageClass(); if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); - /* AArch64MemOperand::AddrMode_B_OI */ - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { if (symbol.GetSKind() == kStConst) { ASSERT(offset == 0, "offset should be 0 for constant literals"); - return *memPool->New(AArch64MemOperand::kAddrModeLiteral, size, symbol); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); } else { if (needLow12) { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ Insn &insn = GetCG()->BuildInstruction(MOP_xadrp, stAddrOpnd, stOpnd); rematInsns.emplace_back(&insn); /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ - return *memPool->New(AArch64MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, - &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); } } } else { @@ -7915,7 +8586,7 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, in } MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef, - bool needLow12, AArch64RegOperand *regOp) { + bool needLow12, RegOperand *regOp) { MIRStorageClass storageClass = symbol.GetStorageClass(); if ((storageClass == kScAuto) || (storageClass == kScFormal)) { AArch64SymbolAlloc *symLoc = @@ -7944,7 +8615,7 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 off return *(it->second); } else { Operand* offOpnd = (it->second)->GetOffset(); - if (((static_cast(offOpnd))->GetOffsetValue() == (stOffset + offset)) && + if (((static_cast(offOpnd))->GetOffsetValue() == (stOffset + offset)) && (it->second->GetSize() == size)) { return *(it->second); } @@ -7961,32 +8632,32 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 off } } - AArch64RegOperand *baseOpnd = static_cast(GetBaseReg(*symLoc)); + RegOperand *baseOpnd = static_cast(GetBaseReg(*symLoc)); int32 totalOffset = stOffset + static_cast(offset); - /* needs a fresh copy of OfstOperand as we may adjust its offset at a later stage. */ - AArch64OfstOperand *offsetOpnd = nullptr; + /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */ + OfstOperand *offsetOpnd = nullptr; if (CGOptions::IsBigEndian()) { if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) { - offsetOpnd = memPool->New(k4BitSize + static_cast(totalOffset), k64BitSize); + offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast(totalOffset), k64BitSize); } else { - offsetOpnd = memPool->New(totalOffset, k64BitSize); + offsetOpnd = &CreateOfstOpnd(totalOffset, k64BitSize); } } else { - offsetOpnd = memPool->New(totalOffset, k64BitSize); + offsetOpnd = &CreateOfstOpnd(totalOffset, k64BitSize); } if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && - AArch64MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) { - AArch64ImmOperand *offsetOprand; + MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) { + ImmOperand *offsetOprand; offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary); Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64); - return *memPool->New(AArch64MemOperand::kAddrModeBOrX, size, *baseOpnd, - static_cast(*resImmOpnd), nullptr, symbol, true); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, + static_cast(*resImmOpnd), nullptr, symbol, true); } else { if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { offsetOpnd->SetVary(kUnAdjustVary); } - AArch64MemOperand *res = memPool->New(AArch64MemOperand::kAddrModeBOi, size, *baseOpnd, - nullptr, offsetOpnd, &symbol); + MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, + nullptr, offsetOpnd, &symbol); if ((symbol.GetType()->GetKind() != kTypeClass) && !forLocalRef) { memOpndsRequiringOffsetAdjustment[idx] = res; } @@ -7995,39 +8666,39 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 off } else if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); if (!regOp) { - regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); } - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; SelectAddrof(stAddrOpnd, stOpnd); - /* AArch64MemOperand::AddrMode_B_OI */ - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { if (symbol.GetSKind() == kStConst) { ASSERT(offset == 0, "offset should be 0 for constant literals"); - return *memPool->New(AArch64MemOperand::kAddrModeLiteral, size, symbol); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); } else { if (needLow12) { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); if (!regOp) { - regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); } - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; SelectAddrof(stAddrOpnd, stOpnd); - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, stAddrOpnd, - nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); } else { StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); if (!regOp) { - regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); } - AArch64RegOperand &stAddrOpnd = *regOp; + RegOperand &stAddrOpnd = *regOp; /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ Insn &insn = GetCG()->BuildInstruction(MOP_xadrp, stAddrOpnd, stOpnd); GetCurBB()->AppendInsn(insn); /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ - return *memPool->New(AArch64MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, - &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); } } } else { @@ -8035,64 +8706,62 @@ MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 off } } -AArch64MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(AArch64MemOperand::AArch64AddressingMode mode, uint32 size, - RegOperand *base, RegOperand *index, OfstOperand *offset, - const MIRSymbol *st) { - ASSERT(base != nullptr, "nullptr check"); - AArch64MemOperand tMemOpnd(mode, size, *base, index, offset, st); +MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd) { auto it = hashMemOpndTable.find(tMemOpnd); if (it != hashMemOpndTable.end()) { return *(it->second); } - AArch64MemOperand *res = memPool->New(tMemOpnd); + auto *res = memPool->New(tMemOpnd); hashMemOpndTable[tMemOpnd] = res; return *res; } -AArch64MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(AArch64MemOperand::AArch64AddressingMode mode, uint32 size, - RegOperand *base, RegOperand *index, int32 shift, - bool isSigned) { +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, ImmOperand *offset, + const MIRSymbol *st) { ASSERT(base != nullptr, "nullptr check"); - AArch64MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned); - auto it = hashMemOpndTable.find(tMemOpnd); - if (it != hashMemOpndTable.end()) { - return *(it->second); + MemOperand tMemOpnd(mode, size, *base, index, offset, st); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); } - AArch64MemOperand *res = memPool->New(tMemOpnd); - hashMemOpndTable[tMemOpnd] = res; - return *res; + return HashMemOpnd(tMemOpnd); } -AArch64MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(AArch64MemOperand &oldMem) { - auto it = hashMemOpndTable.find(oldMem); - if (it != hashMemOpndTable.end()) { - return *(it->second); +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, int32 shift, + bool isSigned) { + ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); } - AArch64MemOperand *res = memPool->New(oldMem); - hashMemOpndTable[oldMem] = res; - return *res; + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem) { + return HashMemOpnd(oldMem); } /* offset: base offset from FP or SP */ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size) { - AArch64OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); /* do not need to check bit size rotate of sign immediate */ bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair); if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) { Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32); - return *memPool->New(AArch64MemOperand::kAddrModeBOrX, size, baseOpnd, - static_cast(resImmOpnd), nullptr, nullptr); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, + static_cast(resImmOpnd), nullptr, nullptr); } else { - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, baseOpnd, - nullptr, &offsetOpnd, nullptr); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, + nullptr, &offsetOpnd, nullptr); } } /* offset: base offset + #:lo12:Label+immediate */ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym) { - AArch64OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), ""); - return *memPool->New(AArch64MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); } RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, @@ -8104,9 +8773,9 @@ RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode RegOperand *result = &CreateRegisterOperandOfType(PTY_a64); SelectAdd(*result, base, *srcOpnd, PTY_a64); - AArch64OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); - AArch64MemOperand &mo = - GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, result, nullptr, offopnd, nullptr); + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand &mo = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, result, nullptr, offopnd, nullptr); RegOperand &structAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); GetCurBB()->AppendInsn(cg->BuildInstruction(PickLdInsn(GetPrimTypeBitSize(baseType), targetType), structAddr, mo)); @@ -8146,9 +8815,9 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas return nullptr; } uint32 scale = static_cast(intOfst->GetValue()); - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize); uint32 dsize = GetPrimTypeBitSize(ptype); - MemOperand *memOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), SelectRegread(*static_cast(baseExpr)), nullptr, &ofstOpnd, nullptr); return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr; /* case 3 */ @@ -8159,7 +8828,7 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas } MemOperand *memOpnd = &GetOrCreateMemOpnd( - AArch64MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), + MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), SelectRegread(*static_cast(baseExpr)), SelectRegread(*static_cast(addendExpr)), nullptr, nullptr); return memOpnd; @@ -8173,7 +8842,7 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas bool isExpand = toSize > fromSize; CHECK_FATAL(toSize >= fromSize, "unKnown convert in ldr or store"); MemOperand *memOpnd = &GetOrCreateMemOpnd( - AArch64MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), + MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), SelectRegread(*static_cast(baseExpr)), SelectRegread(*static_cast(cvtRegreadNode)), shiftAmount, isExpand); return memOpnd; @@ -8215,12 +8884,12 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas if ((fromType == PTY_i32) && (toType == PTY_a64)) { RegOperand &index = static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); - memOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, true); } else if ((fromType == PTY_u32) && (toType == PTY_a64)) { RegOperand &index = static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); - memOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, false); } return memOpnd; @@ -8236,7 +8905,7 @@ MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); CHECK_FATAL(intOfst != nullptr, "just checking"); - offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetValue() : offset - intOfst->GetValue(); + offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue(); } else { addrOpnd = HandleExpr(parent, addrExpr); } @@ -8248,15 +8917,15 @@ MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode Operand &opnd = lastInsn->GetOperand(kInsnThirdOpnd); StImmOperand &stOpnd = static_cast(opnd); - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(stOpnd.GetOffset()), k32BitSize); - MemOperand &tmpMemOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeLo12Li, GetPrimTypeBitSize(ptype), - static_cast(addrOpnd), nullptr, &ofstOpnd, stOpnd.GetSymbol()); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(stOpnd.GetOffset()), k32BitSize); + MemOperand &tmpMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, stOpnd.GetSymbol()); GetCurBB()->RemoveInsn(*GetCurBB()->GetLastInsn()); return tmpMemOpnd; } else { - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k64BitSize); - return GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), - static_cast(addrOpnd), nullptr, &ofstOpnd, nullptr); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k64BitSize); + return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, nullptr); } } @@ -8349,7 +9018,7 @@ void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vectorNew(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); for (size_t i = 1; i < opndVec.size(); ++i) { ASSERT(pt[i] != PTY_void, "primType check"); MIRType *ty; @@ -8361,7 +9030,7 @@ void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector(stOpnd); parmLocator.LocateNextParm(*ty, ploc); if (ploc.reg0 != 0) { /* load to the register */ - AArch64RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(pt[i])); SelectCopy(parmRegOpnd, pt[i], *expRegOpnd, pt[i]); srcOpnds->PushOpnd(parmRegOpnd); @@ -8390,8 +9059,8 @@ void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector(opnd0); AArch64reg regNum = static_cast(is2ndRet ? retMech.GetReg1() : retMech.GetReg0()); if (regOpnd->GetRegisterNumber() != regNum) { - AArch64RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(regNum, regOpnd->GetSize(), - GetRegTyFromPrimTy(retPrimType)); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(regNum, regOpnd->GetSize(), + GetRegTyFromPrimTy(retPrimType)); SelectCopy(*opnd0, retPrimType, retOpnd, retPrimType); } } @@ -8445,7 +9114,7 @@ int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &sa) { } void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol) { - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); AppendCall(funcSymbol, *srcOpnds); } @@ -8467,8 +9136,7 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset), "Spill memory operand should be with a immediate offset."); - AArch64ImmOperand *immOpnd = static_cast(&opnd1); - ASSERT(!immOpnd->IsNegative(), "Spill offset should be positive number."); + ImmOperand *immOpnd = static_cast(&opnd1); MOperator mOpCode = MOP_undef; Insn *curInsn = &insn; @@ -8506,13 +9174,16 @@ void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand } } else { /* load into register */ - RegOperand ®Opnd = CreateRegisterOperandOfType(primType); - SelectCopyImm(regOpnd, *immOpnd, primType); + RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_xmovri32; + Insn &movInsn = GetCG()->BuildInstruction(mOpCode, movOpnd, *immOpnd); mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; - Insn &newInsn = GetCG()->BuildInstruction(mOpCode, resOpnd, opnd0, regOpnd); + Insn &newInsn = GetCG()->BuildInstruction(mOpCode, resOpnd, opnd0, movOpnd); if (isDest) { insn.GetBB()->InsertInsnAfter(insn, newInsn); + insn.GetBB()->InsertInsnAfter(insn, movInsn); } else { + insn.GetBB()->InsertInsnBefore(insn, movInsn); insn.GetBB()->InsertInsnBefore(insn, newInsn); } } @@ -8524,13 +9195,11 @@ MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); } uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); - auto *a64MemOpnd = static_cast(memOpnd); - if (IsImmediateOffsetOutOfRange(*a64MemOpnd, dataSize)) { - if (CheckIfSplitOffsetWithAdd(*a64MemOpnd, dataSize)) { + if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + if (CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) { isOutOfRange = true; } - memOpnd = - &SplitOffsetWithAddInstruction(*a64MemOpnd, dataSize, regNum, isDest, &insn); + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn); } else { isOutOfRange = false; } @@ -8593,9 +9262,9 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); int32 offset = GetOrCreatSpillRegLocation(vrNum); - AArch64OfstOperand *offsetOpnd = memPool->New(offset, k64BitSize); - MemOperand *memOpnd = memPool->New(AArch64MemOperand::kAddrModeBOi, memBitSize, baseOpnd, - nullptr, offsetOpnd, nullptr); + OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k64BitSize); + MemOperand *memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, + nullptr, offsetOpnd, nullptr); (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); return memOpnd; } else { @@ -8618,10 +9287,10 @@ MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i) { uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; RegOperand &base = GetOrCreateFramePointerRegOperand(); - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); - MemOperand &memOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr); - if (IsImmediateOffsetOutOfRange(static_cast(memOpnd), bitLen)) { - MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(static_cast(memOpnd), bitLen); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr); + if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) { + MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen); (void)pRegSpillMemOperands.emplace(std::pair(i, &newMemOpnd)); return &newMemOpnd; } @@ -8648,8 +9317,8 @@ AArch64reg AArch64CGFunc::GetReturnRegisterNumber() { bool AArch64CGFunc::CanLazyBinding(const Insn &ldrInsn) { Operand &memOpnd = ldrInsn.GetOperand(1); - auto &aarchMemOpnd = static_cast(memOpnd); - if (aarchMemOpnd.GetAddrMode() != AArch64MemOperand::kAddrModeLo12Li) { + auto &aarchMemOpnd = static_cast(memOpnd); + if (aarchMemOpnd.GetAddrMode() != MemOperand::kAddrModeLo12Li) { return false; } @@ -8692,11 +9361,11 @@ void AArch64CGFunc::ConvertAdrpl12LdrToLdr() { } /* Check if base register of nextInsn and the dest operand of insn are identical. */ - AArch64MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); CHECK_FATAL(memOpnd != nullptr, "memOpnd can't be nullptr"); /* Only for AddrMode_B_OI addressing mode. */ - if (memOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi) { + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { continue; } @@ -8715,11 +9384,11 @@ void AArch64CGFunc::ConvertAdrpl12LdrToLdr() { } StImmOperand &stImmOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); - AArch64OfstOperand &ofstOpnd = GetOrCreateOfstOpnd( + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd( static_cast(stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue()), k32BitSize); RegOperand &newBaseOpnd = static_cast(insn->GetOperand(kInsnSecondOpnd)); - AArch64MemOperand &newMemOpnd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), - &newBaseOpnd, nullptr, &ofstOpnd, stImmOpnd.GetSymbol()); + MemOperand &newMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + &newBaseOpnd, nullptr, &ofstOpnd, stImmOpnd.GetSymbol()); nextInsn->SetOperand(1, newMemOpnd); bb->RemoveInsn(*insn); } @@ -8776,18 +9445,18 @@ void AArch64CGFunc::ProcessLazyBinding() { * isCleanCall: when generate clean call insn, set isCleanCall as true * Return: the 'blr' instruction */ -Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, AArch64ListOperand &srcOpnds, +Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds, bool isCleanCall) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(func.GetStIdx()); symbol->SetStorageClass(kScGlobal); RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); StImmOperand &stOpnd = CreateStImmOperand(*symbol, 0, 0); - AArch64OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); + OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); Insn &adrpInsn = GetCG()->BuildInstruction(MOP_xadrp, tmpReg, stOpnd); GetCurBB()->AppendInsn(adrpInsn); - AArch64MemOperand &memOrd = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeLo12Li, kSizeOfPtr * kBitsPerByte, - static_cast(&tmpReg), - nullptr, &offsetOpnd, symbol); + MemOperand &memOrd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, kSizeOfPtr * kBitsPerByte, + static_cast(&tmpReg), + nullptr, &offsetOpnd, symbol); Insn &ldrInsn = GetCG()->BuildInstruction(memOrd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, tmpReg, memOrd); GetCurBB()->AppendInsn(ldrInsn); @@ -8818,7 +9487,7 @@ Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, AArc * isCleanCall: when generate clean call insn, set isCleanCall as true * Return: the 'blr' instruction */ -Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, AArch64ListOperand &srcOpnds, +Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds, bool isCleanCall) { RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); StImmOperand &stOpnd = CreateStImmOperand(func, 0, 0); @@ -8838,7 +9507,7 @@ Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, AArch return *callInsn; } -Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, AArch64ListOperand &srcOpnds, bool isCleanCall) { +Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds, bool isCleanCall) { Insn *callInsn = nullptr; if (CGOptions::IsLongCalls()) { MIRFunction *mirFunc = sym.GetFunction(); @@ -8880,6 +9549,48 @@ bool AArch64CGFunc::IsDuplicateAsmList(const MIRSymbol &sym) const { } void AArch64CGFunc::SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode) { + if (Options::profileGen) { + ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + + // Ref: MeProfGen::InstrumentFunc on ctrTbl naming + std::string ctrTblName = namemangler::kprefixProfCtrTbl + + GetMirModule().GetFileName() + "_" + GetName(); + std::replace(ctrTblName.begin(), ctrTblName.end(), '.', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '-', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '/', '_'); + + if (!bbProfileTab || bbProfileTab->GetName() != ctrTblName) { + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(ctrTblName); + CHECK_FATAL(bbProfileTab != nullptr, "expect counter table"); + } + + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 offset = GetPrimTypeSize(PTY_u64) * mirIntConst->GetValue(); + + if (!GetCG()->IsQuiet()) { + maple::LogInfo::MapleLogger(kLlInfo) << "At counter table offset: " << offset << std::endl; + } + MemOperand *memOpnd = &GetOrCreateMemOpnd(*bbProfileTab, offset, k64BitSize); + if (IsImmediateOffsetOutOfRange(*memOpnd, k64BitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, k64BitSize); + } + Operand *reg = &SelectCopy(*memOpnd, PTY_u64, PTY_u64); + ImmOperand &one = CreateImmOperand(1, k64BitSize, false); + SelectAdd(*reg, *reg, one, PTY_u64); + SelectCopy(*memOpnd, PTY_u64, *reg, PTY_u64); + return; + } + ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); BaseNode *arg1 = intrnNode.Opnd(0); ASSERT(arg1 != nullptr, "nullptr check"); @@ -8965,8 +9676,8 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); /* __stack */ - AArch64ImmOperand *offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ - AArch64ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); + ImmOperand *offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); RegOperand &vReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(LOWERED_PTR_TYPE))); if (stkSize) { SelectAdd(vReg, *offsOpnd, *offsOpnd2, LOWERED_PTR_TYPE); @@ -8974,9 +9685,9 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { } else { SelectAdd(vReg, stkOpnd, *offsOpnd, LOWERED_PTR_TYPE); } - AArch64OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); + OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); /* mem operand in va_list struct (lhs) */ - MemOperand *strOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + MemOperand *strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction( vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); @@ -8987,7 +9698,7 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { } else { offOpnd = &GetOrCreateOfstOpnd(k8BitSize, k64BitSize); } - strOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); SelectAdd(vReg, stkOpnd, *offsOpnd, LOWERED_PTR_TYPE); GetCurBB()->AppendInsn(GetCG()->BuildInstruction( @@ -9005,7 +9716,7 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { SelectSub(vReg, *offsOpnd, *offsOpnd2, LOWERED_PTR_TYPE); /* if 1st opnd is register => sub */ SelectAdd(vReg, stkOpnd, vReg, LOWERED_PTR_TYPE); offOpnd = &GetOrCreateOfstOpnd(kSizeOfPtr * 2, k64BitSize); - strOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction( vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); @@ -9016,7 +9727,7 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { RegOperand *tmpReg = &CreateRegisterOperandOfType(PTY_i32); /* offs value to be assigned (rhs) */ SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); offOpnd = &GetOrCreateOfstOpnd(kSizeOfPtr * 3, k32BitSize); - strOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wstr, *tmpReg, *strOpnd)); @@ -9026,7 +9737,7 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { tmpReg = &CreateRegisterOperandOfType(PTY_i32); SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); offOpnd = &GetOrCreateOfstOpnd((kSizeOfPtr * 3 + sizeof(int32)), k32BitSize); - strOpnd = &GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_wstr, *tmpReg, *strOpnd)); } @@ -9067,6 +9778,84 @@ void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { return; } +/* + * intrinsiccall C___Atomic_store_N(ptr, val, memorder)) + * ====> *ptr = val + * let ptr -> x0 + * let val -> x1 + * implement to asm: str/stlr x1, [x0] + * a store-release would replace str if memorder is not 0 + */ +void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = intrinsiccallNode.Opnd(1)->GetPrimType(); + auto *addr = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)); + auto *value = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetValue()); + SelectAtomicStore(*value, *addr, primType, PickMemOrder(memOrder, false)); +} + +void AArch64CGFunc::SelectAtomicStore( + Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto mOp = PickStInsn(GetPrimTypeBitSize(primType), primType, memOrder); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, LoadIntoRegister(srcOpnd, primType), memOpnd)); +} + +void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) { + if (CGOptions::IsPIC()) { + SelectCTlsGlobalDesc(result, stImm); + } else { + SelectCTlsLocalDesc(result, stImm); + } + if (stImm.GetOffset() > 0) { + auto &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } +} + +void AArch64CGFunc::SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm) { + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_mrs, result, *tpidr)); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_tls_desc_rel, result, result, stImm)); +} + +void AArch64CGFunc::SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm) { + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + // adrp x0, :tlsdesc:symbol + auto symbol = stImm.GetSymbol(); + RegOperand *specialFuncPtr = &CreateRegisterOperandOfType(PTY_u64); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xadrp, *specialFuncPtr, stImm)); + // ldr x1, [x0, #tlsdesc_lo12:symbol] + // add x0 ,#tlsdesc_lo12:symbol + auto &offset = CreateOfstOpnd(*symbol, 0, 0); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, + static_cast(specialFuncPtr), nullptr, &offset, nullptr); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xldr, *specialFunc, memOpnd)); + // .tlsdesccall $symbol + // blr x1 + auto ptrType = *GlobalTables::GetTypeTable().GetPtr(); + parmLocator.InitCCLocInfo(ploc); + parmLocator.LocateNextParm(ptrType, ploc); + auto &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), specialFuncPtr->GetSize(), GetRegTyFromPrimTy(PTY_u64)); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xadrpl12, *specialFuncPtr, *specialFuncPtr, stImm)); + SelectCopy(parmRegOpnd, PTY_u64, *specialFuncPtr, PTY_u64); + ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(parmRegOpnd); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_tls_desc_call, *specialFunc, stImm, *srcOpnds)); + auto retRegOpnd = &GetOrCreateSpecialRegisterOperand(kSregRetval0 ,PTY_u64); + // mrs x1, tpidr_el0 + // add x0, x0, x1 + auto tpidr = &CreateCommentOperand("tpidr_el0"); + RegOperand *readSystem = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_mrs, *readSystem, *tpidr)); + SelectAdd(result, *retRegOpnd, *readSystem, PTY_u64); +} + void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); @@ -9095,11 +9884,25 @@ void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { (intrinsic == INTRN_MPL_CLEANUP_NORETESCOBJS)) { return; } - if (intrinsic == INTRN_C_va_start) { - SelectCVaStart(intrinsiccallNode); - return; - } switch (intrinsic) { + case INTRN_C_va_start: + SelectCVaStart(intrinsiccallNode); + return; + case INTRN_C___sync_lock_release_1: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u8); + return; + case INTRN_C___sync_lock_release_2: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u16); + return; + case INTRN_C___sync_lock_release_4: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u32); + return; + case INTRN_C___sync_lock_release_8: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u64); + return; + case INTRN_C___atomic_store_n: + SelectCAtomicStoreN(intrinsiccallNode); + return; case INTRN_vector_zip_v8u8: case INTRN_vector_zip_v8i8: case INTRN_vector_zip_v4u16: case INTRN_vector_zip_v4i16: case INTRN_vector_zip_v2u32: case INTRN_vector_zip_v2i32: @@ -9111,7 +9914,7 @@ void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { break; } std::vector operands; /* Temporary. Deallocated on return. */ - AArch64ListOperand *srcOpnds = memPool->New(*GetFuncScopeAllocator()); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); for (size_t i = 0; i < intrinsiccallNode.NumOpnds(); i++) { BaseNode *argExpr = intrinsiccallNode.Opnd(i); Operand *opnd = HandleExpr(intrinsiccallNode, *argExpr); @@ -9142,7 +9945,7 @@ void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { if (Globals::GetInstance()->GetOptimLevel() == 0) { locReg.SetRegNotBBLocal(); } - AArch64MemOperand &locMem = GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, + MemOperand &locMem = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &locReg, nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); RegOperand &newValReg = LoadIntoRegister(*newVal, PTY_a64); if (Globals::GetInstance()->GetOptimLevel() == 0) { @@ -9340,12 +10143,35 @@ Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) { return opnd0; } -Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, - PrimType pty, bool CalculBefore, bool isAdd) { +void AArch64CGFunc::SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, Opcode op) { + switch(op) { + case OP_add: + SelectAdd(resOpnd, opnd0, opnd1, primType); + break; + case OP_sub: + SelectSub(resOpnd, opnd0, opnd1, primType); + break; + case OP_band: + SelectBand(resOpnd, opnd0, opnd1, primType); + break; + case OP_bior: + SelectBior(resOpnd, opnd0, opnd1, primType); + break; + case OP_bxor: + SelectBxor(resOpnd, opnd0, opnd1, primType); + break; + default: + CHECK_FATAL(false, "unconcerned opcode for arithmetical and logical insns"); + break; + } +} + +Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + auto primType = intrinopNode.GetPrimType(); Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - Operand *calculateEndOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); - calculateEndOpnd = &LoadIntoRegister(*calculateEndOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); /* Create BB which includes atomic built_in function */ LabelIdx atomicBBLabIdx = CreateLabel(); @@ -9356,36 +10182,32 @@ Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopN SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); GetCurBB()->AppendBB(*atomicBB); SetCurBB(*atomicBB); - uint32 typeByteSize = GetPrimTypeSize(pty); - auto stldColomu = static_cast(IsPowerOf2(static_cast(typeByteSize))); - uint32 regOpndByteSize = typeByteSize == k8ByteSize ? k8ByteSize : k4ByteSize; - /* atomic load */ - RegOperand *fetchVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(pty)); - atomicBB->AppendInsn(GetCG()->BuildInstruction( - PickLoadStoreExclInsn(stldColomu, false, true), *fetchVal, memOpnd)); - /* select calculation */ - Operand *addResult = CalculBefore ? fetchVal : &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - if (isAdd) { - SelectAdd(*addResult, *fetchVal, *calculateEndOpnd, pty); - } else { - SelectSub(*addResult, *fetchVal, *calculateEndOpnd, pty); - } - /* upload value in memory */ - RegOperand *resVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - atomicBB->AppendInsn(GetCG()->BuildInstruction( - PickLoadStoreExclInsn(stldColomu, true, true), *resVal, *addResult, memOpnd)); - /* check the result of atomic store */ - atomicBB->AppendInsn( - GetCG()->BuildInstruction(MOP_wcbnz, *resVal, GetOrCreateLabelOperand(*atomicBB))); - + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpLoad, *regLoaded, memOpnd)); + /* update loaded value */ + auto *regOperated = &CreateRegisterOperandOfType(primType); + SelectArithmeticAndLogical(*regOperated, *regLoaded, *valueOpnd, primType, op); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpStore, *accessStatus, *regOperated, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetCG()->BuildInstruction(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ BB *nextBB = CreateNewBB(); atomicBB->AppendBB(*nextBB); SetCurBB(*nextBB); - return CalculBefore ? addResult : fetchVal; + nextBB->AppendInsn(GetCG()->BuildInstruction(MOP_dmb_ish)); + return fetchBefore ? regLoaded : regOperated; } -Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, PrimType pty, bool retBool) { +Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, PrimType primType, bool retBool) { Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); Operand *oldVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); Operand *newVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); @@ -9397,86 +10219,63 @@ Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); GetCurBB()->AppendBB(*atomicBB); SetCurBB(*atomicBB); - uint32 typeByteSize = GetPrimTypeSize(pty); - uint32 byteP2Size = typeByteSize == k8ByteSize ? k3ByteSize : k2ByteSize; - uint32 regOpndByteSize = typeByteSize == k8ByteSize ? k8ByteSize : k4ByteSize; - /* ldaxr */ - RegOperand *fetchVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(pty)); - atomicBB->AppendInsn(GetCG()->BuildInstruction( - PickLoadStoreExclInsn(byteP2Size, false, true), *fetchVal, memOpnd)); + uint32 primTypeP2Size = GetPrimTypeP2Size(primType); + /* ldxr */ + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpLoad, *regLoaded, memOpnd)); /* cmp */ - SelectAArch64Cmp(*fetchVal, *oldVal, true, oldVal->GetSize()); - /* creat clrex; mov w0, wzr; ret */ - LabelIdx clrexIdx = CreateLabel(); - BB *atomicCleanBB = CreateNewBB(); - atomicCleanBB->SetAtomicBuiltIn(); - atomicCleanBB->AddLabel(clrexIdx); - SetLab2BBMap(static_cast(clrexIdx), *atomicCleanBB); - GetCurBB()->AppendBB(*atomicCleanBB); - atomicCleanBB->AppendInsn(GetCG()->BuildInstruction(MOP_clrex)); - if (retBool) { - Operand &zero = AArch64RegOperand::Get32bitZeroRegister(); - Operand *operand0 = memPool->New(R0, k32BitSize, kRegTyInt); - atomicCleanBB->AppendInsn(GetCG()->BuildInstruction(MOP_wmovrr, *operand0, zero)); - } - BB *atomicRetBB = CreateNewBB(); - atomicRetBB->SetKind(BB::kBBReturn); - atomicCleanBB->AppendBB(*atomicRetBB); - atomicRetBB->AppendInsn(GetCG()->BuildInstruction(MOP_xret)); - GetExitBBsVec().emplace_back(atomicRetBB); - /* bne ret bb */ + SelectAArch64Cmp(*regLoaded, *oldVal, true, oldVal->GetSize()); + /* bne */ Operand &rflag = GetOrCreateRflag(); - LabelOperand &targetOpnd = GetOrCreateLabelOperand(clrexIdx); - atomicBB->AppendInsn( - GetCG()->BuildInstruction(MOP_bne, rflag, targetOpnd)); - - LabelIdx atomicBBLabIdx2 = CreateLabel(); - BB *atomicBB2 = CreateNewBB(); - atomicBB2->SetKind(BB::kBBIf); - atomicBB2->SetAtomicBuiltIn(); - atomicBB2->AddLabel(atomicBBLabIdx2); - SetLab2BBMap(static_cast(atomicBBLabIdx2), *atomicBB2); - GetCurBB()->AppendBB(*atomicBB2); - SetCurBB(*atomicBB2); + LabelIdx nextBBLableIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(nextBBLableIdx); + atomicBB->AppendInsn(GetCG()->BuildInstruction(MOP_bne, rflag, targetOpnd)); /* stlxr */ - RegOperand *resVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - RegOperand &newRegVal = LoadIntoRegister(*newVal, pty); - atomicBB2->AppendInsn(GetCG()->BuildInstruction( - PickLoadStoreExclInsn(byteP2Size, true, true), *resVal, newRegVal, memOpnd)); - atomicBB2->AppendInsn( - GetCG()->BuildInstruction(MOP_wcbnz, *resVal, GetOrCreateLabelOperand(*atomicBB))); + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto &newRegVal = LoadIntoRegister(*newVal, primType); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpStore, *accessStatus, newRegVal, memOpnd)); + /* cbnz ==> check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetCG()->BuildInstruction(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + /* Data Memory Barrier */ BB *nextBB = CreateNewBB(); - atomicRetBB->AppendBB(*nextBB); + nextBB->AddLabel(nextBBLableIdx); + nextBB->AppendInsn(GetCG()->BuildInstruction(MOP_dmb_ish)); + SetLab2BBMap(static_cast(nextBBLableIdx), *nextBB); + atomicBB->AppendBB(*nextBB); SetCurBB(*nextBB); - return fetchVal; -} - -Operand *AArch64CGFunc::SelectCSyncAddFetch(maple::IntrinsicopNode &intrinopNode, PrimType pty) { - return SelectAArch64CSyncFetch(intrinopNode, pty, true, true); -} - -Operand *AArch64CGFunc::SelectCSyncFetchAdd(IntrinsicopNode &intrinopNode, PrimType pty) { - return SelectAArch64CSyncFetch(intrinopNode, pty, false, true); -} - -Operand *AArch64CGFunc::SelectCSyncSubFetch(IntrinsicopNode &intrinopNode, PrimType pty) { - return SelectAArch64CSyncFetch(intrinopNode, pty, true, false); + /* bool version return true if the comparison is successful and newval is written */ + if (retBool) { + auto *retOpnd = &CreateRegisterOperandOfType(PTY_u32); + SelectAArch64CSet(*retOpnd, GetCondOperand(CC_EQ), false); + return retOpnd; + } + /* type version return the contents of *addrOpnd before the operation */ + return regLoaded; } -Operand *AArch64CGFunc::SelectCSyncFetchSub(IntrinsicopNode &intrinopNode, PrimType pty) { - return SelectAArch64CSyncFetch(intrinopNode, pty, false, false); +Operand *AArch64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + return SelectAArch64CSyncFetch(intrinopNode, op, fetchBefore); } Operand *AArch64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) { return SelectCSyncCmpSwap(intrinopNode, pty, true); } + Operand *AArch64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode, PrimType pty) { return SelectCSyncCmpSwap(intrinopNode, pty); } + Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { + auto primType = intrinopNode.GetPrimType(); Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - Operand *valOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + /* Create BB which includes atomic built_in function */ LabelIdx atomicBBLabIdx = CreateLabel(); BB *atomicBB = CreateNewBB(); @@ -9486,38 +10285,108 @@ Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, Pr SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); GetCurBB()->AppendBB(*atomicBB); SetCurBB(*atomicBB); - - uint32 typeByteSize = GetPrimTypeSize(pty); - uint32 byteP2Size = typeByteSize == k8ByteSize ? k3ByteSize : k2ByteSize; - uint32 regOpndByteSize = typeByteSize == k8ByteSize ? k8ByteSize : k4ByteSize; - /* atomic load */ - RegOperand *fetchVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(pty)); - atomicBB->AppendInsn( - GetCG()->BuildInstruction(PickLoadStoreExclInsn(byteP2Size, false, true), *fetchVal, memOpnd)); - /* upload value in memory */ - RegOperand *resVal = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, regOpndByteSize)); - RegOperand &newRegVal = LoadIntoRegister(*valOpnd, pty); - atomicBB->AppendInsn(GetCG()->BuildInstruction( - PickLoadStoreExclInsn(byteP2Size, true, true), *resVal, newRegVal, memOpnd)); - - /* check the result of atomic store */ - atomicBB->AppendInsn( - GetCG()->BuildInstruction(MOP_wcbnz, *resVal, GetOrCreateLabelOperand(*atomicBB))); - + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpLoad, *regLoaded, memOpnd)); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, false); + atomicBB->AppendInsn(GetCG()->BuildInstruction(mOpStore, *accessStatus, *valueOpnd, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetCG()->BuildInstruction(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ BB *nextBB = CreateNewBB(); atomicBB->AppendBB(*nextBB); SetCurBB(*nextBB); - return fetchVal; + nextBB->AppendInsn(GetCG()->BuildInstruction(MOP_dmb_ish)); + return regLoaded; } -Operand *AArch64CGFunc::SelectCSyncLockRelease(IntrinsicopNode &intrinopNode, PrimType pty) { - Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - MOperator mOp = (pty == PTY_u32) ? MOP_wstlr : MOP_xstlr; - AArch64MemOperand *memOperand = memPool->New(R0, 0, k64BitSize); - Operand &zero = - (pty == PTY_u32) ? AArch64RegOperand::Get32bitZeroRegister() : AArch64RegOperand::Get64bitZeroRegister(); - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, zero, *memOperand)); - return addrOpnd; + +void AArch64CGFunc::SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType) { + auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + auto mOp = PickStInsn(primTypeBitSize, primType, AArch64isa::kMoRelease); + auto &zero = GetZeroOpnd(primTypeBitSize); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, primTypeBitSize); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, zero, memOpnd)); +} + +Operand *AArch64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinopNode) { + (void)intrinopNode; + CHECK_FATAL(false, "have not implement SelectCSyncSynchronize yet"); + return nullptr; +} + +AArch64isa::MemoryOrdering AArch64CGFunc::PickMemOrder(std::memory_order memOrder, bool isLdr) { + switch (memOrder) { + case std::memory_order_relaxed: + return AArch64isa::kMoNone; + case std::memory_order_consume: + case std::memory_order_acquire: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoNone; + case std::memory_order_release: + return isLdr ? AArch64isa::kMoNone : AArch64isa::kMoRelease; + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoRelease; + default: + CHECK_FATAL(false, "unexpected memorder"); + return AArch64isa::kMoNone; + } +} + +/* + * regassign %1 (intrinsicop C___Atomic_Load_N(ptr, memorder)) + * ====> %1 = *ptr + * let %1 -> x0 + * let ptr -> x1 + * implement to asm: ldr/ldar x0, [x1] + * a load-acquire would replace ldr if memorder is not 0 + */ +Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *memOrderOpnd = intrinsicopNode.Opnd(1); + auto primType = intrinsicopNode.GetPrimType(); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetValue()); + return SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); +} + +/* + * regassign %1 (intrinsicop C___Atomic_exchange_n(ptr, val, memorder)) + * ====> %1 = *ptr; *ptr = val; + * let %1 -> x0 + * let ptr -> x1 + * let val -> x2 + * implement to asm: + * ldr/ldar x0, [x1] + * str/stlr x2, [x1] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +Operand *AArch64CGFunc::SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) { + auto primType = intrinsicopNode.GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *valueOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(1)); + auto *memOrderOpnd = intrinsicopNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetValue()); + auto *result = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*valueOpnd, *addrOpnd, primType, PickMemOrder(memOrder, false)); + return result; +} + +Operand *AArch64CGFunc::SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto mOp = PickLdInsn(GetPrimTypeBitSize(primType), primType, memOrder); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto *resultOpnd = &CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(mOp, *resultOpnd, memOpnd)); + return resultOpnd; } Operand *AArch64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { @@ -9659,7 +10528,7 @@ MOperator AArch64CGFunc::PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bo return optr; } -RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, AArch64MemOperand &loc, bool acquire) { +RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire) { uint32 p2size = GetPrimTypeP2Size(valPrimType); RegOperand &result = CreateRegisterOperandOfType(valPrimType); @@ -9669,7 +10538,7 @@ RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, AArch64MemOperan return &result; } -RegOperand *AArch64CGFunc::SelectStoreExcl(PrimType valPty, AArch64MemOperand &loc, RegOperand &newVal, bool release) { +RegOperand *AArch64CGFunc::SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release) { uint32 p2size = GetPrimTypeP2Size(valPty); /* the result (success/fail) is to be stored in a 32-bit register */ @@ -9701,88 +10570,69 @@ MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 off return CreateMemOpnd(*vreg, offset, static_cast(dataSize)); } -/* For long reach branch, insert a branch in between. - * convert - * condbr target_label - * fallthruBB - * to - * condbr pad_label bb - * uncondbr bypass_label brBB - * pad_label padBB - * uncondbr target_label - * bypass_label bypassBB - * ... fallthruBB - */ + /* For long branch, insert an unconditional branch. + * From To + * cond_br targe_label reverse_cond_br fallthru_label + * fallthruBB unconditional br target_label + * fallthru_label: + * fallthruBB + */ void AArch64CGFunc::InsertJumpPad(Insn *insn) { BB *bb = insn->GetBB(); ASSERT(bb, "instruction has no bb"); - ASSERT(bb->GetKind() == BB::kBBIf, "instruction is not in a if bb"); + ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto, + "instruction is in neither if bb nor goto bb"); + if (bb->GetKind() == BB::kBBGoto) { + return; + } + ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors"); - LabelIdx padLabel = CreateLabel(); - BB *brBB = CreateNewBB(); - BB *padBB = CreateNewBB(); - SetLab2BBMap(static_cast(padLabel), *padBB); - padBB->AddLabel(padLabel); + BB *longBrBB = CreateNewBB(); - BB *targetBB; BB *fallthruBB = bb->GetNext(); - ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors"); + LabelIdx fallthruLBL = fallthruBB->GetLabIdx(); + if (fallthruLBL == 0) { + fallthruLBL = CreateLabel(); + SetLab2BBMap(static_cast(fallthruLBL), *fallthruBB); + fallthruBB->AddLabel(fallthruLBL); + } + + BB *targetBB; if (bb->GetSuccs().front() == fallthruBB) { targetBB = bb->GetSuccs().back(); } else { targetBB = bb->GetSuccs().front(); } - /* Regardless targetBB as is or an non-empty successor, it needs to be removed */ + LabelIdx targetLBL = targetBB->GetLabIdx(); + if (targetLBL == 0) { + targetLBL = CreateLabel(); + SetLab2BBMap(static_cast(targetLBL), *targetBB); + targetBB->AddLabel(targetLBL); + } + + // Adjustment on br and CFG bb->RemoveSuccs(*targetBB); - targetBB->RemovePreds(*bb); - while (targetBB->GetKind() == BB::kBBFallthru && targetBB->NumInsn() == 0) { - targetBB = targetBB->GetNext(); - } - bb->SetNext(brBB); - brBB->SetNext(padBB); - padBB->SetNext(fallthruBB); - brBB->SetPrev(bb); - padBB->SetPrev(brBB); - fallthruBB->SetPrev(padBB); - /* adjust bb branch preds succs for jump to padBB */ - LabelOperand &padLabelOpnd = GetOrCreateLabelOperand(padLabel); - uint32 idx = insn->GetJumpTargetIdx(); - insn->SetOperand(idx, padLabelOpnd); - bb->RemoveSuccs(*fallthruBB); - bb->PushBackSuccs(*brBB); /* new fallthru */ - bb->PushBackSuccs(*padBB); /* new target */ + bb->PushBackSuccs(*longBrBB); + bb->SetNext(longBrBB); + // reverse cond br targeting fallthruBB + uint32 targetIdx = insn->GetJumpTargetIdx(); + MOperator mOp = insn->FlipConditionOp(insn->GetMachineOpcode(), targetIdx); + insn->SetMOP(mOp); + LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL); + insn->SetOperand(targetIdx, fallthruBBLBLOpnd); + + longBrBB->PushBackPreds(*bb); + longBrBB->PushBackSuccs(*targetBB); + LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL); + longBrBB->AppendInsn(cg->BuildInstruction(MOP_xuncond, targetLBLOpnd)); + longBrBB->SetPrev(bb); + longBrBB->SetNext(fallthruBB); + longBrBB->SetKind(BB::kBBGoto); + + fallthruBB->SetPrev(longBrBB); targetBB->RemovePreds(*bb); - targetBB->PushBackPreds(*padBB); - - LabelIdx bypassLabel = fallthruBB->GetLabIdx(); - if (bypassLabel == 0) { - bypassLabel = CreateLabel(); - SetLab2BBMap(static_cast(bypassLabel), *fallthruBB); - fallthruBB->AddLabel(bypassLabel); - } - LabelOperand &bypassLabelOpnd = GetOrCreateLabelOperand(bypassLabel); - brBB->AppendInsn(cg->BuildInstruction(MOP_xuncond, bypassLabelOpnd)); - brBB->SetKind(BB::kBBGoto); - brBB->PushBackPreds(*bb); - brBB->PushBackSuccs(*fallthruBB); - - RegOperand &targetAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - LabelIdx targetLabel = targetBB->GetLabIdx(); - if (targetLabel == 0) { - targetLabel = CreateLabel(); - SetLab2BBMap(static_cast(targetLabel), *targetBB); - targetBB->AddLabel(targetLabel); - } - ImmOperand &targetLabelOpnd = CreateImmOperand(targetLabel, k32BitSize, false); - padBB->AppendInsn(cg->BuildInstruction(MOP_adrp_label, targetAddr, targetLabelOpnd)); - padBB->AppendInsn(cg->BuildInstruction(MOP_xbr, targetAddr, targetLabelOpnd)); - padBB->SetKind(BB::kBBIgoto); - padBB->PushBackPreds(*bb); - padBB->PushBackSuccs(*targetBB); - - fallthruBB->RemovePreds(*bb); - fallthruBB->PushBackPreds(*brBB); + targetBB->PushBackPreds(*longBrBB); } RegOperand *AArch64CGFunc::AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd) { @@ -9795,7 +10645,7 @@ RegOperand *AArch64CGFunc::AdjustOneElementVectorOperand(PrimType oType, RegOper RegOperand *AArch64CGFunc::SelectOneElementVectorCopy(Operand *src, PrimType sType) { RegOperand *res = &CreateRegisterOperandOfType(PTY_f64); SelectCopy(*res, PTY_f64, *src, sType); - static_cast(res)->SetIF64Vec(); + static_cast(res)->SetIF64Vec(); return res; } @@ -9861,7 +10711,9 @@ RegOperand *AArch64CGFunc::SelectVectorImmMov(PrimType rType, Operand *src, Prim int64 val = static_cast(src)->GetValue(); /* copy the src imm operand to a reg if out of range */ - if ((GetPrimTypeSize(sType) > k4ByteSize && val != 0) || val < kMinImmVal || val > kMaxImmVal) { + if ((GetVecEleSize(rType) >= k64BitSize) || + (GetPrimTypeSize(sType) > k4ByteSize && val != 0) || + (val < kMinImmVal || val > kMaxImmVal)) { Operand *reg = &CreateRegisterOperandOfType(sType); SelectCopy(*reg, sType, *src, sType); return SelectVectorRegMov(rType, reg, sType); @@ -9918,7 +10770,7 @@ RegOperand *AArch64CGFunc::SelectVectorGetHigh(PrimType rType, Operand *src) { GetCurBB()->AppendInsn(*insn); if (oType != rType) { res = AdjustOneElementVectorOperand(oType, res); - static_cast(res)->SetIF64Vec(); + static_cast(res)->SetIF64Vec(); } return res; } @@ -9934,7 +10786,7 @@ RegOperand *AArch64CGFunc::SelectVectorGetLow(PrimType rType, Operand *src) { GetCurBB()->AppendInsn(*insn); if (oType != rType) { res = AdjustOneElementVectorOperand(oType, res); - static_cast(res)->SetIF64Vec(); + static_cast(res)->SetIF64Vec(); } return res; } @@ -10071,7 +10923,7 @@ RegOperand *AArch64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Oper RegOperand *AArch64CGFunc::SelectVectorMerge(PrimType rType, Operand *o1, Operand *o2, int32 index) { if (!IsPrimitiveVector(rType)) { - static_cast(o1)->SetIF64Vec(); + static_cast(o1)->SetIF64Vec(); return static_cast(o1); /* 64x1_t, index equals 0 */ } RegOperand *res = &CreateRegisterOperandOfType(rType); @@ -10143,7 +10995,7 @@ void AArch64CGFunc::PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand bool immOpnd = false; if (opd->IsConstImmediate()) { int64 val = static_cast(opd)->GetValue(); - if (val >= kMinImmVal && val <= kMaxImmVal) { + if (val >= kMinImmVal && val <= kMaxImmVal && GetVecEleSize(rType) < k64BitSize) { immOpnd = true; } else { RegOperand *regOpd = &CreateRegisterOperandOfType(origTyp); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp index 859e968f706b56ad47cfeabbd76bf3bfad80a942..aad5ad5db5c02d0cabae714c79b5600934e43673 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -142,7 +142,7 @@ bool LiveRange::IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLev) const /* check stImm.GetOffset() is in addri12 */ StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*symbol, offset, 0); uint32 dataSize = GetPrimTypeBitSize(symType); - AArch64ImmOperand &immOpnd = cgFunc.CreateImmOperand(stOpnd.GetOffset(), dataSize, false); + ImmOperand &immOpnd = cgFunc.CreateImmOperand(stOpnd.GetOffset(), dataSize, false); if (!immOpnd.IsInBitSize(kMaxImmVal12Bits, 0)) { return false; } @@ -157,7 +157,7 @@ bool LiveRange::IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLev) const } std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, - AArch64RegOperand ®Op) { + RegOperand ®Op) { std::vector insns; CG *cg = cgFunc->GetCG(); switch (op) { @@ -179,7 +179,7 @@ std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, case OP_dread: { const MIRSymbol *symbol = rematInfo.sym; PrimType symType = symbol->GetType()->GetPrimType(); - AArch64RegOperand *regOp64 = &cgFunc->GetOrCreatePhysicalRegisterOperand( + RegOperand *regOp64 = &cgFunc->GetOrCreatePhysicalRegisterOperand( static_cast(regOp.GetRegisterNumber()), k64BitSize, regOp.GetRegisterType()); int32 offset = 0; if (fieldID != 0) { @@ -214,7 +214,7 @@ std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, AArch64SymbolAlloc *symLoc = static_cast( cgFunc->GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); - AArch64ImmOperand *offsetOp = nullptr; + ImmOperand *offsetOp = nullptr; offsetOp = &cgFunc->CreateImmOperand(cgFunc->GetBaseOffset(*symLoc) + offset, k64BitSize, false); @@ -235,17 +235,17 @@ std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || (symbol->GetStorageClass() == kScExtern))) { /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ - AArch64OfstOperand &offsetOp = cgFunc->CreateOfstOpnd(*symbol, offset, - 0); - AArch64MemOperand &memOpnd = - cgFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOi, + OfstOperand &offsetOp = cgFunc->CreateOfstOpnd(*symbol, offset, 0); + MemOperand &memOpnd = + cgFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, - static_cast(®Op), + static_cast(®Op), nullptr, &offsetOp, nullptr); - insn = &cg->BuildInstruction(MOP_xldr, regOp, memOpnd); + MOperator ldOp = (memOpnd.GetSize() == k64BitSize) ? MOP_xldr : MOP_wldr; + insn = &cg->BuildInstruction(ldOp, regOp, memOpnd); insns.push_back(insn); if (offset > 0) { - AArch64OfstOperand &ofstOpnd = cgFunc->GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + OfstOperand &ofstOpnd = cgFunc->GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); insns.push_back(&cg->BuildInstruction( MOP_xaddrri12, regOp, regOp, ofstOpnd)); } @@ -647,10 +647,10 @@ bool GraphColorRegAllocator::IsUnconcernedReg(const RegOperand ®Opnd) const { if (regType == kRegTyCc || regType == kRegTyVary) { return true; } - if (regOpnd.IsConstReg()) { + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { return true; } - uint32 regNO = regOpnd.GetRegisterNumber(); return IsUnconcernedReg(regNO); } @@ -986,7 +986,7 @@ void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) { ClassifyOperand(pregs, vregs, opnd); } if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); if (base != nullptr && !memOpnd.IsIntactIndexed()) { ClassifyOperand(pregs, vregs, *base); @@ -1037,7 +1037,7 @@ void GraphColorRegAllocator::ComputeLiveRangesForEachDefOperand(Insn &insn, bool } Operand &opnd = insn.GetOperand(i); if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); if (!memOpnd.IsIntactIndexed()) { SetupLiveRangeByOp(opnd, insn, true, numUses); ++numDefs; @@ -1077,7 +1077,7 @@ void GraphColorRegAllocator::ComputeLiveRangesForEachUseOperand(Insn &insn) { SetupLiveRangeByOp(*op, insn, false, numUses); } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); Operand *offset = memOpnd.GetIndexRegister(); if (base != nullptr) { @@ -1107,7 +1107,7 @@ void GraphColorRegAllocator::ComputeLiveRangesUpdateIfInsnIsCall(const Insn &ins /* active the parametes */ Operand &opnd1 = insn.GetOperand(1); if (opnd1.IsList()) { - auto &srcOpnds = static_cast(opnd1); + auto &srcOpnds = static_cast(opnd1); for (auto regOpnd : srcOpnds.GetOperands()) { ASSERT(!regOpnd->IsVirtualRegister(), "not be a virtual register"); auto physicalReg = static_cast(regOpnd->GetRegisterNumber()); @@ -2702,7 +2702,7 @@ void GraphColorRegAllocator::LocalRaForEachUseOperand(const Insn &insn, LocalReg if (opnd.IsList()) { continue; } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); Operand *offset = memOpnd.GetIndexRegister(); if (base != nullptr) { @@ -2968,9 +2968,9 @@ void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand & stype = (regSize <= k32) ? PTY_f32 : PTY_f64; } - if (a64CGFunc->IsImmediateOffsetOutOfRange(*static_cast(spillMem), k64)) { + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { regno_t pregNO = R16; - spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*static_cast(spillMem), k64, + spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, static_cast(pregNO), false, &insn); } Insn &stInsn = @@ -2981,7 +2981,7 @@ void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand & insn.GetBB()->InsertInsnBefore(insn, stInsn); } -void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand &opnd, AArch64RegOperand &phyOpnd, +void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill) { if (!needSpill) { return; @@ -3032,9 +3032,9 @@ void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand bool isOutOfRange = false; Insn *nextInsn = insn.GetNextMachineInsn(); - if (a64CGFunc->IsImmediateOffsetOutOfRange(*static_cast(spillMem), k64)) { + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { regno_t pregNO = R16; - spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*static_cast(spillMem), k64, + spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, static_cast(pregNO), true, &insn); isOutOfRange = true; } @@ -3120,7 +3120,7 @@ MemOperand *GraphColorRegAllocator::GetSpillOrReuseMem(LiveRange &lr, uint32 reg * then use it for the current spill, then reload it again. */ Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool isDef, - AArch64RegOperand &phyOpnd, bool forCall) { + RegOperand &phyOpnd, bool forCall) { auto ®Opnd = static_cast(opnd); uint32 regNO = regOpnd.GetRegisterNumber(); uint32 pregNO = phyOpnd.GetRegisterNumber(); @@ -3399,7 +3399,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceOpndForLRA(Insn &insn, const Opera LogInfo::MapleLogger() << "\tassigning lra spill reg " << spillReg << "\n"; } } - AArch64RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( static_cast(spillReg), regOpnd.GetSize(), regType); SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); Insn *spill = SpillOperand(insn, regOpnd, isDef, phyOpnd); @@ -3640,7 +3640,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &op regNO = lr->GetAssignedRegNO(); } bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(regNO)); - AArch64RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( static_cast(regNO), opnd.GetSize(), regType); if (GCRA_DUMP) { LogInfo::MapleLogger() << "replace R" << vregNO << " with R" << (regNO - R0) << "\n"; @@ -3723,7 +3723,7 @@ uint64 GraphColorRegAllocator::FinalizeRegisterPreprocess(FinalizeRegisterInfo & fInfo.SetUseOperand(opnd, static_cast(i)); } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); if (base != nullptr) { fInfo.SetBaseOperand(opnd, static_cast(i)); @@ -3797,12 +3797,12 @@ void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) { if (opnd->IsList()) { // call parameters } else if (opnd->IsMemoryAccessOperand()) { - auto *memopnd = static_cast(opnd); - if (memopnd->GetIndexOpt() == AArch64MemOperand::kPreIndex || - memopnd->GetIndexOpt() == AArch64MemOperand::kPostIndex) { + auto *memopnd = static_cast(opnd); + if (memopnd->GetIndexOpt() == MemOperand::kPreIndex || + memopnd->GetIndexOpt() == MemOperand::kPostIndex) { isIndexedMemOp = true; } - auto *base = static_cast(memopnd->GetBaseRegister()); + auto *base = static_cast(memopnd->GetBaseRegister()); if (base != nullptr && !IsUnconcernedReg(*base)) { if (!memopnd->IsIntactIndexed()) { if (base->IsPhysicalRegister()) { @@ -3823,7 +3823,7 @@ void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) { } } } - AArch64RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); if (offset != nullptr) { if (offset->IsPhysicalRegister()) { usePregs.insert(offset->GetRegisterNumber()); @@ -3932,7 +3932,7 @@ RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, if (lr != nullptr && lr->IsSpilled()) { AArch64CGFunc *a64cgfunc = static_cast(cgFunc); CG *cg = a64cgfunc->GetCG(); - uint32 bits = opnd.GetValidBitsNum(); + uint32 bits = opnd.GetSize(); if (bits < k32BitSize) { bits = k32BitSize; } @@ -3943,14 +3943,14 @@ RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, RegType rtype = lr->GetRegType(); spreg = lr->GetSpillReg(); ASSERT(lr->GetSpillReg() != 0, "no reg in CreateSpillFillCode"); - AArch64RegOperand *regopnd = + RegOperand *regopnd = &a64cgfunc->GetOrCreatePhysicalRegisterOperand(static_cast(spreg), opnd.GetSize(), rtype); if (lr->GetRematLevel() != rematOff) { if (isdef) { return nullptr; } else { - std::vector rematInsns = lr->Rematerialize(a64cgfunc, *static_cast(regopnd)); + std::vector rematInsns = lr->Rematerialize(a64cgfunc, *static_cast(regopnd)); for (auto &&remat : rematInsns) { std::string comment = " REMATERIALIZE color vreg: " + std::to_string(vregno); @@ -4016,7 +4016,7 @@ bool GraphColorRegAllocator::SpillLiveRangeForSpills() { // call parameters } else if (opnd->IsMemoryAccessOperand()) { MemOperand *newmemopnd = nullptr; - auto *memopnd = static_cast(opnd); + auto *memopnd = static_cast(opnd); auto *base = static_cast(memopnd->GetBaseRegister()); if (base != nullptr && base->IsVirtualRegister()) { RegOperand *replace = CreateSpillFillCode(*base, *insn, spillCnt); @@ -4025,7 +4025,7 @@ bool GraphColorRegAllocator::SpillLiveRangeForSpills() { } if (replace != nullptr) { spillCnt++; - newmemopnd = static_cast(static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); newmemopnd->SetBaseRegister(*replace); insn->SetOperand(i, *newmemopnd); done = true; @@ -4037,7 +4037,7 @@ bool GraphColorRegAllocator::SpillLiveRangeForSpills() { if (replace != nullptr) { spillCnt++; if (newmemopnd == nullptr) { - newmemopnd = static_cast(static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); } newmemopnd->SetIndexRegister(*replace); insn->SetOperand(i, *newmemopnd); @@ -4045,8 +4045,8 @@ bool GraphColorRegAllocator::SpillLiveRangeForSpills() { } } } else if (opnd->IsRegister()) { - bool isdef = static_cast(md->operand[i])->IsRegDef(); - bool isuse = static_cast(md->operand[i])->IsRegUse(); + bool isdef = md->operand[i]->IsRegDef(); + bool isuse = md->operand[i]->IsRegUse(); RegOperand *replace = CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, isdef); if (isuse && isdef) { (void)CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, false); @@ -4087,14 +4087,14 @@ void CallerSavePre::CodeMotion() { } if (occ->GetOccType() == kOccUse && (beyondLimit || (static_cast(occ)->Reload() && !ReloadAtCallee(occ)))) { - AArch64RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), static_cast(occ->GetOperand())->GetRegisterType()); (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd); continue; } if (occ->GetOccType() == kOccPhiopnd && static_cast(occ)->Reload() && !ReloadAtCallee(occ)) { - AArch64RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), static_cast(occ->GetOperand())->GetRegisterType()); Insn *insn = occ->GetBB()->GetLastInsn(); @@ -4108,7 +4108,7 @@ void CallerSavePre::CodeMotion() { continue; } if (occ->GetOccType() == kOccStore && static_cast(occ)->Reload()) { - AArch64RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), static_cast(occ->GetOperand())->GetRegisterType()); (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd, true); @@ -4567,19 +4567,19 @@ void GraphColorRegAllocator::SplitVregAroundLoop(const CGFuncLoops &loop, const if (hasRef == false) { splitCount++; RegOperand *ropnd = &cgFunc->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); - AArch64RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( static_cast(lr->GetAssignedRegNO()), ropnd->GetSize(), (lr->GetRegType())); Insn *headerCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop begin")); headerPred.AppendInsn(*headerCom); Insn *last = headerPred.GetLastInsn(); - SpillOperand(*last, *ropnd, true, static_cast(phyOpnd)); + SpillOperand(*last, *ropnd, true, static_cast(phyOpnd)); Insn *exitCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop end")); exitSucc.InsertInsnBegin(*exitCom); Insn *first = exitSucc.GetFirstInsn(); - SpillOperand(*first, *ropnd, false, static_cast(phyOpnd)); + SpillOperand(*first, *ropnd, false, static_cast(phyOpnd)); LiveRange *replacedLr = lrMap[*it]; replacedLr->SetAssignedRegNO(lr->GetAssignedRegNO()); @@ -4818,8 +4818,7 @@ void GraphColorRegAllocator::FinalizeRegisters() { uint32 useSpillIdx = 0; MemOperand *memOpnd = nullptr; if (fInfo->GetBaseOperand()) { - memOpnd = static_cast( - static_cast(fInfo->GetBaseOperand())->Clone(*cgFunc->GetMemoryPool())); + memOpnd = static_cast(fInfo->GetBaseOperand())->Clone(*cgFunc->GetMemoryPool()); insn->SetOperand(fInfo->GetMemOperandIdx(), *memOpnd); Operand *base = memOpnd->GetBaseRegister(); ASSERT(base != nullptr, "nullptr check"); @@ -4828,7 +4827,7 @@ void GraphColorRegAllocator::FinalizeRegisters() { if (phyOpnd != nullptr) { memOpnd->SetBaseRegister(*phyOpnd); } - if (!static_cast(memOpnd)->IsIntactIndexed()) { + if (!memOpnd->IsIntactIndexed()) { (void)GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, true); } } @@ -4847,7 +4846,7 @@ void GraphColorRegAllocator::FinalizeRegisters() { ListOperand *outList = const_cast(static_cast(defOpnd)); auto *a64CGFunc = static_cast(cgFunc); auto *srcOpndsNew = - a64CGFunc->GetMemoryPool()->New(*a64CGFunc->GetFuncScopeAllocator()); + a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); RegOperand *phyOpnd; for (auto opnd : outList->GetOperands()) { if (opnd->IsPhysicalRegister()) { @@ -4878,8 +4877,7 @@ void GraphColorRegAllocator::FinalizeRegisters() { if (useOpnd->IsList()) { ListOperand *inList = const_cast(static_cast(useOpnd)); auto *a64CGFunc = static_cast(cgFunc); - auto *srcOpndsNew = - a64CGFunc->GetMemoryPool()->New(*a64CGFunc->GetFuncScopeAllocator()); + auto *srcOpndsNew = a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); for (auto opnd : inList->GetOperands()) { if ((static_cast(opnd))->GetRegisterNumber() < kAllRegNum) { srcOpndsNew->PushOpnd(*opnd); @@ -4899,8 +4897,8 @@ void GraphColorRegAllocator::FinalizeRegisters() { } } if (insn->GetMachineOpcode() == MOP_wmovrr || insn->GetMachineOpcode() == MOP_xmovrr) { - auto ®1 = static_cast(insn->GetOperand(kInsnFirstOpnd)); - auto ®2 = static_cast(insn->GetOperand(kInsnSecondOpnd)); + auto ®1 = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn->GetOperand(kInsnSecondOpnd)); /* remove mov x0,x0 when it cast i32 to i64 */ if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { bb->RemoveInsn(*insn); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp index d93db8a07c5468b3fdade406241d2efa1f1f1632..bd8e9b9e0e552917d7ab6e7b97cb1b21ddd6ec0c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp @@ -25,7 +25,7 @@ bool AArch64Dce::RemoveUnuseDef(VRegVersion &defVersion) { CHECK_FATAL(defInsnInfo->GetInsn() != nullptr, "Get def insn failed"); Insn *defInsn = defInsnInfo->GetInsn(); /* have not support asm/neon opt yet */ - if (defInsn->GetMachineOpcode() == MOP_asm || defInsn->IsVectorOp()) { + if (defInsn->GetMachineOpcode() == MOP_asm || defInsn->IsVectorOp() || defInsn->IsAtomic()) { return false; } std::set defRegs = defInsn->GetDefRegs(); @@ -66,8 +66,7 @@ void A64DeleteRegUseVisitor::Visit(ListOperand *v) { Visit(regOpnd); } } -void A64DeleteRegUseVisitor::Visit(MemOperand *v) { - auto *a64MemOpnd = static_cast(v); +void A64DeleteRegUseVisitor::Visit(MemOperand *a64MemOpnd) { RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { @@ -83,4 +82,4 @@ void A64DeleteRegUseVisitor::Visit(PhiOperand *v) { Visit(phiOpndIt.second); } } -} \ No newline at end of file +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp index 7fc44319ad59cccd36f5ebe02a7859b99168e483..d440e0198e41584fb3cc5f812ef2e9ae00714f69 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp @@ -338,24 +338,22 @@ bool AArch64DepAnalysis::IsFrameReg(const RegOperand &opnd) const { return (opnd.GetRegisterNumber() == RFP) || (opnd.GetRegisterNumber() == RSP); } -AArch64MemOperand *AArch64DepAnalysis::BuildNextMemOperandByByteSize(const AArch64MemOperand &aarchMemOpnd, - uint32 byteSize) const { - AArch64MemOperand *nextMemOpnd = nullptr; - Operand *nextOpnd = aarchMemOpnd.Clone(memPool); - nextMemOpnd = static_cast(nextOpnd); +MemOperand *AArch64DepAnalysis::BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, + uint32 byteSize) const { + MemOperand *nextMemOpnd = aarchMemOpnd.Clone(memPool); Operand *nextOfstOpnd = nextMemOpnd->GetOffsetImmediate()->Clone(memPool); - AArch64OfstOperand *aarchNextOfstOpnd = static_cast(nextOfstOpnd); + OfstOperand *aarchNextOfstOpnd = static_cast(nextOfstOpnd); CHECK_NULL_FATAL(aarchNextOfstOpnd); int32 offsetVal = static_cast(aarchNextOfstOpnd->GetOffsetValue()); aarchNextOfstOpnd->SetOffsetValue(offsetVal + byteSize); - nextMemOpnd->SetOffsetImmediate(*aarchNextOfstOpnd); + nextMemOpnd->SetOffsetOperand(*aarchNextOfstOpnd); return nextMemOpnd; } /* Get the second memory access operand of stp/ldp instructions. */ -AArch64MemOperand *AArch64DepAnalysis::GetNextMemOperand( - const Insn &insn, const AArch64MemOperand &aarchMemOpnd) const { - AArch64MemOperand *nextMemOpnd = nullptr; +MemOperand *AArch64DepAnalysis::GetNextMemOperand( + const Insn &insn, const MemOperand &aarchMemOpnd) const { + MemOperand *nextMemOpnd = nullptr; switch (insn.GetMachineOpcode()) { case MOP_wldp: case MOP_sldp: @@ -404,12 +402,11 @@ void AArch64DepAnalysis::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { } /* Build dependences of stack memory and heap memory uses. */ -void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) { - RegOperand *baseRegister = memOpnd.GetBaseRegister(); - AArch64MemOperand &aarchMemOpnd = static_cast(memOpnd); - AArch64MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); +void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); - memOpnd.SetAccessSize(static_cast(insn).GetLoadStoreSize()); + aarchMemOpnd.SetAccessSize(static_cast(insn).GetLoadStoreSize()); /* Stack memory address */ for (auto defInsn : stackDefs) { if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { @@ -429,23 +426,66 @@ void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) { } } +static bool NoAlias(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + rightOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && leftOpnd.GetIndexOpt() == MemOperand::kIntact && + rightOpnd.GetIndexOpt() == MemOperand::kIntact) { + + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() == RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() == RFP) { + Operand *ofstOpnd = leftOpnd.GetOffsetOperand(); + Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); + + ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); + ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); + ImmOperand *ofst = static_cast(ofstOpnd); + ImmOperand *rofst = static_cast(rofstOpnd); + ASSERT(ofst != nullptr, "CG internal error, invalid type."); + ASSERT(rofst != nullptr, "CG internal error, invalid type."); + + return (!ofst->ValueEquals(*rofst)); + } + } + return false; +} + +static bool NoOverlap(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + rightOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + leftOpnd.GetIndexOpt() != MemOperand::kIntact || + rightOpnd.GetIndexOpt() != MemOperand::kIntact) { + return false; + } + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() != RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { + return false; + } + int64 ofset1 = leftOpnd.GetOffsetOperand()->GetValue(); + int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); + if (ofset1 < ofset2) { + return ((ofset1 + leftOpnd.GetAccessSize()) <= ofset2); + } else { + return ((ofset2 + rightOpnd.GetAccessSize()) <= ofset1); + } +} + /* Return true if memInsn's memOpnd no alias with memOpnd and nextMemOpnd */ -bool AArch64DepAnalysis::NeedBuildDepsMem(const AArch64MemOperand &memOpnd, - const AArch64MemOperand *nextMemOpnd, +bool AArch64DepAnalysis::NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, const Insn &memInsn) const { - auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); - if (!memOpnd.NoAlias(*memOpndOfmemInsn) || ((nextMemOpnd != nullptr) && !nextMemOpnd->NoAlias(*memOpndOfmemInsn))) { + auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); + if (!NoAlias(memOpnd, *memOpndOfmemInsn) || ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *memOpndOfmemInsn))) { return true; } if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && memInsn.IsCall() == false) { static_cast(memInsn.GetMemOpnd())->SetAccessSize( static_cast(memInsn).GetLoadStoreSize()); - return (memOpnd.NoOverlap(*memOpndOfmemInsn) == false); + return (NoOverlap(memOpnd, *memOpndOfmemInsn) == false); } - AArch64MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); + MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); if (nextMemOpndOfmemInsn != nullptr) { - if (!memOpnd.NoAlias(*nextMemOpndOfmemInsn) || - ((nextMemOpnd != nullptr) && !nextMemOpnd->NoAlias(*nextMemOpndOfmemInsn))) { + if (!NoAlias(memOpnd, *nextMemOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *nextMemOpndOfmemInsn))) { return true; } } @@ -458,8 +498,8 @@ bool AArch64DepAnalysis::NeedBuildDepsMem(const AArch64MemOperand &memOpnd, * memOpnd : insn's memOpnd * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. */ -void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, AArch64MemOperand &memOpnd, - const AArch64MemOperand *nextMemOpnd) { +void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, + const MemOperand *nextMemOpnd) { memOpnd.SetAccessSize(static_cast(insn).GetLoadStoreSize()); for (auto *useInsn : stackUses) { if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *useInsn)) { @@ -474,8 +514,8 @@ void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, AArch64MemOperand * memOpnd : insn's memOpnd * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. */ -void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, AArch64MemOperand &memOpnd, - const AArch64MemOperand *nextMemOpnd) { +void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, + const MemOperand *nextMemOpnd) { memOpnd.SetAccessSize(static_cast(insn).GetLoadStoreSize()); for (auto defInsn : stackDefs) { if (defInsn->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *defInsn)) { @@ -485,10 +525,9 @@ void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, AArch64MemOperan } /* Build dependences of stack memory and heap memory definitions. */ -void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) { - RegOperand *baseRegister = memOpnd.GetBaseRegister(); - AArch64MemOperand &aarchMemOpnd = static_cast(memOpnd); - AArch64MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); +void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); /* Build anti dependences. */ BuildAntiDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); @@ -750,14 +789,14 @@ bool AArch64DepAnalysis::IfInAmbiRegs(regno_t regNO) const { * opnd : the memory access operand. * regProp : operand property of the memory access operandess operand. */ -void AArch64DepAnalysis::BuildMemOpndDependency(Insn &insn, Operand &opnd, const AArch64OpndProp ®Prop) { +void AArch64DepAnalysis::BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndProp ®Prop) { ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be memory Operand"); - AArch64MemOperand *memOpnd = static_cast(&opnd); + MemOperand *memOpnd = static_cast(&opnd); RegOperand *baseRegister = memOpnd->GetBaseRegister(); if (baseRegister != nullptr) { regno_t regNO = baseRegister->GetRegisterNumber(); BuildDepsUseReg(insn, regNO); - if ((memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi) && + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed())) { /* Base operand has changed. */ BuildDepsDefReg(insn, regNO); @@ -787,7 +826,7 @@ void AArch64DepAnalysis::BuildOpndDependency(Insn &insn) { uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn.GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (opnd.IsMemoryAccessOperand()) { BuildMemOpndDependency(insn, opnd, *regProp); } else if (opnd.IsStImmediate()) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp index 4fdba210d41c000815f34536120b4c426fb88d55..a2ac9a6f67b748a6cc194c6ee988ca24656fbd9f 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -78,12 +78,23 @@ bool AArch64Ebo::IsAdd(const Insn &insn) const { return ((MOP_xaddrrr <= insn.GetMachineOpcode()) && (insn.GetMachineOpcode() <= MOP_ssub)); } +bool AArch64Ebo::IsInvalidReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == AArch64reg::kRinvalid); +} + bool AArch64Ebo::IsZeroRegister(const Operand &opnd) const { if (!opnd.IsRegister()) { return false; } - const AArch64RegOperand *regOpnd = static_cast(&opnd); - return regOpnd->IsZeroRegister(); + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AArch64Ebo::IsConstantImmOrReg(const Operand &opnd) const { + if (opnd.IsConstImmediate()) { + return true; + } + return IsZeroRegister(opnd); } bool AArch64Ebo::IsClinitCheck(const Insn &insn) const { @@ -114,7 +125,8 @@ bool AArch64Ebo::IsGlobalNeeded(Insn &insn) const { } Operand *opnd = insn.GetResult(0); - if ((opnd != nullptr) && (opnd->IsConstReg() || (opnd->IsRegister() && static_cast(opnd)->IsSPOrFP()))) { + if ((opnd != nullptr) && + (IsZeroRegister(*opnd) || (opnd->IsRegister() && cgFunc->IsSPOrFP(static_cast(*opnd))))) { return true; } return false; @@ -158,8 +170,8 @@ bool AArch64Ebo::IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const { if (!prevInsn->GetOperand(kInsnSecondOpnd).IsImmediate()) { return false; } - auto &sameOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); - auto &opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sameOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); if (sameOpnd.GetValue() == opnd.GetValue()) { sameInfo->refCount += opndInfo.refCount; return true; @@ -344,8 +356,8 @@ void AArch64Ebo::DefineCallUseSpecialRegister(Insn &insn) { bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const { switch (op1.GetKind()) { case Operand::kOpdRegister: { - const AArch64RegOperand ®1 = static_cast(op1); - const AArch64RegOperand ®2 = static_cast(op2); + const RegOperand ®1 = static_cast(op1); + const RegOperand ®2 = static_cast(op2); return reg1 == reg2; } case Operand::kOpdImmediate: { @@ -354,8 +366,8 @@ bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const return imm1 == imm2; } case Operand::kOpdOffset: { - const AArch64OfstOperand &ofst1 = static_cast(op1); - const AArch64OfstOperand &ofst2 = static_cast(op2); + const OfstOperand &ofst1 = static_cast(op1); + const OfstOperand &ofst2 = static_cast(op2); return ofst1 == ofst2; } case Operand::kOpdStImmediate: { @@ -364,8 +376,8 @@ bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const return stImm1 == stImm2; } case Operand::kOpdMem: { - const AArch64MemOperand &mem1 = static_cast(op1); - const AArch64MemOperand &mem2 = static_cast(op2); + const MemOperand &mem1 = static_cast(op1); + const MemOperand &mem2 = static_cast(op2); if (mem1.GetAddrMode() == mem2.GetAddrMode()) { ASSERT(mem1.GetBaseRegister() != nullptr, "nullptr check"); ASSERT(mem2.GetBaseRegister() != nullptr, "nullptr check"); @@ -382,9 +394,8 @@ bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const } } -int32 AArch64Ebo::GetOffsetVal(const MemOperand &mem) const { - const AArch64MemOperand &memOpnd = static_cast(mem); - AArch64OfstOperand *offset = memOpnd.GetOffsetImmediate(); +int32 AArch64Ebo::GetOffsetVal(const MemOperand &memOpnd) const { + OfstOperand *offset = memOpnd.GetOffsetImmediate(); int32 val = 0; if (offset != nullptr) { val += static_cast(offset->GetOffsetValue()); @@ -405,7 +416,7 @@ int32 AArch64Ebo::GetOffsetVal(const MemOperand &mem) const { * return true if do simplify successfully. */ bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { - AArch64ImmOperand *src = static_cast(&opnd); + ImmOperand *src = static_cast(&opnd); const AArch64MD *md = &AArch64CG::kMd[(insn.GetMachineOpcode())]; /* avoid the invalid case "cmp wzr, #0"/"add w1, wzr, #100" */ Operand &destOpnd = insn.GetOperand(idx); @@ -422,7 +433,7 @@ bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { ASSERT(idx == kInsnSecondOpnd, "src const for move must be the second operand."); uint32 targetSize = insn.GetOperand(idx).GetSize(); if (src->GetSize() != targetSize) { - src = static_cast(src->Clone(*cgFunc->GetMemoryPool())); + src = static_cast(src->Clone(*cgFunc->GetMemoryPool())); CHECK_FATAL(src != nullptr, "pointer result is null"); src->SetSize(targetSize); } @@ -433,7 +444,7 @@ bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { } insn.SetOperand(kInsnSecondOpnd, *src); MOperator mOp = (mopCode == MOP_wmovrr) ? MOP_xmovri32 : MOP_xmovri64; - insn.SetMOperator(mOp); + insn.SetMOP(mOp); if (EBO_DUMP) { LogInfo::MapleLogger() << " after constprop the insn is:\n"; insn.Dump(); @@ -468,9 +479,9 @@ bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { } insn.SetOperand(kInsnThirdOpnd, *src); if ((mopCode == MOP_xaddrrr) || (mopCode == MOP_waddrrr)) { - is64Bits ? insn.SetMOperator(MOP_xaddrri12) : insn.SetMOperator(MOP_waddrri12); + is64Bits ? insn.SetMOP(MOP_xaddrri12) : insn.SetMOP(MOP_waddrri12); } else if ((mopCode == MOP_xsubrrr) || (mopCode == MOP_wsubrrr)) { - is64Bits ? insn.SetMOperator(MOP_xsubrri12) : insn.SetMOperator(MOP_wsubrri12); + is64Bits ? insn.SetMOP(MOP_xsubrri12) : insn.SetMOP(MOP_wsubrri12); } if (EBO_DUMP) { LogInfo::MapleLogger() << " after constprop the insn is:\n"; @@ -505,18 +516,18 @@ bool AArch64Ebo::Csel2Cset(Insn &insn, const MapleVector &opnds) { } Operand *op0 = opnds.at(kInsnSecondOpnd); Operand *op1 = opnds.at(kInsnThirdOpnd); - AArch64ImmOperand *imm0 = nullptr; - AArch64ImmOperand *imm1 = nullptr; + ImmOperand *imm0 = nullptr; + ImmOperand *imm1 = nullptr; if (op0->IsImmediate()) { - imm0 = static_cast(op0); + imm0 = static_cast(op0); } if (op1->IsImmediate()) { - imm1 = static_cast(op1); + imm1 = static_cast(op1); } bool reverse = (imm1 != nullptr) && imm1->IsOne() && - (((imm0 != nullptr) && imm0->IsZero()) || op0->IsZeroRegister()); - if (((imm0 != nullptr) && imm0->IsOne() && (((imm1 != nullptr) && imm1->IsZero()) || op1->IsZeroRegister())) || + (((imm0 != nullptr) && imm0->IsZero()) || IsZeroRegister(*op0)); + if (((imm0 != nullptr) && imm0->IsOne() && (((imm1 != nullptr) && imm1->IsZero()) || IsZeroRegister(*op1))) || reverse) { if (EBO_DUMP) { LogInfo::MapleLogger() << "change csel insn :\n"; @@ -569,16 +580,16 @@ bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &o CHECK_FATAL(res != nullptr, "null ptr check"); const AArch64MD *md = &AArch64CG::kMd[static_cast(&insn)->GetMachineOpcode()]; uint32 opndSize = md->GetOperandSize(); - bool op0IsConstant = op0->IsConstant() && !op1->IsConstant(); - bool op1IsConstant = !op0->IsConstant() && op1->IsConstant(); - bool bothConstant = op0->IsConstant() && op1->IsConstant(); - AArch64ImmOperand *immOpnd = nullptr; + bool op0IsConstant = IsConstantImmOrReg(*op0) && !IsConstantImmOrReg(*op1); + bool op1IsConstant = !IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + bool bothConstant = IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + ImmOperand *immOpnd = nullptr; Operand *op = nullptr; int32 idx0 = kInsnSecondOpnd; if (op0IsConstant) { // cannot convert zero reg (r30) to a immOperand - immOpnd = op0->IsZeroRegister() ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) - : static_cast(op0); + immOpnd = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); op = op1; if (op->IsMemoryAccessOperand()) { op = &(insn.GetOperand(kInsnThirdOpnd)); @@ -586,17 +597,17 @@ bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &o idx0 = kInsnThirdOpnd; } else if (op1IsConstant) { // cannot convert zero reg (r30) to a immOperand - immOpnd = op1->IsZeroRegister() ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) - : static_cast(op1); + immOpnd = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); op = op0; if (op->IsMemoryAccessOperand()) { op = &(insn.GetOperand(kInsnSecondOpnd)); } } else if (bothConstant) { - AArch64ImmOperand *immOpnd0 = op0->IsZeroRegister() ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) - : static_cast(op0); - AArch64ImmOperand *immOpnd1 = op1->IsZeroRegister() ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) - : static_cast(op1); + ImmOperand *immOpnd0 = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + ImmOperand *immOpnd1 = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); return SimplifyBothConst(*insn.GetBB(), insn, *immOpnd0, *immOpnd1, opndSize); } CHECK_FATAL(immOpnd != nullptr, "constant operand required!"); @@ -648,9 +659,9 @@ bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &o return result; } Operand &prevOpnd0 = prev->GetOperand(kInsnSecondOpnd); - AArch64ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); + ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); int64_t val = imm0.GetValue() + immOpnd->GetValue(); - AArch64ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); + ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); if (imm1.IsInBitSize(kMaxImmVal24Bits, 0) && (imm1.IsInBitSize(kMaxImmVal12Bits, 0) || imm1.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { MOperator mOp = (opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12); @@ -697,8 +708,8 @@ bool AArch64Ebo::CheckCondCode(const CondOperand &cond) const { } } -bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const AArch64ImmOperand &immOperand0, - const AArch64ImmOperand &immOperand1, uint32 opndSize) { +bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, + const ImmOperand &immOperand1, uint32 opndSize) { MOperator mOp = insn.GetMachineOpcode(); int64 val = 0; /* do not support negative const simplify yet */ @@ -731,7 +742,7 @@ bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const AArch64ImmOperand & return false; } Operand *res = insn.GetResult(0); - AArch64ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); + ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); if (!immOperand->IsSingleInstructionMovable()) { ASSERT(res->IsRegister(), " expect a register operand"); static_cast(cgFunc)->SplitMovImmOpndInstruction(val, *(static_cast(res)), &insn); @@ -780,7 +791,7 @@ bool AArch64Ebo::OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) { } #if TARGAARCH64 || TARGRISCV64 const AArch64MD *md = &AArch64CG::kMd[static_cast(nextInsn)->GetMachineOpcode()]; - auto *regProp = static_cast(md->operand[static_cast(i)]); + auto *regProp = md->operand[static_cast(i)]; #endif bool isUse = regProp->IsUse(); /* if noUse Redefined, no need to check live-out. */ @@ -802,10 +813,10 @@ bool AArch64Ebo::ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn * return false; } Insn *prevInsn = prevOpndInfo->insn; - AArch64MemOperand *memOpnd = static_cast(prevInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(prevInsn->GetMemOpnd()); ASSERT(!prevInsn->IsStorePair(), "do not do this opt for str pair"); ASSERT(!prevInsn->IsLoadPair(), "do not do this opt for ldr pair"); - if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && !a64CGFunc->IsOperandImmValid(newMop, prevInsn->GetMemOpnd(), kInsnSecondOpnd)) { return false; } @@ -876,7 +887,9 @@ bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVector= SXTB) { newPreMop = ExtLoadSwitchBitSize(newPreMop); - prevInsn->GetOperand(kInsnFirstOpnd).SetSize(k64BitSize); + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + prevDstOpnd.SetSize(k64BitSize); + prevDstOpnd.SetValidBitsNum(k64BitSize); } prevInsn->SetMOP(newPreMop); @@ -965,6 +978,16 @@ bool AArch64Ebo::CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bit return false; } +bool CheckInsnRefField(Insn &insn, size_t opndIndex) { + if (insn.IsAccessRefField() && static_cast(insn).AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + return true; + } + } + return false; +} + bool AArch64Ebo::CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) { if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { return false; @@ -1017,10 +1040,10 @@ bool AArch64Ebo::CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bi } Operand &res = insn.GetOperand(kInsnFirstOpnd); Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); - int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); Operand &immOpnd1 = is64bits ? aarchFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) : aarchFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); - int64 immVal2 = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal2 = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); int64 immV2 = __builtin_ffsll(immVal2 + 1) - 1; if (immVal1 + immV2 < k1BitSize || (is64bits && immVal1 + immV2 > k64BitSize) || (!is64bits && immVal1 + immV2 > k32BitSize)) { @@ -1178,7 +1201,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI } Operand &res = insn.GetOperand(kInsnFirstOpnd); Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); - auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); uint32 xLslrriBitLen = 6; uint32 wLslrriBitLen = 5; Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand(BitShiftOperand::kLSL, @@ -1319,7 +1342,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI baseInfo = GetOpndInfo(*base, -1); } - if (static_cast(memOpnd)->GetAddrMode() != AArch64MemOperand::kAddrModeBOi) { + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { return false; } @@ -1335,14 +1358,14 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI if (memOpnd->GetOffset() == nullptr) { return false; } - AArch64ImmOperand *imm0 = static_cast(memOpnd->GetOffset()); + ImmOperand *imm0 = static_cast(memOpnd->GetOffset()); if (imm0 == nullptr) { return false; } int64 imm0Val = imm0->GetValue(); Operand &res = insn.GetOperand(kInsnFirstOpnd); RegOperand *op1 = &static_cast(insn1->GetOperand(kInsnSecondOpnd)); - AArch64ImmOperand &imm1 = static_cast(insn1->GetOperand(kInsnThirdOpnd)); + ImmOperand &imm1 = static_cast(insn1->GetOperand(kInsnThirdOpnd)); int64 immVal; /* don't use register if it was redefined. */ OpndInfo *opndInfo1 = insnInfo1->origOpnd[kInsnSecondOpnd]; @@ -1368,7 +1391,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI } auto &res2 = static_cast(insn2->GetOperand(kInsnFirstOpnd)); auto &base2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); - auto &immOpnd2 = static_cast(insn2->GetOperand(kInsnThirdOpnd)); + auto &immOpnd2 = static_cast(insn2->GetOperand(kInsnThirdOpnd)); auto &res1 = static_cast(insn1->GetOperand(kInsnFirstOpnd)); OpndInfo *base2DefOpndInfo = GetOpndInfo(base2, -1); if (base2DefOpndInfo == nullptr) { @@ -1409,8 +1432,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI /* multiple of 4 and 8 */ const int multiOfFour = 4; const int multiOfEight = 8; - is64bits = is64bits && - (!static_cast(insn).CheckRefField(static_cast(kInsnFirstOpnd), false)); + is64bits = is64bits && (!CheckInsnRefField(insn, kInsnFirstOpnd)); if ((!is64bits && (immVal < kStrLdrImm32UpperBound) && (immVal % multiOfFour == 0)) || (is64bits && (immVal < kStrLdrImm64UpperBound) && (immVal % multiOfEight == 0))) { /* Reserved physicalReg beforeRA */ @@ -1533,7 +1555,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI */ bool AArch64Ebo::IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const { if (insn.GetMachineOpcode() == MOP_wmovrr && replaceInsn.GetMachineOpcode() == MOP_xvmovrv) { - insn.SetMOperator(replaceInsn.GetMachineOpcode()); + insn.SetMOP(replaceInsn.GetMachineOpcode()); return true; } return false; @@ -1564,22 +1586,22 @@ bool AArch64Ebo::ChangeLdrMop(Insn &insn, const Operand &opnd) const { if (regOpnd->GetRegisterType() == kRegTyFloat) { switch (insn.GetMachineOpcode()) { case MOP_wldrb: - insn.SetMOperator(MOP_bldr); + insn.SetMOP(MOP_bldr); break; case MOP_wldrh: - insn.SetMOperator(MOP_hldr); + insn.SetMOP(MOP_hldr); break; case MOP_wldr: - insn.SetMOperator(MOP_sldr); + insn.SetMOP(MOP_sldr); break; case MOP_xldr: - insn.SetMOperator(MOP_dldr); + insn.SetMOP(MOP_dldr); break; case MOP_wldli: - insn.SetMOperator(MOP_sldli); + insn.SetMOP(MOP_sldli); break; case MOP_xldli: - insn.SetMOperator(MOP_dldli); + insn.SetMOP(MOP_dldli); break; case MOP_wldrsb: case MOP_wldrsh: @@ -1590,22 +1612,22 @@ bool AArch64Ebo::ChangeLdrMop(Insn &insn, const Operand &opnd) const { } else if (regOpnd->GetRegisterType() == kRegTyInt) { switch (insn.GetMachineOpcode()) { case MOP_bldr: - insn.SetMOperator(MOP_wldrb); + insn.SetMOP(MOP_wldrb); break; case MOP_hldr: - insn.SetMOperator(MOP_wldrh); + insn.SetMOP(MOP_wldrh); break; case MOP_sldr: - insn.SetMOperator(MOP_wldr); + insn.SetMOP(MOP_wldr); break; case MOP_dldr: - insn.SetMOperator(MOP_xldr); + insn.SetMOP(MOP_xldr); break; case MOP_sldli: - insn.SetMOperator(MOP_wldli); + insn.SetMOP(MOP_wldli); break; case MOP_dldli: - insn.SetMOperator(MOP_xldli); + insn.SetMOP(MOP_xldli); break; default: bRet = false; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp index 9941ff7e43dc6a37d6c7f26370c07ca3b9d154db..900b441e9db36e9994abd4981264a1b38c484bc2 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp @@ -16,6 +16,9 @@ #include #include "aarch64_cgfunc.h" #include "aarch64_cg.h" +#include "metadata_layout.h" +#include "cfi.h" +#include "dbg.h" namespace { using namespace maple; @@ -297,6 +300,7 @@ void AArch64AsmEmitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std: } else { emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\n"); } + delete puIdx; } void AArch64AsmEmitter::EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) { @@ -349,9 +353,10 @@ void AArch64AsmEmitter::RecordRegInfo(FuncEmitInfo &funcEmitInfo) { break; } uint32 opndNum = insn->GetOperandSize(); + const AArch64MD *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; for (uint32 i = 0; i < opndNum; ++i) { if (insn->GetMachineOpcode() == MOP_asm) { - if (i == kAsmInputListOpnd || i == kAsmOutputListOpnd || i == kAsmClobberListOpnd) { + if (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd) { for (auto opnd : static_cast(insn->GetOperand(i)).GetOperands()) { if (opnd->IsRegister()) { referedRegs.insert(static_cast(opnd)->GetRegisterNumber()); @@ -362,26 +367,22 @@ void AArch64AsmEmitter::RecordRegInfo(FuncEmitInfo &funcEmitInfo) { } Operand &opnd = insn->GetOperand(i); if (opnd.IsList()) { - auto &listOpnd = static_cast(opnd); - for (auto op : listOpnd.GetOperands()) { - referedRegs.insert(op->GetRegisterNumber()); - } + /* all use, skip it */ } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); - RegOperand *offset = memOpnd.GetIndexRegister(); - if (base != nullptr) { + if (!memOpnd.IsIntactIndexed()) { referedRegs.insert(base->GetRegisterNumber()); } - if (offset != nullptr) { - referedRegs.insert(offset->GetRegisterNumber()); - } } else if (opnd.IsRegister()) { RegType regType = static_cast(opnd).GetRegisterType(); if (regType == kRegTyCc || regType == kRegTyVary) { continue; } - referedRegs.insert(static_cast(opnd).GetRegisterNumber()); + bool isDef = md->GetOperand(static_cast(i))->IsRegDef(); + if (isDef) { + referedRegs.insert(static_cast(opnd).GetRegisterNumber()); + } } } } @@ -408,6 +409,14 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_initialization)) { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_termination)) { + (void)emitter.Emit("\t.section\t.fini_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } emitter.Emit("\n"); EmitMethodDesc(funcEmitInfo, emitter); /* emit java code to the java section. */ @@ -419,10 +428,14 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { (void)emitter.Emit("\t.section " + sectionName).Emit(",\"ax\",@progbits\n"); } else if (CGOptions::IsFunctionSections()) { (void)emitter.Emit("\t.section .text.").Emit(cgFunc.GetName()).Emit(",\"ax\",@progbits\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_constructor_priority)) { + (void)emitter.Emit("\t.section\t.text.startup").Emit(",\"ax\",@progbits\n"); } else { (void)emitter.Emit("\t.text\n"); } - (void)emitter.Emit("\t.align 5\n"); + if (CGOptions::GetFuncAlignPow() != 0) { + (void)emitter.Emit("\t.align ").Emit(CGOptions::GetFuncAlignPow()).Emit("\n"); + } MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc.GetFunction().GetStIdx().Idx()); const std::string &funcName = std::string(cgFunc.GetShortFuncName().c_str()); @@ -511,7 +524,13 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { } FOR_BB_INSNS(insn, bb) { - insn->Emit(*currCG, emitter); + if (insn->IsCfiInsn()) { + EmitAArch64CfiInsn(emitter, *insn); + } else if (insn->IsDbgInsn()) { + EmitAArch64DbgInsn(emitter, *insn); + } else { + EmitAArch64Insn(emitter, *insn); + } } } if (CGOptions::IsMapleLinker()) { @@ -520,9 +539,16 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { } (void)emitter.Emit("\t.size\t" + funcStName + ", .-").Emit(funcStName + "\n"); + auto constructorAttr = funcSt->GetFunction()->GetAttrs().GetConstructorPriority(); + if (constructorAttr != -1) { + (void)emitter.Emit("\t.section\t.init_array." + std::to_string(constructorAttr) + ",\"aw\"\n"); + (void)emitter.Emit("\t.align 3\n"); + (void)emitter.Emit("\t.xword\t" + funcStName + "\n"); + } + EHFunc *ehFunc = cgFunc.GetEHFunc(); /* emit LSDA */ - if (ehFunc != nullptr) { + if (cgFunc.GetFunction().IsJava() && (ehFunc != nullptr)) { if (!cgFunc.GetHasProEpilogue()) { emitter.Emit("\t.word 0x55555555\n"); emitter.IncreaseJavaInsnCount(); @@ -636,7 +662,8 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { LabelOperand &labelOpnd = aarchCGFunc.GetOrCreateLabelOperand(mpPair.first); - labelOpnd.Emit(emitter, nullptr); + A64OpndEmitVisitor visitor(emitter, nullptr); + labelOpnd.Accept(visitor); emitter.Emit(":\n"); emitter.Emit("\t.quad ").Emit(mpPair.second).Emit("\n"); emitter.IncreaseJavaInsnCount(kQuadInsnCount); @@ -652,6 +679,1428 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { #endif /* ~EMIT_INSN_COUNT */ } +void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + emitter.SetCurrentMOP(mOp); + const AArch64MD *md = &AArch64CG::kMd[mOp]; + + if (!GetCG()->GenerateVerboseAsm() && !GetCG()->GenerateVerboseCG() && mOp == MOP_comment) { + return; + } + + switch (mOp) { + case MOP_clinit: { + EmitClinit(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_ldr: { + uint32 adrpldrInsnCount = md->GetAtomicNum(); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount); + EmitAdrpLdr(emitter, insn); + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount + 1); + } + return; + } + case MOP_counter: { + EmitCounter(emitter, insn); + return; + } + case MOP_asm: { + EmitInlineAsm(emitter, insn); + return; + } + case MOP_clinit_tail: { + EmitClinitTail(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_lazy_ldr: { + EmitLazyLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_label: { + EmitAdrpLabel(emitter, insn); + return; + } + case MOP_lazy_tail: { + /* No need to emit this pseudo instruction. */ + return; + } + case MOP_lazy_ldr_static: { + EmitLazyLoadStatic(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_arrayclass_cache_ldr: { + EmitArrayClassCacheLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_get_and_addI: + case MOP_get_and_addL: { + EmitGetAndAddInt(emitter, insn); + return; + } + case MOP_get_and_setI: + case MOP_get_and_setL: { + EmitGetAndSetInt(emitter, insn); + return; + } + case MOP_compare_and_swapI: + case MOP_compare_and_swapL: { + EmitCompareAndSwapInt(emitter, insn); + return; + } + case MOP_string_indexof: { + EmitStringIndexOf(emitter, insn); + return; + } + case MOP_pseudo_none: { + return; + } + case MOP_tls_desc_call: { + EmitCTlsDescCall(emitter, insn); + return; + } + case MOP_tls_desc_rel: { + EmitCTlsDescRel(emitter, insn); + return; + } + case MOP_sync_lock_test_setI: + case MOP_sync_lock_test_setL: { + EmitSyncLockTestSet(emitter, insn); + return; + } + default: + break; + } + + if (CGOptions::IsNativeOpt() && mOp == MOP_xbl) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + if (nameOpnd->GetName() == "MCC_CheckThrowPendingException") { + EmitCheckThrowPendingException(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + } + + std::string format(md->format); + emitter.Emit("\t").Emit(md->name).Emit("\t"); + size_t opndSize = insn.GetOperandSize(); + std::vector seq(opndSize, -1); + std::vector prefix(opndSize); /* used for print prefix like "*" in icall *rax */ + uint32 index = 0; + uint32 commaNum = 0; + for (uint32 i = 0; i < format.length(); ++i) { + char c = format[i]; + if (c >= '0' && c <= '5') { + seq[index++] = c - '0'; + ++commaNum; + } else if (c != ',') { + prefix[index].push_back(c); + } + } + + bool isRefField = (opndSize == 0) ? false : CheckInsnRefField(insn, static_cast(static_cast(seq[0]))); + if (mOp != MOP_comment) { + emitter.IncreaseJavaInsnCount(); + } + uint32 compositeOpnds = 0; + for (uint32 i = 0; i < commaNum; ++i) { + if (seq[i] == -1) { + continue; + } + if (prefix[i].length() > 0) { + emitter.Emit(prefix[i]); + } + if (emitter.NeedToDealWithHugeSo() && (mOp == MOP_xbl || mOp == MOP_tail_call_opt_xbl)) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + /* Suport huge so here + * As the PLT section is just before java_text section, when java_text section is larger + * then 128M, instrunction of "b" and "bl" would fault to branch to PLT stub functions. Here, to save + * instuctions space, we change the branch target to a local target within 120M address, and add non-plt + * call to the target function. + */ + emitter.InsertHugeSoTarget(nameOpnd->GetName()); + emitter.Emit(nameOpnd->GetName() + emitter.HugeSoPostFix()); + break; + } + auto *opnd = &insn.GetOperand(static_cast(seq[i])); + if (opnd && opnd->IsRegister()) { + auto *regOpnd = static_cast(opnd); + if ((md->operand[static_cast(seq[i])])->IsVectorOperand()) { + regOpnd->SetVecLanePosition(-1); + regOpnd->SetVecLaneSize(0); + regOpnd->SetVecElementSize(0); + if (insn.IsVectorOp()) { + PrepareVectorOperand(regOpnd, compositeOpnds, insn); + if (compositeOpnds != 0) { + emitter.Emit("{"); + } + } + } + } + A64OpndEmitVisitor visitor(emitter, md->operand[seq[i]]); + + insn.GetOperand(seq[i]).Accept(visitor); + if (compositeOpnds == 1) { + emitter.Emit("}"); + } + if (compositeOpnds > 0) { + --compositeOpnds; + } + /* reset opnd0 ref-field flag, so following instruction has correct register */ + if (isRefField && (i == 0)) { + static_cast(&insn.GetOperand(seq[0]))->SetRefField(false); + } + /* Temporary comment the label:.Label.debug.callee */ + if (i != (commaNum - 1)) { + emitter.Emit(", "); + } + const uint32 commaNumForEmitLazy = 2; + if (!CGOptions::IsLazyBinding() || GetCG()->IsLibcore() || (mOp != MOP_wldr && mOp != MOP_xldr) || + commaNum != commaNumForEmitLazy || i != 1 || !insn.GetOperand(seq[1]).IsMemoryAccessOperand()) { + continue; + } + /* + * Only check the last operand of ldr in lo12 mode. + * Check the second operand, if it's [AArch64MemOperand::kAddrModeLo12Li] + */ + auto *memOpnd = static_cast(&insn.GetOperand(seq[1])); + if (memOpnd == nullptr || memOpnd->GetAddrMode() != MemOperand::kAddrModeLo12Li) { + continue; + } + const MIRSymbol *sym = memOpnd->GetSymbol(); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || + sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab()) { + emitter.Emit("\n"); + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(1); + } + } + if (GetCG()->GenerateVerboseCG() || (GetCG()->GenerateVerboseAsm() && mOp == MOP_comment)) { + const char *comment = insn.GetComment().c_str(); + if (comment != nullptr && strlen(comment) > 0) { + (void)emitter.Emit("\t\t// ").Emit(comment); + } + } + + emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitClinit(Emitter &emitter, Insn &insn) const { + /* + * adrp x3, __muid_data_undef_tab$$GetBoolean_dex+144 + * ldr x3, [x3, #:lo12:__muid_data_undef_tab$$GetBoolean_dex+144] + * or, + * adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * + * ldr x3, [x3,#112] + * ldr wzr, [x3] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_clinit]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->operand[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitClinit"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + if (stImmOpnd->GetSymbol()->IsMuidDataUndefTab()) { + /* emit adrp */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit(stImmOpnd->GetName()); + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + emitter.Emit("\n"); + /* emit ldr */ + emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("["); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + emitter.Emit("]"); + emitter.Emit("\n"); + } else { + /* adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + emitter.Emit("\tadrp\t"); + opnd0->Accept(visitor); + emitter.Emit(","); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + emitter.Emit("\n"); + + /* ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + emitter.Emit("\tldr\t"); + opnd0->Accept(visitor); + emitter.Emit(", ["); + opnd0->Accept(visitor); + emitter.Emit(", #:lo12:"); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + emitter.Emit("]\n"); + } + /* emit "ldr x0,[x0,#48]" */ + emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("["); + opnd0->Accept(visitor); + emitter.Emit(",#"); + emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + emitter.Emit("]"); + emitter.Emit("\n"); + + /* emit "ldr xzr, [x0]" */ + emitter.Emit("\t").Emit("ldr\txzr, ["); + opnd0->Accept(visitor); + emitter.Emit("]\n"); +} + +static void AsmStringOutputRegNum( + bool isInt, uint32 regno, uint32 intBase, uint32 fpBase, std::string &strToEmit) { + regno_t newRegno; + if (isInt) { + newRegno = regno - intBase; + } else { + newRegno = regno - fpBase; + } + if (newRegno > (kDecimalMax - 1)) { + uint32 tenth = newRegno / kDecimalMax; + strToEmit += '0' + static_cast(tenth); + newRegno -= (kDecimalMax * tenth); + } + strToEmit += newRegno + '0'; +} + +void AArch64AsmEmitter::EmitInlineAsm(Emitter &emitter, Insn &insn) const { + emitter.Emit("\t//Inline asm begin\n\t"); + auto &list1 = static_cast(insn.GetOperand(kAsmOutputListOpnd)); + std::vector outOpnds; + for (auto *regOpnd : list1.GetOperands()) { + outOpnds.push_back(regOpnd); + } + auto &list2 = static_cast(insn.GetOperand(kAsmInputListOpnd)); + std::vector inOpnds; + for (auto *regOpnd : list2.GetOperands()) { + inOpnds.push_back(regOpnd); + } + auto &list6 = static_cast(insn.GetOperand(kAsmOutputRegPrefixOpnd)); + auto &list7 = static_cast(insn.GetOperand(kAsmInputRegPrefixOpnd)); + MapleString asmStr = static_cast(insn.GetOperand(kAsmStringOpnd)).GetComment(); + std::string stringToEmit; + size_t sidx = 0; + auto IsMemAccess = [](char c)->bool { + return c == '['; + }; + auto EmitRegister = [&](const char *p, bool isInt, uint32 regNO, bool unDefRegSize)->void { + if (IsMemAccess(p[0])) { + stringToEmit += "[x"; + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + stringToEmit += "]"; + } else { + ASSERT((p[0] == 'w' || p[0] == 'x' || p[0] == 's' || p[0] == 'd' || p[0] == 'v'), "Asm invalid register type"); + if ((p[0] == 'w' || p[0] == 'x') && unDefRegSize) { + stringToEmit += 'x'; + } else { + stringToEmit += p[0]; + } + if (!unDefRegSize) { + isInt = (p[0] == 'w' || p[0] == 'x'); + } + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + } + }; + for (size_t i = 0; i < asmStr.length(); ++i) { + switch (asmStr[i]) { + case '$': { + char c = asmStr[++i]; + if ((c >= '0') && (c <= '9')) { + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + if (val < outOpnds.size()) { + const char *prefix = list6.stringList[val]->GetComment().c_str(); + RegOperand *opnd = outOpnds[val]; + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + /* input is a immediate */ + const char *prefix = list7.stringList[val]->GetComment().c_str(); + if (prefix[0] == 'i') { + stringToEmit += '#'; + for (size_t k = 1; k < list7.stringList[val]->GetComment().length(); ++k) { + stringToEmit += prefix[k]; + } + } else { + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } + } + } else if (c == '{') { + c = asmStr[++i]; + CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + regno_t regno; + bool isAddr = false; + if (val < outOpnds.size()) { + RegOperand *opnd = outOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list6.stringList[val]->GetComment().c_str()[0]); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list7.stringList[val]->GetComment().c_str()[0]); + } + c = asmStr[++i]; + CHECK_FATAL(c == ':', "Parsing error in inline asm string during emit"); + c = asmStr[++i]; + std::string prefix(1, c); + if (c == 'a' || isAddr) { + prefix = "[x"; + } + EmitRegister(prefix.c_str(), true, regno, false); + c = asmStr[++i]; + CHECK_FATAL(c == '}', "Parsing error in inline asm string during emit"); + } + break; + } + case '\n': { + stringToEmit += "\n\t"; + break; + } + default: + stringToEmit += asmStr[i]; + sidx++; + } + } + emitter.Emit(stringToEmit); + emitter.Emit("\n\t//Inline asm end\n"); +} + +void AArch64AsmEmitter::EmitClinitTail(Emitter &emitter, Insn &insn) const { + /* + * ldr x17, [xs, #112] + * ldr wzr, [x17] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_clinit_tail]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + + OpndProp *prop0 = md->operand[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr x17,[xs,#112]" */ + emitter.Emit("\t").Emit("ldr").Emit("\tx17, ["); + opnd0->Accept(visitor); + emitter.Emit(", #"); + emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + emitter.Emit("]"); + emitter.Emit("\n"); + + /* emit "ldr xzr, [x17]" */ + emitter.Emit("\t").Emit("ldr\txzr, [x17]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoad(Emitter &emitter, Insn &insn) const { + /* + * ldr wd, [xs] # xd and xs should be differenct register + * ldr wd, [xd] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_lazy_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->operand[0]; + OpndProp *prop1 = md->operand[1]; + + /* emit "ldr wd, [xs]" */ + emitter.Emit("\t").Emit("ldr\t"); +#ifdef USE_32BIT_REF + opnd0->Emit(emitter, prop0); +#else + opnd0->Emit(emitter, prop1); +#endif + emitter.Emit(", ["); + opnd1->Emit(emitter, prop1); + emitter.Emit("]\t// lazy load.\n"); + + /* emit "ldr wd, [xd]" */ + emitter.Emit("\t").Emit("ldr\t"); + opnd0->Emit(emitter, prop0); + emitter.Emit(", ["); + opnd0->Emit(emitter, prop1); + emitter.Emit("]\t// lazy load.\n"); +} + +void AArch64AsmEmitter::EmitCounter(Emitter &emitter, Insn &insn) const { + /* + * adrp x1, __profile_bb_table$$GetBoolean_dex+4 + * ldr w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + * add w17, w17, #1 + * str w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_counter]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->operand[kInsnFirstOpnd]; + A64OpndEmitVisitor visitor(emitter, prop0); + StImmOperand *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitCounter"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* emit adrp */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit(stImmOpnd->GetName()); + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + emitter.Emit("\n"); + /* emit ldr */ + emitter.Emit("\t").Emit("ldr").Emit("\tw17, ["); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + emitter.Emit("]"); + emitter.Emit("\n"); + /* emit add */ + emitter.Emit("\t").Emit("add").Emit("\tw17, w17, #1"); + emitter.Emit("\n"); + /* emit str */ + emitter.Emit("\t").Emit("str").Emit("\tw17, ["); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + emitter.Emit("]"); + emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitAdrpLabel(Emitter &emitter, Insn &insn) const { + /* adrp xd, label + * add xd, xd, #lo12:label + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_label]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->operand[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto lidx = static_cast(opnd1)->GetValue(); + + /* adrp xd, label */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(", "); + const char *idx; + idx = strdup(std::to_string(Globals::GetInstance()->GetBECommon()->GetMIRModule().CurFunction()->GetPuidx()).c_str()); + emitter.Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + + /* add xd, xd, #lo12:label */ + emitter.Emit("\tadd\t"); + opnd0->Accept(visitor); + emitter.Emit(", "); + opnd0->Accept(visitor); + emitter.Emit(", "); + emitter.Emit(":lo12:").Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitAdrpLdr(Emitter &emitter, Insn &insn) const { + /* + * adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->operand[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitAdrpLdr"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(", "); + emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("\n"); + + /* ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); + opnd0->Accept(visitor); + static_cast(opnd0)->SetRefField(false); + emitter.Emit(", "); + emitter.Emit("["); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoadStatic(Emitter &emitter, Insn &insn) const { + /* adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset] + * ldr wzr, [xd] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_lazy_ldr_static]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->GetOperand(0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset" */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Emit(emitter, prop0); + emitter.Emit(", "); + emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("\t// lazy load static.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset]" */ + emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + OpndProp prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + opnd0->Emit(emitter, &prop2); /* ldr wd, ... for emui */ +#else + opnd0->Emit(emitter, prop0); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + emitter.Emit(", "); + emitter.Emit("["); + opnd0->Emit(emitter, prop0); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("]\t// lazy load static.\n"); + + /* emit "ldr wzr, [xd]" */ + emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Emit(emitter, prop0); + emitter.Emit("]\t// lazy load static.\n"); +} + +void AArch64AsmEmitter::EmitArrayClassCacheLoad(Emitter &emitter, Insn &insn) const { + /* adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset] + * ldr wzr, [xd] + */ + const AArch64MD *md = &AArch64CG::kMd[MOP_arrayclass_cache_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop0 = md->GetOperand(kInsnFirstOpnd); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__arrayClassCacheTable$$xxx+offset" */ + emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + emitter.Emit(", "); + emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("\t// load array class.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset]" */ + emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + OpndProp prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + A64OpndEmitVisitor visitor2(emitter, prop2); + opnd0->Accept(visitor2);; /* ldr wd, ... for emui */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + emitter.Emit(", "); + emitter.Emit("["); + opnd0->Accept(visitor); + emitter.Emit(","); + emitter.Emit("#"); + emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + emitter.Emit("]\t// load array class.\n"); + + /* emit "ldr wzr, [xd]" */ + emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + emitter.Emit("]\t// check resolve array class.\n"); +} + +/* + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndAddInt(Emitter &emitter, Insn &insn) const { + ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the oprands number"); + emitter.Emit("\t//\tstart of Unsafe.getAndAddInt.\n"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *tempOpnd2 = &insn.GetOperand(kInsnFourthOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnSixthOpnd); + Operand *deltaOpnd = &insn.GetOperand(kInsnSeventhOpnd); + Operand *labelOpnd = &insn.GetOperand(kInsnEighthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* emit add. */ + emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd0->Accept(visitor); + emitter.Emit(", "); + objOpnd->Accept(visitor); + emitter.Emit(", "); + offsetOpnd->Accept(visitor); + emitter.Emit("\n"); + /* emit label. */ + labelOpnd->Accept(visitor); + emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + const MOperator mOp = insn.GetMachineOpcode(); + const AArch64MD *md = &AArch64CG::kMd[mOp]; + OpndProp *retProp = md->operand[kInsnFirstOpnd]; + A64OpndEmitVisitor retVisitor(emitter, retProp); + /* emit ldaxr */ + emitter.Emit("\t").Emit("ldaxr").Emit("\t"); + retVal->Accept(retVisitor); + emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + emitter.Emit("]\n"); + /* emit add. */ + emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd1->Accept(retVisitor); + emitter.Emit(", "); + retVal->Accept(retVisitor); + emitter.Emit(", "); + deltaOpnd->Accept(retVisitor); + emitter.Emit("\n"); + /* emit stlxr. */ + emitter.Emit("\t").Emit("stlxr").Emit("\t"); + tempOpnd2->Accept(visitor); + emitter.Emit(", "); + tempOpnd1->Accept(retVisitor); + emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + emitter.Emit("]\n"); + /* emit cbnz. */ + emitter.Emit("\t").Emit("cbnz").Emit("\t"); + tempOpnd2->Accept(visitor); + emitter.Emit(", "); + labelOpnd->Accept(visitor); + emitter.Emit("\n"); + emitter.Emit("\t//\tend of Unsafe.getAndAddInt.\n"); +} + +/* + * intrinsic_get_set_int w0, xt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr ws, w3, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndSetInt(Emitter &emitter, Insn &insn) const { + /* MOP_get_and_setI and MOP_get_and_setL have 7 operands */ + ASSERT(insn.GetOperandSize() > kInsnSeventhOpnd, "ensure the operands number"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFourthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add x1, x1, x2 */ + emitter.Emit("\tadd\t"); + tempOpnd0->Accept(visitor); + emitter.Emit(", "); + objOpnd->Accept(visitor); + emitter.Emit(", "); + offsetOpnd->Accept(visitor); + emitter.Emit("\n"); + Operand *labelOpnd = &insn.GetOperand(kInsnSeventhOpnd); + /* label: */ + labelOpnd->Accept(visitor); + emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* ldaxr w0, [xt] */ + emitter.Emit("\tldaxr\t"); + retVal->Accept(visitor); + emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + emitter.Emit("]\n"); + Operand *newValueOpnd = &insn.GetOperand(kInsnSixthOpnd); + /* stlxr ws, w3, [xt] */ + emitter.Emit("\tstlxr\t"); + tempOpnd1->Accept(visitor); + emitter.Emit(", "); + newValueOpnd->Accept(visitor); + emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + emitter.Emit("]\n"); + /* cbnz w2, label */ + emitter.Emit("\tcbnz\t"); + tempOpnd1->Accept(visitor); + emitter.Emit(", "); + labelOpnd->Accept(visitor); + emitter.Emit("\n"); +} + +/* + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, + * Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, + * Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +void AArch64AsmEmitter::EmitStringIndexOf(Emitter &emitter, Insn &insn) const { + /* MOP_string_indexof has 18 operands */ + ASSERT(insn.GetOperandSize() == 18, "ensure the operands number"); + Operand *patternLengthOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *srcLengthOpnd = &insn.GetOperand(kInsnThirdOpnd); + const std::string patternLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(patternLengthOpnd)->GetRegisterNumber()]; + const std::string srcLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(srcLengthOpnd)->GetRegisterNumber()]; + A64OpndEmitVisitor visitor(emitter, nullptr); + /* cmp w4, w2 */ + emitter.Emit("\tcmp\t"); + patternLengthOpnd->Accept(visitor); + emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + /* the 16th operand of MOP_string_indexof is Label.NOMATCH */ + Operand *labelNoMatch = &insn.GetOperand(16); + /* b.gt Label.NOMATCH */ + emitter.Emit("\tb.gt\t"); + labelNoMatch->Accept(visitor); + emitter.Emit("\n"); + /* sub w2, w2, w4 */ + emitter.Emit("\tsub\t"); + srcLengthOpnd->Accept(visitor); + emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + /* sub w4, w4, #8 */ + emitter.Emit("\tsub\t"); + patternLengthOpnd->Accept(visitor); + emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + emitter.Emit(", #8\n"); + /* the 10th operand of MOP_string_indexof is w10 */ + Operand *resultTmp = &insn.GetOperand(10); + /* mov w10, w2 */ + emitter.Emit("\tmov\t"); + resultTmp->Accept(visitor); + emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + /* uxtw x4, w4 */ + emitter.Emit("\tuxtw\t").Emit(patternLengthReg); + emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + /* uxtw x2, w2 */ + emitter.Emit("\tuxtw\t").Emit(srcLengthReg); + emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + Operand *patternStringBaseOpnd = &insn.GetOperand(kInsnFourthOpnd); + /* add x3, x3, x4 */ + emitter.Emit("\tadd\t"); + patternStringBaseOpnd->Accept(visitor); + emitter.Emit(", "); + patternStringBaseOpnd->Accept(visitor); + emitter.Emit(", ").Emit(patternLengthReg); + emitter.Emit("\n"); + Operand *srcStringBaseOpnd = &insn.GetOperand(kInsnSecondOpnd); + /* add x1, x1, x2 */ + emitter.Emit("\tadd\t"); + srcStringBaseOpnd->Accept(visitor); + emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + emitter.Emit(", ").Emit(srcLengthReg); + emitter.Emit("\n"); + /* neg x4, x4 */ + emitter.Emit("\tneg\t").Emit(patternLengthReg); + emitter.Emit(", ").Emit(patternLengthReg); + emitter.Emit("\n"); + /* neg x2, x2 */ + emitter.Emit("\tneg\t").Emit(srcLengthReg); + emitter.Emit(", ").Emit(srcLengthReg); + emitter.Emit("\n"); + Operand *first = &insn.GetOperand(kInsnSixthOpnd); + /* ldr x5, [x3,x4] */ + emitter.Emit("\tldr\t"); + first->Accept(visitor); + emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + emitter.Emit(",").Emit(patternLengthReg); + emitter.Emit("]\n"); + /* the 11th operand of MOP_string_indexof is Label.FIRST_LOOP */ + Operand *labelFirstLoop = &insn.GetOperand(11); + /* .Label.FIRST_LOOP: */ + labelFirstLoop->Accept(visitor); + emitter.Emit(":\n"); + /* the 7th operand of MOP_string_indexof is x7 */ + Operand *ch2 = &insn.GetOperand(7); + /* ldr x7, [x1,x2] */ + emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + emitter.Emit(",").Emit(srcLengthReg); + emitter.Emit("]\n"); + /* cmp x5, x7 */ + emitter.Emit("\tcmp\t"); + first->Accept(visitor); + emitter.Emit(", "); + ch2->Accept(visitor); + emitter.Emit("\n"); + /* the 13th operand of MOP_string_indexof is Label.STR1_LOOP */ + Operand *labelStr1Loop = &insn.GetOperand(13); + /* b.eq .Label.STR1_LOOP */ + emitter.Emit("\tb.eq\t"); + labelStr1Loop->Accept(visitor); + emitter.Emit("\n"); + /* the 12th operand of MOP_string_indexof is Label.STR2_NEXT */ + Operand *labelStr2Next = &insn.GetOperand(12); + /* .Label.STR2_NEXT: */ + labelStr2Next->Accept(visitor); + emitter.Emit(":\n"); + /* adds x2, x2, #1 */ + emitter.Emit("\tadds\t").Emit(srcLengthReg); + emitter.Emit(", ").Emit(srcLengthReg); + emitter.Emit(", #1\n"); + /* b.le .Label.FIRST_LOOP */ + emitter.Emit("\tb.le\t"); + labelFirstLoop->Accept(visitor); + emitter.Emit("\n"); + /* b .Label.NOMATCH */ + emitter.Emit("\tb\t"); + labelNoMatch->Accept(visitor); + emitter.Emit("\n"); + /* .Label.STR1_LOOP: */ + labelStr1Loop->Accept(visitor); + emitter.Emit(":\n"); + /* the 8th operand of MOP_string_indexof is x8 */ + Operand *tmp1 = &insn.GetOperand(kInsnEighthOpnd); + /* adds x8, x4, #8 */ + emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + emitter.Emit(", ").Emit(patternLengthReg); + emitter.Emit(", #8\n"); + /* the 9th operand of MOP_string_indexof is x9 */ + Operand *tmp2 = &insn.GetOperand(9); + /* add x9, x2, #8 */ + emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + emitter.Emit(", ").Emit(srcLengthReg); + emitter.Emit(", #8\n"); + /* the 15th operand of MOP_string_indexof is Label.LAST_WORD */ + Operand *labelLastWord = &insn.GetOperand(15); + /* b.ge .Label.LAST_WORD */ + emitter.Emit("\tb.ge\t"); + labelLastWord->Accept(visitor); + emitter.Emit("\n"); + /* the 14th operand of MOP_string_indexof is Label.STR1_NEXT */ + Operand *labelStr1Next = &insn.GetOperand(14); + /* .Label.STR1_NEXT: */ + labelStr1Next->Accept(visitor); + emitter.Emit(":\n"); + /* the 6th operand of MOP_string_indexof is x6 */ + Operand *ch1 = &insn.GetOperand(6); + /* ldr x6, [x3,x8] */ + emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + emitter.Emit(","); + tmp1->Accept(visitor); + emitter.Emit("]\n"); + /* ldr x7, [x1,x9] */ + emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + emitter.Emit(","); + tmp2->Accept(visitor); + emitter.Emit("]\n"); + /* cmp x6, x7 */ + emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + emitter.Emit(", "); + ch2->Accept(visitor); + emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + emitter.Emit("\n"); + /* adds x8, x8, #8 */ + emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + emitter.Emit(", "); + tmp1->Accept(visitor); + emitter.Emit(", #8\n"); + /* add x9, x9, #8 */ + emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + emitter.Emit(", "); + tmp2->Accept(visitor); + emitter.Emit(", #8\n"); + /* b.lt .Label.STR1_NEXT */ + emitter.Emit("\tb.lt\t"); + labelStr1Next->Accept(visitor); + emitter.Emit("\n"); + /* .Label.LAST_WORD: */ + labelLastWord->Accept(visitor); + emitter.Emit(":\n"); + /* ldr x6, [x3] */ + emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + emitter.Emit("]\n"); + /* sub x9, x1, x4 */ + emitter.Emit("\tsub\t"); + tmp2->Accept(visitor); + emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + emitter.Emit(", ").Emit(patternLengthReg); + emitter.Emit("\n"); + /* ldr x7, [x9,x2] */ + emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + emitter.Emit(", ["); + tmp2->Accept(visitor); + emitter.Emit(", ").Emit(srcLengthReg); + emitter.Emit("]\n"); + /* cmp x6, x7 */ + emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + emitter.Emit(", "); + ch2->Accept(visitor); + emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + emitter.Emit("\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* add w0, w10, w2 */ + emitter.Emit("\tadd\t"); + retVal->Accept(visitor); + emitter.Emit(", "); + resultTmp->Accept(visitor); + emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + emitter.Emit("\n"); + /* the 17th operand of MOP_string_indexof Label.ret */ + Operand *labelRet = &insn.GetOperand(17); + /* b .Label.ret */ + emitter.Emit("\tb\t"); + labelRet->Accept(visitor); + emitter.Emit("\n"); + /* .Label.NOMATCH: */ + labelNoMatch->Accept(visitor); + emitter.Emit(":\n"); + /* mov w0, #-1 */ + emitter.Emit("\tmov\t"); + retVal->Accept(visitor); + emitter.Emit(", #-1\n"); + /* .Label.ret: */ + labelRet->Accept(visitor); + emitter.Emit(":\n"); +} + +/* + * intrinsic_compare_swap_int x0, xt, xs, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +void AArch64AsmEmitter::EmitCompareAndSwapInt(Emitter &emitter, Insn &insn) const { + /* MOP_compare_and_swapI and MOP_compare_and_swapL have 8 operands */ + ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the operands number"); + const MOperator mOp = insn.GetMachineOpcode(); + const AArch64MD *md = &AArch64CG::kMd[mOp]; + Operand *temp0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *temp1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *obj = &insn.GetOperand(kInsnFourthOpnd); + Operand *offset = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add xt, x1, x2 */ + emitter.Emit("\tadd\t"); + temp0->Accept(visitor); + emitter.Emit(", "); + obj->Accept(visitor); + emitter.Emit(", "); + offset->Accept(visitor); + emitter.Emit("\n"); + Operand *label1 = &insn.GetOperand(kInsnEighthOpnd); + /* label1: */ + label1->Accept(visitor); + emitter.Emit(":\n"); + /* ldaxr ws, [xt] */ + emitter.Emit("\tldaxr\t"); + temp1->Accept(visitor); + emitter.Emit(", ["); + temp0->Accept(visitor); + emitter.Emit("]\n"); + Operand *expectedValue = &insn.GetOperand(kInsnSixthOpnd); + OpndProp *expectedValueProp = md->operand[kInsnSixthOpnd]; + /* cmp ws, w3 */ + emitter.Emit("\tcmp\t"); + temp1->Accept(visitor); + emitter.Emit(", "); + A64OpndEmitVisitor visitorExpect(emitter, expectedValueProp); + expectedValue->Accept(visitorExpect); + emitter.Emit("\n"); + constexpr uint32 kInsnNinethOpnd = 8; + Operand *label2 = &insn.GetOperand(kInsnNinethOpnd); + /* b.ne label2 */ + emitter.Emit("\tbne\t"); + label2->Accept(visitor); + emitter.Emit("\n"); + Operand *newValue = &insn.GetOperand(kInsnSeventhOpnd); + /* stlxr ws, w4, [xt] */ + emitter.Emit("\tstlxr\t"); + emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + emitter.Emit(", "); + newValue->Accept(visitor); + emitter.Emit(", ["); + temp0->Accept(visitor); + emitter.Emit("]\n"); + /* cbnz ws, label1 */ + emitter.Emit("\tcbnz\t"); + emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + emitter.Emit(", "); + label1->Accept(visitor); + emitter.Emit("\n"); + /* label2: */ + label2->Accept(visitor); + emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* cset x0, eq */ + emitter.Emit("\tcset\t"); + retVal->Accept(visitor); + emitter.Emit(", EQ\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescRel(Emitter &emitter, Insn &insn) const { + const AArch64MD *md = &AArch64CG::kMd[MOP_tls_desc_rel]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + Operand *src = &insn.GetOperand(kInsnSecondOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + auto stImmOpnd = static_cast(symbol); + A64OpndEmitVisitor resultVisitor(emitter, md->operand[0]); + A64OpndEmitVisitor srcVisitor(emitter, md->operand[1]); + emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + emitter.Emit(", "); + src->Accept(srcVisitor); + emitter.Emit(", #:tprel_hi12:").Emit(stImmOpnd->GetName()).Emit(", lsl #12\n"); + emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + emitter.Emit(", "); + result->Accept(resultVisitor); + emitter.Emit(", #:tprel_lo12_nc:").Emit(stImmOpnd->GetName()).Emit("\n"); +} +void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, Insn &insn) const { + const AArch64MD *md = &AArch64CG::kMd[MOP_tls_desc_call]; + Operand *func = &insn.GetOperand(kInsnFirstOpnd); + Operand *symbol = &insn.GetOperand(kInsnSecondOpnd); + OpndProp *prop = md->operand[0]; + auto stImmOpnd = static_cast(symbol); + A64OpndEmitVisitor funcVisitor(emitter, prop); + /* .tlsdesccall */ + emitter.Emit("\t").Emit(".tlsdesccall").Emit("\t").Emit(stImmOpnd->GetName()).Emit("\n"); + /* blr xd*/ + emitter.Emit("\t").Emit("blr").Emit("\t"); + func->Accept(funcVisitor); + emitter.Emit("\n"); +} +void AArch64AsmEmitter::EmitSyncLockTestSet(Emitter &emitter, Insn &insn) const { + const AArch64MD *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *result = &insn.GetOperand(kInsnFirstOpnd); + auto *temp = &insn.GetOperand(kInsnSecondOpnd); + auto *addr = &insn.GetOperand(kInsnThirdOpnd); + auto *value = &insn.GetOperand(kInsnFourthOpnd); + auto *label = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->operand[kInsnFirstOpnd]); + A64OpndEmitVisitor tempVisitor(emitter, md->operand[kInsnSecondOpnd]); + A64OpndEmitVisitor addrVisitor(emitter, md->operand[kInsnThirdOpnd]); + A64OpndEmitVisitor valueVisitor(emitter, md->operand[kInsnFourthOpnd]); + A64OpndEmitVisitor labelVisitor(emitter, md->operand[kInsnFifthOpnd]); + /* label: */ + label->Accept(labelVisitor); + emitter.Emit(":\n"); + /* ldxr x0, [x2] */ + emitter.Emit("\t").Emit("ldxr").Emit("\t"); + result->Accept(resultVisitor); + emitter.Emit(", ["); + addr->Accept(addrVisitor); + emitter.Emit("]\n"); + /* stxr w1, x3, [x2]*/ + emitter.Emit("\t").Emit("stxr").Emit("\t"); + temp->Accept(tempVisitor); + emitter.Emit(", "); + value->Accept(valueVisitor); + emitter.Emit(", ["); + addr->Accept(addrVisitor); + emitter.Emit("]\n"); + /* cbnz w1, label */ + emitter.Emit("\t").Emit("cbnz").Emit("\t"); + temp->Accept(tempVisitor); + emitter.Emit(", "); + label->Accept(labelVisitor); + emitter.Emit("\n"); + /* dmb ish*/ + emitter.Emit("\t").Emit("dmb").Emit("\t").Emit("ish").Emit("\n"); +} + +void AArch64AsmEmitter::EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const { + /* + * mrs x16, TPIDR_EL0 + * ldr x16, [x16, #64] + * ldr x16, [x16, #8] + * cbz x16, .lnoexception + * bl MCC_ThrowPendingException + * .lnoexception: + */ + emitter.Emit("\t").Emit("mrs").Emit("\tx16, TPIDR_EL0"); + emitter.Emit("\n"); + emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #64]"); + emitter.Emit("\n"); + emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #8]"); + emitter.Emit("\n"); + emitter.Emit("\t").Emit("cbz").Emit("\tx16, .lnoeh.").Emit(GetCG()->GetCurCGFunc()->GetName()); + emitter.Emit("\n"); + emitter.Emit("\t").Emit("bl").Emit("\tMCC_ThrowPendingException"); + emitter.Emit("\n"); + emitter.Emit(".lnoeh.").Emit(GetCG()->GetCurCGFunc()->GetName()).Emit(":"); + emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitLazyBindingRoutine(Emitter &emitter, Insn &insn) const { + /* ldr xzr, [xs] */ + const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + OpndProp *prop0 = md->operand[0]; + + /* emit "ldr xzr,[xs]" */ +#ifdef USE_32BIT_REF + emitter.Emit("\t").Emit("ldr").Emit("\twzr, ["); +#else + emitter.Emit("\t").Emit("ldr").Emit("\txzr, ["); +#endif /* USE_32BIT_REF */ + opnd0->Emit(emitter, prop0); + emitter.Emit("]"); + emitter.Emit("\t// Lazy binding\n"); +} + +void AArch64AsmEmitter::PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const { + VectorRegSpec* vecSpec = static_cast(insn).GetAndRemoveRegSpecFromList(); + compositeOpnds = vecSpec->compositeOpnds ? vecSpec->compositeOpnds : compositeOpnds; + regOpnd->SetVecLanePosition(vecSpec->vecLane); + switch (insn.GetMachineOpcode()) { + case MOP_vanduuu: + case MOP_vxoruuu: + case MOP_voruuu: + case MOP_vnotuu: + case MOP_vextuuui: { + regOpnd->SetVecLaneSize(k8ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + case MOP_vandvvv: + case MOP_vxorvvv: + case MOP_vorvvv: + case MOP_vnotvv: + case MOP_vextvvvi: { + regOpnd->SetVecLaneSize(k16ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + default: { + regOpnd->SetVecLaneSize(vecSpec->vecLaneMax); + regOpnd->SetVecElementSize(vecSpec->vecElementSize); + break; + } + } +} + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[cfi::kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) \ + { ".cfi_" #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, + #define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + { "." #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, + #include "cfi.def" + #undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64CfiInsn(Emitter &emitter, Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + emitter.Emit("\t").Emit(cfiDescr.name); + for (uint32 i = 0; i < cfiDescr.opndCount; ++i) { + emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + curOperand.Emit(emitter, nullptr); + if (i < (cfiDescr.opndCount - 1)) { + emitter.Emit(","); + } + } + emitter.Emit("\n"); +} + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[mpldbg::kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) \ + { #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "dbg.def" +#undef DBG_DEFINE + { "undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64DbgInsn(Emitter &emitter, Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + emitter.Emit("\t.").Emit(dbgDescr.name); + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + curOperand.Emit(emitter, nullptr); + } + emitter.Emit("\n"); +} + +bool AArch64AsmEmitter::CheckInsnRefField(Insn &insn, size_t opndIndex) const { + if (insn.IsAccessRefField() && static_cast(insn).AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + static_cast(opnd0).SetRefField(true); + return true; + } + } + return false; +} + /* new phase manager */ bool CgEmission::PhaseRun(maplebe::CGFunc &f) { Emitter *emitter = f.GetCG()->GetEmitter(); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp index 71284d54f6e4528e2bcef0418dc8c06cffc6602f..12ba9a209c096b7e31f632d62410b7c6ad86bc54 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp @@ -91,7 +91,7 @@ void AArch64FixShortBranch::FixShortBranches() { if (aarch64CGFunc->DistanceCheck(*bb, label.GetLabelIndex(), insn->GetId())) { continue; } - auto ® = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ® = static_cast(insn->GetOperand(kInsnFirstOpnd)); ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); auto &bitPos = static_cast(insn->GetOperand(kInsnSecondOpnd)); MOperator ubfxOp = MOP_undef; @@ -116,7 +116,7 @@ void AArch64FixShortBranch::FixShortBranches() { default: break; } - AArch64RegOperand &tmp = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + RegOperand &tmp = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( R16, (ubfxOp == MOP_wubfxrri5i5) ? k32BitSize : k64BitSize, kRegTyInt); (void)bb->InsertInsnAfter(*insn, cg->BuildInstruction(cbOp, tmp, label)); (void)bb->InsertInsnAfter(*insn, cg->BuildInstruction(ubfxOp, tmp, reg, bitPos, bitSize)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp index 72125cb5448259dbf2525e8f54f9b84872e58f38..a719bd6bf1529f0ff177af26f22617412874569c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp @@ -59,10 +59,19 @@ ExtendShiftOptPattern::SuffixType doOptimize[kExtenAddShift][kExtenAddShift] = { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kExten } }; +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + void AArch64GlobalOpt::Run() { OptimizeManager optManager(cgFunc); bool hasSpillBarrier = (cgFunc.NumBBs() > kMaxBBNum) || (cgFunc.GetRD()->GetMaxInsnNO() > kMaxInsnNum); if (cgFunc.IsAfterRegAlloc()) { + optManager.Optimize(); optManager.Optimize(); return; } @@ -85,7 +94,7 @@ void AArch64GlobalOpt::Run() { bool OptimizePattern::OpndDefByZero(Insn &insn, int32 useIdx) const { ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); /* Zero Register don't need be defined */ - if (insn.GetOperand(static_cast(useIdx)).IsZeroRegister()) { + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { return true; } @@ -105,7 +114,7 @@ bool OptimizePattern::OpndDefByZero(Insn &insn, int32 useIdx) const { bool OptimizePattern::OpndDefByOne(Insn &insn, int32 useIdx) const { ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); /* Zero Register don't need be defined */ - if (insn.GetOperand(static_cast(useIdx)).IsZeroRegister()) { + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { return false; } InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); @@ -120,9 +129,9 @@ bool OptimizePattern::OpndDefByOne(Insn &insn, int32 useIdx) const { return true; } -/* if used Operand in insn is defined by one valid bit in all define insn, return true */ + /* if used Operand in insn is defined by one valid bit in all define insn, return true */ bool OptimizePattern::OpndDefByOneOrZero(Insn &insn, int32 useIdx) const { - if (insn.GetOperand(static_cast(useIdx)).IsZeroRegister()) { + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { return true; } @@ -176,7 +185,7 @@ bool OptimizePattern::InsnDefZero(const Insn &insn) { } case MOP_xmovrr: case MOP_wmovrr: - return insn.GetOperand(kInsnSecondOpnd).IsZeroRegister(); + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); default: return false; } @@ -203,7 +212,7 @@ bool OptimizePattern::InsnDefOneOrZero(const Insn &insn) { } case MOP_xmovrr: case MOP_wmovrr: { - return insn.GetOperand(kInsnSecondOpnd).IsZeroRegister(); + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); } case MOP_wlsrrri5: case MOP_xlsrrri6: { @@ -224,7 +233,7 @@ bool OptimizePattern::InsnDefOneOrZero(const Insn &insn) { } void ReplaceAsmListReg(const Insn *insn, uint32 index, uint32 regNO, Operand *newOpnd) { - MapleList *list = &static_cast(insn->GetOperand(index)).GetOperands(); + MapleList *list = &static_cast(insn->GetOperand(index)).GetOperands(); int32 size = static_cast(list->size()); for (int i = 0; i < size; ++i) { RegOperand *opnd = static_cast(*(list->begin())); @@ -247,7 +256,7 @@ void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, u uint32 opndNum = useInsn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = useInsn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { continue; } @@ -258,7 +267,7 @@ void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, u cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); } } else if (opnd.IsMemoryAccessOperand()) { - AArch64MemOperand &memOpnd = static_cast(opnd); + MemOperand &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); MemOperand *newMem = nullptr; @@ -276,7 +285,7 @@ void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, u CHECK_FATAL(newMem != nullptr, "null ptr check"); newMem->SetIndexRegister(*static_cast(&newOpnd)); if (static_cast(newOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { - static_cast(newMem)->UpdateExtend(AArch64MemOperand::kSignExtend); + newMem->UpdateExtend(MemOperand::kSignExtend); } useInsn->SetOperand(i, *newMem); if (updateInfo) { @@ -305,7 +314,7 @@ bool ForwardPropPattern::CheckCondition(Insn &insn) { RegOperand &secondRegOpnd = static_cast(secondOpnd); uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); uint32 secondRegNO = secondRegOpnd.GetRegisterNumber(); - if (firstRegOpnd.IsZeroRegister() || !firstRegOpnd.IsVirtualRegister() || !secondRegOpnd.IsVirtualRegister()) { + if (IsZeroRegister(firstRegOpnd) || !firstRegOpnd.IsVirtualRegister() || !secondRegOpnd.IsVirtualRegister()) { return false; } firstRegUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, firstRegNO, true); @@ -322,6 +331,12 @@ bool ForwardPropPattern::CheckCondition(Insn &insn) { toDoOpt = false; break; } + /* part defined */ + if ((useInsn->GetMachineOpcode() == MOP_xmovkri16) || + (useInsn->GetMachineOpcode() == MOP_wmovkri16)) { + toDoOpt = false; + break; + } if (useInsn->GetMachineOpcode() == MOP_asm) { toDoOpt = false; break; @@ -330,6 +345,9 @@ bool ForwardPropPattern::CheckCondition(Insn &insn) { if (defInsnSet.size() > 1) { toDoOpt = false; break; + } else if (defInsnSet.size() == 1 && *defInsnSet.begin() != &insn) { + toDoOpt = false; + break; } } return toDoOpt; @@ -350,7 +368,7 @@ void ForwardPropPattern::Optimize(Insn &insn) { uint32 opndNum = useInsn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = useInsn->GetOperand(i); - const AArch64OpndProp *regProp = md->GetOperand(i); + const OpndProp *regProp = md->GetOperand(i); if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { continue; } @@ -364,7 +382,7 @@ void ForwardPropPattern::Optimize(Insn &insn) { } cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); } else if (opnd.IsMemoryAccessOperand()) { - AArch64MemOperand &memOpnd = static_cast(opnd); + MemOperand &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); MemOperand *newMem = nullptr; @@ -380,7 +398,7 @@ void ForwardPropPattern::Optimize(Insn &insn) { CHECK_FATAL(newMem != nullptr, "null ptr check"); newMem->SetIndexRegister(static_cast(secondOpnd)); if (static_cast(secondOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { - static_cast(newMem)->UpdateExtend(AArch64MemOperand::kSignExtend); + newMem->UpdateExtend(MemOperand::kSignExtend); } useInsn->SetOperand(i, *newMem); cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); @@ -438,7 +456,7 @@ void ForwardPropPattern::Run() { Init(); if (!CheckCondition(*insn)) { if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { - insn->SetMOperator(MOP_xuxtw64); + insn->SetMOP(MOP_xuxtw64); } continue; } @@ -475,7 +493,7 @@ bool BackPropPattern::CheckAndGetOpnd(const Insn &insn) { } firstRegOpnd = &static_cast(firstOpnd); secondRegOpnd = &static_cast(secondOpnd); - if (firstRegOpnd->IsZeroRegister()) { + if (IsZeroRegister(*firstRegOpnd)) { return false; } if (!cgFunc.IsAfterRegAlloc() && (!secondRegOpnd->IsVirtualRegister() || !firstRegOpnd->IsVirtualRegister())) { @@ -530,7 +548,7 @@ bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) { return false; } if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { - auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { return false; } @@ -545,6 +563,13 @@ bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) { /* use later before killed. */ return false; } + if (cgFunc.IsAfterRegAlloc()) { + for (auto *usePoint : srcOpndUseInsnSet) { + if (usePoint->IsCall()) { + return false; + } + } + } return true; } @@ -575,7 +600,7 @@ bool BackPropPattern::CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn) { } if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { - auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { return false; } @@ -604,7 +629,7 @@ bool BackPropPattern::CheckPredefineInsn(Insn &insn) { bool BackPropPattern::CheckReplacedUseInsn(Insn &insn) { for (auto *useInsn : srcOpndUseInsnSet) { if (useInsn->GetMemOpnd() != nullptr) { - auto *a64MemOpnd = static_cast(useInsn->GetMemOpnd()); + auto *a64MemOpnd = static_cast(useInsn->GetMemOpnd()); if (!a64MemOpnd->IsIntactIndexed() ){ if (a64MemOpnd->GetBaseRegister() != nullptr && a64MemOpnd->GetBaseRegister()->GetRegisterNumber() == secondRegNO) { @@ -635,8 +660,8 @@ bool BackPropPattern::CheckReplacedUseInsn(Insn &insn) { return true; }; /* ensure that the use insns to be replaced is defined by defInsnForSecondOpnd only */ - if (useInsn->IsMemAccess() && static_cast( - static_cast(useInsn)->GetMemOpnd())->GetIndexOpt() != AArch64MemOperand::kIntact) { + if (useInsn->IsMemAccess() && static_cast( + static_cast(useInsn)->GetMemOpnd())->GetIndexOpt() != MemOperand::kIntact) { return false; } InsnSet defInsnVecOfSrcOpnd = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, secondRegNO, true); @@ -723,7 +748,7 @@ void BackPropPattern::Optimize(Insn &insn) { uint32 opndNum = defInsnForSecondOpnd->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = defInsnForSecondOpnd->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (!regProp->IsRegDef() && !opnd.IsMemoryAccessOperand()) { continue; } @@ -741,9 +766,9 @@ void BackPropPattern::Optimize(Insn &insn) { defInsnForSecondOpnd->SetOperand(i, firstOpnd); cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); } else if (opnd.IsMemoryAccessOperand()) { - AArch64MemOperand &memOpnd = static_cast(opnd); + MemOperand &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); - if (base != nullptr && memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (base != nullptr && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && base->GetRegisterNumber() == secondRegNO) { MemOperand *newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); CHECK_FATAL(newMem != nullptr, "null ptr check"); @@ -1141,7 +1166,7 @@ bool LocalVarSaveInsnPattern::CheckSecondInsn() { } /* check memOperand is stack memOperand, and x0 is stored in localref var region */ secondInsnDestOpnd = &(secondInsn->GetOperand(kInsnSecondOpnd)); - AArch64MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); RegOperand *baseReg = secondInsnDestMem->GetBaseRegister(); RegOperand *indexReg = secondInsnDestMem->GetIndexRegister(); if ((baseReg == nullptr) || !(cgFunc.IsFrameReg(*baseReg)) || (indexReg != nullptr)) { @@ -1208,7 +1233,7 @@ bool LocalVarSaveInsnPattern::CheckCondition(Insn &firstInsn) { } ASSERT((*(defInsnSet.begin()))->GetId() == firstInsn.GetId(), "useInsn has only one define Insn : firstInsn"); /* check whether the stack mem is changed or not */ - AArch64MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); int64 memOffset = secondInsnDestMem->GetOffsetImmediate()->GetOffsetValue(); InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*useInsn, memOffset, true); if (memDefInsnSet.size() != 1) { @@ -1441,16 +1466,31 @@ void ExtendShiftOptPattern::SelectExtendOrShift(const Insn &def) { /* first use must match SelectExtendOrShift */ bool ExtendShiftOptPattern::CheckDefUseInfo(Insn &use, uint32 size) { - auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); + auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); - auto ®DefSrc = static_cast(defSrcOpnd); + auto ®DefSrc = static_cast(defSrcOpnd); if (regDefSrc.IsPhysicalRegister()) { return false; } - /* has Implict cvt */ + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + auto &useDefOpnd = static_cast(use.GetOperand(kInsnFirstOpnd)); if ((shiftOp != BitShiftOperand::kUndef || extendOp != ExtendShiftOperand::kUndef) && - (regDefSrc.GetSize() > regOperand.GetSize())) { + (regDefSrc.GetSize() > regOperand.GetSize() || useDefOpnd.GetSize() != size)) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > size)) { return false; } regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); @@ -1617,7 +1657,7 @@ void ExtendShiftOptPattern::Optimize(Insn &insn) { amount = lastExtendOpnd.GetShiftAmount(); } if (shiftOp != BitShiftOperand::kUndef) { - AArch64ImmOperand &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + ImmOperand &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); offset = static_cast(immOpnd.GetValue()); } amount += offset; @@ -1645,7 +1685,7 @@ bool ExtendShiftOptPattern::CheckCondition(Insn &insn) { if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { return false; } - AArch64RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); + RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); if (regOperand.IsPhysicalRegister()) { return false; } @@ -1718,7 +1758,7 @@ bool ExtenToMovPattern::CheckHideUxtw(const Insn &insn, regno_t regno) { uint32 optSize = insn.GetOperandSize(); for (uint32 i = 0; i < optSize; ++i) { if (regno == static_cast(insn.GetOperand(i)).GetRegisterNumber()) { - AArch64OpndProp *curOpndProp = md->GetOperand(static_cast(i)); + OpndProp *curOpndProp = md->GetOperand(static_cast(i)); if (curOpndProp->IsDef() && curOpndProp->GetSize() == k32BitSize) { return true; } @@ -1757,7 +1797,7 @@ bool ExtenToMovPattern::CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNu case MOP_wiorrri12: case MOP_weorrri12: { /* check immVal if mop is OR */ - AArch64ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); auto bitNum = static_cast(imm.GetValue()); if ((bitNum >> validNum) != 0) { return false; @@ -1847,7 +1887,7 @@ void ExtenToMovPattern::Init() { } void ExtenToMovPattern::Optimize(Insn &insn) { - insn.SetMOperator(replaceMop); + insn.SetMOP(replaceMop); } void SameDefPattern::Run() { @@ -1861,7 +1901,10 @@ void SameDefPattern::Run() { } } -void SameDefPattern::Init() {} +void SameDefPattern::Init() { + currInsn = nullptr; + sameInsn = nullptr; +} bool SameDefPattern::CheckCondition(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); @@ -1871,7 +1914,7 @@ bool SameDefPattern::CheckCondition(Insn &insn) { if (insn.GetBB()->HasCall()) { return false; } - return (mOp == MOP_wcmprr) || (mOp == MOP_xcmprr); + return (mOp == MOP_wcmprr) || (mOp == MOP_xcmprr) || (mOp == MOP_xwcmprre) || (mOp == MOP_xcmprrs); } void SameDefPattern::Optimize(Insn &insn) { @@ -1883,7 +1926,9 @@ void SameDefPattern::Optimize(Insn &insn) { if (sameDefInsn == nullptr) { return; } - if (!IsSameDef(insn, *sameDefInsn)) { + currInsn = &insn; + sameInsn = sameDefInsn; + if (!IsSameDef()) { return; } if (GLOBAL_DUMP) { @@ -1896,20 +1941,28 @@ void SameDefPattern::Optimize(Insn &insn) { insn.GetBB()->RemoveInsn(insn); } -bool SameDefPattern::IsSameDef(Insn &currInsn, Insn &sameInsn) { - if (!CheckCondition(sameInsn)) { +bool SameDefPattern::IsSameDef() { + if (!CheckCondition(*sameInsn)) { return false; } - if (&currInsn == &sameInsn) { + if (currInsn == sameInsn) { return false; } - if (currInsn.GetMachineOpcode() != sameInsn.GetMachineOpcode()) { + if (currInsn->GetMachineOpcode() != sameInsn->GetMachineOpcode()) { return false; } - for (uint32 i = k1BitSize; i < currInsn.GetOperandSize(); ++i) { - Operand &opnd0 = currInsn.GetOperand(i); - Operand &opnd1 = sameInsn.GetOperand(i); - CHECK_FATAL(opnd0.IsRegister(), "must be RegOperand!"); + for (uint32 i = k1BitSize; i < currInsn->GetOperandSize(); ++i) { + Operand &opnd0 = currInsn->GetOperand(i); + Operand &opnd1 = sameInsn->GetOperand(i); + if (!IsSameOperand(opnd0, opnd1)) { + return false; + } + } + return true; +} + +bool SameDefPattern::IsSameOperand(Operand &opnd0, Operand &opnd1) { + if (opnd0.IsRegister()) { CHECK_FATAL(opnd1.IsRegister(), "must be RegOperand!"); RegOperand ®Opnd0 = static_cast(opnd0); RegOperand ®Opnd1 = static_cast(opnd1); @@ -1918,25 +1971,41 @@ bool SameDefPattern::IsSameDef(Insn &currInsn, Insn &sameInsn) { } regno_t regNo = regOpnd0.GetRegisterNumber(); /* src reg not redefined between sameInsn and currInsn */ - if (SrcRegIsRedefined(currInsn, sameInsn, regNo)) { + if (SrcRegIsRedefined(regNo)) { + return false; + } + } else if (opnd0.IsOpdShift()) { + CHECK_FATAL(opnd1.IsOpdShift(), "must be ShiftOperand!"); + BitShiftOperand &shiftOpnd0 = static_cast(opnd0); + BitShiftOperand &shiftOpnd1 = static_cast(opnd1); + if (shiftOpnd0.GetShiftAmount() != shiftOpnd1.GetShiftAmount()) { + return false; + } + } else if (opnd0.IsOpdExtend()) { + CHECK_FATAL(opnd1.IsOpdExtend(), "must be ExtendOperand!"); + ExtendShiftOperand &extendOpnd0 = static_cast(opnd0); + ExtendShiftOperand &extendOpnd1 = static_cast(opnd1); + if (extendOpnd0.GetShiftAmount() != extendOpnd1.GetShiftAmount()) { return false; } + } else { + return false; } return true; } -bool SameDefPattern::SrcRegIsRedefined(Insn &currInsn, Insn &sameInsn, regno_t regNo) { +bool SameDefPattern::SrcRegIsRedefined(regno_t regNo) { AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); - if (currInsn.GetBB() == sameInsn.GetBB()) { - FOR_BB_INSNS(insn, currInsn.GetBB()) { + if (currInsn->GetBB() == sameInsn->GetBB()) { + FOR_BB_INSNS(insn, currInsn->GetBB()) { if (insn->GetMachineOpcode() == MOP_xbl) { return true; } } - if (!a64RD->FindRegDefBetweenInsn(regNo, &sameInsn, &currInsn).empty()) { + if (!a64RD->FindRegDefBetweenInsn(regNo, sameInsn, currInsn).empty()) { return true; } - } else if (a64RD->HasRegDefBetweenInsnGlobal(regNo, sameInsn, currInsn)) { + } else if (a64RD->HasRegDefBetweenInsnGlobal(regNo, *sameInsn, *currInsn)) { return true; } return false; @@ -2051,7 +2120,8 @@ void AndCbzPattern::Run() { void SameRHSPropPattern::Init() { prevInsn = nullptr; - candidates = {MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12, MOP_xmovri32, MOP_xmovri64}; + candidates = {MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12, + MOP_xmovri32, MOP_xmovri64, MOP_wmovrr, MOP_xmovrr}; } bool SameRHSPropPattern::IsSameOperand(Operand *opnd1, Operand *opnd2) const { @@ -2080,19 +2150,16 @@ bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) { } Operand &opnd = insn.GetOperand(i); if (opnd.IsRegister()) { - if (static_cast(opnd).IsSPOrFP() || !(static_cast(opnd).IsVirtualRegister())) { - return false; - } curRegOpnd = &opnd; } else if (opnd.IsImmediate()) { - auto &immOpnd = static_cast(opnd); + auto &immOpnd = static_cast(opnd); if (immOpnd.GetVary() == kUnAdjustVary) { return false; } curImmOpnd = &opnd; } } - if (curRegOpnd == nullptr && curImmOpnd != nullptr && static_cast(curImmOpnd)->IsZero()) { + if (curRegOpnd == nullptr && curImmOpnd != nullptr && static_cast(curImmOpnd)->IsZero()) { return false; } BB *bb = insn.GetBB(); @@ -2100,7 +2167,7 @@ bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) { if (!cursor->IsMachineInstruction()) { continue; } - if (cursor->IsCall()) { + if (cursor->IsCall() && !cgFunc.IsAfterRegAlloc()) { return false; } if (cursor->GetMachineOpcode() != insn.GetMachineOpcode()) { @@ -2115,12 +2182,9 @@ bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) { continue; } if (opnd.IsRegister()) { - if (static_cast(opnd).IsSPOrFP() || !(static_cast(opnd).IsVirtualRegister())) { - return false; - } candRegOpnd = &opnd; } else if (opnd.IsImmediate()) { - auto &immOpnd = static_cast(opnd); + auto &immOpnd = static_cast(opnd); if (immOpnd.GetVary() == kUnAdjustVary) { return false; } @@ -2143,9 +2207,6 @@ bool SameRHSPropPattern::CheckCondition(Insn &insn) { if (std::find(candidates.begin(), candidates.end(), mOp) == candidates.end()) { return false; } - if (insn.GetBB()->HasCall()) { - return false; - } if (!FindSameRHSInsnInBB(insn)) { return false; } @@ -2171,9 +2232,10 @@ bool SameRHSPropPattern::CheckCondition(Insn &insn) { void SameRHSPropPattern::Optimize(Insn &insn) { BB *bb = insn.GetBB(); Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); - uint32 bitSize = static_cast(destOpnd).GetValidBitsNum(); + uint32 bitSize = static_cast(destOpnd).GetSize(); MOperator mOp = (bitSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr); Insn &newInsn = cgFunc.GetCG()->BuildInstruction(mOp, destOpnd, prevInsn->GetOperand(kInsnFirstOpnd)); + newInsn.SetId(insn.GetId()); bb->ReplaceInsn(insn, newInsn); if (GLOBAL_DUMP) { LogInfo::MapleLogger() << ">>>>>>> In SameRHSPropPattern : <<<<<<<\n"; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp index 64c6bd05ddc52525f99aeb278bc10f1609fda14b..89b7654d65db0e59506a36171424cf1bf9eb5a64 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -45,7 +45,7 @@ Insn *AArch64ICOPattern::BuildCcmpInsn(AArch64CC_t ccCode, const Insn *cmpInsn) if (nzcv == k16BitSize) { return nullptr; } - AArch64ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); + ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); uint32 dSize = opnd1.GetSize(); bool isIntTy = opnd2.IsIntImmediate(); @@ -196,7 +196,7 @@ void AArch64ICOIfThenElsePattern::GenerateInsnForImm(const Insn &branchInsn, Ope ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); RegOperand *tempTarIf = nullptr; if (imm1.IsZero()) { - tempTarIf = isD64 ? &AArch64RegOperand::Get64bitZeroRegister() : &AArch64RegOperand::Get32bitZeroRegister(); + tempTarIf = &cgFunc->GetZeroOpnd(dSize); } else { tempTarIf = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); Insn &tempInsnIf = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction( @@ -206,7 +206,7 @@ void AArch64ICOIfThenElsePattern::GenerateInsnForImm(const Insn &branchInsn, Ope RegOperand *tempTarElse = nullptr; if (imm2.IsZero()) { - tempTarElse = isD64 ? &AArch64RegOperand::Get64bitZeroRegister() : &AArch64RegOperand::Get32bitZeroRegister(); + tempTarElse = &cgFunc->GetZeroOpnd(dSize); } else { tempTarElse = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); Insn &tempInsnElse = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction( @@ -233,7 +233,7 @@ RegOperand *AArch64ICOIfThenElsePattern::GenerateRegAndTempInsn(Operand &dest, c reg = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); ImmOperand &tempSrcElse = static_cast(dest); if (tempSrcElse.IsZero()) { - return isDest64 ? &AArch64RegOperand::Get64bitZeroRegister() : &AArch64RegOperand::Get32bitZeroRegister(); + return &cgFunc->GetZeroOpnd(destReg.GetSize()); } Insn &tempInsn = cgFunc->GetTheCFG()->GetInsnModifier()->GetCGFunc()->GetCG()->BuildInstruction( mOp, *reg, tempSrcElse); @@ -377,7 +377,7 @@ bool AArch64ICOIfThenElsePattern::CheckModifiedRegister(Insn &insn, std::map(src); - if (destReg.IsOfFloatOrSIMDClass() && srcReg.IsZeroRegister()) { + if (destReg.IsOfFloatOrSIMDClass() && srcReg.GetRegisterNumber() == RZR) { return false; } for (Insn *tmpInsn = &insn; tmpInsn != nullptr; tmpInsn = tmpInsn->GetNext()) { @@ -417,7 +417,7 @@ bool AArch64ICOIfThenElsePattern::CheckCondMoveBB(BB *bb, std::mapIsConstant() && !src->IsRegister()) { + if (!(src->IsConstImmediate()) && !src->IsRegister()) { return false; } @@ -447,11 +447,12 @@ bool AArch64ICOIfThenElsePattern::DoOpt(BB &cmpBB, BB *ifBB, BB *elseBB, BB &joi Operand *flagOpnd = nullptr; /* for cbnz and cbz institution */ if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { - if (condBr->GetOperand(0).IsZeroRegister()) { + Operand &opnd0 = condBr->GetOperand(0); + if (opnd0.IsRegister() && static_cast(opnd0).GetRegisterNumber() == RZR) { return false; } cmpInsn = condBr; - flagOpnd = &(condBr->GetOperand(0)); + flagOpnd = &(opnd0); } /* tbz will not be optimized */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp index 609096a5793699d1a4fe5ad5a943117881eb03d1..6cada04e4f3e61f9c8906b9c54d710b2f528be5a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -15,21 +15,11 @@ #include "aarch64_insn.h" #include #include "aarch64_cg.h" +#include "common_utils.h" #include "insn.h" #include "metadata_layout.h" namespace maplebe { -namespace { -constexpr uint32 kClinitInsnCount = 4; -constexpr uint32 kAdrpLdrInsnCount = 2; -constexpr uint32 kLazyBindingRoutineInsnCount = 1; -constexpr uint32 kClinitTailInsnCount = 2; -constexpr uint32 kLazyLdrInsnCount = 2; -constexpr uint32 kLazyLdrStaticInsnCount = 3; -constexpr uint32 kCheckThrowPendingExceptionInsnCount = 5; -constexpr uint32 kArrayClassCacheLoadCount = 3; -} - uint32 AArch64Insn::GetResultNum() const { const AArch64MD *md = &AArch64CG::kMd[mOp]; uint32 resNum = 0; @@ -52,1032 +42,7 @@ uint32 AArch64Insn::GetOpndNum() const { return srcNum; } -/* - * intrinsic_compare_swap_int x0, xt, xs, x1, x2, w3, w4, lable1, label2 - * add xt, x1, x2 - * label1: - * ldaxr ws, [xt] - * cmp ws, w3 - * b.ne label2 - * stlxr ws, w4, [xt] - * cbnz ws, label1 - * label2: - * cset x0, eq - */ -void AArch64Insn::EmitCompareAndSwapInt(Emitter &emitter) const { - /* MOP_compare_and_swapI and MOP_compare_and_swapL have 8 operands */ - ASSERT(opnds.size() > kInsnEighthOpnd, "ensure the operands number"); - const MOperator mOp = GetMachineOpcode(); - const AArch64MD *md = &AArch64CG::kMd[mOp]; - Operand *temp0 = opnds[kInsnSecondOpnd]; - Operand *temp1 = opnds[kInsnThirdOpnd]; - Operand *obj = opnds[kInsnFourthOpnd]; - Operand *offset = opnds[kInsnFifthOpnd]; - /* add xt, x1, x2 */ - emitter.Emit("\tadd\t"); - temp0->Emit(emitter, nullptr); - emitter.Emit(", "); - obj->Emit(emitter, nullptr); - emitter.Emit(", "); - offset->Emit(emitter, nullptr); - emitter.Emit("\n"); - Operand *label1 = opnds[kInsnEighthOpnd]; - /* label1: */ - label1->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* ldaxr ws, [xt] */ - emitter.Emit("\tldaxr\t"); - temp1->Emit(emitter, nullptr); - emitter.Emit(", ["); - temp0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - Operand *expectedValue = opnds[kInsnSixthOpnd]; - OpndProp *expectedValueProp = md->operand[kInsnSixthOpnd]; - /* cmp ws, w3 */ - emitter.Emit("\tcmp\t"); - temp1->Emit(emitter, nullptr); - emitter.Emit(", "); - expectedValue->Emit(emitter, expectedValueProp); - emitter.Emit("\n"); - constexpr uint32 kInsnNinethOpnd = 8; - Operand *label2 = opnds[kInsnNinethOpnd]; - /* b.ne label2 */ - emitter.Emit("\tbne\t"); - label2->Emit(emitter, nullptr); - emitter.Emit("\n"); - Operand *newValue = opnds[kInsnSeventhOpnd]; - /* stlxr ws, w4, [xt] */ - emitter.Emit("\tstlxr\t"); - emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); - emitter.Emit(", "); - newValue->Emit(emitter, nullptr); - emitter.Emit(", ["); - temp0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* cbnz ws, label1 */ - emitter.Emit("\tcbnz\t"); - emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); - emitter.Emit(", "); - label1->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* label2: */ - label2->Emit(emitter, nullptr); - emitter.Emit(":\n"); - Operand *retVal = opnds[kInsnFirstOpnd]; - /* cset x0, eq */ - emitter.Emit("\tcset\t"); - retVal->Emit(emitter, nullptr); - emitter.Emit(", EQ\n"); -} - -/* - * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, - * Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, - * Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET - * cmp w4, w2 - * b.gt .Label.NOMATCH - * sub w2, w2, w4 - * sub w4, w4, #8 - * mov w10, w2 - * uxtw x4, w4 - * uxtw x2, w2 - * add x3, x3, x4 - * add x1, x1, x2 - * neg x4, x4 - * neg x2, x2 - * ldr x5, [x3,x4] - * .Label.FIRST_LOOP: - * ldr x7, [x1,x2] - * cmp x5, x7 - * b.eq .Label.STR1_LOOP - * .Label.STR2_NEXT: - * adds x2, x2, #1 - * b.le .Label.FIRST_LOOP - * b .Label.NOMATCH - * .Label.STR1_LOOP: - * adds x8, x4, #8 - * add x9, x2, #8 - * b.ge .Label.LAST_WORD - * .Label.STR1_NEXT: - * ldr x6, [x3,x8] - * ldr x7, [x1,x9] - * cmp x6, x7 - * b.ne .Label.STR2_NEXT - * adds x8, x8, #8 - * add x9, x9, #8 - * b.lt .Label.STR1_NEXT - * .Label.LAST_WORD: - * ldr x6, [x3] - * sub x9, x1, x4 - * ldr x7, [x9,x2] - * cmp x6, x7 - * b.ne .Label.STR2_NEXT - * add w0, w10, w2 - * b .Label.RET - * .Label.NOMATCH: - * mov w0, #-1 - * .Label.RET: - */ -void AArch64Insn::EmitStringIndexOf(Emitter &emitter) const { - /* MOP_string_indexof has 18 operands */ - ASSERT(opnds.size() == 18, "ensure the operands number"); - Operand *patternLengthOpnd = opnds[kInsnFifthOpnd]; - Operand *srcLengthOpnd = opnds[kInsnThirdOpnd]; - const std::string patternLengthReg = - AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(patternLengthOpnd)->GetRegisterNumber()]; - const std::string srcLengthReg = - AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(srcLengthOpnd)->GetRegisterNumber()]; - /* cmp w4, w2 */ - emitter.Emit("\tcmp\t"); - patternLengthOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* the 16th operand of MOP_string_indexof is Label.NOMATCH */ - Operand *labelNoMatch = opnds[16]; - /* b.gt Label.NOMATCH */ - emitter.Emit("\tb.gt\t"); - labelNoMatch->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* sub w2, w2, w4 */ - emitter.Emit("\tsub\t"); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - patternLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* sub w4, w4, #8 */ - emitter.Emit("\tsub\t"); - patternLengthOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - patternLengthOpnd->Emit(emitter, nullptr); - emitter.Emit(", #8\n"); - /* the 10th operand of MOP_string_indexof is w10 */ - Operand *resultTmp = opnds[10]; - /* mov w10, w2 */ - emitter.Emit("\tmov\t"); - resultTmp->Emit(emitter, nullptr); - emitter.Emit(", "); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* uxtw x4, w4 */ - emitter.Emit("\tuxtw\t").Emit(patternLengthReg); - emitter.Emit(", "); - patternLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* uxtw x2, w2 */ - emitter.Emit("\tuxtw\t").Emit(srcLengthReg); - emitter.Emit(", "); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - Operand *patternStringBaseOpnd = opnds[kInsnFourthOpnd]; - /* add x3, x3, x4 */ - emitter.Emit("\tadd\t"); - patternStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - patternStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(patternLengthReg); - emitter.Emit("\n"); - Operand *srcStringBaseOpnd = opnds[kInsnSecondOpnd]; - /* add x1, x1, x2 */ - emitter.Emit("\tadd\t"); - srcStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - srcStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(srcLengthReg); - emitter.Emit("\n"); - /* neg x4, x4 */ - emitter.Emit("\tneg\t").Emit(patternLengthReg); - emitter.Emit(", ").Emit(patternLengthReg); - emitter.Emit("\n"); - /* neg x2, x2 */ - emitter.Emit("\tneg\t").Emit(srcLengthReg); - emitter.Emit(", ").Emit(srcLengthReg); - emitter.Emit("\n"); - Operand *first = opnds[kInsnSixthOpnd]; - /* ldr x5, [x3,x4] */ - emitter.Emit("\tldr\t"); - first->Emit(emitter, nullptr); - emitter.Emit(", ["); - patternStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(",").Emit(patternLengthReg); - emitter.Emit("]\n"); - /* the 11th operand of MOP_string_indexof is Label.FIRST_LOOP */ - Operand *labelFirstLoop = opnds[11]; - /* .Label.FIRST_LOOP: */ - labelFirstLoop->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* the 7th operand of MOP_string_indexof is x7 */ - Operand *ch2 = opnds[7]; - /* ldr x7, [x1,x2] */ - emitter.Emit("\tldr\t"); - ch2->Emit(emitter, nullptr); - emitter.Emit(", ["); - srcStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(",").Emit(srcLengthReg); - emitter.Emit("]\n"); - /* cmp x5, x7 */ - emitter.Emit("\tcmp\t"); - first->Emit(emitter, nullptr); - emitter.Emit(", "); - ch2->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* the 13th operand of MOP_string_indexof is Label.STR1_LOOP */ - Operand *labelStr1Loop = opnds[13]; - /* b.eq .Label.STR1_LOOP */ - emitter.Emit("\tb.eq\t"); - labelStr1Loop->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* the 12th operand of MOP_string_indexof is Label.STR2_NEXT */ - Operand *labelStr2Next = opnds[12]; - /* .Label.STR2_NEXT: */ - labelStr2Next->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* adds x2, x2, #1 */ - emitter.Emit("\tadds\t").Emit(srcLengthReg); - emitter.Emit(", ").Emit(srcLengthReg); - emitter.Emit(", #1\n"); - /* b.le .Label.FIRST_LOOP */ - emitter.Emit("\tb.le\t"); - labelFirstLoop->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* b .Label.NOMATCH */ - emitter.Emit("\tb\t"); - labelNoMatch->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* .Label.STR1_LOOP: */ - labelStr1Loop->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* the 8th operand of MOP_string_indexof is x8 */ - Operand *tmp1 = opnds[8]; - /* adds x8, x4, #8 */ - emitter.Emit("\tadds\t"); - tmp1->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(patternLengthReg); - emitter.Emit(", #8\n"); - /* the 9th operand of MOP_string_indexof is x9 */ - Operand *tmp2 = opnds[9]; - /* add x9, x2, #8 */ - emitter.Emit("\tadd\t"); - tmp2->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(srcLengthReg); - emitter.Emit(", #8\n"); - /* the 15th operand of MOP_string_indexof is Label.LAST_WORD */ - Operand *labelLastWord = opnds[15]; - /* b.ge .Label.LAST_WORD */ - emitter.Emit("\tb.ge\t"); - labelLastWord->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* the 14th operand of MOP_string_indexof is Label.STR1_NEXT */ - Operand *labelStr1Next = opnds[14]; - /* .Label.STR1_NEXT: */ - labelStr1Next->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* the 6th operand of MOP_string_indexof is x6 */ - Operand *ch1 = opnds[6]; - /* ldr x6, [x3,x8] */ - emitter.Emit("\tldr\t"); - ch1->Emit(emitter, nullptr); - emitter.Emit(", ["); - patternStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(","); - tmp1->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* ldr x7, [x1,x9] */ - emitter.Emit("\tldr\t"); - ch2->Emit(emitter, nullptr); - emitter.Emit(", ["); - srcStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(","); - tmp2->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* cmp x6, x7 */ - emitter.Emit("\tcmp\t"); - ch1->Emit(emitter, nullptr); - emitter.Emit(", "); - ch2->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* b.ne .Label.STR2_NEXT */ - emitter.Emit("\tb.ne\t"); - labelStr2Next->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* adds x8, x8, #8 */ - emitter.Emit("\tadds\t"); - tmp1->Emit(emitter, nullptr); - emitter.Emit(", "); - tmp1->Emit(emitter, nullptr); - emitter.Emit(", #8\n"); - /* add x9, x9, #8 */ - emitter.Emit("\tadd\t"); - tmp2->Emit(emitter, nullptr); - emitter.Emit(", "); - tmp2->Emit(emitter, nullptr); - emitter.Emit(", #8\n"); - /* b.lt .Label.STR1_NEXT */ - emitter.Emit("\tb.lt\t"); - labelStr1Next->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* .Label.LAST_WORD: */ - labelLastWord->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* ldr x6, [x3] */ - emitter.Emit("\tldr\t"); - ch1->Emit(emitter, nullptr); - emitter.Emit(", ["); - patternStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* sub x9, x1, x4 */ - emitter.Emit("\tsub\t"); - tmp2->Emit(emitter, nullptr); - emitter.Emit(", "); - srcStringBaseOpnd->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(patternLengthReg); - emitter.Emit("\n"); - /* ldr x7, [x9,x2] */ - emitter.Emit("\tldr\t"); - ch2->Emit(emitter, nullptr); - emitter.Emit(", ["); - tmp2->Emit(emitter, nullptr); - emitter.Emit(", ").Emit(srcLengthReg); - emitter.Emit("]\n"); - /* cmp x6, x7 */ - emitter.Emit("\tcmp\t"); - ch1->Emit(emitter, nullptr); - emitter.Emit(", "); - ch2->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* b.ne .Label.STR2_NEXT */ - emitter.Emit("\tb.ne\t"); - labelStr2Next->Emit(emitter, nullptr); - emitter.Emit("\n"); - Operand *retVal = opnds[kInsnFirstOpnd]; - /* add w0, w10, w2 */ - emitter.Emit("\tadd\t"); - retVal->Emit(emitter, nullptr); - emitter.Emit(", "); - resultTmp->Emit(emitter, nullptr); - emitter.Emit(", "); - srcLengthOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* the 17th operand of MOP_string_indexof Label.ret */ - Operand *labelRet = opnds[17]; - /* b .Label.ret */ - emitter.Emit("\tb\t"); - labelRet->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* .Label.NOMATCH: */ - labelNoMatch->Emit(emitter, nullptr); - emitter.Emit(":\n"); - /* mov w0, #-1 */ - emitter.Emit("\tmov\t"); - retVal->Emit(emitter, nullptr); - emitter.Emit(", #-1\n"); - /* .Label.ret: */ - labelRet->Emit(emitter, nullptr); - emitter.Emit(":\n"); -} - -/* - * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label - * add xt, x1, x2 - * label: - * ldaxr w0, [xt] - * add wt, w0, w3 - * stlxr ws, wt, [xt] - * cbnz ws, label - */ -void AArch64Insn::EmitGetAndAddInt(Emitter &emitter) const { - ASSERT(opnds.size() > kInsnEighthOpnd, "ensure the oprands number"); - emitter.Emit("\t//\tstart of Unsafe.getAndAddInt.\n"); - Operand *tempOpnd0 = opnds[kInsnSecondOpnd]; - Operand *tempOpnd1 = opnds[kInsnThirdOpnd]; - Operand *tempOpnd2 = opnds[kInsnFourthOpnd]; - Operand *objOpnd = opnds[kInsnFifthOpnd]; - Operand *offsetOpnd = opnds[kInsnSixthOpnd]; - Operand *deltaOpnd = opnds[kInsnSeventhOpnd]; - Operand *labelOpnd = opnds[kInsnEighthOpnd]; - /* emit add. */ - emitter.Emit("\t").Emit("add").Emit("\t"); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit(", "); - objOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - offsetOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - /* emit label. */ - labelOpnd->Emit(emitter, nullptr); - emitter.Emit(":\n"); - Operand *retVal = opnds[kInsnFirstOpnd]; - const MOperator mOp = GetMachineOpcode(); - const AArch64MD *md = &AArch64CG::kMd[mOp]; - OpndProp *retProp = md->operand[kInsnFirstOpnd]; - /* emit ldaxr */ - emitter.Emit("\t").Emit("ldaxr").Emit("\t"); - retVal->Emit(emitter, retProp); - emitter.Emit(", ["); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* emit add. */ - emitter.Emit("\t").Emit("add").Emit("\t"); - tempOpnd1->Emit(emitter, retProp); - emitter.Emit(", "); - retVal->Emit(emitter, retProp); - emitter.Emit(", "); - deltaOpnd->Emit(emitter, retProp); - emitter.Emit("\n"); - /* emit stlxr. */ - emitter.Emit("\t").Emit("stlxr").Emit("\t"); - tempOpnd2->Emit(emitter, nullptr); - emitter.Emit(", "); - tempOpnd1->Emit(emitter, retProp); - emitter.Emit(", ["); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* emit cbnz. */ - emitter.Emit("\t").Emit("cbnz").Emit("\t"); - tempOpnd2->Emit(emitter, nullptr); - emitter.Emit(", "); - labelOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - emitter.Emit("\t//\tend of Unsafe.getAndAddInt.\n"); -} - -/* - * intrinsic_get_set_int w0, xt, ws, x1, x2, w3, label - * add xt, x1, x2 - * label: - * ldaxr w0, [xt] - * stlxr ws, w3, [xt] - * cbnz ws, label - */ -void AArch64Insn::EmitGetAndSetInt(Emitter &emitter) const { - /* MOP_get_and_setI and MOP_get_and_setL have 7 operands */ - ASSERT(opnds.size() > kInsnSeventhOpnd, "ensure the operands number"); - Operand *tempOpnd0 = opnds[kInsnSecondOpnd]; - Operand *tempOpnd1 = opnds[kInsnThirdOpnd]; - Operand *objOpnd = opnds[kInsnFourthOpnd]; - Operand *offsetOpnd = opnds[kInsnFifthOpnd]; - /* add x1, x1, x2 */ - emitter.Emit("\tadd\t"); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit(", "); - objOpnd->Emit(emitter, nullptr); - emitter.Emit(", "); - offsetOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); - Operand *labelOpnd = opnds[kInsnSeventhOpnd]; - /* label: */ - labelOpnd->Emit(emitter, nullptr); - emitter.Emit(":\n"); - Operand *retVal = opnds[kInsnFirstOpnd]; - /* ldaxr w0, [xt] */ - emitter.Emit("\tldaxr\t"); - retVal->Emit(emitter, nullptr); - emitter.Emit(", ["); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - Operand *newValueOpnd = opnds[kInsnSixthOpnd]; - /* stlxr ws, w3, [xt] */ - emitter.Emit("\tstlxr\t"); - tempOpnd1->Emit(emitter, nullptr); - emitter.Emit(", "); - newValueOpnd->Emit(emitter, nullptr); - emitter.Emit(", ["); - tempOpnd0->Emit(emitter, nullptr); - emitter.Emit("]\n"); - /* cbnz w2, label */ - emitter.Emit("\tcbnz\t"); - tempOpnd1->Emit(emitter, nullptr); - emitter.Emit(", "); - labelOpnd->Emit(emitter, nullptr); - emitter.Emit("\n"); -} - -void AArch64Insn::EmitCounter(const CG &cg, Emitter &emitter) const { - /* - * adrp x1, __profile_bb_table$$GetBoolean_dex+4 - * ldr w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] - * add w17, w17, #1 - * str w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_counter]; - - Operand *opnd0 = opnds[kInsnFirstOpnd]; - Operand *opnd1 = opnds[kInsnSecondOpnd]; - OpndProp *prop0 = md->operand[kInsnFirstOpnd]; - StImmOperand *stImmOpnd = static_cast(opnd1); - CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitCounter"); - /* emit nop for breakpoint */ - if (cg.GetCGOptions().WithDwarf()) { - emitter.Emit("\t").Emit("nop").Emit("\n"); - } - - /* emit adrp */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit(stImmOpnd->GetName()); - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - emitter.Emit("\n"); - /* emit ldr */ - emitter.Emit("\t").Emit("ldr").Emit("\tw17, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - emitter.Emit("]"); - emitter.Emit("\n"); - /* emit add */ - emitter.Emit("\t").Emit("add").Emit("\tw17, w17, #1"); - emitter.Emit("\n"); - /* emit str */ - emitter.Emit("\t").Emit("str").Emit("\tw17, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - emitter.Emit("]"); - emitter.Emit("\n"); -} - -static void AsmStringOutputRegNum( - bool isInt, uint32 regno, uint32 intBase, uint32 fpBase, std::string &strToEmit) { - regno_t newRegno; - if (isInt) { - newRegno = regno - intBase; - } else { - newRegno = regno - fpBase; - } - if (newRegno > (kDecimalMax - 1)) { - uint32 tenth = newRegno / kDecimalMax; - strToEmit += '0' + static_cast(tenth); - newRegno -= (kDecimalMax * tenth); - } - strToEmit += newRegno + '0'; -} - -void AArch64Insn::EmitInlineAsm(const CG &cg, Emitter &emitter) const { - (void)cg; - emitter.Emit("\t//Inline asm begin\n\t"); - auto &list1 = static_cast(GetOperand(kAsmOutputListOpnd)); - std::vector outOpnds; - for (auto *regOpnd : list1.GetOperands()) { - outOpnds.push_back(regOpnd); - } - auto &list2 = static_cast(GetOperand(kAsmInputListOpnd)); - std::vector inOpnds; - for (auto *regOpnd : list2.GetOperands()) { - inOpnds.push_back(regOpnd); - } - auto &list6 = static_cast(GetOperand(kAsmOutputRegPrefixOpnd)); - auto &list7 = static_cast(GetOperand(kAsmInputRegPrefixOpnd)); - MapleString asmStr = static_cast(this->GetOperand(kAsmStringOpnd)).GetComment(); - std::string stringToEmit; - size_t sidx = 0; - auto IsMemAccess = [](char c)->bool { - return c == '['; - }; - auto EmitRegister = [&](const char *p, bool isInt, uint32 regNO, bool unDefRegSize)->void { - if (IsMemAccess(p[0])) { - stringToEmit += "[x"; - AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); - stringToEmit += "]"; - } else { - ASSERT((p[0] == 'w' || p[0] == 'x' || p[0] == 's' || p[0] == 'd' || p[0] == 'v'), "Asm invalid register type"); - if ((p[0] == 'w' || p[0] == 'x') && unDefRegSize) { - stringToEmit += 'x'; - } else { - stringToEmit += p[0]; - } - if (!unDefRegSize) { - isInt = (p[0] == 'w' || p[0] == 'x'); - } - AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); - } - }; - for (size_t i = 0; i < asmStr.length(); ++i) { - switch (asmStr[i]) { - case '$': { - char c = asmStr[++i]; - if ((c >= '0') && (c <= '9')) { - auto val = static_cast(c - '0'); - if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { - val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); - } - if (val < outOpnds.size()) { - const char *prefix = list6.stringList[val]->GetComment().c_str(); - RegOperand *opnd = outOpnds[val]; - EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); - } else { - val -= static_cast(outOpnds.size()); - CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); - RegOperand *opnd = inOpnds[val]; - /* input is a immediate */ - const char *prefix = list7.stringList[val]->GetComment().c_str(); - if (prefix[0] == 'i') { - stringToEmit += '#'; - for (size_t k = 1; k < list7.stringList[val]->GetComment().length(); ++k) { - stringToEmit += prefix[k]; - } - } else { - EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); - } - } - } else if (c == '{') { - c = asmStr[++i]; - CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); - auto val = static_cast(c - '0'); - if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { - val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); - } - regno_t regno; - bool isAddr = false; - if (val < outOpnds.size()) { - RegOperand *opnd = outOpnds[val]; - regno = opnd->GetRegisterNumber(); - isAddr = IsMemAccess(list6.stringList[val]->GetComment().c_str()[0]); - } else { - val -= static_cast(outOpnds.size()); - CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); - RegOperand *opnd = inOpnds[val]; - regno = opnd->GetRegisterNumber(); - isAddr = IsMemAccess(list7.stringList[val]->GetComment().c_str()[0]); - } - c = asmStr[++i]; - CHECK_FATAL(c == ':', "Parsing error in inline asm string during emit"); - c = asmStr[++i]; - std::string prefix(1, c); - if (c == 'a' || isAddr) { - prefix = "[x"; - } - EmitRegister(prefix.c_str(), true, regno, false); - c = asmStr[++i]; - CHECK_FATAL(c == '}', "Parsing error in inline asm string during emit"); - } - break; - } - case '\n': { - stringToEmit += "\n\t"; - break; - } - default: - stringToEmit += asmStr[i]; - sidx++; - } - } - emitter.Emit(stringToEmit); - emitter.Emit("\n\t//Inline asm end\n"); -} - -void AArch64Insn::EmitClinit(const CG &cg, Emitter &emitter) const { - /* - * adrp x3, __muid_data_undef_tab$$GetBoolean_dex+144 - * ldr x3, [x3, #:lo12:__muid_data_undef_tab$$GetBoolean_dex+144] - * or, - * adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B - * ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] - * - * ldr x3, [x3,#112] - * ldr wzr, [x3] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_clinit]; - - Operand *opnd0 = opnds[0]; - Operand *opnd1 = opnds[1]; - OpndProp *prop0 = md->operand[0]; - auto *stImmOpnd = static_cast(opnd1); - CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitClinit"); - /* emit nop for breakpoint */ - if (cg.GetCGOptions().WithDwarf()) { - emitter.Emit("\t").Emit("nop").Emit("\n"); - } - - if (stImmOpnd->GetSymbol()->IsMuidDataUndefTab()) { - /* emit adrp */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit(stImmOpnd->GetName()); - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - emitter.Emit("\n"); - /* emit ldr */ - emitter.Emit("\t").Emit("ldr").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - emitter.Emit("]"); - emitter.Emit("\n"); - } else { - /* adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ - emitter.Emit("\tadrp\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); - emitter.Emit("\n"); - - /* ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ - emitter.Emit("\tldr\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", ["); - opnd0->Emit(emitter, prop0); - emitter.Emit(", #:lo12:"); - (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); - emitter.Emit("]\n"); - } - /* emit "ldr x0,[x0,#48]" */ - emitter.Emit("\t").Emit("ldr").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("["); - opnd0->Emit(emitter, prop0); - emitter.Emit(",#"); - emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); - emitter.Emit("]"); - emitter.Emit("\n"); - - /* emit "ldr xzr, [x0]" */ - emitter.Emit("\t").Emit("ldr\txzr, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit("]\n"); -} - -void AArch64Insn::EmitAdrpLdr(const CG &cg, Emitter &emitter) const { - /* - * adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B - * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_ldr]; - - Operand *opnd0 = opnds[0]; - Operand *opnd1 = opnds[1]; - OpndProp *prop0 = md->operand[0]; - auto *stImmOpnd = static_cast(opnd1); - CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitAdrpLdr"); - /* emit nop for breakpoint */ - if (cg.GetCGOptions().WithDwarf()) { - emitter.Emit("\t").Emit("nop").Emit("\n"); - } - - /* adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - emitter.Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("\n"); - - /* ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ - emitter.Emit("\tldr\t"); - static_cast(opnd0)->SetRefField(true); - opnd0->Emit(emitter, prop0); - static_cast(opnd0)->SetRefField(false); - emitter.Emit(", "); - emitter.Emit("["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("]\n"); -} - -void AArch64Insn::EmitAdrpLabel(Emitter &emitter) const { - /* adrp xd, label - * add xd, xd, #lo12:label - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_label]; - - Operand *opnd0 = opnds[0]; - Operand *opnd1 = opnds[1]; - OpndProp *prop0 = static_cast(md->operand[0]); - auto lidx = static_cast(opnd1)->GetValue(); - - /* adrp xd, label */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - const char *idx; - idx = strdup(std::to_string(Globals::GetInstance()->GetBECommon()->GetMIRModule().CurFunction()->GetPuidx()).c_str()); - emitter.Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); - - /* add xd, xd, #lo12:label */ - emitter.Emit("\tadd\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - emitter.Emit(":lo12:").Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); - emitter.Emit("\n"); -} - -void AArch64Insn::EmitLazyBindingRoutine(Emitter &emitter) const { - /* ldr xzr, [xs] */ - const AArch64MD *md = &AArch64CG::kMd[MOP_adrp_ldr]; - - Operand *opnd0 = opnds[0]; - OpndProp *prop0 = md->operand[0]; - - /* emit "ldr xzr,[xs]" */ -#ifdef USE_32BIT_REF - emitter.Emit("\t").Emit("ldr").Emit("\twzr, ["); -#else - emitter.Emit("\t").Emit("ldr").Emit("\txzr, ["); -#endif /* USE_32BIT_REF */ - opnd0->Emit(emitter, prop0); - emitter.Emit("]"); - emitter.Emit("\t// Lazy binding\n"); -} - -void AArch64Insn::EmitClinitTail(Emitter &emitter) const { - /* - * ldr x17, [xs, #112] - * ldr wzr, [x17] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_clinit_tail]; - - Operand *opnd0 = opnds[0]; - OpndProp *prop0 = md->operand[0]; - - /* emit "ldr x17,[xs,#112]" */ - emitter.Emit("\t").Emit("ldr").Emit("\tx17, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit(", #"); - emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); - emitter.Emit("]"); - emitter.Emit("\n"); - - /* emit "ldr xzr, [x17]" */ - emitter.Emit("\t").Emit("ldr\txzr, [x17]\n"); -} - -void AArch64Insn::EmitLazyLoad(Emitter &emitter) const { - /* - * ldr wd, [xs] # xd and xs should be differenct register - * ldr wd, [xd] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_lazy_ldr]; - - Operand *opnd0 = opnds[0]; - Operand *opnd1 = opnds[1]; - OpndProp *prop0 = md->operand[0]; - OpndProp *prop1 = md->operand[1]; - - /* emit "ldr wd, [xs]" */ - emitter.Emit("\t").Emit("ldr\t"); -#ifdef USE_32BIT_REF - opnd0->Emit(emitter, prop0); -#else - opnd0->Emit(emitter, prop1); -#endif - emitter.Emit(", ["); - opnd1->Emit(emitter, prop1); - emitter.Emit("]\t// lazy load.\n"); - - /* emit "ldr wd, [xd]" */ - emitter.Emit("\t").Emit("ldr\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", ["); - opnd0->Emit(emitter, prop1); - emitter.Emit("]\t// lazy load.\n"); -} - -void AArch64Insn::EmitLazyLoadStatic(Emitter &emitter) const { - /* adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset - * ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset] - * ldr wzr, [xd] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_lazy_ldr_static]; - - Operand *opnd0 = opnds[0]; - Operand *opnd1 = opnds[1]; - AArch64OpndProp *prop0 = md->GetOperand(0); - auto *stImmOpnd = static_cast(opnd1); - CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitLazyLoadStatic"); - - /* emit "adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset" */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - emitter.Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("\t// lazy load static.\n"); - - /* emit "ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset]" */ - emitter.Emit("\tldr\t"); - static_cast(opnd0)->SetRefField(true); -#ifdef USE_32BIT_REF - AArch64OpndProp prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); - opnd0->Emit(emitter, &prop2); /* ldr wd, ... for emui */ -#else - opnd0->Emit(emitter, prop0); /* ldr xd, ... for qemu */ -#endif /* USE_32BIT_REF */ - static_cast(opnd0)->SetRefField(false); - emitter.Emit(", "); - emitter.Emit("["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("]\t// lazy load static.\n"); - - /* emit "ldr wzr, [xd]" */ - emitter.Emit("\t").Emit("ldr\twzr, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit("]\t// lazy load static.\n"); -} - -void AArch64Insn::EmitArrayClassCacheLoad(Emitter &emitter) const { - /* adrp xd, :got:__arrayClassCacheTable$$xxx+offset - * ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset] - * ldr wzr, [xd] - */ - const AArch64MD *md = &AArch64CG::kMd[MOP_arrayclass_cache_ldr]; - uint32 opndIndex = 0; - uint32 propIndex = 0; - Operand *opnd0 = opnds[opndIndex++]; - Operand *opnd1 = opnds[opndIndex++]; - AArch64OpndProp *prop0 = md->GetOperand(static_cast(propIndex++)); - auto *stImmOpnd = static_cast(opnd1); - CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitLazyLoadStatic"); - - /* emit "adrp xd, :got:__arrayClassCacheTable$$xxx+offset" */ - emitter.Emit("\t").Emit("adrp").Emit("\t"); - opnd0->Emit(emitter, prop0); - emitter.Emit(", "); - emitter.Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("\t// load array class.\n"); - - /* emit "ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset]" */ - emitter.Emit("\tldr\t"); - static_cast(opnd0)->SetRefField(true); -#ifdef USE_32BIT_REF - AArch64OpndProp prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); - opnd0->Emit(emitter, &prop2); /* ldr wd, ... for emui */ -#else - opnd0->Emit(emitter, prop0); /* ldr xd, ... for qemu */ -#endif /* USE_32BIT_REF */ - static_cast(opnd0)->SetRefField(false); - emitter.Emit(", "); - emitter.Emit("["); - opnd0->Emit(emitter, prop0); - emitter.Emit(","); - emitter.Emit("#"); - emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); - if (stImmOpnd->GetOffset() != 0) { - emitter.Emit("+").Emit(stImmOpnd->GetOffset()); - } - emitter.Emit("]\t// load array class.\n"); - - /* emit "ldr wzr, [xd]" */ - emitter.Emit("\t").Emit("ldr\twzr, ["); - opnd0->Emit(emitter, prop0); - emitter.Emit("]\t// check resolve array class.\n"); -} - -void AArch64Insn::EmitCheckThrowPendingException(const CG& cg, Emitter &emitter) const { - /* - * mrs x16, TPIDR_EL0 - * ldr x16, [x16, #64] - * ldr x16, [x16, #8] - * cbz x16, .lnoexception - * bl MCC_ThrowPendingException - * .lnoexception: - */ - emitter.Emit("\t").Emit("mrs").Emit("\tx16, TPIDR_EL0"); - emitter.Emit("\n"); - emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #64]"); - emitter.Emit("\n"); - emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #8]"); - emitter.Emit("\n"); - emitter.Emit("\t").Emit("cbz").Emit("\tx16, .lnoeh.").Emit(cg.GetCurCGFunc()->GetName()); - emitter.Emit("\n"); - emitter.Emit("\t").Emit("bl").Emit("\tMCC_ThrowPendingException"); - emitter.Emit("\n"); - emitter.Emit(".lnoeh.").Emit(cg.GetCurCGFunc()->GetName()).Emit(":"); - emitter.Emit("\n"); -} - -void AArch64Insn::PrepareVectorOperand(AArch64RegOperand *regOpnd, uint32 &compositeOpnds) const { +void AArch64Insn::PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds) const { AArch64Insn *insn = const_cast(this); VectorRegSpec* vecSpec = static_cast(insn)->GetAndRemoveRegSpecFromList(); compositeOpnds = vecSpec->compositeOpnds ? vecSpec->compositeOpnds : compositeOpnds; @@ -1109,214 +74,6 @@ void AArch64Insn::PrepareVectorOperand(AArch64RegOperand *regOpnd, uint32 &compo } } -void AArch64Insn::Emit(const CG &cg, Emitter &emitter) const { - emitter.SetCurrentMOP(mOp); - const AArch64MD *md = &AArch64CG::kMd[mOp]; - - if (!cg.GenerateVerboseAsm() && !cg.GenerateVerboseCG() && mOp == MOP_comment) { - return; - } - - switch (mOp) { - case MOP_clinit: { - EmitClinit(cg, emitter); - emitter.IncreaseJavaInsnCount(kClinitInsnCount); - return; - } - case MOP_adrp_ldr: { - emitter.IncreaseJavaInsnCount(kAdrpLdrInsnCount); - EmitAdrpLdr(cg, emitter); - if (CGOptions::IsLazyBinding() && !cg.IsLibcore()) { - EmitLazyBindingRoutine(emitter); - emitter.IncreaseJavaInsnCount(kLazyBindingRoutineInsnCount + kAdrpLdrInsnCount); - } - return; - } - case MOP_counter: { - EmitCounter(cg, emitter); - return; - } - case MOP_asm: { - EmitInlineAsm(cg, emitter); - return; - } - case MOP_clinit_tail: { - EmitClinitTail(emitter); - emitter.IncreaseJavaInsnCount(kClinitTailInsnCount); - return; - } - case MOP_lazy_ldr: { - EmitLazyLoad(emitter); - emitter.IncreaseJavaInsnCount(kLazyLdrInsnCount); - return; - } - case MOP_adrp_label: { - EmitAdrpLabel(emitter); - return; - } - case MOP_lazy_tail: { - /* No need to emit this pseudo instruction. */ - return; - } - case MOP_lazy_ldr_static: { - EmitLazyLoadStatic(emitter); - emitter.IncreaseJavaInsnCount(kLazyLdrStaticInsnCount); - return; - } - case MOP_arrayclass_cache_ldr: { - EmitArrayClassCacheLoad(emitter); - emitter.IncreaseJavaInsnCount(kArrayClassCacheLoadCount); - return; - } - case MOP_get_and_addI: - case MOP_get_and_addL: { - EmitGetAndAddInt(emitter); - return; - } - case MOP_get_and_setI: - case MOP_get_and_setL: { - EmitGetAndSetInt(emitter); - return; - } - case MOP_compare_and_swapI: - case MOP_compare_and_swapL: { - EmitCompareAndSwapInt(emitter); - return; - } - case MOP_string_indexof: { - EmitStringIndexOf(emitter); - return; - } - case MOP_pseudo_none: { - return; - } - default: - break; - } - - if (CGOptions::IsNativeOpt() && mOp == MOP_xbl) { - auto *nameOpnd = static_cast(opnds[0]); - if (nameOpnd->GetName() == "MCC_CheckThrowPendingException") { - EmitCheckThrowPendingException(cg, emitter); - emitter.IncreaseJavaInsnCount(kCheckThrowPendingExceptionInsnCount); - return; - } - } - - std::string format(md->format); - emitter.Emit("\t").Emit(md->name).Emit("\t"); - size_t opndSize = GetOperandSize(); - std::vector seq(opndSize, -1); - std::vector prefix(opndSize); /* used for print prefix like "*" in icall *rax */ - uint32 index = 0; - uint32 commaNum = 0; - for (uint32 i = 0; i < format.length(); ++i) { - char c = format[i]; - if (c >= '0' && c <= '5') { - seq[index++] = c - '0'; - ++commaNum; - } else if (c != ',') { - prefix[index].push_back(c); - } - } - - bool isRefField = (opndSize == 0) ? false : CheckRefField(static_cast(static_cast(seq[0])), true); - if (mOp != MOP_comment) { - emitter.IncreaseJavaInsnCount(); - } - uint32 compositeOpnds = 0; - for (uint32 i = 0; i < commaNum; ++i) { - if (seq[i] == -1) { - continue; - } - if (prefix[i].length() > 0) { - emitter.Emit(prefix[i]); - } - if (emitter.NeedToDealWithHugeSo() && (mOp == MOP_xbl || mOp == MOP_tail_call_opt_xbl)) { - auto *nameOpnd = static_cast(opnds[0]); - /* Suport huge so here - * As the PLT section is just before java_text section, when java_text section is larger - * then 128M, instrunction of "b" and "bl" would fault to branch to PLT stub functions. Here, to save - * instuctions space, we change the branch target to a local target within 120M address, and add non-plt - * call to the target function. - */ - emitter.InsertHugeSoTarget(nameOpnd->GetName()); - emitter.Emit(nameOpnd->GetName() + emitter.HugeSoPostFix()); - break; - } - AArch64RegOperand *regOpnd = static_cast(opnds[static_cast(seq[i])]); - if (regOpnd != nullptr && - static_cast(md->operand[static_cast(seq[i])])->IsVectorOperand()) { - regOpnd->SetVecLanePosition(-1); - regOpnd->SetVecLaneSize(0); - regOpnd->SetVecElementSize(0); - if (IsVectorOp()) { - PrepareVectorOperand(regOpnd, compositeOpnds); - if (compositeOpnds != 0) { - emitter.Emit("{"); - } - } - } - opnds[seq[i]]->Emit(emitter, md->operand[seq[i]]); - if (compositeOpnds == 1) { - emitter.Emit("}"); - } - if (compositeOpnds > 0) { - --compositeOpnds; - } - /* reset opnd0 ref-field flag, so following instruction has correct register */ - if (isRefField && (i == 0)) { - static_cast(opnds[seq[0]])->SetRefField(false); - } - /* Temporary comment the label:.Label.debug.callee */ - if (i != (commaNum - 1)) { - emitter.Emit(", "); - } - const uint32 commaNumForEmitLazy = 2; - if (!CGOptions::IsLazyBinding() || cg.IsLibcore() || (mOp != MOP_wldr && mOp != MOP_xldr) || - commaNum != commaNumForEmitLazy || i != 1 || !opnds[seq[1]]->IsMemoryAccessOperand()) { - continue; - } - /* - * Only check the last operand of ldr in lo12 mode. - * Check the second operand, if it's [AArch64MemOperand::kAddrModeLo12Li] - */ - auto *memOpnd = static_cast(opnds[seq[1]]); - if (memOpnd == nullptr || memOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeLo12Li) { - continue; - } - const MIRSymbol *sym = memOpnd->GetSymbol(); - if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || - sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab()) { - emitter.Emit("\n"); - EmitLazyBindingRoutine(emitter); - emitter.IncreaseJavaInsnCount(kLazyBindingRoutineInsnCount); - } - } - if (cg.GenerateVerboseCG() || (cg.GenerateVerboseAsm() && mOp == MOP_comment)) { - const char *comment = GetComment().c_str(); - if (comment != nullptr && strlen(comment) > 0) { - (void)emitter.Emit("\t\t// ").Emit(comment); - } - } - - emitter.Emit("\n"); -} - -/* set opnd0 ref-field flag, so we can emit the right register */ -bool AArch64Insn::CheckRefField(size_t opndIndex, bool isEmit) const { - if (IsAccessRefField() && AccessMem()) { - Operand *opnd0 = opnds[opndIndex]; - if (opnd0->IsRegister()) { - if (isEmit) { - static_cast(opnd0)->SetRefField(true); - } - return true; - } - } - return false; -} - uint8 AArch64Insn::GetLoadStoreSize() const { if (IsLoadStorePair()) { return k16ByteSize; @@ -1324,41 +81,49 @@ uint8 AArch64Insn::GetLoadStoreSize() const { /* These are the loads and stores possible from PickLdStInsn() */ switch (mOp) { case MOP_wldarb: + case MOP_wldxrb: case MOP_wldaxrb: case MOP_wldrb: case MOP_wldrsb: case MOP_xldrsb: case MOP_wstrb: case MOP_wstlrb: + case MOP_wstxrb: case MOP_wstlxrb: return k1ByteSize; case MOP_wldrh: case MOP_wldarh: + case MOP_wldxrh: case MOP_wldaxrh: case MOP_wldrsh: case MOP_xldrsh: case MOP_wstrh: case MOP_wstlrh: + case MOP_wstxrh: case MOP_wstlxrh: return k2ByteSize; case MOP_sldr: case MOP_wldr: + case MOP_wldxr: case MOP_wldar: case MOP_wldaxr: case MOP_sstr: case MOP_wstr: + case MOP_wstxr: case MOP_wstlr: case MOP_wstlxr: case MOP_xldrsw: return k4ByteSize; case MOP_dstr: case MOP_xstr: + case MOP_xstxr: case MOP_xstlr: case MOP_xstlxr: case MOP_wstp: case MOP_sstp: case MOP_dldr: case MOP_xldr: + case MOP_xldxr: case MOP_xldar: case MOP_xldaxr: case MOP_wldp: @@ -1482,7 +247,7 @@ bool AArch64Insn::IsRegDefOrUse(regno_t regNO) const { } } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); if ((base != nullptr && base->GetRegisterNumber() == regNO) || @@ -1512,21 +277,21 @@ std::set AArch64Insn::GetDefRegs() const { const AArch64MD *md = &AArch64CG::kMd[mOp]; for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; bool isDef = regProp->IsDef(); if (!isDef && !opnd.IsMemoryAccessOperand()) { continue; } if (opnd.IsList()) { - for (auto *op : static_cast(opnd).GetOperands()) { + for (auto *op : static_cast(opnd).GetOperands()) { ASSERT(op != nullptr, "invalid operand in list operand"); defRegNOs.emplace(op->GetRegisterNumber()); } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); if (base != nullptr) { - if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { ASSERT(!defRegNOs.count(base->GetRegisterNumber()), "duplicate def in one insn"); defRegNOs.emplace(base->GetRegisterNumber()); @@ -1545,13 +310,13 @@ uint32 AArch64Insn::GetBothDefUseOpnd() const { size_t opndNum = opnds.size(); uint32 opndIdx = kInsnMaxOpnd; for (uint32 i = 0; i < opndNum; ++i) { - auto *opndProp = static_cast(md->operand[i]); + auto *opndProp = md->operand[i]; if (opndProp->IsRegUse() && opndProp->IsDef()) { ASSERT(opndIdx == kInsnMaxOpnd, "Do not support in aarch64 yet"); opndIdx = i; } if (opnds[i]->IsMemoryAccessOperand()) { - auto *a64MemOpnd = static_cast(opnds[i]); + auto *a64MemOpnd = static_cast(opnds[i]); if (!a64MemOpnd->IsIntactIndexed()) { ASSERT(opndIdx == kInsnMaxOpnd, "Do not support in aarch64 yet"); opndIdx = i; @@ -1611,7 +376,9 @@ bool AArch64Insn::IsSpecialIntrinsic() const { case MOP_string_indexof: case MOP_lazy_ldr: case MOP_get_and_setI: - case MOP_get_and_setL: { + case MOP_get_and_setL: + case MOP_tls_desc_rel: + case MOP_tls_desc_call: { return true; } default: { @@ -1655,7 +422,7 @@ bool AArch64Insn::IsMemAccess() const { bool AArch64Insn::MayThrow() { const AArch64MD *md = &AArch64CG::kMd[mOp]; if (md->IsMemAccess() && !IsLoadLabel()) { - auto *aarchMemOpnd = static_cast(GetMemOpnd()); + auto *aarchMemOpnd = static_cast(GetMemOpnd()); ASSERT(aarchMemOpnd != nullptr, "CG invalid memory operand."); RegOperand *baseRegister = aarchMemOpnd->GetBaseRegister(); if (baseRegister != nullptr && @@ -1738,10 +505,6 @@ bool AArch64Insn::OpndIsUse(uint32 id) const { return AArch64CG::kMd[mOp].GetOperand(id)->IsUse(); } -bool AArch64Insn::IsUseSpecReg() const { - return AArch64CG::kMd[mOp].UseSpecReg(); -} - uint32 AArch64Insn::GetLatencyType() const { return AArch64CG::kMd[mOp].GetLatencyType(); } @@ -1799,7 +562,7 @@ void AArch64Insn::CheckOpnd(const Operand &opnd, const OpndProp &prop) const { (void)opnd; (void)prop; #if DEBUG - auto &mopd = static_cast(prop); + auto &mopd = (prop); switch (opnd.GetKind()) { case Operand::kOpdRegister: ASSERT(mopd.IsRegister(), "expect reg"); @@ -1974,12 +737,14 @@ void AArch64Insn::Dump() const { for (uint32 i = 0; i < GetOperandSize(); ++i) { Operand &opnd = GetOperand(i); LogInfo::MapleLogger() << " (opnd" << i << ": "; - opnd.Dump(); + + A64OpndDumpVisitor visitor; + opnd.Accept(visitor); LogInfo::MapleLogger() << ")"; } if (IsVectorOp()) { - const AArch64VectorInsn *vInsn = static_cast(this); + auto *vInsn = static_cast(this); if (vInsn->GetNumOfRegSpec() != 0) { LogInfo::MapleLogger() << " (vecSpec: " << vInsn->GetNumOfRegSpec() << ")"; } @@ -1998,8 +763,526 @@ bool AArch64Insn::IsDefinition() const { } bool AArch64Insn::IsDestRegAlsoSrcReg() const { - auto *prop0 = static_cast(AArch64CG::kMd[mOp].GetOperand(0)); - ASSERT(prop0 != nullptr, "expect a AArch64OpndProp"); + auto *prop0 = (AArch64CG::kMd[mOp].GetOperand(0)); + ASSERT(prop0 != nullptr, "expect a OpndProp"); return prop0->IsRegDef() && prop0->IsRegUse(); } + +void A64OpndEmitVisitor::EmitIntReg(RegOperand &v, uint8 opndSz) { + CHECK_FATAL(v.GetRegisterType() == kRegTyInt, "wrong Type"); + uint8 opndSize = (opndSz == kMaxSimm32) ? v.GetSize() : opndSz; + ASSERT((opndSize == k32BitSize || opndSize == k64BitSize), "illegal register size"); +#ifdef USE_32BIT_REF + bool r32 = (opndSize == k32BitSize) || isRefField; +#else + bool r32 = (opndSize == k32BitSize); +#endif /* USE_32BIT_REF */ + emitter.Emit(AArch64CG::intRegNames[(r32 ? AArch64CG::kR32List : AArch64CG::kR64List)][v.GetRegisterNumber()]); +} + +void A64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { + ASSERT(opndProp == nullptr || opndProp->IsRegister(), + "operand type doesn't match"); + uint32 size = v->GetSize(); + regno_t regNO = v->GetRegisterNumber(); + uint8 opndSize = (opndProp != nullptr) ? opndProp->GetSize() : size; + switch (v->GetRegisterType()) { + case kRegTyInt: { + EmitIntReg(*v, opndSize); + break; + } + case kRegTyFloat: { + ASSERT((opndSize == k8BitSize || opndSize == k16BitSize || opndSize == k32BitSize || + opndSize == k64BitSize || opndSize == k128BitSize), "illegal register size"); + if (opndProp->IsVectorOperand() && v->GetVecLaneSize() != 0) { + EmitVectorOperand(*v); + } else { + /* FP reg cannot be reffield. 8~0, 16~1, 32~2, 64~3. 8 is 1000b, has 3 zero. */ + uint32 regSet = __builtin_ctz(opndSize) - 3; + emitter.Emit(AArch64CG::intRegNames[regSet][regNO]); + } + break; + } + default: + ASSERT(false, "NYI"); + break; + } +} + +void A64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { + if (v->IsOfstImmediate()) { + return Visit(static_cast(v)); + } + + int64 value = v->GetValue(); + if (!v->IsFmov()) { + emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + /* + * compute float value + * use top 4 bits expect MSB of value . then calculate its fourth power + */ + int32 exp = (((static_cast(value) & 0x70) >> 4) ^ 0x4) - 3; + /* use the lower four bits of value in this expression */ + const float mantissa = 1.0 + (static_cast(static_cast(value) & 0xf) / 16.0); + float result = std::pow(2, exp) * mantissa; + + std::stringstream ss; + ss << std::setprecision(10) << result; + std::string res; + ss >> res; + size_t dot = res.find('.'); + if (dot == std::string::npos) { + res += ".0"; + dot = res.find('.'); + CHECK_FATAL(dot != std::string::npos, "cannot find in string"); + } + res.erase(dot, 1); + std::string integer(res, 0, 1); + std::string fraction(res, 1); + while (fraction.size() != 1 && fraction[fraction.size() - 1] == '0') { + fraction.pop_back(); + } + /* fetch the sign bit of this value */ + std::string sign = static_cast(value) & 0x80 ? "-" : ""; + emitter.Emit(sign + integer + "." + fraction + "e+").Emit(dot - 1); +} + +void A64OpndEmitVisitor::Visit(maplebe::MemOperand *v) { + auto a64v = static_cast(v); + MemOperand::AArch64AddressingMode addressMode = a64v->GetAddrMode(); +#if DEBUG + const AArch64MD *md = &AArch64CG::kMd[emitter.GetCurrentMOP()]; + bool isLDSTpair = md->IsLoadStorePair(); + ASSERT(md->Is64Bit() || md->GetOperandSize() <= k32BitSize || md->GetOperandSize() == k128BitSize, + "unexpected opnd size"); +#endif + if (addressMode == MemOperand::kAddrModeBOi) { + emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + ASSERT(baseReg != nullptr, "expect an RegOperand here"); + uint32 baseSize = baseReg->GetSize(); + if (baseSize != k64BitSize) { + baseReg->SetSize(k64BitSize); + } + EmitIntReg(*baseReg); + baseReg->SetSize(baseSize); + OfstOperand *offset = a64v->GetOffsetImmediate(); + if (offset != nullptr) { +#ifndef USE_32BIT_REF /* can be load a ref here */ + /* + * Cortex-A57 Software Optimization Guide: + * The ARMv8-A architecture allows many types of load and store accesses to be arbitrarily aligned. + * The Cortex- A57 processor handles most unaligned accesses without performance penalties. + */ +#if DEBUG + if (a64v->IsOffsetMisaligned(md->GetOperandSize())) { + INFO(kLncInfo, "The Memory operand's offset is misaligned:", ""); + LogInfo::MapleLogger() << "==="; + A64OpndDumpVisitor visitor; + v->Accept(visitor); + LogInfo::MapleLogger() << "===\n"; + } +#endif +#endif /* USE_32BIT_REF */ + if (a64v->IsPostIndexed()) { + ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + emitter.Emit("]"); + if (!offset->IsZero()) { + emitter.Emit(", "); + Visit(offset); + } + } else if (a64v->IsPreIndexed()) { + ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + if (!offset->IsZero()) { + emitter.Emit(","); + Visit(offset); + } + emitter.Emit("]!"); + } else { + if (CGOptions::IsPIC() && (offset->IsSymOffset() || offset->IsSymAndImmOffset()) && + (offset->GetSymbol()->NeedPIC() || offset->GetSymbol()->IsThreadLocal())) { + std::string gotEntry = offset->GetSymbol()->IsThreadLocal() ? ", #:tlsdesc_lo12:" : ", #:got_lo12:"; + emitter.Emit(gotEntry + offset->GetSymbolName()); + } else { + uint32 dsize = v->GetSize(); + if (v->GetSize() > k8BitSize) { + dsize = static_cast(RoundUp(v->GetSize(), k8BitSize)); + } + if (!offset->IsZero()) { + emitter.Emit(","); + Visit(offset); + } + } + emitter.Emit("]"); + } + } else { + emitter.Emit("]"); + } + } else if (addressMode == MemOperand::kAddrModeBOrX) { + /* + * Base plus offset | [base{, #imm}] [base, Xm{, LSL #imm}] [base, Wm, (S|U)XTW {#imm}] + * offset_opnds=nullptr + * offset_opnds=64 offset_opnds=32 + * imm=0 or 3 imm=0 or 2, s/u + */ + emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + // After ssa version support different size, the value is changed back + baseReg->SetSize(k64BitSize); + + EmitIntReg(*baseReg); + emitter.Emit(","); + EmitIntReg(*a64v->GetIndexRegister()); + if (a64v->ShouldEmitExtend() || v->GetBaseRegister()->GetSize() > a64v->GetIndexRegister()->GetSize()) { + emitter.Emit(","); + /* extend, #0, of #3/#2 */ + emitter.Emit(a64v->GetExtendAsString()); + if (a64v->GetExtendAsString() == "LSL" || a64v->ShiftAmount() != 0) { + emitter.Emit(" #"); + emitter.Emit(a64v->ShiftAmount()); + } + } + emitter.Emit("]"); + } else if (addressMode == MemOperand::kAddrModeLiteral) { + CHECK_FATAL(opndProp != nullptr, "prop is nullptr in MemOperand::Emit"); + if (opndProp->IsMemLow12()) { + emitter.Emit("#:lo12:"); + } + emitter.Emit(v->GetSymbol()->GetName()); + } else if (addressMode == MemOperand::kAddrModeLo12Li) { + emitter.Emit("["); + EmitIntReg(*v->GetBaseRegister()); + + OfstOperand *offset = a64v->GetOffsetImmediate(); + ASSERT(offset != nullptr, "nullptr check"); + + emitter.Emit(", #:lo12:"); + if (v->GetSymbol()->GetStorageClass() == kScPstatic && v->GetSymbol()->IsLocal()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + emitter.Emit(a64v->GetSymbolName() + std::to_string(pIdx)); + } else { + emitter.Emit(a64v->GetSymbolName()); + } + if (!offset->IsZero()) { + emitter.Emit("+"); + emitter.Emit(std::to_string(offset->GetOffsetValue())); + } + emitter.Emit("]"); + } else { + ASSERT(false, "nyi"); + } +} + +void A64OpndEmitVisitor::Visit(LabelOperand *v) { + emitter.EmitLabelRef(v->GetLabelIndex()); +} + +void A64OpndEmitVisitor::Visit(CondOperand *v) { + emitter.Emit(CondOperand::ccStrs[v->GetCode()]); +} + +void A64OpndEmitVisitor::Visit(ExtendShiftOperand *v) { + ASSERT(v->GetShiftAmount() <= k4BitSize && v->GetShiftAmount() >= 0, + "shift amount out of range in ExtendShiftOperand"); + auto emitExtendShift = [this, v](const std::string &extendKind)->void { + emitter.Emit(extendKind); + if (v->GetShiftAmount()!= 0) { + emitter.Emit(" #").Emit(v->GetShiftAmount()); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + emitExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + emitExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + emitExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + emitExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + emitExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + emitExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + emitExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + emitExtendShift("SXTX"); + break; + default: + ASSERT(false, "should not be here"); + break; + } +} + +void A64OpndEmitVisitor::Visit(BitShiftOperand *v) { + emitter.Emit((v->GetShiftOp() == BitShiftOperand::kLSL) ? "LSL #" : + ((v->GetShiftOp() == BitShiftOperand::kLSR) ? "LSR #" : "ASR #")).Emit(v->GetShiftAmount()); +} + +void A64OpndEmitVisitor::Visit(StImmOperand *v) { + CHECK_FATAL(opndProp != nullptr, "opndProp is nullptr in StImmOperand::Emit"); + const MIRSymbol *symbol = v->GetSymbol(); + const bool isThreadLocal = symbol->IsThreadLocal(); + const bool isLiteralLow12 = opndProp->IsLiteralLow12(); + const bool hasGotEntry = CGOptions::IsPIC() && symbol->NeedPIC(); + bool hasPrefix = false; + if (isThreadLocal) { + emitter.Emit(":tlsdesc"); + hasPrefix = true; + } + if (!hasPrefix && hasGotEntry) { + emitter.Emit(":got"); + hasPrefix = true; + } + if (isLiteralLow12) { + std::string lo12String = hasPrefix ? "_lo12" : ":lo12"; + emitter.Emit(lo12String); + hasPrefix = true; + } + if (hasPrefix) { + emitter.Emit(":"); + } + if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + emitter.Emit(symbol->GetName() + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + emitter.Emit(v->GetName()); + } + if (!hasGotEntry && v->GetOffset() != 0) { + emitter.Emit("+" + std::to_string(v->GetOffset())); + } +} + +void A64OpndEmitVisitor::Visit(FuncNameOperand *v) { + emitter.Emit(v->GetName()); +} + +void A64OpndEmitVisitor::Visit(LogicalShiftLeftOperand *v) { + emitter.Emit(" LSL #").Emit(v->GetShiftAmount()); +} + +void A64OpndEmitVisitor::Visit(CommentOperand *v) { + emitter.Emit(v->GetComment()); +} + +void A64OpndEmitVisitor::Visit(ListOperand *v) { + (void)opndProp; + size_t nLeft = v->GetOperands().size(); + if (nLeft == 0) { + return; + } + + for (auto it = v->GetOperands().begin(); it != v->GetOperands().end(); ++it) { + Visit(*it); + if (--nLeft >= 1) { + emitter.Emit(", "); + } + } +} + +void A64OpndEmitVisitor::Visit(OfstOperand *v) { + int64 value = v->GetValue(); + if (v->IsImmOffset()) { + emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + const MIRSymbol *symbol = v->GetSymbol(); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + emitter.Emit(":got:" + symbol->GetName()); + } else if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + emitter.Emit(symbol->GetName() + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + emitter.Emit(symbol->GetName()); + } + if (value != 0) { + emitter.Emit("+" + std::to_string(value)); + } +} + +void A64OpndEmitVisitor::EmitVectorOperand(RegOperand &v) { + std::string width; + switch (v.GetVecElementSize()) { + case k8BitSize: + width = "b"; + break; + case k16BitSize: + width = "h"; + break; + case k32BitSize: + width = "s"; + break; + case k64BitSize: + width = "d"; + break; + default: + CHECK_FATAL(false, "unexpected value size for vector element"); + break; + } + emitter.Emit(AArch64CG::vectorRegNames[v.GetRegisterNumber()]); + int32 lanePos = v.GetVecLanePosition(); + if (lanePos == -1) { + emitter.Emit("." + std::to_string(v.GetVecLaneSize()) + width); + } else { + emitter.Emit("." + width + "[" + std::to_string(lanePos) + "]"); + } +} + +void A64OpndDumpVisitor::Visit(RegOperand *v) { + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; + uint32 regType = v->GetRegisterType(); + ASSERT(regType < kRegTyLast, "unexpected regType"); + + regno_t reg = v->GetRegisterNumber(); + reg = v->IsVirtualRegister() ? reg : (reg - 1); + uint32 vb = v->GetValidBitsNum(); + LogInfo::MapleLogger() << (v->IsVirtualRegister() ? "vreg:" : " reg:") << prims[regType] << reg << " " << classes[regType]; + if (v->GetValidBitsNum() != v->GetSize()) { + LogInfo::MapleLogger() << " Vb: [" << vb << "]"; + } + LogInfo::MapleLogger() << " Sz: [" << v->GetSize() << "]" ; +} + +void A64OpndDumpVisitor::Visit(ImmOperand *v) { + LogInfo::MapleLogger() << "imm:" << v->GetValue(); +} +void A64OpndDumpVisitor::Visit(MemOperand *a64v) { + LogInfo::MapleLogger() << "Mem:"; + LogInfo::MapleLogger() << " size:" << a64v->GetSize() << " "; + LogInfo::MapleLogger() << " isStack:" << a64v->IsStackMem() << "-" << a64v->IsStackArgMem() << " "; + switch (a64v->GetAddrMode()) { + case MemOperand::kAddrModeBOi: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetOffsetOperand()); + switch (a64v->GetIndexOpt()) { + case MemOperand::kIntact: + LogInfo::MapleLogger() << " intact"; + break; + case MemOperand::kPreIndex: + LogInfo::MapleLogger() << " pre-index"; + break; + case MemOperand::kPostIndex: + LogInfo::MapleLogger() << " post-index"; + break; + default: + break; + } + break; + } + case MemOperand::kAddrModeBOrX: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetIndexRegister()); + LogInfo::MapleLogger() << " " << a64v->GetExtendAsString(); + LogInfo::MapleLogger() << " shift: " << a64v->ShiftAmount(); + LogInfo::MapleLogger() << " extend: " << a64v->GetExtendAsString(); + break; + } + case MemOperand::kAddrModeLiteral: + LogInfo::MapleLogger() << "literal: " << a64v->GetSymbolName(); + break; + case MemOperand::kAddrModeLo12Li: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + OfstOperand *offOpnd = a64v->GetOffsetImmediate(); + LogInfo::MapleLogger() << "#:lo12:"; + if (a64v->GetSymbol()->GetStorageClass() == kScPstatic && a64v->GetSymbol()->IsLocal()) { + PUIdx pIdx = CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetPuidx(); + LogInfo::MapleLogger() << a64v->GetSymbolName() << std::to_string(pIdx); + } else { + LogInfo::MapleLogger() << a64v->GetSymbolName(); + } + LogInfo::MapleLogger() << "+" << std::to_string(offOpnd->GetOffsetValue()); + break; + } + default: + ASSERT(false, "error memoperand dump"); + break; + } +} + +void A64OpndDumpVisitor::Visit(CondOperand *v) { + LogInfo::MapleLogger() << "CC: " << CondOperand::ccStrs[v->GetCode()]; +} +void A64OpndDumpVisitor::Visit(StImmOperand *v) { + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetOffset(); +} +void A64OpndDumpVisitor::Visit(BitShiftOperand *v) { + BitShiftOperand::ShiftOp shiftOp = v->GetShiftOp(); + uint32 shiftAmount = v->GetShiftAmount(); + LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kLSL) ? "LSL: " : + ((shiftOp == BitShiftOperand::kLSR) ? "LSR: " : "ASR: ")); + LogInfo::MapleLogger() << shiftAmount; +} +void A64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { + auto dumpExtendShift = [v](const std::string &extendKind)->void { + LogInfo::MapleLogger() << extendKind; + if (v->GetShiftAmount() != 0) { + LogInfo::MapleLogger() << " : " << v->GetShiftAmount(); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + dumpExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + dumpExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + dumpExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + dumpExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + dumpExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + dumpExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + dumpExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + dumpExtendShift("SXTX"); + break; + default: + ASSERT(false, "should not be here"); + break; + } +} +void A64OpndDumpVisitor::Visit(LabelOperand *v) { + LogInfo::MapleLogger() << "label:" << v->GetLabelIndex(); +} +void A64OpndDumpVisitor::Visit(FuncNameOperand *v) { + LogInfo::MapleLogger() << "func :" << v->GetName(); +} +void A64OpndDumpVisitor::Visit(LogicalShiftLeftOperand *v) { + LogInfo::MapleLogger() << "LSL: " << v->GetShiftAmount(); +} +void A64OpndDumpVisitor::Visit(PhiOperand *v) { + auto &phiList = v->GetOperands(); + for (auto it = phiList.begin(); it != phiList.end();) { + Visit(it->second); + LogInfo::MapleLogger() << " fBB<" << it->first << ">"; + LogInfo::MapleLogger() << (++it == phiList.end() ? "" : " ,"); + } +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp index 6966ba98d8a76d7bb4a3d8280fff8ff103b27eb7..7c6ffb973d945328484048086cb03a6defae2913 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp @@ -68,7 +68,7 @@ void AArch64LiveAnalysis::GetBBDefUse(BB &bb) { bb.UseResetAllBit(); FOR_BB_INSNS_REV(insn, &bb) { - if (!insn->IsMachineInstruction()) { + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { continue; } @@ -80,7 +80,7 @@ void AArch64LiveAnalysis::GetBBDefUse(BB &bb) { uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; bool isDef = regProp->IsRegDef(); bool isUse = regProp->IsRegUse(); if (opnd.IsList()) { @@ -93,6 +93,11 @@ void AArch64LiveAnalysis::GetBBDefUse(BB &bb) { ProcessMemOpnd(bb, opnd); } else if (opnd.IsConditionCode()) { ProcessCondOpnd(bb); + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto opIt : phiOpnd.GetOperands()) { + CollectLiveInfo(bb, *opIt.second, false, true); + } } else { CollectLiveInfo(bb, opnd, isDef, isUse); } @@ -184,7 +189,7 @@ void AArch64LiveAnalysis::ProcessListOpnd(BB &bb, Operand &opnd) const { } void AArch64LiveAnalysis::ProcessMemOpnd(BB &bb, Operand &opnd) const { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); Operand *offset = memOpnd.GetIndexRegister(); if (base != nullptr) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_lsra.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_lsra.cpp index 26a73546f275e7a1f9eb48438fbf0ef4bd641653..3fb0c99cc5a1fa073731a2487687931917a0da56 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_lsra.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_lsra.cpp @@ -155,7 +155,7 @@ void LSRALinearScanRegAllocator::PrintLiveRanges() const { continue; } } else { - bool isDef = static_cast(md->operand[iSecond])->IsRegDef(); + bool isDef = (md->operand[iSecond])->IsRegDef(); (void)CheckForReg(opnd, *insn, *li, regNO, isDef); } } @@ -401,7 +401,7 @@ void LSRALinearScanRegAllocator::RecordPhysRegs(const RegOperand ®Opnd, uint3 return; } - if (IsUntouchableReg(regNO) || regOpnd.IsConstReg()) { + if (IsUntouchableReg(regNO) || regNO == RZR) { return; } @@ -647,8 +647,8 @@ void LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand(const Insn &i SetupIntervalRangesByOperand(*offset, insn, blockFrom, false, true); } } else if (opnd.IsRegister()) { - bool isDef = static_cast(md->operand[i])->IsRegDef(); - bool isUse = static_cast(md->operand[i])->IsRegUse(); + bool isDef = (md->operand[i])->IsRegDef(); + bool isUse = (md->operand[i])->IsRegUse(); SetupIntervalRangesByOperand(opnd, insn, blockFrom, isDef, isUse); } } @@ -850,7 +850,7 @@ void LSRALinearScanRegAllocator::ComputeLiveIntervalForEachOperand(Insn &insn) { int32 lastOpndId = static_cast(insn.GetOperandSize() - 1); for (int32 i = lastOpndId; i >= 0; --i) { Operand &opnd = insn.GetOperand(static_cast(i)); - bool isDef = static_cast(md->operand[i])->IsRegDef(); + bool isDef = (md->operand[i])->IsRegDef(); if (opnd.IsList()) { auto &listOpnd = static_cast(opnd); for (auto op : listOpnd.GetOperands()) { @@ -1244,7 +1244,7 @@ uint32 LSRALinearScanRegAllocator::AssignSpecialPhysRegPattern(const Insn &insn, auto ®Src = static_cast(src); uint32 srcRegNO = regSrc.GetRegisterNumber(); if (li.GetRegNO() == srcRegNO) { - bool srcIsDef = static_cast(md->operand[i])->IsRegDef(); + bool srcIsDef = (md->operand[i])->IsRegDef(); if (srcIsDef) { break; } @@ -1444,13 +1444,13 @@ void LSRALinearScanRegAllocator::InsertCallerSave(Insn &insn, Operand &opnd, boo MOperator opCode = insn.GetMachineOpcode(); if (opCode == MOP_xmovri64 || opCode == MOP_xmovri32) { Operand &opnd1 = insn.GetOperand(1); - auto &imm = static_cast(opnd1); + auto &imm = static_cast(opnd1); if (imm.IsZero()) { isSpillZero = true; } } else if (opCode == MOP_wmovrr || opCode == MOP_xmovrr) { auto &opnd1 = static_cast(insn.GetOperand(1)); - if (opnd1.IsZeroRegister()) { + if (opnd1.GetRegisterNumber() == RZR) { isSpillZero = true; } } @@ -1466,7 +1466,7 @@ void LSRALinearScanRegAllocator::InsertCallerSave(Insn &insn, Operand &opnd, boo RegOperand *phyOpnd = nullptr; if (isSpillZero) { - phyOpnd = &AArch64RegOperand::GetZeroRegister(regSize); + phyOpnd = &cgFunc->GetZeroOpnd(regSize); } else { phyOpnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(rli->GetAssignedReg()), regSize, regType); @@ -1663,13 +1663,13 @@ void LSRALinearScanRegAllocator::SpillOperand(Insn &insn, Operand &opnd, bool is MOperator opCode = insn.GetMachineOpcode(); if (opCode == MOP_xmovri64 || opCode == MOP_xmovri32) { Operand &opnd1 = insn.GetOperand(1); - auto &imm = static_cast(opnd1); + auto &imm = static_cast(opnd1); if (imm.IsZero()) { isSpillZero = true; } } else if (opCode == MOP_wmovrr || opCode == MOP_xmovrr) { auto &opnd1 = static_cast(insn.GetOperand(1)); - if (opnd1.IsZeroRegister()) { + if (opnd1.GetRegisterNumber() == RZR) { isSpillZero = true; } } @@ -1690,7 +1690,7 @@ void LSRALinearScanRegAllocator::SpillOperand(Insn &insn, Operand &opnd, bool is bool isOutOfRange = false; RegOperand *phyOpnd = nullptr; if (isSpillZero) { - phyOpnd = &AArch64RegOperand::GetZeroRegister(regSize); + phyOpnd = &cgFunc->GetZeroOpnd(regSize); } else { phyOpnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(spReg), regSize, regType); } @@ -1799,7 +1799,7 @@ bool LSRALinearScanRegAllocator::OpndNeedAllocation(const Insn &insn, Operand &o if (regType == kRegTyCc || regType == kRegTyVary) { return false; } - if (IsUntouchableReg(regNO) || regOpnd.IsConstReg()) { + if (IsUntouchableReg(regNO) || regNO == RZR) { return false; } if (regOpnd.IsPhysicalRegister()) { @@ -1975,7 +1975,7 @@ void LSRALinearScanRegAllocator::LiveIntervalAnalysis() { uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - bool isDef = static_cast(md->operand[i])->IsRegDef(); + bool isDef = (md->operand[i])->IsRegDef(); if (isDef) { auto ®Opnd = static_cast(opnd); if (regOpnd.IsVirtualRegister() && regOpnd.GetRegisterType() != kRegTyCc) { @@ -2047,7 +2047,7 @@ void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn.GetOperand(i); - bool isDef = static_cast(md->operand[i])->IsRegDef(); + bool isDef = (md->operand[i])->IsRegDef(); RegOperand *newOpnd = nullptr; if (opnd.IsList()) { /* For arm32, not arm64 */ @@ -2120,7 +2120,7 @@ RegOperand *LSRALinearScanRegAllocator::GetReplaceOpnd(Insn &insn, Operand &opnd if (regType == kRegTyCc || regType == kRegTyVary) { return nullptr; } - if (IsUntouchableReg(vRegNO) || regOpnd->IsConstReg()) { + if (IsUntouchableReg(vRegNO) || vRegNO == RZR) { return nullptr; } if (regOpnd->IsPhysicalRegister()) { @@ -2244,7 +2244,7 @@ void LSRALinearScanRegAllocator::FinalizeRegisters() { for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); ASSERT(md->operand[i] != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); - bool isDef = static_cast(md->operand[i])->IsRegDef(); + bool isDef = (md->operand[i])->IsRegDef(); if (isDef) { continue; } @@ -2280,7 +2280,7 @@ void LSRALinearScanRegAllocator::FinalizeRegisters() { /* Handle dest opernads last */ for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - bool isDef = static_cast(md->operand[i])->IsRegDef(); + bool isDef = (md->operand[i])->IsRegDef(); if (!isDef) { continue; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp index 703d9211f320ea7f5b1957b6d87f4da1be5589b7..be7c72ae086e7250fcfdf58e2599829e074285d9 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -133,9 +133,9 @@ void AArch64MemLayout::LayoutVarargParams() { if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { for (uint32 i = 0; i < func->GetFormalCount(); i++) { if (i == 0) { - if (be.HasFuncReturnType(*func)) { - TyIdx tidx = be.GetFuncReturnType(*func); - if (be.GetTypeSize(tidx.GetIdx()) <= k16ByteSize) { + if (func->IsReturnStruct()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { continue; } } @@ -189,6 +189,18 @@ void AArch64MemLayout::LayoutVarargParams() { } void AArch64MemLayout::LayoutFormalParams() { + bool isLmbc = (be.GetMIRModule().GetFlavor() == kFlavorLmbc); + if (isLmbc && mirFunction->GetFormalCount() == 0) { + /* + * lmbc : upformalsize - size of formals passed from caller's frame into current function + * framesize - total frame size of current function used by Maple IR + * outparmsize - portion of frame size of current function used by call parameters + */ + segArgsStkPassed.SetSize(mirFunction->GetOutParmSize()); + segArgsRegPassed.SetSize(mirFunction->GetOutParmSize() + kTwoRegister * k8ByteSize); + return; + } + AArch64CallConvImpl parmLocator(be); CCLocInfo ploc; for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { @@ -197,11 +209,11 @@ void AArch64MemLayout::LayoutFormalParams() { AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); SetSymAllocInfo(stIndex, *symLoc); if (i == 0) { - if (be.HasFuncReturnType(*mirFunction)) { + if (mirFunction->IsReturnStruct()) { symLoc->SetMemSegment(GetSegArgsRegPassed()); symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); - TyIdx tidx = be.GetFuncReturnType(*mirFunction); - if (be.GetTypeSize(tidx.GetIdx()) > k16ByteSize) { + TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) { if (CGOptions::IsArm64ilp32()) { segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); } else { @@ -243,6 +255,8 @@ void AArch64MemLayout::LayoutFormalParams() { segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); symLoc->SetOffset(segArgsRegPassed.GetSize()); segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } else if (isLmbc) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); } noStackPara = true; } else { /* stack */ @@ -274,6 +288,11 @@ void AArch64MemLayout::LayoutFormalParams() { } void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc && mirFunction->GetFormalCount() == 0) { + segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize()); + return; + } + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); for (uint32 i = 0; i < symTabSize; ++i) { MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); @@ -353,7 +372,11 @@ void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, symLoc->SetOffset(segRefLocals.GetSize()); segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); } - segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segArgsToStkPass.SetSize(mirFunction->GetOutParmSize()); + } else { + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + } maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); if (Globals::GetInstance()->GetOptimLevel() == 0) { AssignSpillLocationsToPseudoRegisters(); @@ -373,7 +396,7 @@ void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, void AArch64MemLayout::LayoutActualParams() { for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { if (i == 0) { - if (be.HasFuncReturnType(*mirFunction)) { + if (mirFunction->IsReturnStruct()) { continue; } } @@ -477,10 +500,10 @@ void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { RegOperand &baseOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand(); int32 offset = static_cast(segLocals.GetSize()); - AArch64OfstOperand *offsetOpnd = - aarchCGFunc->GetMemoryPool()->New(offset + k16BitSize, k64BitSize); - AArch64MemOperand *throwMem = aarchCGFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, k64BitSize, baseOpnd, static_cast(nullptr), offsetOpnd, + OfstOperand *offsetOpnd = + &aarchCGFunc->CreateOfstOpnd(offset + k16BitSize, k64BitSize); + MemOperand *throwMem = aarchCGFunc->CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, baseOpnd, static_cast(nullptr), offsetOpnd, nullptr); aarchCGFunc->SetCatchOpnd(*throwMem); if (CGOptions::IsArm64ilp32()) { @@ -493,7 +516,7 @@ void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { SymbolAlloc *AArch64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); symLoc->SetMemSegment(segSpillReg); - uint32 regSize = cgFunc->IsExtendReg(vrNum) ? k64BitSize : cgFunc->GetVRegSize(vrNum); + uint32 regSize = cgFunc->IsExtendReg(vrNum) ? k8ByteSize : cgFunc->GetVRegSize(vrNum); segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); symLoc->SetOffset(segSpillReg.GetSize()); segSpillReg.SetSize(segSpillReg.GetSize() + regSize); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp index a689705b7fc4915463e0be41b762c5ebe210fe6c..eaa5726d435a939ffd0706b8ec4e0b37ede60e38 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -22,10 +22,11 @@ void AArch64FPLROffsetAdjustment::Run() { } void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc) { + bool isLmbc = (aarchCGFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); uint32 opndNum = insn.GetOperandSize(); MemLayout *memLayout = aarchCGFunc.GetMemlayout(); bool stackBaseOpnd = false; - AArch64reg stackBaseReg = aarchCGFunc.UseFP() ? R29 : RSP; + AArch64reg stackBaseReg = isLmbc ? R29 : (aarchCGFunc.UseFP() ? R29 : RSP); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn.GetOperand(i); if (opnd.IsRegister()) { @@ -38,27 +39,27 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGF stackBaseOpnd = true; } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - if (((memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi) || - (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOrX)) && + auto &memOpnd = static_cast(opnd); + if (((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) || + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX)) && memOpnd.GetBaseRegister() != nullptr) { if (memOpnd.GetBaseRegister()->IsOfVary()) { - memOpnd.SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); + memOpnd.SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); } RegOperand *memBaseReg = memOpnd.GetBaseRegister(); if (memBaseReg->GetRegisterNumber() == RFP) { RegOperand &newBaseOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); - AArch64MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( - AArch64MemOperand::kAddrModeBOi, memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), + MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), memOpnd.GetOffsetImmediate(), memOpnd.GetSymbol()); insn.SetOperand(i, newMemOpnd); stackBaseOpnd = true; } } - if ((memOpnd.GetAddrMode() != AArch64MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + if ((memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { continue; } - AArch64OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); if (ofstOpnd == nullptr) { continue; } @@ -70,7 +71,7 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGF if (!stackBaseOpnd && (ofstOpnd->GetVary() == kAdjustVary || ofstOpnd->GetVary() == kNotVary)) { bool condition = aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, i); if (!condition) { - AArch64MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( memOpnd, memOpnd.GetSize(), static_cast(R16), false, &insn); insn.SetOperand(i, newMemOpnd); } @@ -94,7 +95,7 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 if (immOpnd.GetValue() < 0) { immOpnd.Negate(); } - insn.SetMOperator(A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())); + insn.SetMOP(A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())); } else { immOpnd.Add(ofst); } @@ -104,18 +105,18 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 PrimType destTy = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize ? PTY_i64 : PTY_i32; RegOperand *resOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); - AArch64ImmOperand ©ImmOpnd = aarchCGFunc.CreateImmOperand( + ImmOperand ©ImmOpnd = aarchCGFunc.CreateImmOperand( immOpnd.GetValue(), immOpnd.GetSize(), immOpnd.IsSignedValue()); aarchCGFunc.SelectAddAfterInsn(*resOpnd, insn.GetOperand(kInsnSecondOpnd), copyImmOpnd, destTy, false, insn); insn.GetBB()->RemoveInsn(insn); } else if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { if (immOpnd.IsSingleInstructionMovable()) { - AArch64RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); bool is64bit = insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize; MOperator tempMovOp = is64bit ? MOP_xmovri64 : MOP_xmovri32; Insn &tempMov = cgFunc->GetCG()->BuildInstruction(tempMovOp, tempReg, immOpnd); insn.SetOperand(index, tempReg); - insn.SetMOperator(is64bit ? MOP_xsubrrr : MOP_wsubrrr); + insn.SetMOP(is64bit ? MOP_xsubrrr : MOP_wsubrrr); (void)insn.GetBB()->InsertInsnBefore(insn, tempMov); } } else { @@ -136,25 +137,25 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn, AArch64CGFu for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn.GetOperand(i); if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); ASSERT(memOpnd.GetBaseRegister() != nullptr, "Unexpect, need check"); CHECK_FATAL(memOpnd.IsIntactIndexed(), "unsupport yet"); - if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi) { - OfstOperand *ofstOpnd = memOpnd.GetOffsetOperand(); - OfstOperand *newOfstOpnd = &aarchCGFunc.GetOrCreateOfstOpnd( + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + ImmOperand *ofstOpnd = memOpnd.GetOffsetOperand(); + ImmOperand *newOfstOpnd = &aarchCGFunc.GetOrCreateOfstOpnd( static_cast(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize()); - AArch64MemOperand &newOfstMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( - AArch64MemOperand::kAddrModeBOi, memOpnd.GetSize(), memOpnd.GetBaseRegister(), memOpnd.GetIndexRegister(), + MemOperand &newOfstMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, memOpnd.GetSize(), memOpnd.GetBaseRegister(), memOpnd.GetIndexRegister(), newOfstOpnd, memOpnd.GetSymbol()); insn.SetOperand(i, newOfstMemOpnd); if (!aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &newOfstMemOpnd, i)) { bool isPair = (i == kInsnThirdOpnd); - AArch64MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( newOfstMemOpnd, newOfstMemOpnd.GetSize(), static_cast(R16), false, &insn, isPair); insn.SetOperand(i, newMemOpnd); } continue; - } else if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOrX) { + } else if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX) { CHECK_FATAL(false, "Unexpect adjust insn"); } else { insn.Dump(); @@ -175,8 +176,8 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn, AArch64CGFu case MOP_xaddrri24: { ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, "regNumber should be changed in AdjustmentOffsetForOpnd"); - AArch64RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); - AArch64ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); insn.SetOperand(kInsnSecondOpnd, tempReg); break; @@ -191,12 +192,25 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn, AArch64CGFu case MOP_xsubrri24: { ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, "regNumber should be changed in AdjustmentOffsetForOpnd"); - AArch64RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); - AArch64ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); insn.SetOperand(kInsnSecondOpnd, tempReg); break; } + case MOP_waddrri12: { + if(!CGOptions::IsArm64ilp32()) { + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } else { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &addend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + addend.SetValue(addend.GetValue() + offset); + AdjustmentOffsetForImmOpnd(insn, kInsnThirdOpnd, aarchCGFunc); /* legalize imm opnd */ + } + break; + } default: insn.Dump(); CHECK_FATAL(false, "Unexpect offset adjustment insn"); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp index be6f5aaaccdcacd9586791e78ea674d182bc041b..fea00fee26404f3868f678b05ddfd138bec75bbb 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp @@ -20,8 +20,6 @@ #include "aarch64_cg.h" namespace maplebe { -AArch64RegOperand AArch64RegOperand::zero64(RZR, k64BitSize, kRegTyInt); -AArch64RegOperand AArch64RegOperand::zero32(RZR, k32BitSize, kRegTyInt); const char *CondOperand::ccStrs[kCcLast] = { #define CONDCODE(a) #a, @@ -29,162 +27,6 @@ const char *CondOperand::ccStrs[kCcLast] = { #undef CONDCODE }; -bool AArch64RegOperand::IsSaveReg(MIRType &type, BECommon &beCommon) const { - AArch64CallConvImpl retLocator(beCommon); - CCLocInfo retMechanism; - retLocator.InitReturnInfo(type, retMechanism); - if (retMechanism.GetRegCount() > 0) { - return GetRegisterNumber() == retMechanism.GetReg0() || GetRegisterNumber() == retMechanism.GetReg1() || - GetRegisterNumber() == retMechanism.GetReg2() || GetRegisterNumber() == retMechanism.GetReg3(); - } - return false; -} - -bool AArch64RegOperand::IsSPOrFP() const { - return (IsPhysicalRegister() && (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); -} - -bool AArch64RegOperand::operator==(const AArch64RegOperand &o) const { - regno_t myRn = GetRegisterNumber(); - uint32 mySz = GetSize(); - uint32 myFl = flag; - regno_t otherRn = o.GetRegisterNumber(); - uint32 otherSz = o.GetSize(); - uint32 otherFl = o.flag; - - if (IsPhysicalRegister()) { - return (myRn == otherRn && mySz == otherSz && myFl == otherFl); - } - return (myRn == otherRn && mySz == otherSz); -} - -bool AArch64RegOperand::operator<(const AArch64RegOperand &o) const { - regno_t myRn = GetRegisterNumber(); - uint32 mySz = GetSize(); - uint32 myFl = flag; - regno_t otherRn = o.GetRegisterNumber(); - uint32 otherSz = o.GetSize(); - uint32 otherFl = o.flag; - return myRn < otherRn || (myRn == otherRn && mySz < otherSz) || - (myRn == otherRn && mySz == otherSz && myFl < otherFl); -} - -void AArch64RegOperand::EmitVectorOpnd(Emitter &emitter) const { - std::string width; - switch (GetVecElementSize()) { - case k8BitSize: - width = "b"; - break; - case k16BitSize: - width = "h"; - break; - case k32BitSize: - width = "s"; - break; - case k64BitSize: - width = "d"; - break; - default: - CHECK_FATAL(false, "unexpected value size for vector element"); - break; - } - emitter.Emit(AArch64CG::vectorRegNames[regNO]); - int32 lanePos = GetVecLanePosition(); - if (lanePos == -1) { - emitter.Emit("." + std::to_string(GetVecLaneSize()) + width); - } else { - emitter.Emit("." + width + "[" + std::to_string(lanePos) + "]"); - } -} - -void AArch64RegOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - ASSERT((opndProp == nullptr || (static_cast(opndProp)->IsRegister())), - "operand type doesn't match"); - /* opndProp null means a sub emit, i.e from MemOperand */ - uint8 opndSize = (opndProp != nullptr) ? static_cast(opndProp)->GetSize() : size; - switch (GetRegisterType()) { - case kRegTyInt: { - ASSERT((opndSize == k32BitSize || opndSize == k64BitSize), "illegal register size"); -#ifdef USE_32BIT_REF - bool r32 = (opndSize == k32BitSize) || isRefField; -#else - bool r32 = (opndSize == k32BitSize); -#endif /* USE_32BIT_REF */ - emitter.Emit(AArch64CG::intRegNames[(r32 ? AArch64CG::kR32List : AArch64CG::kR64List)][regNO]); - break; - } - case kRegTyFloat: { - ASSERT((opndSize == k8BitSize || opndSize == k16BitSize || opndSize == k32BitSize || - opndSize == k64BitSize || opndSize == k128BitSize), "illegal register size"); - if (static_cast(opndProp)->IsVectorOperand() && GetVecLaneSize() != 0) { - EmitVectorOpnd(emitter); - } else { - /* FP reg cannot be reffield. 8~0, 16~1, 32~2, 64~3. 8 is 1000b, has 3 zero. */ - uint32 regSet = __builtin_ctz(opndSize) - 3; - emitter.Emit(AArch64CG::intRegNames[regSet][regNO]); - } - break; - } - default: - ASSERT(false, "NYI"); - break; - } -} - -void AArch64ImmOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - if (!isFmov) { - emitter.Emit((opndProp != nullptr && static_cast(opndProp)->IsLoadLiteral()) ? "=" : "#") - .Emit((size == k64BitSize) ? value : static_cast(static_cast(value))); - return; - } - /* - * compute float value - * use top 4 bits expect MSB of value . then calculate its fourth power - */ - int32 exp = (((static_cast(value) & 0x70) >> 4) ^ 0x4) - 3; - /* use the lower four bits of value in this expression */ - const float mantissa = 1.0 + (static_cast(static_cast(value) & 0xf) / 16.0); - float result = std::pow(2, exp) * mantissa; - - std::stringstream ss; - ss << std::setprecision(10) << result; - std::string res; - ss >> res; - size_t dot = res.find('.'); - if (dot == std::string::npos) { - res += ".0"; - dot = res.find('.'); - CHECK_FATAL(dot != std::string::npos, "cannot find in string"); - } - res.erase(dot, 1); - std::string integer(res, 0, 1); - std::string fraction(res, 1); - while (fraction.size() != 1 && fraction[fraction.size() - 1] == '0') { - fraction.pop_back(); - } - /* fetch the sign bit of this value */ - std::string sign = static_cast(value) & 0x80 ? "-" : ""; - emitter.Emit(sign + integer + "." + fraction + "e+").Emit(dot - 1); -} - -void AArch64OfstOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - if (IsImmOffset()) { - emitter.Emit((opndProp != nullptr && static_cast(opndProp)->IsLoadLiteral()) ? "=" : "#") - .Emit((size == k64BitSize) ? GetValue() : static_cast(static_cast(GetValue()))); - return; - } - if (CGOptions::IsPIC() && symbol->NeedPIC()) { - emitter.Emit(":got:" + symbol->GetName()); - } else if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { - emitter.Emit(symbol->GetName() + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); - } else { - emitter.Emit(symbol->GetName()); - } - if (GetValue() != 0) { - emitter.Emit("+" + std::to_string(GetValue())); - } -} - bool StImmOperand::Less(const Operand &right) const{ if (&right == this) { return false; @@ -207,10 +49,11 @@ bool StImmOperand::Less(const Operand &right) const{ void StImmOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { CHECK_FATAL(opndProp != nullptr, "opndProp is nullptr in StImmOperand::Emit"); - bool isLiteralLow12 = static_cast(opndProp)->IsLiteralLow12(); + bool isLiteralLow12 = opndProp->IsLiteralLow12(); if (CGOptions::IsPIC() && symbol->NeedPIC()) { - auto picPreFix = isLiteralLow12 ? "#:got_lo12:" : ":got:"; - emitter.Emit(picPreFix + GetName()); + std::string gotEntry = symbol->IsThreadLocal() ? ":tlsdesc" : ":got"; + gotEntry += isLiteralLow12 ? "_lo12:" : ":"; + emitter.Emit(gotEntry + GetName()); return; } if (isLiteralLow12) { @@ -226,371 +69,6 @@ void StImmOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { } } -Operand *AArch64MemOperand::GetOffset() const { - switch (addrMode) { - case kAddrModeBOi: - return GetOffsetOperand(); - case kAddrModeBOrX: - return GetOffsetRegister(); - case kAddrModeLiteral: - break; - case kAddrModeLo12Li: - break; - default: - ASSERT(false, "error memoperand dump"); - break; - } - return nullptr; -} - -void AArch64MemOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - AArch64MemOperand::AArch64AddressingMode addressMode = GetAddrMode(); -#if DEBUG - const AArch64MD *md = &AArch64CG::kMd[emitter.GetCurrentMOP()]; - bool isLDSTpair = md->IsLoadStorePair(); - ASSERT(md->Is64Bit() || md->GetOperandSize() <= k32BitSize || md->GetOperandSize() == k128BitSize, - "unexpected opnd size"); -#endif - if (addressMode == AArch64MemOperand::kAddrModeBOi) { - emitter.Emit("["); - auto *baseReg = static_cast(GetBaseRegister()); - ASSERT(baseReg != nullptr, "expect an AArch64RegOperand here"); - uint32 baseSize = baseReg->GetSize(); - if (CGOptions::IsPIC() && (baseSize != k64BitSize)) { - baseReg->SetSize(k64BitSize); - } - baseReg->Emit(emitter, nullptr); - baseReg->SetSize(baseSize); - AArch64OfstOperand *offset = GetOffsetImmediate(); - if (offset != nullptr) { -#ifndef USE_32BIT_REF /* can be load a ref here */ - /* - * Cortex-A57 Software Optimization Guide: - * The ARMv8-A architecture allows many types of load and store accesses to be arbitrarily aligned. - * The Cortex- A57 processor handles most unaligned accesses without performance penalties. - */ -#if DEBUG - if (IsOffsetMisaligned(md->GetOperandSize())) { - INFO(kLncInfo, "The Memory operand's offset is misaligned:", ""); - LogInfo::MapleLogger() << "==="; - Dump(); - LogInfo::MapleLogger() << "===\n"; - } -#endif -#endif /* USE_32BIT_REF */ - if (IsPostIndexed()) { - ASSERT(!IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), - "should not be SIMMOffsetOutOfRange"); - emitter.Emit("]"); - if (!offset->IsZero()) { - emitter.Emit(", "); - offset->Emit(emitter, nullptr); - } - } else if (IsPreIndexed()) { - ASSERT(!IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), - "should not be SIMMOffsetOutOfRange"); - if (!offset->IsZero()) { - emitter.Emit(","); - offset->Emit(emitter, nullptr); - } - emitter.Emit("]!"); - } else { - if (CGOptions::IsPIC() && (offset->IsSymOffset() || offset->IsSymAndImmOffset()) && - offset->GetSymbol()->NeedPIC()) { - emitter.Emit(",#:got_lo12:"); - emitter.Emit(offset->GetSymbolName()); - } else { - uint32 dsize = size; - if (size > k8BitSize) { - dsize = static_cast(RoundUp(size, k8BitSize)); - } - if (!offset->IsZero()) { - emitter.Emit(","); - offset->Emit(emitter, nullptr); - } - } - emitter.Emit("]"); - } - } else { - emitter.Emit("]"); - } - } else if (addressMode == AArch64MemOperand::kAddrModeBOrX) { - /* - * Base plus offset | [base{, #imm}] [base, Xm{, LSL #imm}] [base, Wm, (S|U)XTW {#imm}] - * offset_opnds=nullptr - * offset_opnds=64 offset_opnds=32 - * imm=0 or 3 imm=0 or 2, s/u - */ - emitter.Emit("["); - auto *baseReg = static_cast(GetBaseRegister()); - // After ssa version support different size, the value is changed back - baseReg->SetSize(k64BitSize); - baseReg->Emit(emitter, nullptr); - emitter.Emit(","); - GetOffsetRegister()->Emit(emitter, nullptr); - if (ShouldEmitExtend() || GetBaseRegister()->GetSize() > GetOffsetRegister()->GetSize()) { - emitter.Emit(","); - /* extend, #0, of #3/#2 */ - emitter.Emit(GetExtendAsString()); - if (GetExtendAsString() == "LSL" || ShiftAmount() != 0) { - emitter.Emit(" #"); - emitter.Emit(ShiftAmount()); - } - } - emitter.Emit("]"); - } else if (addressMode == AArch64MemOperand::kAddrModeLiteral) { - auto *prop = static_cast(opndProp); - CHECK_FATAL(prop != nullptr, "prop is nullptr in AArch64MemOperand::Emit"); - if (prop->IsMemLow12()) { - emitter.Emit("#:lo12:"); - } - emitter.Emit(GetSymbol()->GetName()); - } else if (addressMode == AArch64MemOperand::kAddrModeLo12Li) { - emitter.Emit("["); - GetBaseRegister()->Emit(emitter, nullptr); - - AArch64OfstOperand *offset = GetOffsetImmediate(); - ASSERT(offset != nullptr, "nullptr check"); - - emitter.Emit(", #:lo12:"); - if (GetSymbol()->GetStorageClass() == kScPstatic && GetSymbol()->IsLocal()) { - PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); - emitter.Emit(GetSymbolName() + std::to_string(pIdx)); - } else { - emitter.Emit(GetSymbolName()); - } - if (!offset->IsZero()) { - emitter.Emit("+"); - emitter.Emit(std::to_string(offset->GetOffsetValue())); - } - emitter.Emit("]"); - } else { - ASSERT(false, "nyi"); - } -} - -void AArch64MemOperand::Dump() const { - LogInfo::MapleLogger() << "Mem:"; - LogInfo::MapleLogger() << " size:" << GetSize() << " "; - switch (addrMode) { - case kAddrModeBOi: { - LogInfo::MapleLogger() << "base:"; - GetBaseRegister()->Dump(); - LogInfo::MapleLogger() << "offset:"; - GetOffsetOperand()->Dump(); - switch (idxOpt) { - case kIntact: - LogInfo::MapleLogger() << " intact"; - break; - case kPreIndex: - LogInfo::MapleLogger() << " pre-index"; - break; - case kPostIndex: - LogInfo::MapleLogger() << " post-index"; - break; - default: - break; - } - break; - } - case kAddrModeBOrX: { - LogInfo::MapleLogger() << "base:"; - GetBaseRegister()->Dump(); - LogInfo::MapleLogger() << "offset:"; - GetOffsetRegister()->Dump(); - LogInfo::MapleLogger() << " " << GetExtendAsString(); - LogInfo::MapleLogger() << " shift: " << ShiftAmount(); - LogInfo::MapleLogger() << " extend: " << extend; - break; - } - case kAddrModeLiteral: - LogInfo::MapleLogger() << "literal: " << GetSymbolName(); - break; - case kAddrModeLo12Li: { - LogInfo::MapleLogger() << "base:"; - GetBaseRegister()->Dump(); - LogInfo::MapleLogger() << "offset:"; - AArch64OfstOperand *offOpnd = GetOffsetImmediate(); - LogInfo::MapleLogger() << "#:lo12:"; - if (GetSymbol()->GetStorageClass() == kScPstatic && GetSymbol()->IsLocal()) { - PUIdx pIdx = CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetPuidx(); - LogInfo::MapleLogger() << GetSymbolName() << std::to_string(pIdx); - } else { - LogInfo::MapleLogger() << GetSymbolName(); - } - LogInfo::MapleLogger() << "+" << std::to_string(offOpnd->GetOffsetValue()); - break; - } - default: - ASSERT(false, "error memoperand dump"); - break; - } -} - -bool AArch64MemOperand::Equals(Operand &operand) const { - if (!operand.IsMemoryAccessOperand()) { - return false; - } - return Equals(static_cast(operand)); -} - -bool AArch64MemOperand::Equals(const AArch64MemOperand &op) const { - if (&op == this) { - return true; - } - - if (addrMode == op.GetAddrMode()) { - switch (addrMode) { - case kAddrModeBOi: - return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && - GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); - case kAddrModeBOrX: - return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && - GetOffsetRegister()->Equals(*op.GetOffsetRegister()) && - GetExtendAsString() == op.GetExtendAsString() && - ShiftAmount() == op.ShiftAmount()); - case kAddrModeLiteral: - return GetSymbolName() == op.GetSymbolName(); - case kAddrModeLo12Li: - return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && - GetSymbolName() == op.GetSymbolName() && - GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); - default: - ASSERT(false, "error memoperand"); - break; - } - } - return false; -} - -bool AArch64MemOperand::Less(const Operand &right) const { - if (&right == this) { - return false; - } - - /* For different type. */ - if (GetKind() != right.GetKind()) { - return GetKind() < right.GetKind(); - } - - const AArch64MemOperand *rightOpnd = static_cast(&right); - if (addrMode != rightOpnd->addrMode) { - return addrMode < rightOpnd->addrMode; - } - - switch (addrMode) { - case kAddrModeBOi: { - ASSERT(idxOpt == kIntact, "Should not compare pre/post index addressing."); - - RegOperand *baseReg = GetBaseRegister(); - RegOperand *rbaseReg = rightOpnd->GetBaseRegister(); - int32 nRet = baseReg->RegCompare(*rbaseReg); - if (nRet == 0) { - Operand *ofstOpnd = GetOffsetOperand(); - const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); - return ofstOpnd->Less(*rofstOpnd); - } - return nRet < 0; - } - case kAddrModeBOrX: { - if (noExtend != rightOpnd->noExtend) { - return noExtend; - } - if (!noExtend && extend != rightOpnd->extend) { - return extend < rightOpnd->extend; - } - RegOperand *indexReg = GetIndexRegister(); - const RegOperand *rindexReg = rightOpnd->GetIndexRegister(); - return indexReg->Less(*rindexReg); - } - case kAddrModeLiteral: { - return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); - } - case kAddrModeLo12Li: { - if (GetSymbol() != rightOpnd->GetSymbol()) { - return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); - } - Operand *ofstOpnd = GetOffsetOperand(); - const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); - return ofstOpnd->Less(*rofstOpnd); - } - default: - ASSERT(false, "Internal error."); - return false; - } -} - -bool AArch64MemOperand::NoAlias(const AArch64MemOperand &rightOpnd) const { - if (addrMode == kAddrModeBOi && rightOpnd.addrMode == kAddrModeBOi && idxOpt == kIntact && - rightOpnd.idxOpt == kIntact) { - RegOperand *baseReg = GetBaseRegister(); - RegOperand *rbaseReg = rightOpnd.GetBaseRegister(); - - if (baseReg->GetRegisterNumber() == RFP || rbaseReg->GetRegisterNumber() == RFP) { - Operand *ofstOpnd = GetOffsetOperand(); - Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); - - ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); - ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); - OfstOperand *ofst = static_cast(ofstOpnd); - OfstOperand *rofst = static_cast(rofstOpnd); - ASSERT(ofst != nullptr, "CG internal error, invalid type."); - ASSERT(rofst != nullptr, "CG internal error, invalid type."); - - return (!ofst->ValueEquals(*rofst)); - } - } - - return false; -} - -bool AArch64MemOperand::NoOverlap(const AArch64MemOperand &rightOpnd) const { - if (addrMode != kAddrModeBOi || rightOpnd.addrMode != kAddrModeBOi || idxOpt != kIntact || - rightOpnd.idxOpt != kIntact) { - return false; - } - if (GetBaseRegister()->GetRegisterNumber() != RFP || rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { - return false; - } - int64 ofset1 = GetOffsetOperand()->GetValue(); - int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); - if (ofset1 < ofset2) { - return ((ofset1 + GetAccessSize()) <= ofset2); - } else { - return ((ofset2 + rightOpnd.GetAccessSize()) <= ofset1); - } -} - -/* sort the register operand according to their number */ -void AArch64ListOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - (void)opndProp; - size_t nLeft = opndList.size(); - if (nLeft == 0) { - return; - } - - for (auto it = opndList.begin(); it != opndList.end(); ++it) { - (*it)->Emit(emitter, nullptr); - if (--nLeft >= 1) { - emitter.Emit(", "); - } - } -} - -void AArch64PhiOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { - (void)emitter; - (void)opndProp; - CHECK_FATAL(false, "phi is a pseudo-operand. cannot emit"); -} - -void AArch64PhiOperand::Dump() const { - for (auto it = phiList.begin(); it != phiList.end();) { - it->second->Dump(); - LogInfo::MapleLogger() << " fBB<" << it->first << ">"; - LogInfo::MapleLogger() << (++it == phiList.end() ? "" : " ,"); - } -} - void ListConstraintOperand::Emit(Emitter &emitter, const OpndProp *opndProp) const { /* nothing emitted for inline asm constraints */ (void)emitter; @@ -675,44 +153,6 @@ void ExtendShiftOperand::Emit(Emitter &emitter, const OpndProp *prop) const { } } -void ExtendShiftOperand::Dump() const { - auto dumpExtendShift = [this](const std::string &extendKind)->void { - LogInfo::MapleLogger() << extendKind; - if (shiftAmount != 0) { - LogInfo::MapleLogger() << " : " << shiftAmount; - } - }; - switch (extendOp) { - case kUXTB: - dumpExtendShift("UXTB"); - break; - case kUXTH: - dumpExtendShift("UXTH"); - break; - case kUXTW: - dumpExtendShift("UXTW"); - break; - case kUXTX: - dumpExtendShift("UXTX"); - break; - case kSXTB: - dumpExtendShift("SXTB"); - break; - case kSXTH: - dumpExtendShift("SXTH"); - break; - case kSXTW: - dumpExtendShift("SXTW"); - break; - case kSXTX: - dumpExtendShift("SXTX"); - break; - default: - ASSERT(false, "should not be here"); - break; - } -} - bool BitShiftOperand::Less(const Operand &right) const { if (&right == this) { return false; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_opnd.def b/src/mapleall/maple_be/src/cg/aarch64/aarch64_opnd.def index 5e5be5b353a6701aba6ba6aa1117ffb1d2790ccc..42d639ea0ade31c22c04ff95c9d922f83f4709b2 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_opnd.def +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_opnd.def @@ -12,279 +12,177 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v1 for more details. */ -AArch64OpndProp mopdInt32RegSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropUse }, 32}; -AArch64OpndProp mopdInt32RegDest = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef}, 32}; -AArch64OpndProp mopdInt32RegDestSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef | kRegPropUse}, 32}; -AArch64OpndProp mopdInt64RegSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropUse}, 64}; -AArch64OpndProp mopdInt64RegDest = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef}, 64}; -AArch64OpndProp mopdInt64RegDestSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef | kRegPropUse}, 64}; -AArch64OpndProp mopdF8RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 8}; -AArch64OpndProp mopdF8RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 8}; -AArch64OpndProp mopdF16RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 16}; -AArch64OpndProp mopdF16RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 16}; -AArch64OpndProp mopdF32RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 32}; -AArch64OpndProp mopdF32RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 32}; -AArch64OpndProp mopdF32RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse}, 32}; -AArch64OpndProp mopdF64RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 64}; -AArch64OpndProp mopdF64RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 64}; -AArch64OpndProp mopdF64RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse}, 64}; -AArch64OpndProp mopdV128RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse | kVector}, 128}; -AArch64OpndProp mopdV128RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kVector}, 128}; -AArch64OpndProp mopdV128RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse | kVector}, 128}; -AArch64OpndProp mopdV64RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse | kVector}, 64}; -AArch64OpndProp mopdV64RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kVector}, 64}; -AArch64OpndProp mopdV64RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse | kVector}, 64}; -AArch64OpndProp mopdIntImm4Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 4}; -AArch64OpndProp mopdIntImm5Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 5}; -AArch64OpndProp mopdIntImm6Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; -AArch64OpndProp mopdIntImm8Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8}; - -bool Imm12BitValid(int64 value) { - bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); - // for target linux-aarch64-gnu - result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); - return result; -} - -bool Imm12BitMaskValid(int64 value) { - if (value == 0 || static_cast(value) == -1) { - return true; - } - return maplebe::IsBitmaskImmediate(static_cast(value), k32BitSize); -} - -bool Imm13BitValid(int64 value) { - bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); - // for target linux-aarch64-gnu - result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); - return result; -} - -bool Imm13BitMaskValid(int64 value) { - if (value == 0 || static_cast(value) == -1) { - return true; - } - return maplebe::IsBitmaskImmediate(static_cast(value), k64BitSize); -} - -bool Imm16BitValid(int64 value) { - bool result = maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); - /* - * for target linux-aarch64-gnu - * aarch64 assembly takes up to 24-bits immediate, generating - * either cmp or cmp with shift 12 encoding - */ - result = result || maplebe::IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); - return result; -} - -AArch64ImmOpndProp mopdIntImm12Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12, Imm12BitValid}; -AArch64ImmOpndProp mopdIntImm13Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 13, Imm13BitValid}; -AArch64ImmOpndProp mopdIntBitMaskImm12Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12, Imm12BitMaskValid}; -AArch64ImmOpndProp mopdIntBitMaskImm13Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 13, Imm13BitMaskValid}; -AArch64ImmOpndProp mopdIntImm16Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 16, Imm16BitValid}; -AArch64OpndProp mopdIntImm24Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 24}; -AArch64OpndProp mopdIntImm32Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32}; -AArch64OpndProp mopdIntImm32Literal = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse | kLoadLiteral}, 32}; -AArch64OpndProp mopdIntImm64Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64}; -AArch64OpndProp mopdIntImm64Literal = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse | kLoadLiteral}, 64}; -AArch64OpndProp mopdFpzeroImm8Src = {Operand::kOpdFPZeroImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8}; - -/* - * 8bit : 0 - * halfword : 1 - * 32bit - word : 2 - * 64bit - word : 3 - * 128bit- word : 4 - */ -bool StrLdrSignedOfstValid(int64 value, uint wordSize) { - if (value <= k256BitSize && value >= kNegative256BitSize) { - return true; - } else if ((value > k256BitSize) && (value <= kMaxPimm[wordSize])) { - uint64 mask = (1U << wordSize) - 1U; - return (static_cast(value) & mask) ? false : true; - } - return false; -} - - -bool StrLdr8ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, 0); -} - -bool StrLdr16ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k1ByteSize); -} - -bool StrLdr32ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k2ByteSize); -} - -bool StrLdr32PairImmValid(int64 value) { - if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { - return (static_cast(value) & 3) ? false : true; - } - return false; -} - -bool StrLdr64ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k3ByteSize); -} - -bool StrLdr64PairImmValid(int64 value) { - if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { - return (static_cast(value) & 7) ? false : true; - } - return false; -} - -bool StrLdr128ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k4ByteSize); -} - -bool StrLdr128PairImmValid(int64 value) { - if (value < k1024BitSize && (value >= kNegative1024BitSize)) { - return (static_cast(value) & 0xf) ? false : true; - } - return false; -} - -AArch64ImmOpndProp mopdMem8Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8, StrLdr8ImmValid}; -AArch64ImmOpndProp mopdMem16Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 16, StrLdr16ImmValid}; -AArch64ImmOpndProp mopdMem32Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32, StrLdr32ImmValid}; -AArch64ImmOpndProp mopdMemPair32Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32, StrLdr32PairImmValid}; -AArch64OpndProp mopdMem32SrcH = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 16}; -AArch64OpndProp mopdMem32SrcL = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 16}; -AArch64ImmOpndProp mopdMem64Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64, StrLdr64ImmValid}; -AArch64ImmOpndProp mopdMemPair64Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64, StrLdr64PairImmValid}; -AArch64OpndProp mopdMem64SrcL = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 12}; -AArch64ImmOpndProp mopdMem128Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 128, StrLdr128ImmValid}; -AArch64ImmOpndProp mopdMemPair128Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 128, StrLdr128PairImmValid}; - -AArch64ImmOpndProp mopdMem8Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 8, StrLdr8ImmValid}; -AArch64ImmOpndProp mopdMem16Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 16, StrLdr16ImmValid}; -AArch64ImmOpndProp mopdMem32Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 32, StrLdr32ImmValid}; -AArch64ImmOpndProp mopdMem64Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 64, StrLdr64ImmValid}; -AArch64ImmOpndProp mopdMem128Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef}, 128, StrLdr128ImmValid}; -AArch64ImmOpndProp mopdMemPair32Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 32, StrLdr32PairImmValid}; -AArch64ImmOpndProp mopdMemPair64Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 64, StrLdr64PairImmValid}; -AArch64ImmOpndProp mopdMemPair128Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef}, 128, StrLdr128PairImmValid}; - -AArch64OpndProp mopdLbl64Src = {Operand::kOpdBBAddress, {kRegTyUndef,kAllRegNum, kRegPropUse}, 64}; -AArch64OpndProp mopdLiteralSrc = {Operand::kOpdStImmediate, {kRegTyUndef,kAllRegNum, kRegPropUse}, 64}; -AArch64OpndProp mopdLiteralL12Src = {Operand::kOpdStImmediate, {kRegTyUndef, kAllRegNum, kLiteralLow12}, 12}; -AArch64OpndProp mopdListSrc = {Operand::kOpdList, {kRegTyUndef, kAllRegNum, kRegPropUse}, 1}; -AArch64OpndProp mopdListDest = {Operand::kOpdList, {kRegTyUndef, kAllRegNum, kRegPropDef}, 1}; -AArch64OpndProp mopdCcRegSrc = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropUse}, 1}; -AArch64OpndProp mopdCcRegDest = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropDef}, 1}; -AArch64OpndProp mopdCcRegDestSrc = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropDef | kRegPropUse}, 1}; -AArch64OpndProp mopdSpRegDest = {Operand::kOpdRegister, {kRegTyInt, RSP, kRegPropDef}, 32}; -AArch64OpndProp mopdMem32SrcPre = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPreInc}, 32}; -AArch64OpndProp mopdMem32SrcPost = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPostInc}, 32}; -AArch64OpndProp mopdMem64SrcPre = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPreInc}, 64}; -AArch64OpndProp mopdMem64SrcPost = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPostInc}, 64}; -AArch64OpndProp mopdMem32LiteralSrc = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32}; -AArch64OpndProp mopdMem64LiteralSrc = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64}; - -AArch64OpndProp mopdCondSrc = {Operand::kOpdCond, {kRegTyCc, kAllRegNum, kRegPropUse}, 4}; - -AArch64OpndProp mopdBitshift32Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 5}; -AArch64OpndProp mopdBitshift64Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; -AArch64OpndProp mopdExtendshift64Src = {Operand::kOpdExtend, {kRegTyUndef, kAllRegNum, kRegPropUse}, 3}; -AArch64OpndProp mopdLsl4Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 4}; -AArch64OpndProp mopdLsl6Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; -AArch64OpndProp mopdLsl12Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12}; - -AArch64OpndProp mopdString = {Operand::kOpdString, {kRegTyUndef, kAllRegNum, kRegPropUse}, 0}; +OpndProp mopdInt32RegSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropUse }, 32}; +OpndProp mopdInt32RegDest = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef}, 32}; +OpndProp mopdInt32RegDestSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef | kRegPropUse}, 32}; +OpndProp mopdInt64RegSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropUse}, 64}; +OpndProp mopdInt64RegDest = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef}, 64}; +OpndProp mopdInt64RegDestSrc = {Operand::kOpdRegister, {kRegTyInt, kAllRegNum, kRegPropDef | kRegPropUse}, 64}; +OpndProp mopdF8RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 8}; +OpndProp mopdF8RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 8}; +OpndProp mopdF16RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 16}; +OpndProp mopdF16RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 16}; +OpndProp mopdF32RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 32}; +OpndProp mopdF32RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 32}; +OpndProp mopdF32RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse}, 32}; +OpndProp mopdF64RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse}, 64}; +OpndProp mopdF64RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef}, 64}; +OpndProp mopdF64RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse}, 64}; +OpndProp mopdV128RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse | kVector}, 128}; +OpndProp mopdV128RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kVector}, 128}; +OpndProp mopdV128RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse | kVector}, 128}; +OpndProp mopdV64RegSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropUse | kVector}, 64}; +OpndProp mopdV64RegDest = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kVector}, 64}; +OpndProp mopdV64RegDestSrc = {Operand::kOpdRegister, {kRegTyFloat, kAllRegNum, kRegPropDef | kRegPropUse | kVector}, 64}; +OpndProp mopdIntImm4Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 4}; +OpndProp mopdIntImm5Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 5}; +OpndProp mopdIntImm6Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; +OpndProp mopdIntImm8Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8}; + +ImmOpndProp mopdIntImm12Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12, Imm12BitValid}; +ImmOpndProp mopdIntImm13Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 13, Imm13BitValid}; +ImmOpndProp mopdIntBitMaskImm12Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12, Imm12BitMaskValid}; +ImmOpndProp mopdIntBitMaskImm13Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 13, Imm13BitMaskValid}; +ImmOpndProp mopdIntImm16Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 16, Imm16BitValid}; +OpndProp mopdIntImm24Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 24}; +OpndProp mopdIntImm32Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32}; +OpndProp mopdIntImm64Src = {Operand::kOpdImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64}; +OpndProp mopdFpzeroImm8Src = {Operand::kOpdFPZeroImmediate, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8}; + +ImmOpndProp mopdMem8Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 8, StrLdr8ImmValid}; +ImmOpndProp mopdMem16Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 16, StrLdr16ImmValid}; +ImmOpndProp mopdMem32Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32, StrLdr32ImmValid}; +ImmOpndProp mopdMemPair32Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32, StrLdr32PairImmValid}; +OpndProp mopdMem32SrcH = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 16}; +OpndProp mopdMem32SrcL = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 16}; +ImmOpndProp mopdMem64Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64, StrLdr64ImmValid}; +ImmOpndProp mopdMemPair64Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64, StrLdr64PairImmValid}; +OpndProp mopdMem64SrcL = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kMemLow12}, 12}; +ImmOpndProp mopdMem128Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 128, StrLdr128ImmValid}; +ImmOpndProp mopdMemPair128Src = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 128, StrLdr128PairImmValid}; + +ImmOpndProp mopdMem8Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 8, StrLdr8ImmValid}; +ImmOpndProp mopdMem16Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 16, StrLdr16ImmValid}; +ImmOpndProp mopdMem32Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 32, StrLdr32ImmValid}; +ImmOpndProp mopdMem64Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 64, StrLdr64ImmValid}; +ImmOpndProp mopdMem128Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef}, 128, StrLdr128ImmValid}; +ImmOpndProp mopdMemPair32Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 32, StrLdr32PairImmValid}; +ImmOpndProp mopdMemPair64Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef }, 64, StrLdr64PairImmValid}; +ImmOpndProp mopdMemPair128Dest = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropDef}, 128, StrLdr128PairImmValid}; + +OpndProp mopdLbl64Src = {Operand::kOpdBBAddress, {kRegTyUndef,kAllRegNum, kRegPropUse}, 64}; +OpndProp mopdLiteralSrc = {Operand::kOpdStImmediate, {kRegTyUndef,kAllRegNum, kRegPropUse}, 64}; +OpndProp mopdLiteralL12Src = {Operand::kOpdStImmediate, {kRegTyUndef, kAllRegNum, kLiteralLow12}, 12}; +OpndProp mopdListSrc = {Operand::kOpdList, {kRegTyUndef, kAllRegNum, kRegPropUse}, 1}; +OpndProp mopdListDest = {Operand::kOpdList, {kRegTyUndef, kAllRegNum, kRegPropDef}, 1}; +OpndProp mopdCcRegSrc = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropUse}, 1}; +OpndProp mopdCcRegDest = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropDef}, 1}; +OpndProp mopdCcRegDestSrc = {Operand::kOpdRegister, {kRegTyCc, kAllRegNum, kRegPropDef | kRegPropUse}, 1}; +OpndProp mopdSpRegDest = {Operand::kOpdRegister, {kRegTyInt, RSP, kRegPropDef}, 32}; +OpndProp mopdMem32SrcPre = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPreInc}, 32}; +OpndProp mopdMem32SrcPost = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPostInc}, 32}; +OpndProp mopdMem64SrcPre = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPreInc}, 64}; +OpndProp mopdMem64SrcPost = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse | kPostInc}, 64}; +OpndProp mopdMem32LiteralSrc = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 32}; +OpndProp mopdMem64LiteralSrc = {Operand::kOpdMem, {kRegTyUndef, kAllRegNum, kRegPropUse}, 64}; + +OpndProp mopdCondSrc = {Operand::kOpdCond, {kRegTyCc, kAllRegNum, kRegPropUse}, 4}; + +OpndProp mopdBitshift32Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 5}; +OpndProp mopdBitshift64Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; +OpndProp mopdExtendshift64Src = {Operand::kOpdExtend, {kRegTyUndef, kAllRegNum, kRegPropUse}, 3}; +OpndProp mopdLsl4Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 4}; +OpndProp mopdLsl6Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 6}; +OpndProp mopdLsl12Src = {Operand::kOpdShift, {kRegTyUndef, kAllRegNum, kRegPropUse}, 12}; + +OpndProp mopdString = {Operand::kOpdString, {kRegTyUndef, kAllRegNum, kRegPropUse}, 0}; // physical register -AArch64OpndProp *MOPDReg = &mopdInt32RegSrc; +OpndProp *MOPDReg = &mopdInt32RegSrc; // in mopdReg32IS, Reg means register, 32 means 32-bits, I means integer(F means float), // S means source, D means dest, H means high harf bits, L means low harf bits -AArch64OpndProp *mopdReg32IS = &mopdInt32RegSrc; -AArch64OpndProp *mopdReg32ID = &mopdInt32RegDest; -AArch64OpndProp *mopdReg32IDS = &mopdInt32RegDestSrc; -AArch64OpndProp *mopdReg64IS = &mopdInt64RegSrc; -AArch64OpndProp *mopdReg64ID = &mopdInt64RegDest; -AArch64OpndProp *mopdReg64IDS = &mopdInt64RegDestSrc; -AArch64OpndProp *mopdReg8FS = &mopdF8RegSrc; -AArch64OpndProp *mopdReg8FD = &mopdF8RegDest; -AArch64OpndProp *mopdReg16FS = &mopdF16RegSrc; -AArch64OpndProp *mopdReg16FD = &mopdF16RegDest; -AArch64OpndProp *mopdReg32FS = &mopdF32RegSrc; -AArch64OpndProp *mopdReg32FD = &mopdF32RegDest; -AArch64OpndProp *mopdReg32FDS = &mopdF32RegDestSrc; -AArch64OpndProp *mopdReg64FS = &mopdF64RegSrc; -AArch64OpndProp *mopdReg64FD = &mopdF64RegDest; -AArch64OpndProp *mopdReg64FDS = &mopdF64RegDestSrc; -AArch64OpndProp *mopdReg128VS = &mopdV128RegSrc; -AArch64OpndProp *mopdReg128VD = &mopdV128RegDest; -AArch64OpndProp *mopdReg128VDS = &mopdV128RegDestSrc; -AArch64OpndProp *mopdReg64VS = &mopdV64RegSrc; -AArch64OpndProp *mopdReg64VD = &mopdV64RegDest; -AArch64OpndProp *mopdReg64VDS = &mopdV64RegDestSrc; -AArch64OpndProp *mopdMem = &mopdMem32Src; -AArch64OpndProp *mopdMem8S = &mopdMem8Src; -AArch64OpndProp *mopdMem16S = &mopdMem16Src; -AArch64OpndProp *mopdMem32S = &mopdMem32Src; -AArch64OpndProp *mopdMem32PS = &mopdMemPair32Src; -AArch64OpndProp *mopdMem32SL = &mopdMem32SrcL; -AArch64OpndProp *mopdMem32SH = &mopdMem32SrcH; -AArch64OpndProp *mopdMem64PS = &mopdMemPair64Src; -AArch64OpndProp *mopdMem64S = &mopdMem64Src; -AArch64OpndProp *mopdMem64SL = &mopdMem64SrcL; -AArch64OpndProp *mopdMem128S = &mopdMem128Src; -AArch64OpndProp *mopdMem128PS = &mopdMemPair128Src; -AArch64OpndProp *mopdMem8D = &mopdMem8Dest; -AArch64OpndProp *mopdMem16D = &mopdMem16Dest; -AArch64OpndProp *mopdMem32D = &mopdMem32Dest; -AArch64OpndProp *mopdMem32PD = &mopdMemPair32Dest; -AArch64OpndProp *mopdMem64D = &mopdMem64Dest; -AArch64OpndProp *mopdMem64PD = &mopdMemPair64Dest; -AArch64OpndProp *mopdMem128D = &mopdMem128Dest; -AArch64OpndProp *mopdMem128PD = &mopdMemPair128Dest; -AArch64OpndProp *mopdMem32SPRE = &mopdMem32SrcPre; -AArch64OpndProp *mopdMem32SPOST = &mopdMem32SrcPost; -AArch64OpndProp *mopdMem64SPRE = &mopdMem64SrcPre; -AArch64OpndProp *mopdMem64SPOST = &mopdMem64SrcPost; -AArch64OpndProp *mopdMem32LiteralS = &mopdMem32LiteralSrc; -AArch64OpndProp *mopdMem64LiteralS = &mopdMem64LiteralSrc; -AArch64OpndProp *mopdImm4 = &mopdIntImm4Src; -AArch64OpndProp *mopdImm5 = &mopdIntImm5Src; -AArch64OpndProp *mopdImm6 = &mopdIntImm6Src; -AArch64OpndProp *mopdImm8 = &mopdIntImm8Src; -AArch64OpndProp *mopdImm12 = &mopdIntImm12Src; -AArch64OpndProp *mopdImmBm12 = &mopdIntBitMaskImm12Src; -AArch64OpndProp *mopdImm13 = &mopdIntImm13Src; -AArch64OpndProp *mopdImmBm13 = &mopdIntBitMaskImm13Src; -AArch64OpndProp *mopdImm16 = &mopdIntImm16Src; -AArch64OpndProp *mopdImm24 = &mopdIntImm24Src; -AArch64OpndProp *mopdImm32 = &mopdIntImm32Src; -AArch64OpndProp* mopdImm32LI = &mopdIntImm32Literal; -AArch64OpndProp *mopdImm64 = &mopdIntImm64Src; -AArch64OpndProp* mopdImm64LI = &mopdIntImm64Literal; -AArch64OpndProp *mopdFPZeroImm8 = &mopdFpzeroImm8Src; -AArch64OpndProp *mopdFuncName = &mopdLbl64Src; -AArch64OpndProp *mopdLabel = &mopdLbl64Src; -AArch64OpndProp *mopdLiteral = &mopdLiteralSrc; -AArch64OpndProp *mopdLiteralL12 = &mopdLiteralL12Src; - -AArch64OpndProp *mopdRegCCS = &mopdCcRegSrc; -AArch64OpndProp *mopdRegCCD = &mopdCcRegDest; -AArch64OpndProp *mopdRegCCDS = &mopdCcRegDestSrc; - -AArch64OpndProp *mopdCond = &mopdCondSrc; - -AArch64OpndProp *mopdBitShift32 = &mopdBitshift32Src; -AArch64OpndProp *mopdBitShift64 = &mopdBitshift64Src; -AArch64OpndProp *mopdExtendShift64 = &mopdExtendshift64Src; -AArch64OpndProp *mopdLSL4 = &mopdLsl4Src; -AArch64OpndProp *mopdLSL6 = &mopdLsl6Src; -AArch64OpndProp *mopdLSL12 = &mopdLsl12Src; - -AArch64OpndProp *mopdRSPD = &mopdSpRegDest; -AArch64OpndProp *mopdLISTS = &mopdListSrc; -AArch64OpndProp *mopdLISTD = &mopdListDest; -AArch64OpndProp *mopdSTRING = &mopdString; -AArch64OpndProp *mopdUndef = nullptr; +OpndProp *mopdReg32IS = &mopdInt32RegSrc; +OpndProp *mopdReg32ID = &mopdInt32RegDest; +OpndProp *mopdReg32IDS = &mopdInt32RegDestSrc; +OpndProp *mopdReg64IS = &mopdInt64RegSrc; +OpndProp *mopdReg64ID = &mopdInt64RegDest; +OpndProp *mopdReg64IDS = &mopdInt64RegDestSrc; +OpndProp *mopdReg8FS = &mopdF8RegSrc; +OpndProp *mopdReg8FD = &mopdF8RegDest; +OpndProp *mopdReg16FS = &mopdF16RegSrc; +OpndProp *mopdReg16FD = &mopdF16RegDest; +OpndProp *mopdReg32FS = &mopdF32RegSrc; +OpndProp *mopdReg32FD = &mopdF32RegDest; +OpndProp *mopdReg32FDS = &mopdF32RegDestSrc; +OpndProp *mopdReg64FS = &mopdF64RegSrc; +OpndProp *mopdReg64FD = &mopdF64RegDest; +OpndProp *mopdReg64FDS = &mopdF64RegDestSrc; +OpndProp *mopdReg128VS = &mopdV128RegSrc; +OpndProp *mopdReg128VD = &mopdV128RegDest; +OpndProp *mopdReg128VDS = &mopdV128RegDestSrc; +OpndProp *mopdReg64VS = &mopdV64RegSrc; +OpndProp *mopdReg64VD = &mopdV64RegDest; +OpndProp *mopdReg64VDS = &mopdV64RegDestSrc; +OpndProp *mopdMem = &mopdMem32Src; +OpndProp *mopdMem8S = &mopdMem8Src; +OpndProp *mopdMem16S = &mopdMem16Src; +OpndProp *mopdMem32S = &mopdMem32Src; +OpndProp *mopdMem32PS = &mopdMemPair32Src; +OpndProp *mopdMem32SL = &mopdMem32SrcL; +OpndProp *mopdMem32SH = &mopdMem32SrcH; +OpndProp *mopdMem64PS = &mopdMemPair64Src; +OpndProp *mopdMem64S = &mopdMem64Src; +OpndProp *mopdMem64SL = &mopdMem64SrcL; +OpndProp *mopdMem128S = &mopdMem128Src; +OpndProp *mopdMem128PS = &mopdMemPair128Src; +OpndProp *mopdMem8D = &mopdMem8Dest; +OpndProp *mopdMem16D = &mopdMem16Dest; +OpndProp *mopdMem32D = &mopdMem32Dest; +OpndProp *mopdMem32PD = &mopdMemPair32Dest; +OpndProp *mopdMem64D = &mopdMem64Dest; +OpndProp *mopdMem64PD = &mopdMemPair64Dest; +OpndProp *mopdMem128D = &mopdMem128Dest; +OpndProp *mopdMem128PD = &mopdMemPair128Dest; +OpndProp *mopdMem32SPRE = &mopdMem32SrcPre; +OpndProp *mopdMem32SPOST = &mopdMem32SrcPost; +OpndProp *mopdMem64SPRE = &mopdMem64SrcPre; +OpndProp *mopdMem64SPOST = &mopdMem64SrcPost; +OpndProp *mopdMem32LiteralS = &mopdMem32LiteralSrc; +OpndProp *mopdMem64LiteralS = &mopdMem64LiteralSrc; +OpndProp *mopdImm4 = &mopdIntImm4Src; +OpndProp *mopdImm5 = &mopdIntImm5Src; +OpndProp *mopdImm6 = &mopdIntImm6Src; +OpndProp *mopdImm8 = &mopdIntImm8Src; +OpndProp *mopdImm12 = &mopdIntImm12Src; +OpndProp *mopdImmBm12 = &mopdIntBitMaskImm12Src; +OpndProp *mopdImm13 = &mopdIntImm13Src; +OpndProp *mopdImmBm13 = &mopdIntBitMaskImm13Src; +OpndProp *mopdImm16 = &mopdIntImm16Src; +OpndProp *mopdImm24 = &mopdIntImm24Src; +OpndProp *mopdImm32 = &mopdIntImm32Src; +OpndProp *mopdImm64 = &mopdIntImm64Src; +OpndProp *mopdFPZeroImm8 = &mopdFpzeroImm8Src; +OpndProp *mopdFuncName = &mopdLbl64Src; +OpndProp *mopdLabel = &mopdLbl64Src; +OpndProp *mopdLiteral = &mopdLiteralSrc; +OpndProp *mopdLiteralL12 = &mopdLiteralL12Src; + +OpndProp *mopdRegCCS = &mopdCcRegSrc; +OpndProp *mopdRegCCD = &mopdCcRegDest; +OpndProp *mopdRegCCDS = &mopdCcRegDestSrc; + +OpndProp *mopdCond = &mopdCondSrc; + +OpndProp *mopdBitShift32 = &mopdBitshift32Src; +OpndProp *mopdBitShift64 = &mopdBitshift64Src; +OpndProp *mopdExtendShift64 = &mopdExtendshift64Src; +OpndProp *mopdLSL4 = &mopdLsl4Src; +OpndProp *mopdLSL6 = &mopdLsl6Src; +OpndProp *mopdLSL12 = &mopdLsl12Src; + +OpndProp *mopdRSPD = &mopdSpRegDest; +OpndProp *mopdLISTS = &mopdListSrc; +OpndProp *mopdLISTD = &mopdListDest; +OpndProp *mopdSTRING = &mopdString; +OpndProp *mopdUndef = nullptr; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index ace828b7d6414742ef997dfe8d20b9abd797f474..b52e2bfb151682d57e429af8e30c75064a4c55ce 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -20,7 +20,7 @@ #include "aarch64_utils.h" namespace maplebe { -#define JAVALANG (cgFunc.GetMirModule().IsJavaModule()) +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) #define CG_PEEP_DUMP CG_DEBUG_FUNC(*cgFunc) namespace { const std::string kMccLoadRef = "MCC_LoadRefField"; @@ -66,6 +66,14 @@ MOperator GetLoadOperator(uint32 refSize, bool isVolatile) { } } +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + void AArch64CGPeepHole::Run() { bool optSuccess = false; FOR_ALL_BB(bb, cgFunc) { @@ -116,7 +124,6 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { case MOP_wcsetrc: case MOP_xcsetrc: { manager->Optimize(true); - manager->Optimize(true); break; } case MOP_waddrrr: @@ -131,13 +138,12 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { case MOP_winegrr: case MOP_wfnegrr: case MOP_xfnegrr: { - manager->Optimize(CGOptions::IsFastMath()); + manager->Optimize(true); break; } case MOP_wandrri12: case MOP_xandrri13: { manager->Optimize(true); - manager->Optimize(true); break; } case MOP_wcselrrrc: @@ -185,10 +191,6 @@ bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { return manager->OptSuccess(); } -std::string ContinuousCmpCsetPattern::GetPatternName() { - return "ContinuousCmpCsetPattern"; -} - bool ContinuousCmpCsetPattern::CheckCondCode(const CondOperand &condOpnd) const { switch (condOpnd.GetCode()) { case CC_NE: @@ -222,7 +224,7 @@ bool ContinuousCmpCsetPattern::CheckCondition(Insn &insn) { if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { return false; } - if (!static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)).IsZero()) { + if (!static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)).IsZero()) { return false; } auto &cmpCCReg = static_cast(prevCmpInsn->GetOperand(kInsnFirstOpnd)); @@ -287,9 +289,9 @@ void ContinuousCmpCsetPattern::Run(BB &bb, Insn &insn) { auto &prevCsetCondOpnd = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); CondOperand &newCondOpnd = aarFunc->GetCondOperand(GetReverseCondCode(prevCsetCondOpnd)); regno_t tmpRegNO = 0; - auto *tmpDefOpnd = - cgFunc->GetMemoryPool()->New(tmpRegNO, resOpnd.GetSize(), - static_cast(resOpnd).GetRegisterType()); + auto *tmpDefOpnd = aarFunc->CreateVirtualRegisterOperand(tmpRegNO, + resOpnd.GetSize(), static_cast(resOpnd).GetRegisterType()); + tmpDefOpnd->SetValidBitsNum(k1BitSize); newCsetInsn = &cgFunc->GetCG()->BuildInstruction( prevCsetMop, *tmpDefOpnd, newCondOpnd, prevCsetInsn1->GetOperand(kInsnThirdOpnd)); BB *prevCsetBB = prevCsetInsn1->GetBB(); @@ -336,10 +338,6 @@ void ContinuousCmpCsetPattern::Run(BB &bb, Insn &insn) { } } -std::string NegCmpToCmnPattern::GetPatternName() { - return "NegCmpToCmnPattern"; -} - bool NegCmpToCmnPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wcmprr && curMop != MOP_xcmprr) { @@ -391,10 +389,6 @@ void NegCmpToCmnPattern::Run(BB &bb, Insn &insn) { } } -std::string CsetCbzToBeqPattern::GetPatternName() { - return "CsetCbzToBeqPattern"; -} - bool CsetCbzToBeqPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { @@ -475,10 +469,6 @@ void CsetCbzToBeqPattern::Run(BB &bb, Insn &insn) { } } -std::string ExtLslToBitFieldInsertPattern::GetPatternName() { - return "ExtLslToBitFieldInsertPattern"; -} - bool ExtLslToBitFieldInsertPattern::CheckCondition(Insn &insn) { auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); prevInsn = GetDefInsn(useReg); @@ -520,16 +510,12 @@ void ExtLslToBitFieldInsertPattern::Run(BB &bb, Insn &insn) { } } -std::string CselToCsetPattern::GetPatternName() { - return "CselToCsetPattern"; -} - bool CselToCsetPattern::IsOpndDefByZero(const Insn &insn) { MOperator movMop = insn.GetMachineOpcode(); switch (movMop) { case MOP_xmovrr: case MOP_wmovrr: { - return insn.GetOperand(kInsnSecondOpnd).IsZeroRegister(); + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); } case MOP_xmovri32: case MOP_xmovri64: { @@ -621,7 +607,7 @@ void CselToCsetPattern::Run(BB &bb, Insn &insn) { Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); MOperator newMop = (dstOpnd.GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); Operand &condOpnd = insn.GetOperand(kInsnFourthOpnd); - Operand &rflag = cgFunc->GetOrCreateRflag(); + Operand &rflag = insn.GetOperand(kInsnFifthOpnd); Insn *newInsn = nullptr; if (IsOpndDefByOne(*prevMovInsn1) && IsOpndDefByZero(*prevMovInsn2)) { newInsn = &(cgFunc->GetCG()->BuildInstruction(newMop, dstOpnd, condOpnd, rflag)); @@ -652,10 +638,6 @@ void CselToCsetPattern::Run(BB &bb, Insn &insn) { } } -std::string AndCmpBranchesToTbzPattern::GetPatternName() { - return "AndCmpBranchesToTbzPattern"; -} - bool AndCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { MOperator curMop = currInsn.GetMachineOpcode(); MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); @@ -750,10 +732,6 @@ void AndCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { } } -std::string ZeroCmpBranchesToTbzPattern::GetPatternName() { - return "ZeroCmpBranchesToTbzPattern"; -} - bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { MOperator currMop = currInsn.GetMachineOpcode(); MOperator prevMop = prevInsn->GetMachineOpcode(); @@ -780,12 +758,12 @@ bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { case MOP_xcmprr: { auto ®Opnd0 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); auto ®Opnd1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); - if (!regOpnd0.IsZeroRegister() && !regOpnd1.IsZeroRegister()) { + if (!IsZeroRegister(regOpnd0) && !IsZeroRegister(regOpnd1)) { return false; } switch (currMop) { case MOP_bge: - if (regOpnd1.IsZeroRegister()) { + if (IsZeroRegister(regOpnd1)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; } else { @@ -793,7 +771,7 @@ bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { } break; case MOP_ble: - if (regOpnd0.IsZeroRegister()) { + if (IsZeroRegister(regOpnd0)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; } else { @@ -801,7 +779,7 @@ bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { } break; case MOP_blt: - if (regOpnd1.IsZeroRegister()) { + if (IsZeroRegister(regOpnd1)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; } else { @@ -809,7 +787,7 @@ bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { } break; case MOP_bgt: - if (regOpnd0.IsZeroRegister()) { + if (IsZeroRegister(regOpnd0)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; } else { @@ -820,8 +798,8 @@ bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { return false; } } - // fall through - [[clang::fallthrough]]; + // fall through + [[clang::fallthrough]]; default: return false; } @@ -858,7 +836,7 @@ void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { ImmOperand &bitOpnd = aarFunc->CreateImmOperand( (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, *static_cast(regOpnd), + Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, *static_cast(regOpnd), bitOpnd, labelOpnd); bb.ReplaceInsn(insn, newInsn); /* update ssa info */ @@ -873,10 +851,6 @@ void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { } } -std::string LsrAndToUbfxPattern::GetPatternName() { - return "LsrAndToUbfxPattern"; -} - bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wandrri12 && curMop != MOP_xandrri13) { @@ -929,10 +903,10 @@ void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) { bool is64Bits = (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize); Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); Operand &srcOpnd = prevInsn->GetOperand(kInsnSecondOpnd); - int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); Operand &immOpnd1 = is64Bits ? aarFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) : aarFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); - int64 tmpVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 tmpVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); int64 immVal2 = __builtin_ffsll(tmpVal + 1) - 1; if ((immVal2 < k1BitSize) || (is64Bits && (immVal1 + immVal2) > k64BitSize) || (!is64Bits && (immVal1 + immVal2) > k32BitSize)) { @@ -956,156 +930,6 @@ void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) { } } -void CmpCsetOpt::Run(BB &bb, Insn &csetInsn) { - if (!CheckCondition(csetInsn)) { - return; - } - Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); - Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); - auto &cond = static_cast(csetInsn.GetOperand(kInsnSecondOpnd)); - Insn *newInsn = nullptr; - - /* cmpFirstOpnd == 1 */ - if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { - MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; - newInsn = &cgFunc->GetCG()->BuildInstruction(mopCode, csetFirstOpnd, cmpFirstOpnd); - bb.ReplaceInsn(csetInsn, *newInsn); - ssaInfo->ReplaceInsn(csetInsn, *newInsn); - optSuccess = true; - SetCurrInsn(newInsn); - } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { - /* cmpFirstOpnd == 0 */ - MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; - ImmOperand &one = static_cast(cgFunc)->CreateImmOperand(1, k8BitSize, false); - newInsn = &cgFunc->GetCG()->BuildInstruction(mopCode, csetFirstOpnd, cmpFirstOpnd, one); - bb.ReplaceInsn(csetInsn, *newInsn); - ssaInfo->ReplaceInsn(csetInsn, *newInsn); - optSuccess = true; - SetCurrInsn(newInsn); - } - if (CG_PEEP_DUMP && (newInsn != nullptr)) { - std::vector prevInsns; - prevInsns.emplace_back(cmpInsn); - prevInsns.emplace_back(&csetInsn); - DumpAfterPattern(prevInsns, newInsn, nullptr); - } -} - -bool CmpCsetOpt::IsContinuousCmpCset(const Insn &curInsn) { - auto &csetDstReg = static_cast(curInsn.GetOperand(kInsnFirstOpnd)); - CHECK_FATAL(csetDstReg.IsSSAForm(), "dstOpnd must be ssa form"); - VRegVersion *dstVersion = ssaInfo->FindSSAVersion(csetDstReg.GetRegisterNumber()); - ASSERT(dstVersion != nullptr, "find vRegVersion failed"); - for (auto useDUInfoIt : dstVersion->GetAllUseInsns()) { - if (useDUInfoIt.second == nullptr) { - continue; - } - Insn *useInsn = useDUInfoIt.second->GetInsn(); - if (useInsn == nullptr) { - continue; - } - MOperator useMop = useInsn->GetMachineOpcode(); - if (useMop == MOP_wcmpri || useMop == MOP_xcmpri) { - auto &ccDstReg = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); - CHECK_FATAL(ccDstReg.IsSSAForm(), "dstOpnd must be ssa form"); - VRegVersion *ccDstVersion = ssaInfo->FindSSAVersion(ccDstReg.GetRegisterNumber()); - ASSERT(ccDstVersion != nullptr, "find vRegVersion failed"); - for (auto ccUseDUInfoIt : ccDstVersion->GetAllUseInsns()) { - if (ccUseDUInfoIt.second == nullptr) { - continue; - } - Insn *ccUseInsn = ccUseDUInfoIt.second->GetInsn(); - if (ccUseInsn == nullptr) { - continue; - } - MOperator ccUseMop = ccUseInsn->GetMachineOpcode(); - if (ccUseMop == MOP_wcsetrc || ccUseMop == MOP_xcsetrc) { - return true; - } - } - } - } - return false; -} - -bool CmpCsetOpt::CheckCondition(Insn &csetInsn) { - MOperator curMop = csetInsn.GetMachineOpcode(); - if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { - return false; - } - /* combine [continuous cmp & cset] first, to eliminate more insns */ - if (IsContinuousCmpCset(csetInsn)) { - return false; - } - RegOperand &ccReg = static_cast(csetInsn.GetOperand(kInsnThirdOpnd)); - regno_t ccRegNo = ccReg.GetRegisterNumber(); - cmpInsn = GetDefInsn(ccReg); - CHECK_NULL_FATAL(cmpInsn); - MOperator mop = cmpInsn->GetMachineOpcode(); - if ((mop != MOP_wcmpri) && (mop != MOP_xcmpri)) { - return false; - } - VRegVersion *ccRegVersion = ssaInfo->FindSSAVersion(ccRegNo); - if (ccRegVersion->GetAllUseInsns().size() > k1BitSize) { - return false; - } - Operand &cmpSecondOpnd = cmpInsn->GetOperand(kInsnThirdOpnd); - CHECK_FATAL(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); - auto &cmpConst = static_cast(cmpSecondOpnd); - cmpConstVal = cmpConst.GetValue(); - /* get ImmOperand, must be 0 or 1 */ - if ((cmpConstVal != 0) && (cmpConstVal != k1BitSize)) { - return false; - } - Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); - Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); - if (cmpFirstOpnd.GetSize() != csetFirstOpnd.GetSize()) { - return false; - } - CHECK_FATAL(cmpFirstOpnd.IsRegister(), "cmpFirstOpnd must be register!"); - RegOperand &cmpReg = static_cast(cmpFirstOpnd); - Insn *defInsn = GetDefInsn(cmpReg); - if (defInsn == nullptr) { - return false; - } - return OpndDefByOneValidBit(*defInsn); -} - -bool CmpCsetOpt::OpndDefByOneValidBit(const Insn &defInsn) { - MOperator defMop = defInsn.GetMachineOpcode(); - switch (defMop) { - case MOP_wcsetrc: - case MOP_xcsetrc: - return true; - case MOP_xmovri32: - case MOP_xmovri64: { - Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); - ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); - auto &defConst = static_cast(defOpnd); - int64 defConstValue = defConst.GetValue(); - return (defConstValue == 0 || defConstValue == 1); - } - case MOP_xmovrr: - case MOP_wmovrr: - return defInsn.GetOperand(kInsnSecondOpnd).IsZeroRegister(); - case MOP_wlsrrri5: - case MOP_xlsrrri6: { - Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); - ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); - auto &opndImm = static_cast(opnd2); - int64 shiftBits = opndImm.GetValue(); - return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || - (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); - } - default: - return false; - } -} - -std::string MvnAndToBicPattern::GetPatternName() { - return "MvnAndToBicPattern"; -} - bool MvnAndToBicPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wandrrr && curMop != MOP_xandrrr) { @@ -1150,10 +974,6 @@ void MvnAndToBicPattern::Run(BB &bb , Insn &insn) { } } -std::string AndCbzToTbzPattern::GetPatternName() { - return "AndCbzToTbzPattern"; -} - bool AndCbzToTbzPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { @@ -1217,10 +1037,6 @@ void AndCbzToTbzPattern::Run(BB &bb, Insn &insn) { } } -std::string LogicShiftAndOrrToExtrPattern::GetPatternName() { - return "LogicShiftAndOrrToExtrPattern"; -} - bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wiorrrr && curMop != MOP_xiorrrr && curMop != MOP_wiorrrrs && curMop != MOP_xiorrrrs) { @@ -1249,8 +1065,8 @@ bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { } else { return false; } - int64 prevLsrImmValue = static_cast(prevLsrInsn->GetOperand(kInsnThirdOpnd)).GetValue(); - int64 prevLslImmValue = static_cast(prevLslInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevLsrImmValue = static_cast(prevLsrInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevLslImmValue = static_cast(prevLslInsn->GetOperand(kInsnThirdOpnd)).GetValue(); if ((prevLsrImmValue + prevLslImmValue) < 0) { return false; } @@ -1269,7 +1085,7 @@ bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6 && prevMop != MOP_wlslrri5 && prevMop != MOP_xlslrri6) { return false; } - int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); uint32 shiftAmount = shiftOpnd.GetShiftAmount(); if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { @@ -1285,8 +1101,8 @@ bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { if (prevImm + static_cast(shiftAmount) < 0) { return false; } - if ((is64Bits && (prevImm + static_cast(shiftAmount)) >= k64BitSize) || - (!is64Bits && (prevImm + static_cast(shiftAmount)) >= k32BitSize)) { + if ((is64Bits && (prevImm + static_cast(shiftAmount)) != k64BitSize) || + (!is64Bits && (prevImm + static_cast(shiftAmount)) != k32BitSize)) { return false; } } else { @@ -1324,10 +1140,6 @@ void LogicShiftAndOrrToExtrPattern::Run(BB &bb, Insn &insn) { } } -std::string SimplifyMulArithmeticPattern::GetPatternName() { - return "SimplifyMulArithmeticPattern"; -} - void SimplifyMulArithmeticPattern::SetArithType(const Insn &currInsn) { MOperator mOp = currInsn.GetMachineOpcode(); switch (mOp) { @@ -1387,6 +1199,11 @@ bool SimplifyMulArithmeticPattern::CheckCondition(Insn &insn) { if (prevInsn == nullptr) { return false; } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + if (useVersion->GetAllUseInsns().size() > 1) { + return false; + } MOperator currMop = insn.GetMachineOpcode(); if (currMop == MOP_dadd || currMop == MOP_sadd || currMop == MOP_dsub || currMop == MOP_ssub || currMop == MOP_wfnegrr || currMop == MOP_xfnegrr) { @@ -1402,7 +1219,10 @@ bool SimplifyMulArithmeticPattern::CheckCondition(Insn &insn) { if (!isFloat && (prevMop == MOP_xvmuld || prevMop == MOP_xvmuls)) { return false; } - return true; + if ((currMop == MOP_xaddrrr) || (currMop == MOP_waddrrr)) { + return true; + } + return CGOptions::IsFastMath(); } void SimplifyMulArithmeticPattern::DoOptimize(BB &currBB, Insn &currInsn) { @@ -1450,23 +1270,9 @@ void SimplifyMulArithmeticPattern::Run(BB &bb, Insn &insn) { DoOptimize(bb, insn); } -std::string ElimSpecificExtensionPattern::GetPatternName() { - return "ElimSpecificExtensionPattern"; -} - void ElimSpecificExtensionPattern::SetSpecificExtType(const Insn &currInsn) { MOperator mOp = currInsn.GetMachineOpcode(); switch (mOp) { - case MOP_wandrri12: { - is64Bits = false; - extTypeIdx = AND; - break; - } - case MOP_xandrri13: { - is64Bits = true; - extTypeIdx = AND; - break; - } case MOP_xsxtb32: { is64Bits = false; extTypeIdx = SXTB; @@ -1594,7 +1400,7 @@ void ElimSpecificExtensionPattern::ElimExtensionAfterMov(Insn &insn) { if (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { return; } - auto &immMovOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &immMovOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); int64 value = immMovOpnd.GetValue(); uint64 minRange = extValueRangeTable[extTypeIdx][0]; uint64 maxRange = extValueRangeTable[extTypeIdx][1]; @@ -1623,10 +1429,10 @@ bool ElimSpecificExtensionPattern::IsValidLoadExtPattern(Insn &currInsn, MOperat return true; } auto *aarFunc = static_cast(cgFunc); - auto *memOpnd = static_cast(prevInsn->GetMemOpnd()); + auto *memOpnd = static_cast(prevInsn->GetMemOpnd()); ASSERT(!prevInsn->IsStorePair(), "do not do ElimSpecificExtensionPattern for str pair"); ASSERT(!prevInsn->IsLoadPair(), "do not do ElimSpecificExtensionPattern for ldr pair"); - if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && !aarFunc->IsOperandImmValid(newMop, memOpnd, kInsnSecondOpnd)) { return false; } @@ -1667,12 +1473,6 @@ void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { if (extTypeIdx == EXTUNDEF) { return; } - if (extTypeIdx == AND) { - auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); - if (immOpnd.GetValue() != 0xff) { - return; - } - } MOperator prevOrigMop = prevInsn->GetMachineOpcode(); for (uint8 i = 0; i < kPrevLoadPatternNum; i++) { if (prevOrigMop != loadMappingTable[extTypeIdx][i][0]) { @@ -1738,7 +1538,7 @@ void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { } void ElimSpecificExtensionPattern::ElimExtensionAfterSameExt(Insn &insn) { - if (extTypeIdx == EXTUNDEF || extTypeIdx == AND) { + if (extTypeIdx == EXTUNDEF) { return; } auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); @@ -1777,7 +1577,7 @@ void ElimSpecificExtensionPattern::Run(BB &bb, Insn &insn) { if (!CheckCondition(insn)) { return; } - if ((sceneType == kSceneMov) && (extTypeIdx != AND)) { + if (sceneType == kSceneMov) { ElimExtensionAfterMov(insn); } else if (sceneType == kSceneLoad) { ElimExtensionAfterLoad(insn); @@ -1853,7 +1653,7 @@ void OneHoleBranchPattern::Run(BB &bb, Insn &insn) { } AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); - auto ®Operand = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + auto ®Operand = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); Insn &newTbzInsn = cgFunc->GetCG()->BuildInstruction(newOp, regOperand, oneHoleOpnd, label); bb.ReplaceInsn(insn, newTbzInsn); ssaInfo->ReplaceInsn(insn, newTbzInsn); @@ -1907,7 +1707,7 @@ void OrrToMovPattern::Run(BB &bb, Insn &insn) { if (!CheckCondition(insn)) { return; } - AArch64RegOperand *reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand *reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, *reg1, *reg2); bb.ReplaceInsn(insn, newInsn); ssaInfo->ReplaceInsn(insn, newInsn); @@ -1930,13 +1730,13 @@ bool OrrToMovPattern::CheckCondition(Insn &insn) { switch (thisMop) { case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); - reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); newMop = MOP_wmovrr; break; } case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); - reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); newMop = MOP_xmovrr; break; } @@ -1997,6 +1797,7 @@ void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { case MOP_qldr: case MOP_qstr: { manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); break; } @@ -2005,14 +1806,38 @@ void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); break; } + case MOP_xsbfxrri6i6: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } case MOP_wcbnz: case MOP_xcbnz: { manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); break; } + case MOP_wsdivrrr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xbl: { + if (JAVALANG) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } + if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } + break; + } default: break; } + /* skip if it is not a read barrier call. */ + if (GetReadBarrierName(insn) != "") { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } } /* ======== CGPeepPattern End ======== */ @@ -2021,10 +1846,6 @@ void AArch64PeepHole::InitOpts() { optimizations[kEliminateSpecifcSXTOpt] = optOwnMemPool->New(cgFunc); optimizations[kEliminateSpecifcUXTOpt] = optOwnMemPool->New(cgFunc); optimizations[kCsetCbzToBeqOpt] = optOwnMemPool->New(cgFunc); - optimizations[kContiLDRorSTRToSameMEMOpt] = optOwnMemPool->New(cgFunc); - optimizations[kRemoveIncDecRefOpt] = optOwnMemPool->New(cgFunc); - optimizations[kInlineReadBarriersOpt] = optOwnMemPool->New(cgFunc); - optimizations[kReplaceDivToMultiOpt] = optOwnMemPool->New(cgFunc); optimizations[kAndCmpBranchesToCsetOpt] = optOwnMemPool->New(cgFunc); optimizations[kAndCmpBranchesToTstOpt] = optOwnMemPool->New(cgFunc); optimizations[kAndCbzBranchesToTstOpt] = optOwnMemPool->New(cgFunc); @@ -2035,23 +1856,6 @@ void AArch64PeepHole::InitOpts() { void AArch64PeepHole::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); switch (thisMop) { - case MOP_wstrb: - case MOP_wldrb: - case MOP_wstrh: - case MOP_wldrh: - case MOP_xldr: - case MOP_xstr: - case MOP_wldr: - case MOP_wstr: - case MOP_dldr: - case MOP_dstr: - case MOP_sldr: - case MOP_sstr: - case MOP_qldr: - case MOP_qstr: { - (static_cast(optimizations[kContiLDRorSTRToSameMEMOpt]))->Run(bb, insn); - break; - } case MOP_xsxtb32: case MOP_xsxth32: case MOP_xsxtb64: @@ -2076,14 +1880,6 @@ void AArch64PeepHole::Run(BB &bb, Insn &insn) { (static_cast(optimizations[kCsetCbzToBeqOpt]))->Run(bb, insn); break; } - case MOP_xbl: { - (static_cast(optimizations[kRemoveIncDecRefOpt]))->Run(bb, insn); - break; - } - case MOP_wsdivrrr: { - (static_cast(optimizations[kReplaceDivToMultiOpt]))->Run(bb, insn); - break; - } case MOP_wcsetrc: case MOP_xcsetrc: { (static_cast(optimizations[kAndCmpBranchesToCsetOpt]))->Run(bb, insn); @@ -2105,9 +1901,6 @@ void AArch64PeepHole::Run(BB &bb, Insn &insn) { default: break; } - if (GetReadBarrierName(insn) != "") { /* skip if it is not a read barrier call. */ - (static_cast(optimizations[kInlineReadBarriersOpt]))->Run(bb, insn); - } if (&insn == bb.GetLastInsn()) { (static_cast(optimizations[kZeroCmpBranchesOpt]))->Run(bb, insn); } @@ -2171,12 +1964,10 @@ void AArch64PrePeepHole::InitOpts() { optimizations[kOneHoleBranchesPreOpt] = optOwnMemPool->New(cgFunc); optimizations[kReplaceOrrToMovOpt] = optOwnMemPool->New(cgFunc); optimizations[kReplaceCmpToCmnOpt] = optOwnMemPool->New(cgFunc); - optimizations[kRemoveIncRefOpt] = optOwnMemPool->New(cgFunc); optimizations[kComplexMemOperandOpt] = optOwnMemPool->New(cgFunc); optimizations[kComplexMemOperandPreOptAdd] = optOwnMemPool->New(cgFunc); optimizations[kComplexMemOperandOptLSL] = optOwnMemPool->New(cgFunc); optimizations[kComplexMemOperandOptLabel] = optOwnMemPool->New(cgFunc); - optimizations[kWriteFieldCallOpt] = optOwnMemPool->New(cgFunc); optimizations[kDuplicateExtensionOpt] = optOwnMemPool->New(cgFunc); optimizations[kEnhanceStrLdrAArch64Opt] = optOwnMemPool->New(cgFunc); } @@ -2194,13 +1985,6 @@ void AArch64PrePeepHole::Run(BB &bb, Insn &insn) { (static_cast(optimizations[kReplaceCmpToCmnOpt]))->Run(bb, insn); break; } - case MOP_xbl: { - (static_cast(optimizations[kRemoveIncRefOpt]))->Run(bb, insn); - if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { - (static_cast(optimizations[kWriteFieldCallOpt]))->Run(bb, insn); - } - break; - } case MOP_xadrpl12: { (static_cast(optimizations[kComplexMemOperandOpt]))->Run(bb, insn); break; @@ -2244,17 +2028,12 @@ void AArch64PrePeepHole::Run(BB &bb, Insn &insn) { } if (&insn == bb.GetLastInsn()) { (static_cast(optimizations[kOneHoleBranchesPreOpt]))->Run(bb, insn); - if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { - (static_cast(optimizations[kWriteFieldCallOpt]))->Reset(); - } } } void AArch64PrePeepHole1::InitOpts() { optimizations.resize(kPeepholeOptsNum); - optimizations[kRemoveDecRefOpt] = optOwnMemPool->New(cgFunc); optimizations[kOneHoleBranchesOpt] = optOwnMemPool->New(cgFunc); - optimizations[kReplaceIncDecWithIncOpt] = optOwnMemPool->New(cgFunc); optimizations[kAndCmpBranchesToTbzOpt] = optOwnMemPool->New(cgFunc); optimizations[kComplexExtendWordLslOpt] = optOwnMemPool->New(cgFunc); } @@ -2262,13 +2041,6 @@ void AArch64PrePeepHole1::InitOpts() { void AArch64PrePeepHole1::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); switch (thisMop) { - case MOP_xbl: { - if (JAVALANG) { - (static_cast(optimizations[kRemoveDecRefOpt]))->Run(bb, insn); - (static_cast(optimizations[kReplaceIncDecWithIncOpt]))->Run(bb, insn); - } - break; - } case MOP_xsxtw64: case MOP_xuxtw64: { (static_cast(optimizations[kComplexExtendWordLslOpt]))->Run(bb, insn); @@ -2329,12 +2101,12 @@ bool RemoveIdenticalLoadAndStorePattern::IsMemOperandsIdentical(const Insn &insn return false; } /* Match only [base + offset] */ - auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); - if (memOpnd1.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { return false; } - auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); - if (memOpnd2.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { return false; } Operand *base1 = memOpnd1.GetBaseRegister(); @@ -2382,12 +2154,12 @@ bool RemoveIdenticalLoadAndStoreAArch64::IsMemOperandsIdentical(const Insn &insn return false; } /* Match only [base + offset] */ - auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); - if (memOpnd1.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { return false; } - auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); - if (memOpnd2.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { return false; } Operand *base1 = memOpnd1.GetBaseRegister(); @@ -2413,8 +2185,8 @@ bool RemoveIdenticalLoadAndStoreAArch64::IsMemOperandsIdentical(const Insn &insn bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) { ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); - auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); - auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); /* remove mov x0,x0 when it cast i32 to i64 */ if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { return true; @@ -2432,8 +2204,8 @@ void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { void RemoveMovingtoSameRegAArch64::Run(BB &bb, Insn &insn) { ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); - auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); - auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); /* remove mov x0,x0 when it cast i32 to i64 */ if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { bb.RemoveInsn(insn); @@ -2451,26 +2223,26 @@ void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { } Operand &memOpnd = insn.GetOperand(kInsnSecondOpnd); CHECK_FATAL(memOpnd.GetKind() == Operand::kOpdMem, "Unexpected operand in EnhanceStrLdrAArch64"); - auto &a64MemOpnd = static_cast(memOpnd); + auto &a64MemOpnd = static_cast(memOpnd); RegOperand *baseOpnd = a64MemOpnd.GetBaseRegister(); MOperator prevMop = prevInsn->GetMachineOpcode(); - if (IsEnhanceAddImm(prevMop) && a64MemOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (IsEnhanceAddImm(prevMop) && a64MemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && a64MemOpnd.GetOffsetImmediate()->GetValue() == 0) { auto &addDestOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); if (baseOpnd == &addDestOpnd && !IfOperandIsLiveAfterInsn(addDestOpnd, insn)) { - auto &concreteMemOpnd = static_cast(memOpnd); + auto &concreteMemOpnd = static_cast(memOpnd); auto *origBaseReg = concreteMemOpnd.GetBaseRegister(); concreteMemOpnd.SetBaseRegister( - static_cast(prevInsn->GetOperand(kInsnSecondOpnd))); + static_cast(prevInsn->GetOperand(kInsnSecondOpnd))); auto &ofstOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); - AArch64OfstOperand &offOpnd = static_cast(cgFunc).GetOrCreateOfstOpnd( + OfstOperand &offOpnd = static_cast(cgFunc).GetOrCreateOfstOpnd( static_cast(ofstOpnd.GetValue()), k32BitSize); auto *origOffOpnd = concreteMemOpnd.GetOffsetImmediate(); - concreteMemOpnd.SetOffsetImmediate(offOpnd); + concreteMemOpnd.SetOffsetOperand(offOpnd); if (!static_cast(cgFunc).IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, kInsnSecondOpnd)) { // If new offset is invalid, undo it - concreteMemOpnd.SetBaseRegister(*static_cast(origBaseReg)); - concreteMemOpnd.SetOffsetImmediate(*origOffOpnd); + concreteMemOpnd.SetBaseRegister(*static_cast(origBaseReg)); + concreteMemOpnd.SetOffsetOperand(*origOffOpnd); return; } bb.RemoveInsn(*prevInsn); @@ -2508,14 +2280,14 @@ bool CombineContiLoadAndStorePattern::IsRegNotSameMemUseInInsn(const Insn &insn, } } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); /* need check offset as well */ regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; if (!sameMemAccess && base != nullptr) { regno_t curBaseRegNO = base->GetRegisterNumber(); int64 memBarrierRange = static_cast(insn.IsLoadStorePair() ? k16BitSize : k8BitSize); - if (!(curBaseRegNO == regNO && memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (!(curBaseRegNO == regNO && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd.GetOffsetImmediate() != nullptr && (memOpnd.GetOffsetImmediate()->GetOffsetValue() <= (baseOfst - memBarrierRange) || memOpnd.GetOffsetImmediate()->GetOffsetValue() >= (baseOfst + memBarrierRange)))) { @@ -2532,7 +2304,7 @@ bool CombineContiLoadAndStorePattern::IsRegNotSameMemUseInInsn(const Insn &insn, return true; } if (isStore && base != nullptr && base->GetRegisterNumber() == regNO) { - if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && memOpnd.GetOffsetImmediate() != nullptr) { + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd.GetOffsetImmediate() != nullptr) { int64 curOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); if (memOpnd.GetSize() == k64BitSize) { uint32 memBarrierRange = insn.IsLoadStorePair() ? k16BitSize : k8BitSize; @@ -2590,13 +2362,13 @@ std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, r } /* return continuous STD/LDR insn */ if (((isStr && curInsn->IsStore()) || (!isStr && curInsn->IsLoad())) && !curInsn->IsLoadStorePair()) { - auto *memOpnd = static_cast(curInsn->GetMemOpnd()); + auto *memOpnd = static_cast(curInsn->GetMemOpnd()); /* do not combine ldr r0, label */ if (memOpnd != nullptr) { - auto *BaseRegOpnd = static_cast(memOpnd->GetBaseRegister()); + auto *BaseRegOpnd = static_cast(memOpnd->GetBaseRegister()); ASSERT(BaseRegOpnd == nullptr || !BaseRegOpnd->IsVirtualRegister(), "physical register has not been allocated?"); - if (memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && BaseRegOpnd->GetRegisterNumber() == memBaseRegNO) { prevContiInsns.emplace_back(curInsn); } @@ -2613,6 +2385,10 @@ std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, r memBaseRegNO != stackBaseRegNO)) { return prevContiInsns; } + /*store opt should not cross call due to stack args */ + if (curInsn->IsCall() && isStr) { + return prevContiInsns; + } if (curInsn->GetMachineOpcode() == MOP_asm) { return prevContiInsns; } @@ -2623,12 +2399,15 @@ std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, r return prevContiInsns; } -bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, const AArch64MemOperand &memOpnd) { - auto *baseRegOpnd = static_cast(memOpnd.GetBaseRegister()); - auto *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); +bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, const MemOperand &memOpnd) { + auto *baseRegOpnd = static_cast(memOpnd.GetBaseRegister()); + auto *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); CHECK_FATAL(insn.GetOperand(kInsnFirstOpnd).GetSize() == insn.GetOperand(kInsnSecondOpnd).GetSize(), "the size must equal"); Insn *splitAdd = nullptr; + if (baseRegOpnd->GetRegisterNumber() == R16) { + return false; + } for (Insn *cursor = insn.GetPrev(); cursor != nullptr; cursor = cursor->GetPrev()) { if (!cursor->IsMachineInstruction()) { continue; @@ -2643,11 +2422,11 @@ bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, cons if (mOp != MOP_xaddrri12 && mOp != MOP_waddrri12) { continue; } - auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); + auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); if (destOpnd.GetRegisterNumber() != R16 || destOpnd.GetSize() != baseRegOpnd->GetSize()) { continue; } - auto &useOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + auto &useOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); if (useOpnd.GetRegisterNumber() != baseRegOpnd->GetRegisterNumber() || useOpnd.GetSize() != baseRegOpnd->GetSize()) { break; @@ -2657,7 +2436,7 @@ bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, cons } } const AArch64MD *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; - auto *opndProp = static_cast(md->operand[kInsnFirstOpnd]); + auto *opndProp = md->operand[kInsnFirstOpnd]; auto &aarFunc = static_cast(*cgFunc); if (splitAdd == nullptr) { if (insn.IsStorePair() || insn.IsLoadPair()) { @@ -2666,18 +2445,17 @@ bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, cons } } regno_t pregNO = R16; - AArch64MemOperand &newMemOpnd = aarFunc.SplitOffsetWithAddInstruction(memOpnd, opndProp->GetSize(), - static_cast(pregNO), - false, &insn, true); + MemOperand &newMemOpnd = aarFunc.SplitOffsetWithAddInstruction(memOpnd, opndProp->GetSize(), + static_cast(pregNO), false, &insn, true); insn.SetOperand(kInsnThirdOpnd, newMemOpnd); return true; } else { - auto &newBaseReg = static_cast(splitAdd->GetOperand(kInsnFirstOpnd)); - auto &addImmOpnd = static_cast(splitAdd->GetOperand(kInsnThirdOpnd)); - auto *newOfstOpnd = aarFunc.GetMemoryPool()->New( + auto &newBaseReg = static_cast(splitAdd->GetOperand(kInsnFirstOpnd)); + auto &addImmOpnd = static_cast(splitAdd->GetOperand(kInsnThirdOpnd)); + auto *newOfstOpnd = &aarFunc.CreateOfstOpnd( (ofstOpnd->GetOffsetValue() - addImmOpnd.GetValue()), ofstOpnd->GetSize()); - auto *newMemOpnd = aarFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, opndProp->GetSize(), newBaseReg, nullptr, newOfstOpnd, memOpnd.GetSymbol()); + auto *newMemOpnd = aarFunc.CreateMemOperand( + MemOperand::kAddrModeBOi, opndProp->GetSize(), newBaseReg, nullptr, newOfstOpnd, memOpnd.GetSymbol()); if (!(static_cast(*cgFunc).IsOperandImmValid( insn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { return false; @@ -2688,9 +2466,9 @@ bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(Insn &insn, cons } bool CombineContiLoadAndStorePattern::CheckCondition(Insn &insn) { - memOpnd = static_cast(insn.GetMemOpnd()); + memOpnd = static_cast(insn.GetMemOpnd()); ASSERT(memOpnd != nullptr, "get mem operand failed"); - if (memOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi) { + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { return false; } if (!doAggressiveCombine) { @@ -2707,40 +2485,66 @@ void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "unexpect operand"); auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); - auto *baseRegOpnd = static_cast(memOpnd->GetBaseRegister()); - AArch64OfstOperand *offsetOpnd = memOpnd->GetOffsetImmediate(); + auto *baseRegOpnd = static_cast(memOpnd->GetBaseRegister()); + OfstOperand *offsetOpnd = memOpnd->GetOffsetImmediate(); CHECK_FATAL(offsetOpnd != nullptr, "offset opnd lost"); ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), "physical register has not been allocated?"); std::vector prevContiInsnVec = FindPrevStrLdr( insn, destOpnd.GetRegisterNumber(), baseRegOpnd->GetRegisterNumber(), offsetOpnd->GetOffsetValue()); for (auto prevContiInsn : prevContiInsnVec) { ASSERT(prevContiInsn != nullptr, "get previous consecutive instructions failed"); - auto *prevMemOpnd = static_cast(prevContiInsn->GetMemOpnd()); + auto *prevMemOpnd = static_cast(prevContiInsn->GetMemOpnd()); if (memOpnd->GetIndexOpt() != prevMemOpnd->GetIndexOpt()) { continue; } - AArch64OfstOperand *prevOffsetOpnd = prevMemOpnd->GetOffsetImmediate(); + OfstOperand *prevOffsetOpnd = prevMemOpnd->GetOffsetImmediate(); CHECK_FATAL(offsetOpnd != nullptr && prevOffsetOpnd != nullptr, "both conti str/ldr have no offset"); - auto &prevDestOpnd = static_cast(prevContiInsn->GetOperand(kInsnFirstOpnd)); + auto &prevDestOpnd = static_cast(prevContiInsn->GetOperand(kInsnFirstOpnd)); uint32 memSize = static_cast(insn).GetLoadStoreSize(); uint32 prevMemSize = static_cast(*prevContiInsn).GetLoadStoreSize(); - if (memSize != prevMemSize || prevDestOpnd.GetRegisterType() != destOpnd.GetRegisterType() || - thisMop != prevContiInsn->GetMachineOpcode() || prevDestOpnd.GetSize() != destOpnd.GetSize()) { + if (prevDestOpnd.GetRegisterType() != destOpnd.GetRegisterType()) { continue; } int64 offsetVal = offsetOpnd->GetOffsetValue(); int64 prevOffsetVal = prevOffsetOpnd->GetOffsetValue(); auto diffVal = std::abs(offsetVal - prevOffsetVal); + regno_t destRegNO = destOpnd.GetRegisterNumber(); + regno_t prevDestRegNO = prevDestOpnd.GetRegisterNumber(); + if (insn.IsStore() && memOpnd->IsStackArgMem() && prevMemOpnd->IsStackArgMem() && + (memSize == k4ByteSize || memSize == k8ByteSize) && diffVal == k8BitSize && + (prevMemSize == k4ByteSize || prevMemSize == k8ByteSize) && + (destOpnd.GetValidBitsNum() == memSize * k8BitSize) && + (prevDestOpnd.GetValidBitsNum() == prevMemSize * k8BitSize)) { + RegOperand &newDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(destRegNO), k64BitSize, destOpnd.GetRegisterType()); + RegOperand &newPrevDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(prevDestRegNO), k64BitSize, prevDestOpnd.GetRegisterType()); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + CG *cg = cgFunc->GetCG(); + MOperator mopPair = (destOpnd.GetRegisterType() == kRegTyInt) ? MOP_xstp : MOP_dstp; + if ((static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd))) { + Insn &combineInsn = (offsetVal < prevOffsetVal) ? + cg->BuildInstruction(mopPair, newDest, newPrevDest, *combineMemOpnd) : + cg->BuildInstruction(mopPair, newPrevDest, newDest, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + if (memSize != prevMemSize || + thisMop != prevContiInsn->GetMachineOpcode() || prevDestOpnd.GetSize() != destOpnd.GetSize()) { + continue; + } /* do combination str/ldr -> stp/ldp */ - if ((insn.IsStore() || destOpnd.GetRegisterNumber() != prevDestOpnd.GetRegisterNumber()) || - (destOpnd.GetRegisterNumber() == RZR && prevDestOpnd.GetRegisterNumber() == RZR)) { + if ((insn.IsStore() || destRegNO != prevDestRegNO) || + (destRegNO == RZR && prevDestRegNO == RZR)) { if ((memSize == k8ByteSize && diffVal == k8BitSize) || (memSize == k4ByteSize && diffVal == k4BitSize) || (memSize == k16ByteSize && diffVal == k16BitSize)) { CG *cg = cgFunc->GetCG(); MOperator mopPair = GetMopPair(thisMop); - AArch64MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; Insn &combineInsn = (offsetVal < prevOffsetVal) ? cg->BuildInstruction(mopPair, destOpnd, prevDestOpnd, *combineMemOpnd) : cg->BuildInstruction(mopPair, prevDestOpnd, destOpnd, *combineMemOpnd); @@ -2755,8 +2559,8 @@ void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { } } /* do combination strb/ldrb -> strh/ldrh -> str/ldr */ - if (destOpnd.GetRegisterNumber() == prevDestOpnd.GetRegisterNumber() && - destOpnd.GetRegisterNumber() == RZR && prevDestOpnd.GetRegisterNumber() == RZR) { + if (destRegNO == prevDestRegNO && + destRegNO == RZR && prevDestRegNO == RZR) { if ((memSize == k1ByteSize && diffVal == k1BitSize) || (memSize == k2ByteSize && diffVal == k2ByteSize)) { CG *cg = cgFunc->GetCG(); MOperator mopPair = GetMopHigherByte(thisMop); @@ -2833,7 +2637,7 @@ void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) { } Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); if (opnd.IsIntImmediate()) { - auto &immOpnd = static_cast(opnd); + auto &immOpnd = static_cast(opnd); int64 value = immOpnd.GetValue(); if (thisMop == MOP_xsxtb32) { /* value should in range between -127 and 127 */ @@ -2859,7 +2663,7 @@ void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) { RegOperand &dstOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( static_cast(dstMovOpnd.GetRegisterNumber()), k64BitSize, dstMovOpnd.GetRegisterType()); prevInsn->SetOperand(kInsnFirstOpnd, dstOpnd); - prevInsn->SetMOperator(MOP_xmovri64); + prevInsn->SetMOP(MOP_xmovri64); bb.RemoveInsn(insn); } } @@ -3047,7 +2851,7 @@ void FmovRegPattern::Run(BB &bb, Insn &insn) { } else if (opnd.IsRegister()) { /* Check if it is a source operand. */ const AArch64MD *md = &AArch64CG::kMd[static_cast(nextInsn)->GetMachineOpcode()]; - auto *regProp = static_cast(md->operand[opndIdx]); + auto *regProp = md->operand[opndIdx]; if (regProp->IsUse()) { auto ® = static_cast(opnd); if (reg.GetRegisterNumber() == curDstReg) { @@ -3058,6 +2862,53 @@ void FmovRegPattern::Run(BB &bb, Insn &insn) { } } +bool SbfxOptPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 opndNum = nextInsn->GetOperandSize(); + const AArch64MD *md = &AArch64CG::kMd[static_cast(nextInsn)->GetMachineOpcode()]; + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + /* Check if it is a source operand. */ + if (opnd.IsMemoryAccessOperand() || opnd.IsList()) { + return false; + } else if (opnd.IsRegister()) { + auto ® = static_cast(opnd); + auto *regProp = md->operand[opndIdx]; + if (reg.GetRegisterNumber() == curDstRegOpnd.GetRegisterNumber()) { + if (reg.GetSize() != k32BitSize) { + return false; + } + if (regProp->IsDef()) { + toRemove = true; + } else { + cands.emplace_back(opndIdx); + } + } + } + } + return cands.size() != 0; +} + +void SbfxOptPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto &srcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand &newReg = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(srcRegOpnd.GetRegisterNumber()), k32BitSize, srcRegOpnd.GetRegisterType()); + // replace use point of opnd in nextInsn + for (auto i: cands) { + nextInsn->SetOperand(i, newReg); + } + if (toRemove) { + bb.RemoveInsn(insn); + } +} + bool CbnzToCbzPattern::CheckCondition(Insn &insn) { /* reg has to be R0, since return value is in R0 */ auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); @@ -3235,16 +3086,14 @@ MOperator CsetCbzToBeqOptAArch64::SelectMOperator(AArch64CC_t condCode, bool inv } } -void ContiLDRorSTRToSameMEMAArch64::Run(BB &bb, Insn &insn) { - Insn *prevInsn = insn.GetPrev(); - while (prevInsn != nullptr && !prevInsn->GetMachineOpcode() && prevInsn != bb.GetFirstInsn()) { +bool ContiLDRorSTRToSameMEMPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && !prevInsn->GetMachineOpcode() && prevInsn != insn.GetBB()->GetFirstInsn()) { prevInsn = prevInsn->GetPrev(); } if (!insn.IsMachineInstruction() || prevInsn == nullptr) { - return; + return false; } - bool loadAfterStore = false; - bool loadAfterLoad = false; MOperator thisMop = insn.GetMachineOpcode(); MOperator prevMop = prevInsn->GetMachineOpcode(); /* @@ -3264,37 +3113,44 @@ void ContiLDRorSTRToSameMEMAArch64::Run(BB &bb, Insn &insn) { loadAfterLoad = true; } if (!loadAfterStore && !loadAfterLoad) { - return; + return false; } ASSERT(insn.GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); ASSERT(prevInsn->GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + return true; +} - auto &memOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); - AArch64MemOperand::AArch64AddressingMode addrMode1 = memOpnd1.GetAddrMode(); - if (addrMode1 != AArch64MemOperand::kAddrModeBOi || (!memOpnd1.IsIntactIndexed())) { +void ContiLDRorSTRToSameMEMPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + auto &memOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode1 = memOpnd1.GetAddrMode(); + if (addrMode1 != MemOperand::kAddrModeBOi || (!memOpnd1.IsIntactIndexed())) { return; } - auto *base1 = static_cast(memOpnd1.GetBaseRegister()); + auto *base1 = static_cast(memOpnd1.GetBaseRegister()); ASSERT(base1 == nullptr || !base1->IsVirtualRegister(), "physical register has not been allocated?"); - AArch64OfstOperand *offset1 = memOpnd1.GetOffsetImmediate(); + OfstOperand *offset1 = memOpnd1.GetOffsetImmediate(); - auto &memOpnd2 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); - AArch64MemOperand::AArch64AddressingMode addrMode2 = memOpnd2.GetAddrMode(); - if (addrMode2 != AArch64MemOperand::kAddrModeBOi || (!memOpnd2.IsIntactIndexed())) { + auto &memOpnd2 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode2 = memOpnd2.GetAddrMode(); + if (addrMode2 != MemOperand::kAddrModeBOi || (!memOpnd2.IsIntactIndexed())) { return; } - auto *base2 = static_cast(memOpnd2.GetBaseRegister()); + auto *base2 = static_cast(memOpnd2.GetBaseRegister()); ASSERT(base2 == nullptr || !base2->IsVirtualRegister(), "physical register has not been allocated?"); - AArch64OfstOperand *offset2 = memOpnd2.GetOffsetImmediate(); + OfstOperand *offset2 = memOpnd2.GetOffsetImmediate(); if (base1 == nullptr || base2 == nullptr || offset1 == nullptr || offset2 == nullptr) { return; } - auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); - auto ®2 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); int64 offsetVal1 = offset1->GetOffsetValue(); int64 offsetVal2 = offset2->GetOffsetValue(); if (base1->GetRegisterNumber() != base2->GetRegisterNumber() || @@ -3324,7 +3180,7 @@ void ContiLDRorSTRToSameMEMAArch64::Run(BB &bb, Insn &insn) { } } if (moveSameReg == false) { - CG *cg = cgFunc.GetCG(); + CG *cg = cgFunc->GetCG(); bb.InsertInsnAfter(*prevInsn, cg->BuildInstruction(newOp, reg1, reg2)); } bb.RemoveInsn(insn); @@ -3334,20 +3190,35 @@ void ContiLDRorSTRToSameMEMAArch64::Run(BB &bb, Insn &insn) { } } -void RemoveIncDecRefAArch64::Run(BB &bb, Insn &insn) { - ASSERT(insn.GetMachineOpcode() == MOP_xbl, "expect a xbl MOP at RemoveIncDecRef optimization"); +bool RemoveIncDecRefPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xmovrr) { + return false; + } auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); - Insn *insnMov = insn.GetPreviousMachineInsn(); - if (insnMov == nullptr) { - return; + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; } - MOperator mopMov = insnMov->GetMachineOpcode(); - if (target.GetName() == "MCC_IncDecRef_NaiveRCFast" && mopMov == MOP_xmovrr && - static_cast(insnMov->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == R1 && - static_cast(insnMov->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R0) { - bb.RemoveInsn(*insnMov); - bb.RemoveInsn(insn); + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != R0) { + return false; } + return true; +} + +void RemoveIncDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); } #ifdef USE_32BIT_REF @@ -3371,12 +3242,12 @@ void CselZeroOneToCsetOpt::Run(BB &bb, Insn &insn) { } /* csel to cset */ - if ((trueTempOp->IsIntImmediate() || trueTempOp->IsZeroRegister()) && - (falseTempOp->IsIntImmediate() || falseTempOp->IsZeroRegister())){ + if ((trueTempOp->IsIntImmediate() || IsZeroRegister(*trueTempOp)) && + (falseTempOp->IsIntImmediate() || IsZeroRegister(*falseTempOp))){ ImmOperand *imm1 = static_cast(trueTempOp); ImmOperand *imm2 = static_cast(falseTempOp); - bool inverse = imm1->IsOne() && (imm2->IsZero() || imm2->IsZeroRegister()); - if (inverse || ((imm1->IsZero() || imm1->IsZeroRegister()) && imm2->IsOne())) { + bool inverse = imm1->IsOne() && (imm2->IsZero() || IsZeroRegister(*imm2)); + if (inverse || ((imm1->IsZero() || IsZeroRegister(*imm1)) && imm2->IsOne())) { Operand ® = insn.GetOperand(kInsnFirstOpnd); CondOperand &condOperand = static_cast(insn.GetOperand(kInsnFourthOpnd)); MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; @@ -3393,12 +3264,6 @@ void CselZeroOneToCsetOpt::Run(BB &bb, Insn &insn) { CHECK_FATAL(false, "check this case in ssa opt"); } insn.GetBB()->ReplaceInsn(insn, csetInsn); - if (trueMovInsn != nullptr) { - insn.GetBB()->RemoveInsn(*trueMovInsn); - } - if (falseMovInsn != nullptr) { - insn.GetBB()->RemoveInsn(*falseMovInsn); - } } } } @@ -3472,12 +3337,20 @@ AArch64CC_t CselZeroOneToCsetOpt::GetReverseCond(const CondOperand &cond) const return kCcLast; } -void InlineReadBarriersAArch64::Run(BB &bb, Insn &insn) { - if (!CGOptions::IsGCOnly()) { /* Inline read barriers only enabled for GCONLY. */ +bool InlineReadBarriersPattern::CheckCondition(Insn &insn) { + /* Inline read barriers only enabled for GCONLY. */ + if (!CGOptions::IsGCOnly()) { + return false; + } + return true; +} + +void InlineReadBarriersPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { return; } const std::string &barrierName = GetReadBarrierName(insn); - CG *cg = cgFunc.GetCG(); + CG *cg = cgFunc->GetCG(); if (barrierName == kMccDummy) { /* remove dummy call. */ bb.RemoveInsn(insn); @@ -3487,7 +3360,7 @@ void InlineReadBarriersAArch64::Run(BB &bb, Insn &insn) { bool isStatic = (barrierName == kMccLoadRefS || barrierName == kMccLoadRefVS); /* refSize is 32 if USE_32BIT_REF defined, otherwise 64. */ const uint32 refSize = kRefSize; - auto *aarch64CGFunc = static_cast(&cgFunc); + auto *aarch64CGFunc = static_cast(cgFunc); MOperator loadOp = GetLoadOperator(refSize, isVolatile); RegOperand ®Op = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, refSize, kRegTyInt); AArch64reg addrReg = isStatic ? R0 : R1; @@ -3504,21 +3377,30 @@ void InlineReadBarriersAArch64::Run(BB &bb, Insn &insn) { } } -void ReplaceDivToMultiAArch64::Run(BB &bb, Insn &insn) { - Insn *prevInsn = insn.GetPreviousMachineInsn(); +bool ReplaceDivToMultiPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPreviousMachineInsn(); if (prevInsn == nullptr) { - return; + return false; } - Insn *prePrevInsn = prevInsn->GetPreviousMachineInsn(); + prePrevInsn = prevInsn->GetPreviousMachineInsn(); auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); if (sdivOpnd1.GetRegisterNumber() == sdivOpnd2.GetRegisterNumber() || sdivOpnd1.GetRegisterNumber() == R16 || sdivOpnd2.GetRegisterNumber() == R16 || prePrevInsn == nullptr) { - return; + return false; } MOperator prevMop = prevInsn->GetMachineOpcode(); MOperator prePrevMop = prePrevInsn->GetMachineOpcode(); if (prevMop && (prevMop == MOP_wmovkri16) && prePrevMop && (prePrevMop == MOP_xmovri32)) { + return true; + } + return false; +} + +void ReplaceDivToMultiPattern::Run(BB &bb, Insn &insn) { + if (CheckCondition(insn)) { + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); /* Check if dest operand of insn is idential with register of prevInsn and prePrevInsn. */ if ((&(prevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2) || (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2)) { @@ -3537,8 +3419,8 @@ void ReplaceDivToMultiAArch64::Run(BB &bb, Insn &insn) { if ((prevImmOpnd.GetValue() != 1) || (prePrevImmOpnd.GetValue() != 34464)) { return; } - auto *aarch64CGFunc = static_cast(&cgFunc); - CG *cg = cgFunc.GetCG(); + auto *aarch64CGFunc = static_cast(cgFunc); + CG *cg = cgFunc->GetCG(); /* mov w16, #0x588f */ RegOperand &tempOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R16), k64BitSize, kRegTyInt); @@ -3694,8 +3576,8 @@ void AndCmpBranchesToCsetAArch64::Run(BB &bb, Insn &insn) { if (ubfxOp == MOP_wubfxrri5i5 && static_cast(n) >= k32BitSize) { return; } - auto &dstReg = static_cast(csetReg); - auto &srcReg = static_cast(prevInsnSecondReg); + auto &dstReg = static_cast(csetReg); + auto &srcReg = static_cast(prevInsnSecondReg); CG *cg = cgFunc.GetCG(); auto *aarch64CGFunc = static_cast(&cgFunc); ImmOperand &bitPos = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); @@ -3833,12 +3715,12 @@ void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { case MOP_xcmprr: { reg0 = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); reg1 = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); - if (!reg0->IsZeroRegister() && !reg1->IsZeroRegister()) { + if (!IsZeroRegister(*reg0) && !IsZeroRegister(*reg1)) { return; } switch (insn.GetMachineOpcode()) { case MOP_bge: - if (reg1->IsZeroRegister()) { + if (IsZeroRegister(*reg1)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; } else { @@ -3846,7 +3728,7 @@ void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { } break; case MOP_ble: - if (reg0->IsZeroRegister()) { + if (IsZeroRegister(*reg0)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; } else { @@ -3854,7 +3736,7 @@ void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { } break; case MOP_blt: - if (reg1->IsZeroRegister()) { + if (IsZeroRegister(*reg1)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; } else { @@ -3862,7 +3744,7 @@ void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { } break; case MOP_bgt: - if (reg0->IsZeroRegister()) { + if (IsZeroRegister(*reg0)) { regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; } else { @@ -3882,7 +3764,7 @@ void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { ImmOperand &bitp = aarch64CGFunc->CreateImmOperand( (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); bb.InsertInsnAfter( - insn, cg->BuildInstruction(newOp, *static_cast(regOpnd), bitp, *label)); + insn, cg->BuildInstruction(newOp, *static_cast(regOpnd), bitp, *label)); bb.RemoveInsn(insn); bb.RemoveInsn(*prevInsn); } @@ -3984,18 +3866,18 @@ const Insn *CmpCsetAArch64::DefInsnOfOperandInBB(const Insn &startInsn, const In uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (!regProp->IsDef()) { continue; } /* Operand is base reg of Memory, defined by str */ if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); ASSERT(base != nullptr, "nullptr check"); ASSERT(base->IsRegister(), "expects RegOperand"); if (RegOperand::IsSameRegNO(*base, checkInsn.GetOperand(static_cast(opndIdx))) && - memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { return insn; } @@ -4026,7 +3908,7 @@ bool CmpCsetAArch64::OpndDefByOneValidBit(const Insn &defInsn) { } case MOP_xmovrr: case MOP_wmovrr: - return defInsn.GetOperand(kInsnSecondOpnd).IsZeroRegister(); + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); case MOP_wlsrrri5: case MOP_xlsrrri6: { Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); @@ -4092,7 +3974,7 @@ bool CmpCsetAArch64::FlagUsedLaterInCurBB(const BB &bb, Insn &startInsn) const { if (!opnd.IsConditionCode()) { continue; } - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; bool isUse = regProp->IsUse(); if (isUse) { return true; @@ -4206,8 +4088,8 @@ bool DeleteMovAfterCbzOrCbnzAArch64::OpndDefByMovZero(const Insn &insn) const { case MOP_wmovrr: { Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); ASSERT(secondOpnd.IsRegister(), "expects RegOperand here"); - auto ®Opnd = static_cast(secondOpnd); - return regOpnd.IsZeroRegister(); + auto ®Opnd = static_cast(secondOpnd); + return IsZeroRegister(regOpnd); } default: return false; @@ -4227,17 +4109,17 @@ bool DeleteMovAfterCbzOrCbnzAArch64::NoPreDefine(Insn &testInsn) const { uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (!regProp->IsDef()) { continue; } if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); ASSERT(base != nullptr, "nullptr check"); ASSERT(base->IsRegister(), "expects RegOperand"); if (RegOperand::IsSameRegNO(*base, testInsn.GetOperand(kInsnFirstOpnd)) && - memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { return false; } @@ -4334,7 +4216,7 @@ void ComplexMemOperandAddAArch64::Run(BB &bb, Insn &insn) { if (!IsMemOperandOptPattern(insn, *nextInsn)) { return; } - AArch64MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); auto newBaseOpnd = static_cast(&insn.GetOperand(kInsnSecondOpnd)); auto newIndexOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); regno_t memBaseOpndRegNO = newBaseOpnd->GetRegisterNumber(); @@ -4346,13 +4228,13 @@ void ComplexMemOperandAddAArch64::Run(BB &bb, Insn &insn) { return; } if (newIndexOpnd->GetSize() <= k32BitSize) { - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, newIndexOpnd, 0, false); nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); } else { - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, newIndexOpnd, nullptr, nullptr); nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); } @@ -4456,7 +4338,7 @@ void OneHoleBranchesPreAArch64::Run(BB &bb, Insn &insn) { return; } ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); - auto ®Operand = static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)); + auto ®Operand = static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)); if (CGOptions::DoCGSSA()) { CHECK_FATAL(false, "check this case in ssa opt"); } @@ -4545,10 +4427,10 @@ void LoadFloatPointPattern::Run(BB &bb, Insn &insn) { Insn *insn2 = optInsn[++insnNum]; Insn *insn3 = optInsn[++insnNum]; Insn *insn4 = optInsn[++insnNum]; - auto &movConst1 = static_cast(insn1->GetOperand(kInsnSecondOpnd)); - auto &movConst2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); - auto &movConst3 = static_cast(insn3->GetOperand(kInsnSecondOpnd)); - auto &movConst4 = static_cast(insn4->GetOperand(kInsnSecondOpnd)); + auto &movConst1 = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + auto &movConst2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); + auto &movConst3 = static_cast(insn3->GetOperand(kInsnSecondOpnd)); + auto &movConst4 = static_cast(insn4->GetOperand(kInsnSecondOpnd)); /* movk/movz's immOpnd is 16-bit unsigned immediate */ uint64 value = static_cast(movConst1.GetValue()) + (static_cast(movConst2.GetValue()) << k16BitSize) + @@ -4572,20 +4454,20 @@ void LoadFloatPointPattern::Run(BB &bb, Insn &insn) { void ReplaceOrrToMovAArch64::Run(BB &bb, Insn &insn){ Operand *opndOfOrr = nullptr; ImmOperand *immOpnd = nullptr; - AArch64RegOperand *reg1 = nullptr; - AArch64RegOperand *reg2 = nullptr; + RegOperand *reg1 = nullptr; + RegOperand *reg2 = nullptr; MOperator thisMop = insn.GetMachineOpcode(); MOperator newMop = MOP_undef; switch (thisMop) { case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); - reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); newMop = MOP_wmovrr; break; } case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); - reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); newMop = MOP_xmovrr; break; } @@ -4595,7 +4477,7 @@ void ReplaceOrrToMovAArch64::Run(BB &bb, Insn &insn){ ASSERT(opndOfOrr->IsIntImmediate(), "expects immediate operand"); immOpnd = static_cast(opndOfOrr); if (immOpnd->GetValue() == 0) { - reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); if (CGOptions::DoCGSSA()) { CHECK_FATAL(false, "check this case in ssa opt"); } @@ -4646,39 +4528,46 @@ void ReplaceCmpToCmnAArch64::Run(BB &bb, Insn &insn) { } } -void RemoveIncRefAArch64::Run(BB &bb, Insn &insn) { +bool RemoveIncRefPattern::CheckCondition(Insn &insn) { MOperator mOp = insn.GetMachineOpcode(); if (mOp != MOP_xbl) { - return; + return false; } auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { - return; + return false; } - Insn *insnMov2 = insn.GetPreviousMachineInsn(); + insnMov2 = insn.GetPreviousMachineInsn(); if (insnMov2 == nullptr) { - return; + return false; } MOperator mopMov2 = insnMov2->GetMachineOpcode(); if (mopMov2 != MOP_xmovrr) { - return; + return false; } - Insn *insnMov1 = insnMov2->GetPreviousMachineInsn(); + insnMov1 = insnMov2->GetPreviousMachineInsn(); if (insnMov1 == nullptr) { - return; + return false; } MOperator mopMov1 = insnMov1->GetMachineOpcode(); if (mopMov1 != MOP_xmovrr) { - return; + return false; } if (static_cast(insnMov1->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != static_cast(insnMov2->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { - return; + return false; } auto &mov2Dest = static_cast(insnMov2->GetOperand(kInsnFirstOpnd)); auto &mov1Dest = static_cast(insnMov1->GetOperand(kInsnFirstOpnd)); if (mov1Dest.IsVirtualRegister() || mov2Dest.IsVirtualRegister() || mov1Dest.GetRegisterNumber() != R0 || mov2Dest.GetRegisterNumber() != R1) { + return false; + } + return true; +} + +void RemoveIncRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { return; } bb.RemoveInsn(insn); @@ -4741,8 +4630,8 @@ bool LongIntCompareWithZPattern::IsPatternMatch(const std::vector &optIns Insn *insn3 = optInsn[++insnNum]; Insn *insn4 = optInsn[++insnNum]; ASSERT(insnNum == 3, " this specific case has three insns"); - if (insn3->GetOperand(kInsnSecondOpnd).IsZeroRegister() && insn3->GetOperand(kInsnThirdOpnd).IsZeroRegister() && - insn2->GetOperand(kInsnThirdOpnd).IsZeroRegister() && + if (IsZeroRegister(insn3->GetOperand(kInsnSecondOpnd)) && IsZeroRegister(insn3->GetOperand(kInsnThirdOpnd)) && + IsZeroRegister(insn2->GetOperand(kInsnThirdOpnd)) && &(insn2->GetOperand(kInsnFirstOpnd)) == &(insn2->GetOperand(kInsnSecondOpnd)) && static_cast(insn3->GetOperand(kInsnFourthOpnd)).GetCode() == CC_GE && static_cast(insn2->GetOperand(kInsnFourthOpnd)).GetCode() == CC_LE && @@ -4788,11 +4677,11 @@ void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldp) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstp))) { /* Check if base register of nextInsn and the dest operand of insn are identical. */ - AArch64MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); ASSERT(memOpnd != nullptr, "memOpnd is null in AArch64Peep::ComplexMemOperandAArch64"); /* Only for AddrMode_B_OI addressing mode. */ - if (memOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi) { + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { return; } @@ -4824,8 +4713,14 @@ void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { } auto &stImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); - AArch64OfstOperand &offOpnd = aarch64CGFunc->GetOrCreateOfstOpnd( + OfstOperand &offOpnd = aarch64CGFunc->GetOrCreateOfstOpnd( stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue(), k32BitSize); + + /* avoid relocation */ + if ((offOpnd.GetValue() % kBitsPerByte) != 0) { + return; + } + if (cgFunc.GetMirModule().IsCModule()) { Insn *prevInsn = insn.GetPrev(); MOperator prevMop = prevInsn->GetMachineOpcode(); @@ -4837,11 +4732,11 @@ void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { } } auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), &newBaseOpnd, nullptr, &offOpnd, stImmOpnd.GetSymbol()); - nextInsn->SetMemOpnd(static_cast(&newMemOpnd)); + nextInsn->SetMemOpnd(&newMemOpnd); bb.RemoveInsn(insn); CHECK_FATAL(!CGOptions::IsLazyBinding() || cgFunc.GetCG()->IsLibcore(), "this pattern can't be found in this phase"); @@ -4864,21 +4759,22 @@ void ComplexMemOperandPreAddAArch64::Run(BB &bb, Insn &insn) { if (!IsMemOperandOptPattern(insn, *nextInsn)) { return; } - AArch64MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); if (newBaseOpnd.GetSize() != k64BitSize) { return; } + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); if (newIndexOpnd.GetSize() <= k32BitSize) { - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, &newIndexOpnd, 0, false); nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); } else { - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, - &newIndexOpnd, nullptr, nullptr); + auto *newOfstOpnd = &aarch64CGFunc->GetOrCreateOfstOpnd(0, k32BitSize); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, newOfstOpnd, nullptr); nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); } bb.RemoveInsn(insn); @@ -4916,11 +4812,11 @@ void ComplexMemOperandLSLAArch64::Run(BB &bb, Insn &insn) { if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { /* Check if base register of nextInsn and the dest operand of insn are identical. */ - AArch64MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); ASSERT(memOpnd != nullptr, "null ptr check"); /* Only for AddrMode_B_OI addressing mode. */ - if (memOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi) { + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { return; } @@ -4957,8 +4853,8 @@ void ComplexMemOperandLSLAArch64::Run(BB &bb, Insn &insn) { } auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); - AArch64MemOperand &newMemOpnd = - aarch64CGFunc->GetOrCreateMemOpnd(AArch64MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, &newIndexOpnd, static_cast(lsl.GetShiftAmount()), false); nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); @@ -5000,13 +4896,22 @@ void ComplexMemOperandLabelAArch64::Run(BB &bb, Insn &insn) { bb.RemoveInsn(*nextInsn); } +static bool MayThrowBetweenInsn(const Insn &prevCallInsn, const Insn &currCallInsn) { + for (Insn *insn = prevCallInsn.GetNext(); insn != nullptr && insn != &currCallInsn; insn = insn->GetNext()) { + if (insn->MayThrow()) { + return true; + } + } + return false; +} + /* * mov R0, vreg1 / R0 -> objDesignateInsn * add vreg2, vreg1, #imm -> fieldDesignateInsn * mov R1, vreg2 -> fieldParamDefInsn * mov R2, vreg3 -> fieldValueDefInsn */ -bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m, +bool WriteFieldCallPattern::WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m, std::vector ¶mDefInsns) { Insn *fieldValueDefInsn = writeFieldCallInsn.GetPreviousMachineInsn(); if (fieldValueDefInsn == nullptr || fieldValueDefInsn->GetMachineOpcode() != MOP_xmovrr) { @@ -5040,7 +4945,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField } Operand &fieldDesignateBaseOpnd = fieldDesignateInsn->GetOperand(kInsnSecondOpnd); param.fieldBaseOpnd = &(static_cast(fieldDesignateBaseOpnd)); - auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); param.fieldOffset = immOpnd.GetValue(); paramDefInsns.emplace_back(fieldDesignateInsn); Insn *objDesignateInsn = fieldDesignateInsn->GetPreviousMachineInsn(); @@ -5062,7 +4967,7 @@ bool WriteFieldCallAArch64::WriteFieldCallOptPatternMatch(const Insn &writeField return true; } -bool WriteFieldCallAArch64::IsWriteRefFieldCallInsn(const Insn &insn) { +bool WriteFieldCallPattern::IsWriteRefFieldCallInsn(const Insn &insn) { if (!insn.IsCall() || insn.IsIndirectCall()) { return false; } @@ -5071,54 +4976,54 @@ bool WriteFieldCallAArch64::IsWriteRefFieldCallInsn(const Insn &insn) { if (!targetOpnd->IsFuncNameOpnd()) { return false; } - FuncNameOperand *target = static_cast(targetOpnd); + auto *target = static_cast(targetOpnd); const MIRSymbol *funcSt = target->GetFunctionSymbol(); ASSERT(funcSt->GetSKind() == kStFunc, "the kind of funcSt is unreasonable"); const std::string &funcName = funcSt->GetName(); return funcName == "MCC_WriteRefField" || funcName == "MCC_WriteVolatileField"; } -static bool MayThrowBetweenInsn(const Insn &prevCallInsn, const Insn &currCallInsn) { - for (Insn *insn = prevCallInsn.GetNext(); insn != nullptr && insn != &currCallInsn; insn = insn->GetNext()) { - if (insn->MayThrow()) { - return true; - } +bool WriteFieldCallPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; } - return false; -} - -void WriteFieldCallAArch64::Run(BB &bb, Insn &insn) { - AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); - std::vector paramDefInsns; - Insn *nextInsn = insn.GetNextMachineInsn(); if (!IsWriteRefFieldCallInsn(insn)) { - return; + return false; } if (!hasWriteFieldCall) { if (!WriteFieldCallOptPatternMatch(insn, firstCallParam, paramDefInsns)) { - return; + return false; } prevCallInsn = &insn; hasWriteFieldCall = true; - return; + return false; } - WriteRefFieldParam currentCallParam; if (!WriteFieldCallOptPatternMatch(insn, currentCallParam, paramDefInsns)) { - return; + return false; } if (prevCallInsn == nullptr || MayThrowBetweenInsn(*prevCallInsn, insn)) { - return; + return false; } if (firstCallParam.objOpnd == nullptr || currentCallParam.objOpnd == nullptr || currentCallParam.fieldBaseOpnd == nullptr) { - return; + return false; } if (!RegOperand::IsSameReg(*firstCallParam.objOpnd, *currentCallParam.objOpnd)) { + return false; + } + return true; +} + +void WriteFieldCallPattern::Run(BB &bb, Insn &insn) { + paramDefInsns.clear(); + if (!CheckCondition(insn)) { return; } + auto *aarCGFunc = static_cast(cgFunc); MemOperand &addr = - aarch64CGFunc->CreateMemOpnd(*currentCallParam.fieldBaseOpnd, currentCallParam.fieldOffset, k64BitSize); - Insn &strInsn = cgFunc.GetCG()->BuildInstruction(MOP_xstr, *currentCallParam.fieldValue, addr); + aarCGFunc->CreateMemOpnd(*currentCallParam.fieldBaseOpnd, currentCallParam.fieldOffset, k64BitSize); + Insn &strInsn = cgFunc->GetCG()->BuildInstruction(MOP_xstr, *currentCallParam.fieldValue, addr); strInsn.AppendComment("store reference field"); strInsn.MarkAsAccessRefField(true); bb.InsertInsnAfter(insn, strInsn); @@ -5130,29 +5035,36 @@ void WriteFieldCallAArch64::Run(BB &bb, Insn &insn) { nextInsn = strInsn.GetNextMachineInsn(); } -void RemoveDecRefAArch64::Run(BB &bb, Insn &insn) { +bool RemoveDecRefPattern::CheckCondition(Insn &insn) { if (insn.GetMachineOpcode() != MOP_xbl) { - return; + return false; } auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); if (target.GetName() != "MCC_DecRef_NaiveRCFast") { - return; + return false; } - Insn *insnMov = insn.GetPreviousMachineInsn(); - if (insnMov == nullptr) { - return; + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; } - MOperator mopMov = insnMov->GetMachineOpcode(); + MOperator mopMov = prevInsn->GetMachineOpcode(); if ((mopMov != MOP_xmovrr && mopMov != MOP_xmovri64) || - static_cast(insnMov->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R0) { - return; + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R0) { + return false; } - Operand &srcOpndOfMov = insnMov->GetOperand(kInsnSecondOpnd); - if (!srcOpndOfMov.IsZeroRegister() && + Operand &srcOpndOfMov = prevInsn->GetOperand(kInsnSecondOpnd); + if (!IsZeroRegister(srcOpndOfMov) && !(srcOpndOfMov.IsImmediate() && static_cast(srcOpndOfMov).GetValue() == 0)) { + return false; + } + return true; +} + +void RemoveDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { return; } - bb.RemoveInsn(*insnMov); + bb.RemoveInsn(*prevInsn); bb.RemoveInsn(insn); } @@ -5219,24 +5131,31 @@ void OneHoleBranchesAArch64::Run(BB &bb, Insn &insn) { bb.RemoveInsn(insn); } -void ReplaceIncDecWithIncAArch64::Run(BB &bb, Insn &insn) { +bool ReplaceIncDecWithIncPattern::CheckCondition(Insn &insn) { if (insn.GetMachineOpcode() != MOP_xbl) { - return; + return false; } - auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); - if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { - return; + target = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target->GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; } - Insn *insnMov = insn.GetPreviousMachineInsn(); - if (insnMov == nullptr) { - return; + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; } - MOperator mopMov = insnMov->GetMachineOpcode(); + MOperator mopMov = prevInsn->GetMachineOpcode(); if (mopMov != MOP_xmovrr) { - return; + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + !IsZeroRegister(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; } - if (static_cast(insnMov->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || - !insnMov->GetOperand(kInsnSecondOpnd).IsZeroRegister()) { + return true; +} + +void ReplaceIncDecWithIncPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { return; } std::string funcName = "MCC_IncRef_NaiveRCFast"; @@ -5246,8 +5165,8 @@ void ReplaceIncDecWithIncAArch64::Run(BB &bb, Insn &insn) { LogInfo::MapleLogger() << "WARNING: Replace IncDec With Inc fail due to no MCC_IncRef_NaiveRCFast func\n"; return; } - bb.RemoveInsn(*insnMov); - target.SetFunctionSymbol(*st); + bb.RemoveInsn(*prevInsn); + target->SetFunctionSymbol(*st); } @@ -5418,8 +5337,8 @@ void UbfxToUxtwPattern::Run(BB &bb , Insn &insn) { } bool UbfxToUxtwPattern::CheckCondition(Insn &insn) { - AArch64ImmOperand &imm0 = static_cast(insn.GetOperand(kInsnThirdOpnd)); - AArch64ImmOperand &imm1 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + ImmOperand &imm0 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &imm1 = static_cast(insn.GetOperand(kInsnFourthOpnd)); if ((imm0.GetValue() != 0) || (imm1.GetValue() != k32BitSize)) { return false; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp index f3c4d3805eae4a1998e62e3f7f528ded36a5f5d6..5851b9075ad8f76cd3c7fcad23d0799d429bc306 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp @@ -17,7 +17,7 @@ namespace maplebe { RegOperand &AArch64PhiEliminate::CreateTempRegForCSSA(RegOperand &oriOpnd) { - return *phiEliAlloc.New(GetAndIncreaseTempRegNO(), oriOpnd.GetSize(), oriOpnd.GetRegisterType()); + return *phiEliAlloc.New(GetAndIncreaseTempRegNO(), oriOpnd.GetSize(), oriOpnd.GetRegisterType()); } Insn &AArch64PhiEliminate::CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) { @@ -36,6 +36,13 @@ Insn &AArch64PhiEliminate::CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) insn = &cgFunc->GetCG()->BuildInstruction( is64bit ? isFloat ? MOP_xvmovd : MOP_xmovrr : isFloat ? MOP_xvmovs : MOP_wmovrr, destOpnd, fromOpnd); } + /* restore validBitNum */ + if (destOpnd.GetValidBitsNum() != k64BitSize && destOpnd.GetValidBitsNum() != k32BitSize) { + destOpnd.SetValidBitsNum(destOpnd.GetSize()); + } + if (fromOpnd.GetValidBitsNum() != k64BitSize && fromOpnd.GetValidBitsNum() != k32BitSize) { + fromOpnd.SetValidBitsNum(fromOpnd.GetSize()); + } /* copy remat info */ MaintainRematInfo(destOpnd, fromOpnd, true); ASSERT(insn != nullptr, "create move insn failed"); @@ -48,7 +55,11 @@ RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const ASSERT(ssaVersion != nullptr, "find ssaVersion failed"); ASSERT(!ssaVersion->IsDeleted(), "ssaVersion has been deleted"); RegOperand *regForRecreate = &ssaOpnd; - if (curInsn.GetMachineOpcode() != MOP_asm && !curInsn.IsVectorOp() && ssaVersion->GetAllUseInsns().empty()) { + if (curInsn.GetMachineOpcode() != MOP_asm && + !curInsn.IsVectorOp() && + !curInsn.IsSpecialIntrinsic() && + ssaVersion->GetAllUseInsns().empty() && + !curInsn.IsAtomic()) { CHECK_FATAL(false, "plz delete dead version"); } if (GetSSAInfo()->IsNoDefVReg(ssaOpnd.GetRegisterNumber())) { @@ -89,6 +100,12 @@ RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const } if (LastVersion != nullptr) { newReg.SetRegisterNumber(LastVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } else { + const MapleMap& bindingMap = defInsn->GetRegBinding(); + auto pairIt = bindingMap.find(ssaVersion->GetOriginalRegNO()); + if (pairIt != bindingMap.end()) { + newReg.SetRegisterNumber(pairIt->second); + } } } /* case 3 */ @@ -98,7 +115,6 @@ RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const } else { newReg.SetRegisterNumber(ssaVersion->GetOriginalRegNO()); } - MaintainRematInfo(newReg, ssaOpnd, true); newReg.SetOpndOutOfSSAForm(); return newReg; @@ -161,10 +177,9 @@ void AArch64PhiEliminate::ReCreateRegOperand(Insn &insn) { } void A64OperandPhiElmVisitor::Visit(RegOperand *v) { - auto *a64RegOpnd = static_cast(v); - if (a64RegOpnd->IsSSAForm()) { - ASSERT(a64RegOpnd->GetRegisterNumber() != kRFLAG, "both condi and reg"); - insn->SetOperand(idx, a64PhiEliminator->GetCGVirtualOpearnd(*a64RegOpnd, *insn)); + if (v->IsSSAForm()) { + ASSERT(v->GetRegisterNumber() != kRFLAG, "both condi and reg"); + insn->SetOperand(idx, a64PhiEliminator->GetCGVirtualOpearnd(*v, *insn)); } } @@ -187,15 +202,13 @@ void A64OperandPhiElmVisitor::Visit(ListOperand *v) { v->GetOperands().assign(tempRegStore.begin(), tempRegStore.end()); } -void A64OperandPhiElmVisitor::Visit(MemOperand *v) { - auto *a64MemOpnd = static_cast(v); +void A64OperandPhiElmVisitor::Visit(MemOperand *a64MemOpnd) { RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); if ((baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) || (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm())) { if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { - a64MemOpnd->SetBaseRegister( - static_cast(a64PhiEliminator->GetCGVirtualOpearnd(*baseRegOpnd, *insn))); + a64MemOpnd->SetBaseRegister(a64PhiEliminator->GetCGVirtualOpearnd(*baseRegOpnd, *insn)); } if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { a64MemOpnd->SetIndexRegister(a64PhiEliminator->GetCGVirtualOpearnd(*indexRegOpnd, *insn)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp index b70edf4edea06a8a9361e3a8cb42ce302b429339..59e44f6e28366eae54c7768baa466529f8dbbee5 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -104,12 +104,24 @@ bool AArch64GenProEpilog::OptimizeTailBB(BB &bb, std::set &callInsns, con } MOperator insnMop = insn->GetMachineOpcode(); switch (insnMop) { + case MOP_xldr: + case MOP_xldp: + case MOP_dldr: + case MOP_dldp: { + if (bb.GetKind() == BB::kBBReturn) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (AArch64Abi::IsCalleeSavedReg(static_cast(reg.GetRegisterNumber()))) { + break; /* inserted restore from calleeregs-placement, ignore */ + } + } + return false; + } case MOP_wmovrr: case MOP_xmovrr: { CHECK_FATAL(insn->GetOperand(0).IsRegister(), "operand0 is not register"); CHECK_FATAL(insn->GetOperand(1).IsRegister(), "operand1 is not register"); - auto ®1 = static_cast(insn->GetOperand(0)); - auto ®2 = static_cast(insn->GetOperand(1)); + auto ®1 = static_cast(insn->GetOperand(0)); + auto ®2 = static_cast(insn->GetOperand(1)); if (reg1.GetRegisterNumber() != R0 || reg2.GetRegisterNumber() != R0) { return false; @@ -173,6 +185,9 @@ void AArch64GenProEpilog::TailCallBBOpt(BB &bb, std::set &callInsns, BB & * Return value: true if function do not need Prologue/Epilogue. false otherwise. */ bool AArch64GenProEpilog::TailCallOpt() { + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + return false; + } /* Count how many call insns in the whole function. */ uint32 nCount = 0; bool hasGetStackClass = false; @@ -317,12 +332,12 @@ void AArch64GenProEpilog::GenStackGuard(BB &bb) { MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); - AArch64RegOperand &stAddrOpnd = + RegOperand &stAddrOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, kRegTyInt); aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); - AArch64MemOperand *guardMemOp = - aarchCGFunc.GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, + MemOperand *guardMemOp = + aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, kSizeOfPtr * kBitsPerByte, stAddrOpnd, nullptr, &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), stkGuardSym); MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); Insn &insn = currCG->BuildInstruction(mOp, stAddrOpnd, *guardMemOp); @@ -344,8 +359,8 @@ void AArch64GenProEpilog::GenStackGuard(BB &bb) { if (useFP) { stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); } - AArch64MemOperand *downStk = aarchCGFunc.GetMemoryPool()->New(stackBaseReg, - stkSize - kOffset8MemPos - static_cast(vArea), kSizeOfPtr * kBitsPerByte); + int32 memSize = stkSize - kOffset8MemPos - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, kSizeOfPtr * kBitsPerByte); if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); @@ -375,15 +390,14 @@ BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { const MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); - AArch64RegOperand &stAddrOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, + RegOperand &stAddrOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, kSizeOfPtr * kBitsPerByte, kRegTyInt); aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); - AArch64MemOperand *guardMemOp = - cgFunc.GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, - kSizeOfPtr * kBitsPerByte, stAddrOpnd, nullptr, - &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), - stkGuardSym); + MemOperand *guardMemOp = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, + kSizeOfPtr * kBitsPerByte, stAddrOpnd, nullptr, + &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), + stkGuardSym); MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); Insn &insn = currCG->BuildInstruction(mOp, stAddrOpnd, *guardMemOp); insn.SetDoNotRemove(true); @@ -400,16 +414,16 @@ BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { } } - AArch64RegOperand &checkOp = + RegOperand &checkOp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, kSizeOfPtr * kBitsPerByte, kRegTyInt); int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); if (useFP) { stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); } - AArch64MemOperand *downStk = aarchCGFunc.GetMemoryPool()->New(stackBaseReg, - stkSize - kOffset8MemPos - static_cast(vArea), kSizeOfPtr * kBitsPerByte); + uint32 memSize = stkSize - kOffset8MemPos - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, kSizeOfPtr * kBitsPerByte); if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { - downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*static_cast(downStk), k64BitSize, R10); + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); } mOp = aarchCGFunc.PickLdInsn(kSizeOfPtr * kBitsPerByte, PTY_u64); Insn &newInsn = currCG->BuildInstruction(mOp, checkOp, *downStk); @@ -423,8 +437,7 @@ BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { MIRSymbol *failFunc = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_fail"))); - AArch64ListOperand *srcOpnds = - cgFunc.GetMemoryPool()->New(*cgFunc.GetFuncScopeAllocator()); + ListOperand *srcOpnds = aarchCGFunc.CreateListOpnd(*cgFunc.GetFuncScopeAllocator()); Insn &callInsn = aarchCGFunc.AppendCall(*failFunc, *srcOpnds); callInsn.SetDoNotRemove(true); @@ -823,13 +836,11 @@ BB *AArch64GenProEpilog::IsolateFastPath(BB &bb) { return coldBB; } -AArch64MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, - const AArch64MemOperand &mo, - uint32 bitLen, - AArch64reg baseRegNum) { +MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, + const MemOperand &mo, uint32 bitLen, AArch64reg baseRegNum) { auto &aarchCGFunc = static_cast(cgFunc); - CHECK_FATAL(mo.GetAddrMode() == AArch64MemOperand::kAddrModeBOi, "mode should be kAddrModeBOi"); - AArch64OfstOperand *ofstOp = mo.GetOffsetImmediate(); + CHECK_FATAL(mo.GetAddrMode() == MemOperand::kAddrModeBOi, "mode should be kAddrModeBOi"); + OfstOperand *ofstOp = mo.GetOffsetImmediate(); int32 offsetVal = static_cast(ofstOp->GetOffsetValue()); CHECK_FATAL(offsetVal > 0, "offsetVal should be greater than 0"); CHECK_FATAL((static_cast(offsetVal) & 0x7) == 0, "(offsetVal & 0x7) should be equal to 0"); @@ -837,7 +848,7 @@ AArch64MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddIn * Offset adjustment due to FP/SP has already been done * in AArch64GenProEpilog::GeneratePushRegs() and AArch64GenProEpilog::GeneratePopRegs() */ - AArch64RegOperand &br = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(baseRegNum, bitLen, kRegTyInt); + RegOperand &br = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(baseRegNum, bitLen, kRegTyInt); if (aarchCGFunc.GetSplitBaseOffset() == 0) { aarchCGFunc.SetSplitBaseOffset(offsetVal); /* remember the offset; don't forget to clear it */ ImmOperand &immAddEnd = aarchCGFunc.CreateImmOperand(offsetVal, k64BitSize, true); @@ -860,7 +871,7 @@ void AArch64GenProEpilog::AppendInstructionPushPair(CGFunc &cgFunc, uint32 dataSize = kSizeOfPtr * kBitsPerByte; CHECK_FATAL(offset >= 0, "offset must >= 0"); if (offset > kStpLdpImm64UpperBound) { - o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); } Insn &pushInsn = currCG->BuildInstruction(mOp, o0, o1, *o2); std::string comment = "SAVE CALLEE REGISTER PAIR"; @@ -887,7 +898,7 @@ void AArch64GenProEpilog::AppendInstructionPushSingle(CGFunc &cgFunc, Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, rty); Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), kSizeOfPtr * kBitsPerByte); - AArch64MemOperand *aarchMemO1 = static_cast(o1); + MemOperand *aarchMemO1 = static_cast(o1); uint32 dataSize = kSizeOfPtr * kBitsPerByte; if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { @@ -924,32 +935,31 @@ Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int } if (argsToStkPassSize <= kStrLdrImm64UpperBound - kOffset8MemPos) { mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopSingle] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; - AArch64RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); - AArch64MemOperand *o2 = aarchCGFunc.GetMemoryPool()->New(RSP, argsToStkPassSize, - size * kBitsPerByte); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, argsToStkPassSize, size * kBitsPerByte); Insn &insn1 = currCG->BuildInstruction(mOp, o0, *o2); AppendInstructionTo(insn1, cgFunc); - AArch64RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); - o2 = aarchCGFunc.GetMemoryPool()->New(RSP, argsToStkPassSize + size, - size * kBitsPerByte); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, argsToStkPassSize + size, + size * kBitsPerByte); Insn &insn2 = currCG->BuildInstruction(mOp, o1, *o2); AppendInstructionTo(insn2, cgFunc); return insn2; } else { - AArch64RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); - AArch64ImmOperand &io1 = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k64BitSize, true); + RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); + ImmOperand &io1 = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k64BitSize, true); aarchCGFunc.SelectCopyImm(oo, io1, PTY_i64); - AArch64RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); - AArch64RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); - AArch64MemOperand *mo = aarchCGFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); + MemOperand *mo = aarchCGFunc.CreateMemOperand( + MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); Insn &insn1 = currCG->BuildInstruction(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); AppendInstructionTo(insn1, cgFunc); - AArch64ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); + ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); aarchCGFunc.SelectAdd(oo, oo, io2, PTY_i64); - AArch64RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); - mo = aarchCGFunc.GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOrX, - size * kBitsPerByte, rsp, oo, 0); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + mo = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOrX, + size * kBitsPerByte, rsp, oo, 0); Insn &insn2 = currCG->BuildInstruction(isAllocate ? MOP_xstr : MOP_xldr, o1, *mo); AppendInstructionTo(insn2, cgFunc); return insn2; @@ -968,8 +978,8 @@ Insn &AArch64GenProEpilog::CreateAndAppendInstructionForAllocateCallFrame(int64 } else { Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); - Operand *o2 = aarchCGFunc.GetMemoryPool()->New(RSP, argsToStkPassSize, - kSizeOfPtr * kBitsPerByte); + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, argsToStkPassSize, + kSizeOfPtr * kBitsPerByte); allocInsn = &currCG->BuildInstruction(mOp, o0, o1, *o2); AppendInstructionTo(*allocInsn, cgFunc); } @@ -1027,9 +1037,9 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrame(AArch64reg reg0, AA offset = stackFrameSize; } MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - AArch64RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); - AArch64RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); - AArch64MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), kSizeOfPtr * kBitsPerByte); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), kSizeOfPtr * kBitsPerByte); ipoint = &currCG->BuildInstruction(mOp, o0, o1, o2); AppendInstructionTo(*ipoint, cgFunc); cfiOffset = offset; @@ -1108,7 +1118,7 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg if (stackFrameSize > kStpLdpImm64UpperBound) { useStpSub = true; - AArch64RegOperand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + RegOperand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); ImmOperand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); ipoint = cgFunc.GetCurBB()->GetLastInsn(); @@ -1116,9 +1126,9 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); } else { MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - AArch64RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); - AArch64RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); - AArch64MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, kSizeOfPtr * kBitsPerByte); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, kSizeOfPtr * kBitsPerByte); ipoint = &currCG->BuildInstruction(mOp, o0, o1, o2); AppendInstructionTo(*ipoint, cgFunc); cfiOffset = stackFrameSize; @@ -1127,9 +1137,9 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg if (useStpSub) { MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - AArch64RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); - AArch64RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); - AArch64MemOperand *o2 = aarchCGFunc.GetMemoryPool()->New(RSP, 0, kSizeOfPtr * kBitsPerByte); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, kSizeOfPtr * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, kSizeOfPtr * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, kSizeOfPtr * kBitsPerByte); ipoint = &currCG->BuildInstruction(mOp, o0, o1, *o2); AppendInstructionTo(*ipoint, cgFunc); } @@ -1198,9 +1208,16 @@ void AArch64GenProEpilog::GeneratePushRegs() { Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); - if (argsToStkPassSize > 0) { - Operand &immOpnd = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); - aarchCGFunc.SelectAdd(fpOpnd, spOpnd, immOpnd, PTY_u64); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); if (cgFunc.GenCfi()) { cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(stackBaseReg, @@ -1337,7 +1354,7 @@ void AArch64GenProEpilog::AppendInstructionStackCheck(AArch64reg reg, RegType rt aarchCGFunc.SelectSub(x16Opnd, spOpnd, imm1, PTY_u64); /* ldr wzr, [x16] */ - auto &wzr = AArch64RegOperand::Get32bitZeroRegister(); + auto &wzr = cgFunc.GetZeroOpnd(k32BitSize); auto &refX16 = aarchCGFunc.CreateMemOpnd(reg, 0, k64BitSize); auto &soeInstr = currCG->BuildInstruction(MOP_wldr, wzr, refX16); if (currCG->GenerateVerboseCG()) { @@ -1416,10 +1433,17 @@ void AArch64GenProEpilog::GenerateProlog(BB &bb) { } if (useFP) { Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); - if (argsToStkPassSize > 0) { - Operand &immOpnd = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); - aarchCGFunc.SelectAdd(fpOpnd, spOpnd, immOpnd, PTY_u64); + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); if (cgFunc.GenCfi()) { cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn( @@ -1489,7 +1513,7 @@ void AArch64GenProEpilog::AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopSingle]; Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, kSizeOfPtr * kBitsPerByte, rty); Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), kSizeOfPtr * kBitsPerByte); - AArch64MemOperand *aarchMemO1 = static_cast(o1); + MemOperand *aarchMemO1 = static_cast(o1); uint32 dataSize = kSizeOfPtr * kBitsPerByte; if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R9); @@ -1518,7 +1542,7 @@ void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, CHECK_FATAL(offset >= 0, "offset must >= 0"); if (offset > kStpLdpImm64UpperBound) { o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, - *static_cast(o2), dataSize, R16); + static_cast(*o2), dataSize, R16); } Insn &popInsn = currCG->BuildInstruction(mOp, o0, o1, *o2); popInsn.SetComment("RESTORE RESTORE"); @@ -1550,7 +1574,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, Operand *o2 = nullptr; if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { - o2 = aarchCGFunc.GetMemoryPool()->New(RSP, argsToStkPassSize, kSizeOfPtr * kBitsPerByte); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, argsToStkPassSize, kSizeOfPtr * kBitsPerByte); } else { if (stackFrameSize > kStpLdpImm64UpperBound) { useLdpAdd = true; @@ -1616,7 +1640,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg r stackFrameSize -= argsToStkPassSize; if (stackFrameSize > kStpLdpImm64UpperBound) { Operand *o2; - o2 = aarchCGFunc.GetMemoryPool()->New(RSP, 0, kSizeOfPtr * kBitsPerByte); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, kSizeOfPtr * kBitsPerByte); Insn &deallocInsn = currCG->BuildInstruction(mOp, o0, o1, *o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); if (cgFunc.GenCfi()) { @@ -1630,7 +1654,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg r Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); } else { - AArch64MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, kSizeOfPtr * kBitsPerByte); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, kSizeOfPtr * kBitsPerByte); Insn &deallocInsn = currCG->BuildInstruction(mOp, o0, o1, o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); if (cgFunc.GenCfi()) { @@ -1642,7 +1666,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg r } } else { Operand *o2; - o2 = aarchCGFunc.GetMemoryPool()->New(RSP, argsToStkPassSize, kSizeOfPtr * kBitsPerByte); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, argsToStkPassSize, kSizeOfPtr * kBitsPerByte); if (argsToStkPassSize > kStpLdpImm64UpperBound) { (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); } else { @@ -1755,7 +1779,7 @@ void AArch64GenProEpilog::GeneratePopRegs() { void AArch64GenProEpilog::AppendJump(const MIRSymbol &funcSymbol) { auto &aarchCGFunc = static_cast(cgFunc); CG *currCG = cgFunc.GetCG(); - Operand &targetOpnd = aarchCGFunc.CreateFuncLabelOperand(funcSymbol); + Operand &targetOpnd = aarchCGFunc.GetOrCreateFuncNameOpnd(funcSymbol); cgFunc.GetCurBB()->AppendInsn(currCG->BuildInstruction(MOP_xuncond, targetOpnd)); } @@ -1814,7 +1838,7 @@ void AArch64GenProEpilog::GenerateEpilog(BB &bb) { } if (cgFunc.HasVLAOrAlloca()) { - auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsStkPass().GetSize(); + auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsToStkPass().GetSize(); stackFrameSize = stackFrameSize < size ? 0 : stackFrameSize - size; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp index 4f9f2a1b5fd46a66a9ba0ccccb900495efa54aaa..ce942aa535b4b8cb46cc41b91acb08f9cb4bdbf1 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,6 +15,7 @@ #include "aarch64_prop.h" #include "aarch64_isa.h" #include "aarch64_cg.h" +#include "aarch64_reg_coalesce.h" #include namespace maplebe { @@ -33,10 +34,10 @@ bool AArch64Prop::IsInLimitCopyRange(VRegVersion *toBeReplaced) { } void AArch64Prop::CopyProp() { - PropOptimizeManager optManager(*cgFunc, GetSSAInfo()); - optManager.Optimize(); - optManager.Optimize(); - optManager.Optimize(); + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo(), GetRegll()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); } void AArch64Prop::TargetProp(Insn &insn) { @@ -56,7 +57,7 @@ void A64ConstProp::DoOpt() { ASSERT(destVersion != nullptr, "find Version failed"); Operand &constOpnd = curInsn->GetOperand(kInsnSecondOpnd); CHECK_FATAL(constOpnd.IsImmediate(), "must be imm operand"); - auto &immOperand = static_cast(constOpnd); + auto &immOperand = static_cast(constOpnd); bool isZero = immOperand.IsZero(); for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { if (isZero) { @@ -73,11 +74,10 @@ void A64ConstProp::ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) auto *useInsn = static_cast(useDUInfo.GetInsn()); const AArch64MD *md = &AArch64CG::kMd[(useInsn->GetMachineOpcode())]; /* special case */ - bool isSpecficCase = useInsn->GetMachineOpcode() == MPO_wbfirri5i5 || useInsn->GetMachineOpcode() == MPO_xbfirri6i6; + bool isSpecficCase = useInsn->GetMachineOpcode() == MOP_wbfirri5i5 || useInsn->GetMachineOpcode() == MOP_xbfirri6i6; isSpecficCase &= (useDUInfo.GetOperands().size() == 1) && (useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd); if (useInsn->IsStore() || md->IsCondDef() || isSpecficCase) { - AArch64RegOperand &zeroOpnd = toReplaceReg.GetSize() == k64BitSize ? - AArch64RegOperand::Get64bitZeroRegister() : AArch64RegOperand::Get32bitZeroRegister(); + RegOperand &zeroOpnd = cgFunc->GetZeroOpnd(toReplaceReg.GetSize()); for (auto &opndIt : useDUInfo.GetOperands()) { if (useInsn->IsStore() && opndIt.first != 0) { return; @@ -99,6 +99,8 @@ MOperator A64ConstProp::GetReversalMOP(MOperator arithMop) { return MOP_xsubrri12; case MOP_xsubrri12: return MOP_xaddrri12; + case MOP_wsubrri12: + return MOP_waddrri12; default: CHECK_FATAL(false, "NYI"); break; @@ -133,8 +135,10 @@ MOperator A64ConstProp::GetRegImmMOP(MOperator regregMop, bool withLeftShift) { case MOP_weorrrrs: return MOP_weorrri12; case MOP_xiorrrrs: + case MOP_xbfirri6i6: return MOP_xiorrri13; case MOP_wiorrrrs: + case MOP_wbfirri5i5: return MOP_wiorrri12; case MOP_xmovrr: { return MOP_xmovri64; @@ -149,13 +153,86 @@ MOperator A64ConstProp::GetRegImmMOP(MOperator regregMop, bool withLeftShift) { return MOP_undef; } +MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, Insn &arithInsn) { + MOperator arithMop = arithInsn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch(arithMop) { + case MOP_waddrrr: + case MOP_xaddrrr: { + newVal = constVal + constVal; + newMop = (arithMop == MOP_waddrrr) ? MOP_xmovri32 : MOP_xmovri64; + break; + } + case MOP_waddrrrs: + case MOP_xaddrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch(sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal + ((unsigned)constVal << amount); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal + ((unsigned)constVal >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal + (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_waddrrrs) ? MOP_xmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + newVal = 0; + newMop = (arithMop == MOP_wsubrrr) ? MOP_xmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch(sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal - ((unsigned)constVal << amount); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal - ((unsigned)constVal >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal - (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_wsubrrrs) ? MOP_xmovri32 : MOP_xmovri64; + break; + } + default: + ASSERT(false, "this case is not supported currently"); + break; + } + return newMop; +} + void A64ConstProp::ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) { ssaInfo->ReplaceInsn(oriInsn, newInsn); oriInsn.GetBB()->ReplaceInsn(oriInsn, newInsn); /* dump insn replacement here */ } -bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd) { +bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { Insn *useInsn = useDUInfo.GetInsn(); MOperator curMop = useInsn->GetMachineOpcode(); if (useDUInfo.GetOperands().size() == 1) { @@ -178,7 +255,7 @@ bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOperand &con } /* support add now */ -bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd, ArithmeticType aT) { +bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) { Insn *useInsn = useDUInfo.GetInsn(); MOperator curMop = useInsn->GetMachineOpcode(); if (useDUInfo.GetOperands().size() == 1) { @@ -196,17 +273,18 @@ bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOpera } /* try aggressive opt in aarch64 add and sub */ if (newInsn == nullptr && (aT == kAArch64Add || aT == kAArch64Sub)) { - auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); /* try aarch64 imm shift mode */ tempImm->SetValue(tempImm->GetValue() >> 12); - if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd)) { + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd) && + CGOptions::GetInstance().GetOptimizeLevel() < 0) { ASSERT(false, "NIY"); } /* Addition and subtraction reversal */ tempImm->SetValue(-constOpnd.GetValue()); newMop = GetReversalMOP(newMop); if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd)) { - auto *cgImm = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + auto *cgImm = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); newInsn = &cgFunc->GetCG()->BuildInstruction( newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), *cgImm); if (useOpndIdx == kInsnSecondOpnd) { /* swap operand due to legality in aarch */ @@ -219,15 +297,29 @@ bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, AArch64ImmOpera return true; } } else if (useDUInfo.GetOperands().size() == 2) { - /* no case in SPEC 2017 */ - ASSERT(false, "should be optimized by other phase"); + /* only support add & sub now */ + int64 newValue = 0; + MOperator newMop = GetFoldMopAndVal(newValue, constOpnd.GetValue(), *useInsn); + bool isSigned = (newValue < 0); + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + tempImm->SetValue(newValue); + tempImm->SetSigned(isSigned); + if (tempImm->IsSingleInstructionMovable()) { + auto *newImmOpnd = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + auto &newInsn = cgFunc->GetCG()->BuildInstruction( + newMop, useInsn->GetOperand(kInsnFirstOpnd), *newImmOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } else { + CHECK_FATAL(false, "invalid immediate"); + } } else { ASSERT(false, "invalid instruction in ssa form"); } return false; } -bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const AArch64ImmOperand &constOpnd, +bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT) { Insn *useInsn = useDUInfo.GetInsn(); if (useDUInfo.GetOperands().size() == 1) { @@ -235,7 +327,7 @@ bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const AArch64ImmOp ASSERT(existedImm.IsImmediate(), "must be"); Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); bool is64Bit = destOpnd.GetSize() == k64BitSize; - AArch64ImmOperand *foldConst = CanDoConstFold(constOpnd, static_cast(existedImm), aT, is64Bit); + ImmOperand *foldConst = CanDoConstFold(constOpnd, static_cast(existedImm), aT, is64Bit); if (foldConst != nullptr) { MOperator newMop = is64Bit ? MOP_xmovri64 : MOP_xmovri32; Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, destOpnd, *foldConst); @@ -246,7 +338,7 @@ bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const AArch64ImmOp return false; } -bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const AArch64ImmOperand &constOpnd) { +bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) { Insn *useInsn = useDUInfo.GetInsn(); MOperator curMop = useInsn->GetMachineOpcode(); if (useDUInfo.GetOperands().size() == 1) { @@ -264,11 +356,11 @@ bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const AArch64ImmOper } else { CHECK_FATAL(false, "shift type is not defined"); } - auto *newImm = static_cast(constOpnd.Clone(*constPropMp)); + auto *newImm = static_cast(constOpnd.Clone(*constPropMp)); newImm->SetValue(val); MOperator newMop = GetRegImmMOP(curMop, false); if (static_cast(cgFunc)->IsOperandImmValid(newMop, newImm, kInsnThirdOpnd)) { - auto *cgNewImm = static_cast(constOpnd.Clone(*cgFunc->GetMemoryPool())); + auto *cgNewImm = static_cast(constOpnd.Clone(*cgFunc->GetMemoryPool())); Insn &newInsn = cgFunc->GetCG()->BuildInstruction( newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), *cgNewImm); ReplaceInsnAndUpdateSSA(*useInsn, newInsn); @@ -279,7 +371,7 @@ bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const AArch64ImmOper return false; } -bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd) { +bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { MOperator curMop = useDUInfo.GetInsn()->GetMachineOpcode(); switch (curMop) { case MOP_xmovrr: @@ -297,7 +389,6 @@ bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd case MOP_waddrri12: case MOP_xaddrri12: { return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Add); - } case MOP_xsubrri12: case MOP_wsubrri12: { @@ -315,15 +406,49 @@ bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, AArch64ImmOperand &constOpnd case MOP_xsubrrrs: { return ShiftConstReplace(useDUInfo, constOpnd); } + case MOP_wbfirri5i5: + case MOP_xbfirri6i6: { + return BitInsertReplace(useDUInfo, constOpnd); + } default: break; } return false; } -AArch64ImmOperand *A64ConstProp::CanDoConstFold( - const AArch64ImmOperand &value1, const AArch64ImmOperand &value2, ArithmeticType aT, bool is64Bit) { - auto *tempImm = static_cast(value1.Clone(*constPropMp)); +bool A64ConstProp::BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnSecondOpnd) { + auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + auto val = static_cast(constOpnd.GetValue()); + /* bfi width in the range [1 -64] */ + auto width = static_cast(widthOpnd.GetValue()); + /* bit number of the lsb of the destination bitfield */ + auto lsb = static_cast(lsbOpnd.GetValue()); + val = val & ((1U << width) - 1U); + if (__builtin_popcountl(val) == width) { + val = val << lsb; + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &newOpnd = cgFunc->CreateImmOperand(PTY_i64, val); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &newOpnd, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnFirstOpnd), newOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + } + return false; +} + +ImmOperand *A64ConstProp::CanDoConstFold( + const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit) { + auto *tempImm = static_cast(value1.Clone(*constPropMp)); int64 newVal = 0; switch (aT) { case kAArch64Add : { @@ -348,7 +473,7 @@ AArch64ImmOperand *A64ConstProp::CanDoConstFold( tempImm->SetVary(kUnAdjustVary); } bool canBeMove = tempImm->IsSingleInstructionMovable(k64BitSize); - return canBeMove ? static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())): nullptr; + return canBeMove ? static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())): nullptr; } void A64StrLdrProp::DoOpt() { @@ -356,7 +481,7 @@ void A64StrLdrProp::DoOpt() { bool tryOptAgain = false; do { tryOptAgain = false; - AArch64MemOperand *currMemOpnd = StrLdrPropPreCheck(*curInsn); + MemOperand *currMemOpnd = StrLdrPropPreCheck(*curInsn); if (currMemOpnd != nullptr && memPropMode != kUndef) { /* can be changed to recursive propagation */ if (ReplaceMemOpnd(*currMemOpnd, nullptr)) { @@ -367,7 +492,7 @@ void A64StrLdrProp::DoOpt() { } while (tryOptAgain); } -bool A64StrLdrProp::ReplaceMemOpnd(const AArch64MemOperand &currMemOpnd, const Insn *defInsn) { +bool A64StrLdrProp::ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn) { auto GetDefInsn = [&defInsn, this](const RegOperand ®Opnd, std::vector &allUseInsns)->void{ if (regOpnd.IsSSAForm() && defInsn == nullptr) { @@ -382,22 +507,22 @@ bool A64StrLdrProp::ReplaceMemOpnd(const AArch64MemOperand &currMemOpnd, const I }; RegOperand *replacedReg = nullptr; std::vector allUseInsns; - std::vector newMemOpnds; + std::vector newMemOpnds; bool doFullReplaceProp = true; /* due to register pressure, do not do partial prop */ if (memPropMode == kPropBase) { replacedReg = currMemOpnd.GetBaseRegister(); } else { Operand *offset = currMemOpnd.GetOffset(); ASSERT(offset->IsRegister(), "must be"); - replacedReg = static_cast(offset); + replacedReg = static_cast(offset); } CHECK_FATAL(replacedReg != nullptr, "check this insn"); GetDefInsn(*replacedReg, allUseInsns); if (defInsn != nullptr) { for (auto useInsn : allUseInsns) { - AArch64MemOperand *oldMemOpnd = StrLdrPropPreCheck(*useInsn, memPropMode); + MemOperand *oldMemOpnd = StrLdrPropPreCheck(*useInsn, memPropMode); if (CheckSameReplace(*replacedReg, oldMemOpnd)) { - AArch64MemOperand *newMemOpnd = SelectReplaceMem(*defInsn, *oldMemOpnd); + MemOperand *newMemOpnd = SelectReplaceMem(*defInsn, *oldMemOpnd); if (newMemOpnd != nullptr) { uint32 opndIdx = GetMemOpndIdx(oldMemOpnd, *useInsn); if (CheckNewMemOffset(*useInsn, newMemOpnd, opndIdx)) { @@ -421,20 +546,20 @@ bool A64StrLdrProp::ReplaceMemOpnd(const AArch64MemOperand &currMemOpnd, const I return false; } -bool A64StrLdrProp::CheckSameReplace(const RegOperand &replacedReg, const AArch64MemOperand *memOpnd) { +bool A64StrLdrProp::CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) { if (memOpnd != nullptr && memPropMode != kUndef) { if (memPropMode == kPropBase) { return replacedReg.GetRegisterNumber() == memOpnd->GetBaseRegister()->GetRegisterNumber(); } else { Operand *offset = memOpnd->GetOffset(); ASSERT(offset->IsRegister(), "must be"); - return replacedReg.GetRegisterNumber() == static_cast(offset)->GetRegisterNumber(); + return replacedReg.GetRegisterNumber() == static_cast(offset)->GetRegisterNumber(); } } return false; } -uint32 A64StrLdrProp::GetMemOpndIdx(AArch64MemOperand *newMemOpnd, const Insn &insn) { +uint32 A64StrLdrProp::GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) { uint32 opndIdx = kInsnMaxOpnd; if (insn.IsLoadPair() || insn.IsStorePair()) { ASSERT(newMemOpnd->GetOffsetImmediate() != nullptr, "unexpect insn"); @@ -445,7 +570,7 @@ uint32 A64StrLdrProp::GetMemOpndIdx(AArch64MemOperand *newMemOpnd, const Insn &i return opndIdx; } -void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, AArch64MemOperand &newMem, Insn &useInsn) { +void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn) { VRegVersion *replacedV = ssaInfo->FindSSAVersion(replacedReg.GetRegisterNumber()); ASSERT(replacedV != nullptr, "must in ssa form"); uint32 opndIdx = GetMemOpndIdx(&newMem, useInsn); @@ -459,13 +584,13 @@ void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, AArch64MemOperan useInsn.SetOperand(opndIdx, newMem); } -AArch64MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod) { +MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod) { memPropMode = kUndef; if (insn.IsLoad() || insn.IsStore()) { if (insn.IsAtomic() || insn.GetOperand(0).GetSize() == k128BitSize) { return nullptr; } - auto *currMemOpnd = static_cast(insn.GetMemOpnd()); + auto *currMemOpnd = static_cast(insn.GetMemOpnd()); if (currMemOpnd != nullptr) { memPropMode = SelectStrLdrPropMode(*currMemOpnd); if (prevMod != kUndef) { @@ -480,17 +605,17 @@ AArch64MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMo return nullptr; } -MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const AArch64MemOperand &currMemOpnd) { - AArch64MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); +MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); MemPropMode innerMemPropMode = kUndef; switch (currAddrMode) { - case AArch64MemOperand::kAddrModeBOi: { + case MemOperand::kAddrModeBOi: { if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { innerMemPropMode = kPropBase; } break; } - case AArch64MemOperand::kAddrModeBOrX: { + case MemOperand::kAddrModeBOrX: { innerMemPropMode = kPropOffset; auto amount = currMemOpnd.ShiftAmount(); if (currMemOpnd.GetExtendAsString() == "LSL") { @@ -511,17 +636,17 @@ MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const AArch64MemOperand &currMem return innerMemPropMode; } -AArch64MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const AArch64MemOperand &currMemOpnd) { - AArch64MemOperand *newMemOpnd = nullptr; +MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd) { + MemOperand *newMemOpnd = nullptr; Operand *offset = currMemOpnd.GetOffset(); RegOperand *base = currMemOpnd.GetBaseRegister(); MOperator opCode = defInsn.GetMachineOpcode(); switch (opCode) { case MOP_xsubrri12: case MOP_wsubrri12: { - AArch64RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); if (replace != nullptr) { - auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); int64 defVal = -(immOpnd.GetValue()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); } @@ -529,9 +654,9 @@ AArch64MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const A } case MOP_xaddrri12: case MOP_waddrri12: { - AArch64RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); if (replace != nullptr) { - auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); int64 defVal = immOpnd.GetValue(); newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); } @@ -542,36 +667,59 @@ AArch64MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const A case MOP_dadd: case MOP_sadd: { if (memPropMode == kPropBase) { - auto *ofstOpnd = static_cast(offset); + auto *ofstOpnd = static_cast(offset); if (!ofstOpnd->IsZero()) { break; } - AArch64RegOperand *replace = GetReplaceReg( - static_cast(defInsn.GetOperand(kInsnSecondOpnd))); - AArch64RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + RegOperand *replace = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnThirdOpnd))); if (replace != nullptr && newOfst != nullptr) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *replace, newOfst, nullptr, nullptr); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *replace, newOfst, nullptr, nullptr); + } + } + break; + } + case MOP_xaddrrrs: + case MOP_waddrrrs: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newBaseOpnd = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newIndexOpnd = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + auto &shift = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + break; + } + if (newBaseOpnd != nullptr && newIndexOpnd != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *newBaseOpnd, *newIndexOpnd, + shift.GetShiftAmount(), false); } } break; } case MOP_xadrpl12: { if (memPropMode == kPropBase) { - auto *ofstOpnd = static_cast(offset); + auto *ofstOpnd = static_cast(offset); CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); int64 val = ofstOpnd->GetValue(); auto *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); val += offset1->GetOffset(); - AArch64OfstOperand *newOfsetOpnd = cgFunc->GetMemoryPool()->New(val, k32BitSize); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc)->CreateOfstOpnd(val, k32BitSize); CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); const MIRSymbol *addr = offset1->GetSymbol(); - AArch64RegOperand *replace = GetReplaceReg( - static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *replace = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnSecondOpnd))); if (replace != nullptr) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeLo12Li, currMemOpnd.GetSize(), *replace, nullptr, newOfsetOpnd, addr); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeLo12Li, currMemOpnd.GetSize(), *replace, nullptr, newOfsetOpnd, addr); } } break; @@ -580,30 +728,30 @@ AArch64MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const A case MOP_xmovri32: case MOP_xmovri64: { if (memPropMode == kPropOffset) { - auto *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); - AArch64OfstOperand *newOffset = cgFunc->GetMemoryPool()->New(imm->GetValue(), k32BitSize); + auto *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc)->CreateOfstOpnd(imm->GetValue(), k32BitSize); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, currMemOpnd.GetSize(), *base, nullptr, newOffset, nullptr); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOi, currMemOpnd.GetSize(), *base, nullptr, newOffset, nullptr); } break; } case MOP_xlslrri6: case MOP_wlslrri5: { - auto *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); - AArch64RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + auto *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); if (newOfst != nullptr) { int64 shift = imm->GetValue(); if (memPropMode == kPropOffset) { if ((shift < k4ByteSize) && (shift >= 0)) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); } } else if (memPropMode == kPropShift) { shift += currMemOpnd.ShiftAmount(); if ((shift < k4ByteSize) && (shift >= 0)) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); } } } @@ -625,7 +773,7 @@ AArch64MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const A return newMemOpnd; } -AArch64RegOperand *A64StrLdrProp::GetReplaceReg(AArch64RegOperand &a64Reg) { +RegOperand *A64StrLdrProp::GetReplaceReg(RegOperand &a64Reg) { if (a64Reg.IsSSAForm()) { regno_t ssaIndex = a64Reg.GetRegisterNumber(); replaceVersions[ssaIndex] = ssaInfo->FindSSAVersion(ssaIndex); @@ -635,28 +783,28 @@ AArch64RegOperand *A64StrLdrProp::GetReplaceReg(AArch64RegOperand &a64Reg) { return nullptr; } -AArch64MemOperand *A64StrLdrProp::HandleArithImmDef(AArch64RegOperand &replace, Operand *oldOffset, - int64 defVal, uint32 memSize) { +MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, + int64 defVal, uint32 memSize) { if (memPropMode != kPropBase) { return nullptr; } - AArch64OfstOperand *newOfstImm = nullptr; + OfstOperand *newOfstImm = nullptr; if (oldOffset == nullptr) { - newOfstImm = cgFunc->GetMemoryPool()->New(defVal, k32BitSize); + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd(defVal, k32BitSize); } else { - auto *ofstOpnd = static_cast(oldOffset); + auto *ofstOpnd = static_cast(oldOffset); CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); - newOfstImm = cgFunc->GetMemoryPool()->New(defVal + ofstOpnd->GetValue(), k32BitSize); + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd(defVal + ofstOpnd->GetValue(), k32BitSize); } CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); - return cgFunc->GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, memSize, - replace, nullptr, newOfstImm, nullptr); + return static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOi, memSize, + replace, nullptr, newOfstImm, nullptr); } -AArch64MemOperand *A64StrLdrProp::SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, - bool isSigned, uint32 memSize) { - AArch64MemOperand *newMemOpnd = nullptr; - AArch64RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); +MemOperand *A64StrLdrProp::SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, + bool isSigned, uint32 memSize) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); if (newOfst == nullptr) { return nullptr; } @@ -664,18 +812,18 @@ AArch64MemOperand *A64StrLdrProp::SelectReplaceExt(const Insn &defInsn, RegOpera bool propExtend = (memPropMode == kPropShift) || ((memPropMode == kPropSignedExtend) && isSigned) || ((memPropMode == kPropUnsignedExtend) && !isSigned); if (memPropMode == kPropOffset) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, memSize, base, *newOfst, 0, isSigned); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, 0, isSigned); } else if (propExtend) { - newMemOpnd = cgFunc->GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, memSize, base, *newOfst, amount, isSigned); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, amount, isSigned); } else { return nullptr; } return newMemOpnd; } -bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, AArch64MemOperand *newMemOpnd, uint32 opndIdx) { +bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) { auto *a64CgFunc = static_cast(cgFunc); if ((newMemOpnd->GetOffsetImmediate() != nullptr) && !a64CgFunc->IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { @@ -686,20 +834,18 @@ bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, AArch64MemOperand *newMe return false; } /* is ldp or stp, addrMode must be BOI */ - if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi)) { + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { return false; } return true; } void AArch64Prop::PropPatternOpt() { - PropOptimizeManager optManager(*cgFunc, GetSSAInfo()); - optManager.Optimize(); - /* need peephole optimize */ - if (CGOptions::GetInstance().GetOptimizeLevel() < 0) { - optManager.Optimize(); - } - optManager.Optimize(); + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); } void ExtendShiftPattern::SetExMOpType(const Insn &use) { @@ -873,33 +1019,6 @@ void ExtendShiftPattern::SelectExtendOrShift(const Insn &def) { } } -/* first use must match SelectExtendOrShift */ -bool ExtendShiftPattern::CheckDefUseInfo(uint32 size) { - auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); - Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); - CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); - auto ®DefSrc = static_cast(defSrcOpnd); - if (regDefSrc.IsPhysicalRegister()) { - return false; - } - regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); - /* check regDefSrc */ - Insn *defSrcInsn = nullptr; - VRegVersion *useVersion = optSsaInfo->FindSSAVersion(defSrcRegNo); - CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); - DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); - if (defInfo == nullptr) { - return false; - } - defSrcInsn = defInfo->GetInsn(); - - const AArch64MD *md = &AArch64CG::kMd[static_cast(defSrcInsn)->GetMachineOpcode()]; - if ((size != regOperand.GetSize()) && md->IsMove()) { - return false; - } - return true; -} - /* Optimize ExtendShiftPattern: * ========================================================== * nosuffix LSL LSR ASR extrn (def) @@ -1048,7 +1167,7 @@ void ExtendShiftPattern::Optimize(Insn &insn) { amount = lastExtendOpnd.GetShiftAmount(); } if (shiftOp != BitShiftOperand::kUndef) { - auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); offset = static_cast(immOpnd.GetValue()); } amount += offset; @@ -1076,26 +1195,50 @@ bool ExtendShiftPattern::CheckCondition(Insn &insn) { if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { return false; } - auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); + auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); regno_t regNo = regOperand.GetRegisterNumber(); VRegVersion *useVersion = optSsaInfo->FindSSAVersion(regNo); - if (useVersion == nullptr) { + defInsn = FindDefInsn(useVersion); + if (!defInsn || (useVersion->GetAllUseInsns().size() > 1)) { return false; } - DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); - if (defInfo == nullptr) { + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { return false; } - defInsn = defInfo->GetInsn(); - SelectExtendOrShift(*defInsn); + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ if (useVersion->HasImplicitCvt() && shiftOp != BitShiftOperand::kUndef) { return false; } - /* defInsn must be shift or extend */ - if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > regOperand.GetSize())) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + VRegVersion *replaceUseV = optSsaInfo->FindSSAVersion(defSrcRegNo); + CHECK_FATAL(replaceUseV != nullptr, "useVRegVersion must not be null based on ssa"); + if (replaceUseV->GetAllUseInsns().size() > 1) { return false; } - return CheckDefUseInfo(regOperand.GetSize()); + return true; } void ExtendShiftPattern::Init() { @@ -1103,8 +1246,8 @@ void ExtendShiftPattern::Init() { extendOp = ExtendShiftOperand::kUndef; shiftOp = BitShiftOperand::kUndef; defInsn = nullptr; - replaceIdx = kInsnThirdOpnd; newInsn = nullptr; + replaceIdx = kInsnThirdOpnd; optSuccess = false; exMOpType = kExUndef; lsMOpType = kLsUndef; @@ -1159,7 +1302,7 @@ bool ExtendMovPattern::CheckSrcReg(regno_t srcRegNo, uint32 validNum) { case MOP_wiorrri12: case MOP_weorrri12: { /* check immVal if mop is OR */ - AArch64ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); uint32 bitNum = static_cast(imm.GetValue()); if ((bitNum >> validNum) != 0) { return false; @@ -1249,7 +1392,7 @@ void ExtendMovPattern::Init() { } void ExtendMovPattern::Optimize(Insn &insn) { - insn.SetMOperator(replaceMop); + insn.SetMOP(replaceMop); } void CopyRegProp::Run() { @@ -1267,6 +1410,42 @@ void CopyRegProp::Run() { } } +bool CopyRegProp::IsValidCopyProp(RegOperand &dstReg, RegOperand &srcReg) { + ASSERT(destVersion != nullptr, "find destVersion failed"); + ASSERT(srcVersion != nullptr, "find srcVersion failed"); + LiveInterval *dstll = nullptr; + LiveInterval *srcll = nullptr; + if (destVersion->GetOriginalRegNO() == srcVersion->GetOriginalRegNO()) { + return true; + } + regno_t dstRegNO = dstReg.GetRegisterNumber(); + regno_t srcRegNO = srcReg.GetRegisterNumber(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + + dstll = regll->GetLiveInterval(dstRegNO); + srcll = regll->GetLiveInterval(srcRegNO); + static_cast(regll)->CheckInterference(*dstll, *srcll); + BB *useBB = useInsn->GetBB(); + if (dstll->IsConflictWith(srcRegNO) && + /* support override value when the version is not transphi */ + (((useBB->IsInPhiDef(srcRegNO) || useBB->IsInPhiList(srcRegNO)) && useBB->HasCriticalEdge()) || + useBB->IsInPhiList(dstRegNO))) { + return false; + } + } + if (dstll && srcll) { + regll->CoalesceLiveIntervals(*dstll, *srcll); + } + return true; +} + bool CopyRegProp::CheckCondition(Insn &insn) { if (insn.IsEffectiveCopy()) { MOperator mOp = insn.GetMachineOpcode(); @@ -1293,6 +1472,9 @@ bool CopyRegProp::CheckCondition(Insn &insn) { ASSERT(destVersion != nullptr, "find Version failed"); srcVersion = optSsaInfo->FindSSAVersion(srcReg.GetRegisterNumber()); ASSERT(srcVersion != nullptr, "find Version failed"); + if (!IsValidCopyProp(destReg, srcReg)) { + return false; + } return true; } else { /* should be eliminated by ssa peep */ @@ -1303,19 +1485,18 @@ bool CopyRegProp::CheckCondition(Insn &insn) { } void CopyRegProp::Optimize(Insn &insn) { - /* do not extend register live range at current stage */ - if (destVersion->GetOriginalRegNO() == srcVersion->GetOriginalRegNO() || - AArch64Prop::IsInLimitCopyRange(destVersion)) { - optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + if (cgFunc.IsExtendReg(destVersion->GetSSAvRegOpnd()->GetRegisterNumber())) { + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); } } void CopyRegProp::VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn) { ASSERT(movInsn.GetMachineOpcode() == MOP_xmovrr || movInsn.GetMachineOpcode() == MOP_wmovrr, "NIY explicit CVT"); if (destReg.GetSize() == k64BitSize && srcReg.GetSize() == k32BitSize) { - movInsn.SetMOperator(MOP_xuxtw64); + movInsn.SetMOP(MOP_xuxtw64); } else if (destReg.GetSize() == k32BitSize && srcReg.GetSize() == k64BitSize) { - movInsn.SetMOperator(MOP_xubfxrri6i6); + movInsn.SetMOP(MOP_xubfxrri6i6); movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, 0)); movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, k32BitSize)); } else { @@ -1366,8 +1547,8 @@ bool ValidBitNumberProp::CheckCondition(Insn &insn) { if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { destOpnd= &static_cast(insn.GetOperand(kInsnFirstOpnd)); srcOpnd= &static_cast(insn.GetOperand(kInsnSecondOpnd)); - auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); - auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); + auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { return false; } @@ -1450,13 +1631,13 @@ bool FpSpConstProp::CheckCondition(Insn &insn) { if (insn.GetMachineOpcode() == MOP_xaddrri12) { aT = kAArch64Add; if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { - shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); return true; } } else if (insn.GetMachineOpcode() == MOP_xsubrri12) { aT = kAArch64Sub; if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { - shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); return true; } } @@ -1490,22 +1671,25 @@ int64 FpSpConstProp::ArithmeticFold(int64 valInUse, ArithmeticType useAT) const void FpSpConstProp::PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn) { MOperator useMop = useInsn.GetMachineOpcode(); + if (useInsn.IsAtomic()) { + return; + } if (useInsn.IsStore() || useInsn.IsLoad()) { if (useDUInfo.GetOperands().size() == 1) { auto useOpndIt = useDUInfo.GetOperands().begin(); if (useOpndIt->first == kInsnSecondOpnd || useOpndIt->first == kInsnThirdOpnd) { ASSERT(useOpndIt->second == 1, "multiple use in memory opnd"); - auto *a64memOpnd = static_cast(useInsn.GetMemOpnd()); - if (a64memOpnd->IsIntactIndexed() && a64memOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi) { - auto *ofstOpnd = static_cast(a64memOpnd->GetOffsetImmediate()); + auto *a64memOpnd = static_cast(useInsn.GetMemOpnd()); + if (a64memOpnd->IsIntactIndexed() && a64memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + auto *ofstOpnd = static_cast(a64memOpnd->GetOffsetImmediate()); CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); int64 newVal = ArithmeticFold(ofstOpnd->GetValue(), kAArch64Add); - auto *newOfstImm = cgFunc.GetMemoryPool()->New(newVal, k64BitSize); + auto *newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(newVal, k64BitSize); if (ofstOpnd->GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { newOfstImm->SetVary(kUnAdjustVary); } - auto *newMem = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, a64memOpnd->GetSize(), *fpSpBase, + auto *newMem = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, a64memOpnd->GetSize(), *fpSpBase, nullptr, newOfstImm, nullptr); if (static_cast(cgFunc).IsOperandImmValid(useMop, newMem, useOpndIt->first)) { useInsn.SetMemOpnd(newMem); @@ -1515,7 +1699,14 @@ void FpSpConstProp::PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn) { } } } else { - CHECK_FATAL(false, "NYI"); + /* + * case : store stack location on stack + * add x1, sp, #8 + * ... + * store x1 [x1, #16] + * not prop , not benefit to live range yet + */ + return; } } } @@ -1526,9 +1717,9 @@ void FpSpConstProp::PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, Arithmetic MOperator useMop = useInsn.GetMachineOpcode(); ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); - auto &curVal = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); - AArch64ImmOperand &newVal = a64cgFunc.CreateImmOperand(ArithmeticFold(curVal.GetValue(), curAT), - curVal.GetSize(), false); + auto &curVal = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newVal = a64cgFunc.CreateImmOperand(ArithmeticFold(curVal.GetValue(), curAT), + curVal.GetSize(), false); if (newVal.GetValue() < 0) { newVal.Negate(); useMop = A64ConstProp::GetReversalMOP(useMop); @@ -1551,7 +1742,7 @@ void FpSpConstProp::PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator o if (useDUInfo.GetOperands().size() == 1) { ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); - auto &newVal = *static_cast(shiftOpnd->Clone(*cgFunc.GetMemoryPool())); + auto &newVal = *static_cast(shiftOpnd->Clone(*cgFunc.GetMemoryPool())); Insn &newInsn = cgFunc.GetCG()->BuildInstruction( oriMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); @@ -1568,10 +1759,9 @@ void FpSpConstProp::Optimize(Insn &insn) { PropInMem(*useInsnInfo.second, *useInsn); switch (useMop) { case MOP_xmovrr: + case MOP_wmovrr: PropInCopy(*useInsnInfo.second, *useInsn, insn.GetMachineOpcode()); break; - case MOP_wmovrr: - CHECK_FATAL(false, "NIY mov 32"); case MOP_xaddrri12: PropInArith(*useInsnInfo.second, *useInsn, kAArch64Add); break; @@ -1584,19 +1774,329 @@ void FpSpConstProp::Optimize(Insn &insn) { } } +bool A64PregCopyPattern::DFSFindValidDefInsns(Insn *curDefInsn, std::unordered_map &visited) { + if (curDefInsn == nullptr) { + return false; + } + if (visited[curDefInsn->GetId()]) { + return true; + } + visited[curDefInsn->GetId()] = true; + if (!curDefInsn->IsPhi()) { + CHECK_FATAL(curDefInsn->IsMachineInstruction(), "expect valid insn"); + validDefInsns.emplace_back(curDefInsn); + return true; + } + auto &phiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + if (!DFSFindValidDefInsns(defInsn, visited)) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckMultiUsePoints(VRegVersion *version) { + for (auto &useInfoIt : version->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + if (!useInsn->IsPhi() && useInsn->GetMachineOpcode() != MOP_wmovrr && useInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + if ((useInsn->GetMachineOpcode() == MOP_wmovrr || useInsn->GetMachineOpcode() == MOP_xmovrr) && + !static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsPhysicalRegister()) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn) { + std::unordered_map visited; + if (!DFSFindValidDefInsns(&defInsn, visited)) { + return false; + } + if (!CheckValidDefInsn(validDefInsns[0])) { + return false; + } + MOperator defMop = validDefInsns[0]->GetMachineOpcode(); + uint32 defOpndNum = validDefInsns[0]->GetOperandSize(); + for (int i = 1; i < validDefInsns.size(); ++i) { + if (defMop != validDefInsns[i]->GetMachineOpcode()) { + return false; + } + Operand &dstOpnd = validDefInsns[i]->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + /* use: (phi) or (mov preg) */ + if (defVersion->GetAllUseInsns().size() > 1 && !CheckMultiUsePoints(defVersion)) { + return false; + } + for (uint32 idx = 0; idx < defOpndNum; ++idx) { + if (validDefInsns[0]->OpndIsDef(idx) && validDefInsns[i]->OpndIsDef(idx)) { + continue; + } + Operand &opnd1 = validDefInsns[0]->GetOperand(idx); + Operand &opnd2 = validDefInsns[i]->GetOperand(idx); + if (!opnd1.Equals(opnd2) && differIdx == -1) { + differIdx = static_cast(idx); + if (!validDefInsns[0]->GetOperand(static_cast(differIdx)).IsRegister() || + !validDefInsns[i]->GetOperand(static_cast(differIdx)).IsRegister()) { + return false; + } + auto &differOpnd1 = static_cast(validDefInsns[0]->GetOperand(static_cast(differIdx))); + auto &differOpnd2 = static_cast(validDefInsns[1]->GetOperand(static_cast(differIdx))); + /* avoid cc reg */ + if (!differOpnd1.IsOfIntClass() || !differOpnd2.IsOfIntClass() || + differOpnd1.IsPhysicalRegister() || differOpnd2.IsPhysicalRegister()) { + return false; + } + VRegVersion *differVersion1 = optSsaInfo->FindSSAVersion(differOpnd1.GetRegisterNumber()); + VRegVersion *differVersion2 = optSsaInfo->FindSSAVersion(differOpnd2.GetRegisterNumber()); + if (!differVersion1 || !differVersion2) { + return false; + } + if (differVersion1->GetOriginalRegNO() != differVersion2->GetOriginalRegNO()) { + return false; + } + differOrigNO = differVersion1->GetOriginalRegNO(); + } else if (!opnd1.Equals(opnd2) && idx != differIdx) { + return false; + } + } + if (differIdx <= 0) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckValidDefInsn(Insn *defInsn) { + const AArch64MD *md = &AArch64CG::kMd[defInsn->GetMachineOpcode()]; + CHECK_FATAL(md != nullptr, "expect valid AArch64MD"); + /* this pattern applies to all basicOps */ + if (md->IsMove() || md->IsStore() || md->IsLoad() || md->IsLoadStorePair() || md->IsLoadAddress() || md->IsCall() || + md->IsDMB() || md->IsVectorOp() || md->IsCondDef() || md->IsCondBranch() || md->IsUnCondBranch()) { + return false; + } + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsn->GetOperand(i); + if (!opnd.IsRegister() && !opnd.IsImmediate() && !opnd.IsOpdShift() && !opnd.IsOpdExtend()) { + return false; + } + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (cgFunc.IsSPOrFP(regOpnd) || regOpnd.IsPhysicalRegister() || + (!regOpnd.IsOfIntClass() && !regOpnd.IsOfFloatOrSIMDClass())) { + return false; + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_xmovrr && curMop != MOP_wmovrr) { + return false; + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!dstOpnd.IsPhysicalRegister()) { + return false; + } + regno_t useRegNO = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useRegNO); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + Operand &defDstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + /* avoid inline-asm */ + if (!defDstOpnd.IsRegister()) { + return false; + } + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(defDstOpnd).GetRegisterNumber()); + /* use: (phi) or (mov preg) */ + if (defVersion->GetAllUseInsns().size() > 1 && !CheckMultiUsePoints(defVersion)) { + return false; + } + if (defInsn->IsPhi()) { + isCrossPhi = true; + firstPhiInsn = defInsn; + return CheckPhiCaseCondition(insn, *defInsn); + } else { + if (!CheckValidDefInsn(defInsn)) { + return false; + } + validDefInsns.emplace_back(defInsn); + } + return true; +} + +Insn &A64PregCopyPattern::CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn) { + CHECK_FATAL(!newPhiList.empty(), "empty newPhiList"); + RegOperand *differOrigOpnd = cgFunc.GetVirtualRegisterOperand(differOrigNO); + CHECK_FATAL(differOrigOpnd != nullptr, "get original opnd default"); + PhiOperand &phiList = optSsaInfo->CreatePhiOperand(); + for (auto &it : newPhiList) { + phiList.InsertOpnd(it.first, *it.second); + } + Insn &phiInsn = cgFunc.GetCG()->BuildPhiInsn(*differOrigOpnd, phiList); + optSsaInfo->CreateNewInsnSSAInfo(phiInsn); + BB *bb = curInsn->GetBB(); + bb->InsertInsnBefore(*curInsn, phiInsn); + bb->AddPhiInsn(differOrigOpnd->GetRegisterNumber(), phiInsn); + return phiInsn; +} + +RegOperand &A64PregCopyPattern::DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited) { + CHECK_FATAL(curInsn, "curInsn must not be null"); + if (visited[curInsn->GetId()] != nullptr) { + return *visited[curInsn->GetId()]; + } + if (!curInsn->IsPhi()) { + return static_cast(curInsn->GetOperand(static_cast(differIdx))); + } + std::unordered_map differPhiList; + auto &phiOpnd = static_cast(curInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + CHECK_FATAL(defInsn != nullptr, "get defInsn failed"); + RegOperand &phiDefOpnd = DFSBuildPhiInsn(defInsn, visited); + differPhiList.emplace(phiListIt.first, &phiDefOpnd); + } + Insn &phiInsn = CreateNewPhiInsn(differPhiList, curInsn); + visited[curInsn->GetId()] = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + return static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); +} + +/* + * Find if the new phi insn has been created at preds + */ +std::unordered_map A64PregCopyPattern::FindDifferPhiDefOpnds() { + std::unordered_map differPhiOpnds; + auto &phiOpnd = static_cast(firstPhiInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + CHECK_FATAL(defInsn != nullptr, "get defInsn failed"); + if (defInsn->IsPhi()) { + MapleMap &phiInsns = defInsn->GetBB()->GetPhiInsns(); + for (auto &phiIt : phiInsns) { + auto &def = static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(def.GetRegisterNumber()); + if (defVersion->GetOriginalRegNO() == differOrigNO) { + differPhiOpnds.emplace(phiIt.second->GetBB()->GetId(), &def); + } + } + } else { + differPhiOpnds.emplace(defInsn->GetBB()->GetId(), + &static_cast(defInsn->GetOperand(static_cast(differIdx)))); + } + } + return differPhiOpnds; +} + +RegOperand *A64PregCopyPattern::GetDifferPhiDef() { + MapleMap &phiInsns = firstPhiInsn->GetBB()->GetPhiInsns(); + for (auto &phiIt : phiInsns) { + auto &def = static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(def.GetRegisterNumber()); + if (defVersion->GetOriginalRegNO() == differOrigNO) { + return &static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + } + } + std::unordered_map differPhiOpnds = FindDifferPhiDefOpnds(); + auto &firstPhiOpnd = static_cast(firstPhiInsn->GetOperand(kInsnSecondOpnd)); + if (differPhiOpnds.size() == firstPhiOpnd.GetOperands().size()) { + Insn &phiInsn = CreateNewPhiInsn(differPhiOpnds, firstPhiInsn); + return &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + } + return nullptr; +} + +void A64PregCopyPattern::Optimize(Insn &insn) { + Insn *defInsn = *validDefInsns.begin(); + MOperator newMop = defInsn->GetMachineOpcode(); + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn &newInsn = cgFunc.GetCG()->BuildInstruction(newMop); + uint32 opndNum = defInsn->GetOperandSize(); + newInsn.ResizeOpnds(opndNum); + if (!isCrossPhi) { + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } else { + RegOperand *differPhiDefOpnd = GetDifferPhiDef(); + if (differPhiDefOpnd == nullptr) { + std::unordered_map visited; + differPhiDefOpnd = &DFSBuildPhiInsn(firstPhiInsn, visited); + } + CHECK_FATAL(differPhiDefOpnd, "get differPhiDefOpnd failed"); + for (uint32 i = 0; i< opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else if (i == static_cast(differIdx)) { + newInsn.SetOperand(i, *differPhiDefOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } + insn.GetBB()->ReplaceInsn(insn, newInsn); + /* update ssa info */ + optSsaInfo->ReplaceInsn(insn, newInsn); + + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In A64PregCopyPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } +} + +void A64PregCopyPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } + validDefInsns.clear(); + validDefInsns.shrink_to_fit(); +} + void A64ReplaceRegOpndVisitor::Visit(RegOperand *v) { (void)v; insn->SetOperand(idx, *newReg); } -void A64ReplaceRegOpndVisitor::Visit(MemOperand *v) { - auto *a64memOpnd = static_cast(v); +void A64ReplaceRegOpndVisitor::Visit(MemOperand *a64memOpnd) { bool changed = false; CHECK_FATAL(a64memOpnd->IsIntactIndexed(), "NYI post/pre index model"); StackMemPool tempMemPool(memPoolCtrler, "temp mempool for A64ReplaceRegOpndVisitor"); - auto *cpyMem = static_cast(a64memOpnd->Clone(tempMemPool)); + auto *cpyMem = a64memOpnd->Clone(tempMemPool); if (cpyMem->GetBaseRegister() != nullptr && cpyMem->GetBaseRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { - cpyMem->SetBaseRegister(*static_cast(newReg)); + cpyMem->SetBaseRegister(*static_cast(newReg)); changed = true; } if (cpyMem->GetIndexRegister() != nullptr && diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp index 9271da591252264b29dc17bfce2cdfc145c3c58d..56ec37bedc2cf3aa3f3d5a93a2d98e87568b651f 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp @@ -54,7 +54,7 @@ bool RaX0Opt::PropagateRenameReg(Insn *nInsn, const X0OptInfo &optVal) { memopnd.SetIndexRegister(*renameOpnd); } } else if (opnd.IsRegister()) { - bool isdef = static_cast(md->GetOperand(i))->IsRegDef(); + bool isdef = (md->GetOperand(i))->IsRegDef(); RegOperand ®opnd = static_cast(opnd); regno_t regCandidate = regopnd.GetRegisterNumber(); if (isdef) { @@ -91,7 +91,7 @@ bool RaX0Opt::PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) { bool RaX0Opt::PropagateX0DetectRedefine(const AArch64MD *md, const Insn *ninsn, const X0OptInfo &optVal, uint32 index) { - bool isdef = static_cast(md->GetOperand(static_cast(index)))->IsRegDef(); + bool isdef = (md->GetOperand(static_cast(index)))->IsRegDef(); if (isdef) { RegOperand &opnd = static_cast(ninsn->GetOperand(index)); if (opnd.GetRegisterNumber() == optVal.GetReplaceReg()) { @@ -487,7 +487,7 @@ void VregRename::RenameGetFuncVregInfo() { } } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && static_cast(opnd)->GetRegisterNumber() != ccRegno) { - bool isdef = static_cast(md->operand[i])->IsRegDef(); + bool isdef = (md->operand[i])->IsRegDef(); regno_t vreg = static_cast(opnd)->GetRegisterNumber(); UpdateVregInfo(vreg, bb, isInner, isdef); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 9bb7f1bfc24522c2e5a0154fc2d579b95c9775d6..a4b203372e930e96c3119234cf38f58c55099cb6 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -109,8 +109,7 @@ void AArch64ReachingDefinition::InitStartGen() { uint32 firstSymSize = cgFunc->GetBecommon().GetTypeSize(firstType->GetTypeIndex()); uint32 firstStackSize = firstSymSize < k4ByteSize ? k4ByteSize : firstSymSize; - AArch64MemOperand *memOpnd = cgFunc->GetMemoryPool()->New(RFP, stOffset, - firstStackSize * kBitsPerByte); + MemOperand *memOpnd = aarchCGFunc->CreateStackMemOpnd(RFP, stOffset, firstStackSize * kBitsPerByte); MOperator mopTemp = firstStackSize <= k4ByteSize ? MOP_pseudo_param_store_w : MOP_pseudo_param_store_x; Insn &pseudoInsnTemp = cgFunc->GetCG()->BuildInstruction(mopTemp, *memOpnd); bb->InsertInsnBegin(pseudoInsnTemp); @@ -123,7 +122,7 @@ void AArch64ReachingDefinition::InitStartGen() { AArch64CGFunc *a64CGFunc = static_cast(cgFunc); for (uint32 i = 0; i < a64CGFunc->GetRefCount(); ++i) { - AArch64MemOperand *memOpnd = cgFunc->GetMemoryPool()->New( + MemOperand *memOpnd = a64CGFunc->CreateStackMemOpnd( RFP, a64CGFunc->GetBeginOffset() + i * k8BitSize, k64BitSize); Insn &pseudoInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ref_init_x, *memOpnd); @@ -189,13 +188,13 @@ void AArch64ReachingDefinition::AddRetPseudoInsns() { } void AArch64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) { - for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { regGen[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); } } void AArch64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) { - for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { regUse[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); } } @@ -244,7 +243,7 @@ bool AArch64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &star } static bool SetDefInsnVecForAsm(Insn *insn, uint32 index, uint32 regNO, std::vector &defInsnVec) { - for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { if (static_cast(reg)->GetRegisterNumber() == regNO) { defInsnVec.emplace_back(insn); return true; @@ -358,7 +357,7 @@ std::vector AArch64ReachingDefinition::FindRegDefBetweenInsn( } static bool IsRegInAsmList(Insn *insn, uint32 index, uint32 regNO, InsnSet &insnSet) { - for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { if (static_cast(reg)->GetRegisterNumber() == regNO) { insnSet.insert(insn); return true; @@ -451,7 +450,7 @@ std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn( Operand &opnd = insn->GetOperand(i); if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); @@ -507,7 +506,7 @@ void AArch64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &d for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); @@ -838,13 +837,13 @@ bool AArch64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *star continue; } - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { continue; } if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); if ((base != nullptr && base->GetRegisterNumber() == regNO) || @@ -908,17 +907,17 @@ bool AArch64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startI } continue; } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *baseOpnd = memOpnd.GetBaseRegister(); if (baseOpnd != nullptr && - (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi) && + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && baseOpnd->GetRegisterNumber() == regNO) { findFinish = true; } } - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (regProp->IsDef() && (opnd.IsConditionCode() || opnd.IsRegister()) && (static_cast(opnd).GetRegisterNumber() == regNO)) { @@ -930,7 +929,7 @@ bool AArch64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startI } if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); if ((base != nullptr && base->GetRegisterNumber() == regNO) || @@ -993,7 +992,7 @@ bool AArch64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *start continue; } - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); if (base == nullptr || !IsFrameReg(*base)) { continue; @@ -1014,7 +1013,7 @@ bool AArch64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *start } } - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; bool isUse = regProp->IsUse(); if (!isUse) { continue; @@ -1042,7 +1041,7 @@ InsnSet AArch64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrO Operand &opnd = insn.GetOperand(indexOrOffset); ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *indexReg = memOpnd.GetIndexRegister(); @@ -1091,7 +1090,7 @@ InsnSet AArch64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrO InsnSet AArch64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const { Operand &opnd = insn.GetOperand(index); ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); InsnSet useInsnSet; @@ -1167,7 +1166,7 @@ void AArch64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) { uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; if (opnd.IsList() && (mode & kRDRegAnalysis)) { ASSERT(regProp->IsUse(), "ListOperand is used in insn"); InitInfoForListOpnd(bb, opnd); @@ -1200,7 +1199,7 @@ void AArch64ReachingDefinition::InitMemInfoForClearStackCall(Insn &callInsn) { void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef) { ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); - AArch64MemOperand &memOpnd = static_cast(opnd); + MemOperand &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); RegOperand *index = memOpnd.GetIndexRegister(); @@ -1239,7 +1238,7 @@ void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, if (index != nullptr) { regUse[insn.GetBB()->GetId()]->SetBit(index->GetRegisterNumber()); } - if (memOpnd.GetAddrMode() == AArch64MemOperand::kAddrModeBOi && + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { /* Base operand has changed. */ regGen[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp index 0abd0cf8f3c9bfe24271a9324ecc241a00fa6a65..246315542e1d670bbfe545dfa8f3f6d030999ab6 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp @@ -28,12 +28,12 @@ namespace maplebe { #define REGCOAL_DUMP CG_DEBUG_FUNC(*cgFunc) -bool AArch64RegisterCoalesce::IsUnconcernedReg(const RegOperand ®Opnd) const { +bool AArch64LiveIntervalAnalysis::IsUnconcernedReg(const RegOperand ®Opnd) const { RegType regType = regOpnd.GetRegisterType(); if (regType == kRegTyCc || regType == kRegTyVary) { return true; } - if (regOpnd.IsConstReg()) { + if (regOpnd.GetRegisterNumber() == RZR) { return true; } if (!regOpnd.IsVirtualRegister()) { @@ -42,7 +42,7 @@ bool AArch64RegisterCoalesce::IsUnconcernedReg(const RegOperand ®Opnd) const return false; } -LiveInterval *AArch64RegisterCoalesce::GetOrCreateLiveInterval(regno_t regNO) { +LiveInterval *AArch64LiveIntervalAnalysis::GetOrCreateLiveInterval(regno_t regNO) { LiveInterval *lr = GetLiveInterval(regNO); if (lr == nullptr) { lr = memPool->New(alloc); @@ -52,14 +52,14 @@ LiveInterval *AArch64RegisterCoalesce::GetOrCreateLiveInterval(regno_t regNO) { return lr; } -void AArch64RegisterCoalesce::UpdateCallInfo() { +void AArch64LiveIntervalAnalysis::UpdateCallInfo() { for (auto vregNO : vregLive) { LiveInterval *lr = GetLiveInterval(vregNO); lr->IncNumCall(); } } -void AArch64RegisterCoalesce::SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef) { +void AArch64LiveIntervalAnalysis::SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef) { if (!op.IsRegister()) { return; } @@ -84,7 +84,7 @@ void AArch64RegisterCoalesce::SetupLiveIntervalByOp(Operand &op, Insn &insn, boo } } -void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachDefOperand(Insn &insn) { +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachDefOperand(Insn &insn) { const AArch64MD *md = &AArch64CG::kMd[static_cast(insn).GetMachineOpcode()]; uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { @@ -96,7 +96,7 @@ void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachDefOperand(Insn &insn) } Operand &opnd = insn.GetOperand(i); if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); if (!memOpnd.IsIntactIndexed()) { SetupLiveIntervalByOp(opnd, insn, true); } @@ -108,7 +108,7 @@ void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachDefOperand(Insn &insn) } } -void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachUseOperand(Insn &insn) { +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachUseOperand(Insn &insn) { const AArch64MD *md = &AArch64CG::kMd[static_cast(insn).GetMachineOpcode()]; uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { @@ -128,7 +128,7 @@ void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachUseOperand(Insn &insn) SetupLiveIntervalByOp(*op, insn, false); } } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); Operand *offset = memOpnd.GetIndexRegister(); if (base != nullptr) { @@ -137,6 +137,11 @@ void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachUseOperand(Insn &insn) if (offset != nullptr) { SetupLiveIntervalByOp(*offset, insn, false); } + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto opIt : phiOpnd.GetOperands()) { + SetupLiveIntervalByOp(*opIt.second, insn, false); + } } else { SetupLiveIntervalByOp(opnd, insn, false); } @@ -144,7 +149,7 @@ void AArch64RegisterCoalesce::ComputeLiveIntervalsForEachUseOperand(Insn &insn) } /* handle live range for bb->live_out */ -void AArch64RegisterCoalesce::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) { +void AArch64LiveIntervalAnalysis::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) { --currPoint; if (liveOut >= kAllRegNum) { @@ -155,7 +160,7 @@ void AArch64RegisterCoalesce::SetupLiveIntervalInLiveOut(regno_t liveOut, const } } -void AArch64RegisterCoalesce::CollectCandidate() { +void AArch64LiveIntervalAnalysis::CollectCandidate() { for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { BB *bb = bfs->sortedBBs[bbIdx - 1]; @@ -180,7 +185,7 @@ void AArch64RegisterCoalesce::CollectCandidate() { } } -void AArch64RegisterCoalesce::ComputeLiveIntervals() { +void AArch64LiveIntervalAnalysis::ComputeLiveIntervals() { /* colloct refpoints and build interfere only for cands. */ CollectCandidate(); @@ -203,8 +208,10 @@ void AArch64RegisterCoalesce::ComputeLiveIntervals() { } FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { - insn->SetId(currPoint); - if (!insn->IsMachineInstruction()) { + if (!runAnalysis) { + insn->SetId(currPoint); + } + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { --currPoint; if (ninsn != nullptr && ninsn->IsCall()) { UpdateCallInfo(); @@ -240,7 +247,7 @@ void AArch64RegisterCoalesce::ComputeLiveIntervals() { } } -void AArch64RegisterCoalesce::CheckInterference(LiveInterval &li1, LiveInterval &li2) { +void AArch64LiveIntervalAnalysis::CheckInterference(LiveInterval &li1, LiveInterval &li2) { auto ranges1 = li1.GetRanges(); auto ranges2 = li2.GetRanges(); bool conflict = false; @@ -272,45 +279,37 @@ void AArch64RegisterCoalesce::CheckInterference(LiveInterval &li1, LiveInterval } /* replace regDest with regSrc. */ -void AArch64RegisterCoalesce::CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src) { +void AArch64LiveIntervalAnalysis::CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src) { LiveInterval *lrDest = GetLiveInterval(regDest.GetRegisterNumber()); LiveInterval *lrSrc = GetLiveInterval(regSrc.GetRegisterNumber()); /* replace dest with src */ + if (regDest.GetSize() != regSrc.GetSize()) { + CHECK_FATAL(cgFunc->IsExtendReg(regDest.GetRegisterNumber()) || + cgFunc->IsExtendReg(regSrc.GetRegisterNumber()), "expect equal size in reg coalesce"); + cgFunc->InsertExtendSet(regSrc.GetRegisterNumber()); + } + regno_t destNO = regDest.GetRegisterNumber(); /* replace all refPoints */ for (auto insn : lrDest->GetDefPoint()) { - cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn); + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); } for (auto insn : lrDest->GetUsePoint()) { - cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn); + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); } - /* merge destlr to srclr */ - lrSrc->MergeRanges(*lrDest); - - /* update conflicts */ - lrSrc->MergeConflict(*lrDest); - for (auto reg : lrDest->GetConflict()) { - LiveInterval *conf = GetLiveInterval(reg); - if (conf) { - conf->AddConflict(lrSrc->GetRegNO()); - } - } - - /* merge refpoints */ - lrSrc->MergeRefPoints(*lrDest); - - vregIntervals.erase(lrDest->GetRegNO()); + ASSERT(lrDest && lrSrc, "get live interval failed"); + CoalesceLiveIntervals(*lrDest, *lrSrc); } -void AArch64RegisterCoalesce::CollectMoveForEachBB(BB &bb, std::vector &movInsns) { +void AArch64LiveIntervalAnalysis::CollectMoveForEachBB(BB &bb, std::vector &movInsns) { FOR_BB_INSNS_SAFE(insn, &bb, ninsn) { if (!insn->IsMachineInstruction()) { continue; } if (insn->IsMoveRegReg()) { - RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); - RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + auto ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { continue; } @@ -322,7 +321,7 @@ void AArch64RegisterCoalesce::CollectMoveForEachBB(BB &bb, std::vector &m } } -void AArch64RegisterCoalesce::CoalesceMoves(std::vector &movInsns, bool phiOnly) { +void AArch64LiveIntervalAnalysis::CoalesceMoves(std::vector &movInsns, bool phiOnly) { AArch64CGFunc *a64CGFunc = static_cast(cgFunc); bool changed = false; do { @@ -374,7 +373,7 @@ void AArch64RegisterCoalesce::CoalesceMoves(std::vector &movInsns, bool p } while (changed); } -void AArch64RegisterCoalesce::CoalesceRegisters() { +void AArch64LiveIntervalAnalysis::CoalesceRegisters() { std::vector movInsns; AArch64CGFunc *a64CGFunc = static_cast(cgFunc); if (REGCOAL_DUMP) { @@ -392,7 +391,7 @@ void AArch64RegisterCoalesce::CoalesceRegisters() { for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { BB *bb = bfs->sortedBBs[bbIdx - 1]; - if (bb->GetCritical() == true) { + if (bb->GetCritical()) { continue; } CollectMoveForEachBB(*bb, movInsns); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp index 581af8b9a04b2b2ad6db123ebb0ef17c379ac0c4..65d567493ff3415a39b52cceb7296b3e5d39bc5c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp @@ -105,10 +105,10 @@ bool AArch64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const { if (regType == kRegTyCc || regType == kRegTyVary) { return true; } - if (regOpnd.IsConstReg()) { + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { return true; } - uint32 regNO = regOpnd.GetRegisterNumber(); return IsUnconcernedReg(regNO); } @@ -119,8 +119,7 @@ RegOperand& AArch64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, ListOperand* AArch64RegInfo::CreateListOperand() { AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); - return static_cast(aarch64CgFunc->GetMemoryPool()->New - (*aarch64CgFunc->GetFuncScopeAllocator())); + return (aarch64CgFunc->CreateListOpnd(*aarch64CgFunc->GetFuncScopeAllocator())); } Insn *AArch64RegInfo::BuildMovInstruction(Operand &opnd0, Operand &opnd1) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp index 8348ada6aee5c040183aca60ad2b42d426270f83..f50bea9c19501f1a2ac34bda945a66bf3b2fe9bf 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp @@ -49,6 +49,10 @@ void AArch64RegSavesOpt::InitData() { aarchCGFunc->GetProEpilogSavedRegs().push_back(RLR); } } + + for (auto bb : bfs->sortedBBs) { + SetId2bb(bb); + } } @@ -122,7 +126,7 @@ void AArch64RegSavesOpt::ProcessListOpnd(BB &bb, Operand &opnd) { } void AArch64RegSavesOpt::ProcessMemOpnd(BB &bb, Operand &opnd) { - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); Operand *base = memOpnd.GetBaseRegister(); Operand *offset = memOpnd.GetIndexRegister(); if (base != nullptr) { @@ -160,7 +164,7 @@ void AArch64RegSavesOpt::GetLocalDefUse() { uint32 opndNum = insn->GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { Operand &opnd = insn->GetOperand(i); - AArch64OpndProp *regProp = static_cast(md->operand[i]); + OpndProp *regProp = md->operand[i]; bool isDef = regProp->IsRegDef(); bool isUse = regProp->IsRegUse(); if (opnd.IsList()) { @@ -405,7 +409,7 @@ void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() { /* Determine calleesave regs restore locations by calling ssu-pre, previous bbSavedRegs memory is cleared and restore locs recorded in it */ -void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { +bool AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); MapleAllocator sprealloc(memPool); if (RS_DUMP) { @@ -444,8 +448,24 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { /* something gone wrong, skip this reg */ wkCand.restoreAtEpilog = true; } + /* splitted empty block for critical edge present, skip function */ + MapleSet rset = wkCand.restoreAtEntryBBs; + for (auto bbid : wkCand.restoreAtExitBBs) { + rset.insert(bbid); + } + for (auto bbid : rset) { + BB *bb = GetId2bb(bbid); + if (bb->GetKind() == BB::kBBGoto && bb->NumInsn() == 1) { + aarchCGFunc->GetProEpilogSavedRegs().clear(); + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto areg : callees) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(areg); + } + return false; + } + } if (wkCand.restoreAtEpilog) { - /* Restore cannot be applied, skip this reg and place save/restore + /* Restore cannot b3 applied, skip this reg and place save/restore in prolog/epilog */ for (int bid = 1; bid < bbSavedRegs.size(); bid++) { SavedRegInfo *sp = bbSavedRegs[bid]; @@ -473,36 +493,31 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { GetbbSavedRegsEntry(entBB)->InsertEntryReg(reg); } for (uint32 exitBB : wkCand.restoreAtExitBBs) { - for (BB *bb : bfs->sortedBBs) { - if (bb->GetId() == exitBB) { - if (bb->GetKind() == BB::kBBIgoto) { - CHECK_FATAL(false, "igoto detected"); - } - Insn *lastInsn = bb->GetLastInsn(); - if (lastInsn != nullptr && lastInsn->IsBranch() && - (!lastInsn->GetOperand(0).IsRegister() || /* not a reg OR */ - (!AArch64Abi::IsCalleeSavedReg( /* reg but not cs */ - static_cast(static_cast( - lastInsn->GetOperand(0)).GetRegisterNumber()))))) { - /* To insert in this block - 1 instr */ - SavedRegInfo *sp = GetbbSavedRegsEntry(exitBB); - sp->InsertExitReg(reg); - sp->insertAtLastMinusOne = true; - } else if (bb->GetSuccs().size() > 1) { - for (BB *sbb : bb->GetSuccs()) { - if (sbb->GetPreds().size() > 1) { - CHECK_FATAL(false, "critical edge detected"); - } - } - for (BB *sbb : bb->GetSuccs()) { - /* To insert at all succs */ - GetbbSavedRegsEntry(sbb->GetId())->InsertEntryReg(reg); - } - } else { - /* otherwise, BB_FT etc */ - GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); + BB *bb = GetId2bb(exitBB); + if (bb->GetKind() == BB::kBBIgoto) { + CHECK_FATAL(false, "igoto detected"); + } + Insn *lastInsn = bb->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->IsBranch() && + (!lastInsn->GetOperand(0).IsRegister() || /* not a reg OR */ + (!AArch64Abi::IsCalleeSavedReg( /* reg but not cs */ + static_cast(static_cast( + lastInsn->GetOperand(0)).GetRegisterNumber()))))) { + /* To insert in this block - 1 instr */ + SavedRegInfo *sp = GetbbSavedRegsEntry(exitBB); + sp->InsertExitReg(reg); + sp->insertAtLastMinusOne = true; + } else if (bb->GetSuccs().size() > 1) { + for (BB *sbb : bb->GetSuccs()) { + if (sbb->GetPreds().size() > 1) { + CHECK_FATAL(false, "critical edge detected"); } + /* To insert at all succs */ + GetbbSavedRegsEntry(sbb->GetId())->InsertEntryReg(reg); } + } else { + /* otherwise, BB_FT etc */ + GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); } if (RS_DUMP) { std::string r = reg <= R28 ? "R" : "V"; @@ -511,6 +526,7 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { } } } + return true; } int32 AArch64RegSavesOpt::FindNextOffsetForCalleeSave() { @@ -806,8 +822,11 @@ void AArch64RegSavesOpt::Run() { } /* Determine restore sites */ - DetermineCalleeRestoreLocations(); + if (!DetermineCalleeRestoreLocations()) { + return; + } +#ifdef VERIFY /* Verify saves/restores are in pair */ if (RS_DUMP) { std::vector rlist = { R19, R20, R21, R22, R23, R24, R25, R26, R27, R28 }; @@ -820,6 +839,7 @@ void AArch64RegSavesOpt::Run() { mLog << "\nVerify Done\n"; } } +#endif /* Generate callee save instrs at found sites */ InsertCalleeSaveCode(); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp index 4d5fb83577e71629fc34c23662c16cef30617705..b118299ff3bcc6b65c747c4d12bbc69554d478a0 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -90,13 +90,13 @@ bool AArch64Schedule::CanCombine(const Insn &insn) const { } ASSERT(insn.GetOperand(1).IsMemoryAccessOperand(), "expects mem operands"); - auto &memOpnd = static_cast(insn.GetOperand(1)); - AArch64MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); - if ((addrMode != AArch64MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + auto &memOpnd = static_cast(insn.GetOperand(1)); + MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); + if ((addrMode != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { return false; } - auto ®Opnd = static_cast(insn.GetOperand(0)); + auto ®Opnd = static_cast(insn.GetOperand(0)); if (regOpnd.GetSize() != memOpnd.GetSize()) { return false; } @@ -108,7 +108,7 @@ bool AArch64Schedule::CanCombine(const Insn &insn) const { } #endif /* USE_32BIT_REF */ - AArch64OfstOperand *offset = memOpnd.GetOffsetImmediate(); + OfstOperand *offset = memOpnd.GetOffsetImmediate(); if (offset == nullptr) { return false; } @@ -179,7 +179,7 @@ void AArch64Schedule::MemoryAccessPairOpt() { void AArch64Schedule::FindAndCombineMemoryAccessPair(const std::vector &memList) { ASSERT(!memList.empty(), "memList should not be empty"); CHECK_FATAL(memList[0]->GetInsn() != nullptr, "memList[0]'s insn should not be nullptr"); - AArch64MemOperand *currMemOpnd = static_cast(memList[0]->GetInsn()->GetMemOpnd()); + MemOperand *currMemOpnd = static_cast(memList[0]->GetInsn()->GetMemOpnd()); ASSERT(currMemOpnd != nullptr, "opnd should not be nullptr"); ASSERT(currMemOpnd->IsMemoryAccessOperand(), "opnd should be memOpnd"); int32 currOffsetVal = static_cast(currMemOpnd->GetOffsetImmediate()->GetOffsetValue()); @@ -189,7 +189,7 @@ void AArch64Schedule::FindAndCombineMemoryAccessPair(const std::vector ASSERT((*it)->GetInsn() != nullptr, "null ptr check"); if (currMop == (*it)->GetInsn()->GetMachineOpcode()) { - AArch64MemOperand *nextMemOpnd = static_cast((*it)->GetInsn()->GetMemOpnd()); + MemOperand *nextMemOpnd = static_cast((*it)->GetInsn()->GetMemOpnd()); CHECK_FATAL(nextMemOpnd != nullptr, "opnd should not be nullptr"); CHECK_FATAL(nextMemOpnd->IsMemoryAccessOperand(), "opnd should be MemOperand"); int32 nextOffsetVal = static_cast(nextMemOpnd->GetOffsetImmediate()->GetOffsetValue()); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp index 13981655a460fd9e1a28cc7244788f6965de6fab..91c147f6dc9f31603c3cfcba189e060ad4df9040 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp @@ -25,14 +25,14 @@ void AArch64CGSSAInfo::RenameInsn(Insn &insn) { } for (int i = opndNum - 1; i >= 0; --i) { Operand &opnd = insn.GetOperand(static_cast(i)); - auto *opndProp = static_cast(md->operand[static_cast(i)]); + auto *opndProp = (md->operand[static_cast(i)]); A64SSAOperandRenameVisitor renameVisitor(*this, insn, *opndProp, i); opnd.Accept(renameVisitor); } } -AArch64MemOperand *AArch64CGSSAInfo::CreateMemOperand(AArch64MemOperand &memOpnd, bool isOnSSA) { - return isOnSSA ? static_cast(memOpnd.Clone(*memPool)) : +MemOperand *AArch64CGSSAInfo::CreateMemOperand(MemOperand &memOpnd, bool isOnSSA) { + return isOnSSA ? memOpnd.Clone(*memPool) : &static_cast(cgFunc)->GetOrCreateMemOpnd(memOpnd); } @@ -75,7 +75,9 @@ RegOperand *AArch64CGSSAInfo::CreateSSAOperand(RegOperand &virtualOpnd) { ssaRegNO++; SSARegNObase++; } - RegOperand *newVreg = memPool->New(ssaRegNO, virtualOpnd.GetSize(), virtualOpnd.GetRegisterType()); + RegOperand *newVreg = memPool->New(ssaRegNO, + virtualOpnd.GetSize(), virtualOpnd.GetRegisterType()); + newVreg->SetValidBitsNum(virtualOpnd.GetValidBitsNum()); newVreg->SetOpndSSAForm(); return newVreg; } @@ -83,10 +85,10 @@ RegOperand *AArch64CGSSAInfo::CreateSSAOperand(RegOperand &virtualOpnd) { void AArch64CGSSAInfo::ReplaceInsn(Insn &oriInsn, Insn &newInsn) { A64OpndSSAUpdateVsitor ssaUpdator(*this); auto UpdateInsnSSAInfo = [&ssaUpdator](Insn &curInsn, bool isDelete) { - const AArch64MD *md = &AArch64CG::kMd[static_cast(curInsn).GetMachineOpcode()]; + const AArch64MD *md = &AArch64CG::kMd[curInsn.GetMachineOpcode()]; for (uint32 i = 0; i < curInsn.GetOperandSize(); ++i) { Operand &opnd = curInsn.GetOperand(i); - auto *opndProp = static_cast(md->operand[i]); + auto *opndProp = md->operand[i]; if (isDelete) { ssaUpdator.MarkDecrease(); } else { @@ -102,14 +104,26 @@ void AArch64CGSSAInfo::ReplaceInsn(Insn &oriInsn, Insn &newInsn) { CHECK_FATAL(!ssaUpdator.HasDeleteDef(), "delete def point in replace insn, please check"); } +/* do not break binding between input and output operands in asm */ +void AArch64CGSSAInfo::CheckAsmDUbinding(Insn &insn, VRegVersion *toBeReplaced, VRegVersion *newVersion) { + if (insn.GetMachineOpcode() == MOP_asm) { + newVersion->GetOriginalRegNO(); + for (auto &opndIt : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + if (opndIt->IsSSAForm()) { + VRegVersion *defVersion = FindSSAVersion(opndIt->GetRegisterNumber()); + if (defVersion && defVersion->GetOriginalRegNO() == toBeReplaced->GetOriginalRegNO()) { + insn.AddRegBinding(defVersion->GetOriginalRegNO(), newVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } + } + } + } +} + void AArch64CGSSAInfo::ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) { MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); for (auto it = useList.begin(); it != useList.end();) { Insn *useInsn = it->second->GetInsn(); - if (useInsn->GetMachineOpcode() == MOP_asm) { - ++it; - continue; - } + CheckAsmDUbinding(*useInsn, toBeReplaced, newVersion); for (auto &opndIt : it->second->GetOperands()) { Operand &opnd = useInsn->GetOperand(opndIt.first); A64ReplaceRegOpndVisitor replaceRegOpndVisitor( @@ -128,13 +142,13 @@ void AArch64CGSSAInfo::CreateNewInsnSSAInfo(Insn &newInsn) { for (uint32 i = 0; i < opndNum; i++) { Operand &opnd = newInsn.GetOperand(i); const AArch64MD *md = &AArch64CG::kMd[static_cast(newInsn).GetMachineOpcode()]; - auto *opndProp = static_cast(md->operand[i]); + auto *opndProp = md->operand[i]; if (opndProp->IsDef() && opndProp->IsUse()) { CHECK_FATAL(false, "do not support both def and use"); } if (opndProp->IsDef()) { CHECK_FATAL(opnd.IsRegister(), "defOpnd must be reg"); - RegOperand &defRegOpnd = static_cast(opnd); + auto &defRegOpnd = static_cast(opnd); regno_t defRegNO = defRegOpnd.GetRegisterNumber(); uint32 defVIdx = IncreaseVregCount(defRegNO); RegOperand *defSSAOpnd = CreateSSAOperand(defRegOpnd); @@ -183,29 +197,26 @@ void AArch64CGSSAInfo::DumpInsnInSSAForm(const Insn &insn) const { } void A64SSAOperandRenameVisitor::Visit(RegOperand *v) { - auto *regOpnd = static_cast(v); - auto *a64OpndProp = static_cast(opndProp); + auto *regOpnd = static_cast(v); if (regOpnd->IsVirtualRegister()) { - if (a64OpndProp->IsRegDef() && a64OpndProp->IsRegUse()) { /* both def use */ + if (opndProp->IsRegDef() && opndProp->IsRegUse()) { /* both def use */ insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*regOpnd, false, *insn, idx)); (void)ssaInfo->GetRenamedOperand(*regOpnd, true, *insn, idx); } else { - insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*regOpnd, a64OpndProp->IsRegDef(), *insn, idx)); + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*regOpnd, opndProp->IsRegDef(), *insn, idx)); } } } -void A64SSAOperandRenameVisitor::Visit(MemOperand*v) { - auto *a64MemOpnd = static_cast(v); +void A64SSAOperandRenameVisitor::Visit(MemOperand *a64MemOpnd) { RegOperand *base = a64MemOpnd->GetBaseRegister(); RegOperand *index = a64MemOpnd->GetIndexRegister(); bool needCopy = (base != nullptr && base->IsVirtualRegister()) || (index != nullptr && index->IsVirtualRegister()); if (needCopy) { - AArch64MemOperand *cpyMem = ssaInfo->CreateMemOperand(*a64MemOpnd, true); + MemOperand *cpyMem = ssaInfo->CreateMemOperand(*a64MemOpnd, true); if (base != nullptr && base->IsVirtualRegister()) { bool isDef = !a64MemOpnd->IsIntactIndexed(); - cpyMem->SetBaseRegister( - *static_cast(ssaInfo->GetRenamedOperand(*base, isDef, *insn, idx))); + cpyMem->SetBaseRegister(*ssaInfo->GetRenamedOperand(*base, isDef, *insn, idx)); } if (index != nullptr && index->IsVirtualRegister()) { cpyMem->SetIndexRegister(*ssaInfo->GetRenamedOperand(*index, false, *insn, idx)); @@ -215,12 +226,10 @@ void A64SSAOperandRenameVisitor::Visit(MemOperand*v) { } void A64SSAOperandRenameVisitor::Visit(ListOperand *v) { - auto *a64ListOpnd = static_cast(v); bool isAsm = insn->GetMachineOpcode() == MOP_asm; /* record the orignal list order */ std::list tempList; - auto& opndList = a64ListOpnd->GetOperands(); - + auto& opndList = v->GetOperands(); while (!opndList.empty()) { auto* op = opndList.front(); opndList.pop_front(); @@ -235,27 +244,30 @@ void A64SSAOperandRenameVisitor::Visit(ListOperand *v) { RegOperand *renameOpnd = ssaInfo->GetRenamedOperand(*op, isDef, *insn, idx); tempList.push_back(renameOpnd); } - ASSERT(a64ListOpnd->GetOperands().empty(), "need to clean list"); - a64ListOpnd->GetOperands().assign(tempList.begin(), tempList.end()); + ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempList.begin(), tempList.end()); } -void A64OpndSSAUpdateVsitor::Visit(RegOperand *v) { - auto *regOpnd = static_cast(v); - auto *a64OpndProp = static_cast(opndProp); +void A64OpndSSAUpdateVsitor::Visit(RegOperand *regOpnd) { if (regOpnd->IsSSAForm()) { - CHECK_FATAL(!(a64OpndProp->IsRegDef() && a64OpndProp->IsRegUse()), "do not support yet"); - if (a64OpndProp->IsRegDef()){ - UpdateRegDef(regOpnd->GetRegisterNumber()); - } else if (a64OpndProp->IsRegUse()) { + if (opndProp->IsRegDef() && opndProp->IsRegUse()) { UpdateRegUse(regOpnd->GetRegisterNumber()); + UpdateRegDef(regOpnd->GetRegisterNumber()); } else { - ASSERT(false, "invalid opnd"); + if (opndProp->IsRegDef()){ + UpdateRegDef(regOpnd->GetRegisterNumber()); + } else if (opndProp->IsRegUse()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else if (IsPhi()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else { + ASSERT(false, "invalid opnd"); + } } } } -void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *v) { - auto *a64MemOpnd = static_cast(v); +void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *a64MemOpnd) { RegOperand *base = a64MemOpnd->GetBaseRegister(); RegOperand *index = a64MemOpnd->GetIndexRegister(); if (base != nullptr && base->IsSSAForm()) { @@ -270,14 +282,21 @@ void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *v) { } } +void A64OpndSSAUpdateVsitor::Visit(PhiOperand *phiOpnd) { + SetPhi(true); + for (auto phiListIt = phiOpnd->GetOperands().begin(); phiListIt != phiOpnd->GetOperands().end(); ++phiListIt) { + Visit(phiListIt->second); + } + SetPhi(false); +} + void A64OpndSSAUpdateVsitor::Visit(ListOperand *v) { - auto *a64ListOpnd = static_cast(v); /* do not handle asm here, so there is no list def */ if (insn->GetMachineOpcode() == MOP_asm) { ASSERT(false, "do not support asm yet"); return; } - for (auto *op : a64ListOpnd->GetOperands()) { + for (auto *op : v->GetOperands()) { if (op->IsSSAForm()) { UpdateRegUse(op->GetRegisterNumber()); } @@ -310,8 +329,7 @@ void A64OpndSSAUpdateVsitor::UpdateRegDef(uint32 ssaIdx) { } } -void A64SSAOperandDumpVisitor::Visit(RegOperand *v) { - auto *a64RegOpnd = static_cast(v); +void A64SSAOperandDumpVisitor::Visit(RegOperand *a64RegOpnd) { ASSERT(!a64RegOpnd->IsConditionCode(), "both condi and reg"); if (a64RegOpnd->IsSSAForm()) { std::array prims = { "U", "R", "V", "C", "X", "Vra" }; @@ -330,8 +348,7 @@ void A64SSAOperandDumpVisitor::Visit(RegOperand *v) { } void A64SSAOperandDumpVisitor::Visit(ListOperand *v) { - auto *a64listOpnd = static_cast(v); - for (auto regOpnd : a64listOpnd->GetOperands()) { + for (auto regOpnd : v->GetOperands()) { if (regOpnd->IsSSAForm()) { Visit(regOpnd); continue; @@ -339,25 +356,23 @@ void A64SSAOperandDumpVisitor::Visit(ListOperand *v) { } } -void A64SSAOperandDumpVisitor::Visit(MemOperand *v) { - auto *a64MemOpnd = static_cast(v); +void A64SSAOperandDumpVisitor::Visit(MemOperand *a64MemOpnd) { if (a64MemOpnd->GetBaseRegister() != nullptr && a64MemOpnd->GetBaseRegister()->IsSSAForm()) { LogInfo::MapleLogger() << "Mem: "; Visit(a64MemOpnd->GetBaseRegister()); - if (a64MemOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOi) { + if (a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { LogInfo::MapleLogger() << "offset:"; a64MemOpnd->GetOffsetOperand()->Dump(); } } if (a64MemOpnd->GetIndexRegister() != nullptr && a64MemOpnd->GetIndexRegister()->IsSSAForm() ) { - ASSERT(a64MemOpnd->GetAddrMode() == AArch64MemOperand::kAddrModeBOrX, "mem mode false"); + ASSERT(a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX, "mem mode false"); LogInfo::MapleLogger() << "offset:"; Visit(a64MemOpnd->GetIndexRegister()); } } -void A64SSAOperandDumpVisitor::Visit(PhiOperand *v) { - auto *phi = static_cast(v); +void A64SSAOperandDumpVisitor::Visit(PhiOperand *phi) { for (auto phiListIt = phi->GetOperands().begin(); phiListIt != phi->GetOperands().end();) { Visit(phiListIt->second); LogInfo::MapleLogger() << " fBB<" << phiListIt->first << ">"; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp index c20bf784a575858d598c0894cfb5595ef6f568c8..ab84561b93e459b04b8f7f5cad04381590963253 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp @@ -355,7 +355,7 @@ bool AArch64StoreLoadOpt::CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet tmpInsn = tmpInsn->GetNext(); } } else { - regno_t defRegno = static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t defRegno = static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); if (defRegno == replaceRegNo) { uint32 defLoopId = 0; uint32 curLoopId = 0; @@ -394,11 +394,11 @@ bool AArch64StoreLoadOpt::CheckDefInsn(Insn &defInsn, Insn &currInsn) { } for (uint32 i = kInsnSecondOpnd; i < defInsn.GetOperandSize(); i++) { Operand &opnd = defInsn.GetOperand(i); - if (defInsn.IsMove() && opnd.IsRegister() && !static_cast(opnd).IsSPOrFP()) { + if (defInsn.IsMove() && opnd.IsRegister() && !cgFunc.IsSPOrFP(static_cast(opnd))) { return false; } if (opnd.IsRegister()) { - AArch64RegOperand &a64OpndTmp = static_cast(opnd); + RegOperand &a64OpndTmp = static_cast(opnd); regno_t replaceRegNo = a64OpndTmp.GetRegisterNumber(); InsnSet newRegDefSet = cgFunc.GetRD()->FindDefForRegOpnd(currInsn, replaceRegNo, true); if (!CheckReplaceReg(defInsn, currInsn, newRegDefSet, replaceRegNo)) { @@ -441,7 +441,7 @@ bool AArch64StoreLoadOpt::CheckNewAmount(const Insn &insn, uint32 newAmount) { } } -bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, AArch64MemOperand *newMemOpnd, uint32 opndIdx) { +bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) { AArch64CGFunc &a64CgFunc = static_cast(cgFunc); if ((newMemOpnd->GetOffsetImmediate() != nullptr) && !a64CgFunc.IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { @@ -452,47 +452,47 @@ bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, AArch64MemOperand return false; } /* is ldp or stp, addrMode must be BOI */ - if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != AArch64MemOperand::kAddrModeBOi)) { + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { return false; } return true; } -AArch64MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned) { - AArch64MemOperand *newMemOpnd = nullptr; - AArch64RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); +MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); /* defInsn is extend, currMemOpnd is same extend or shift */ bool propExtend = (propMode == kPropShift) || ((propMode == kPropSignedExtend) && isSigned) || ((propMode == kPropUnsignedExtend) && !isSigned); if (propMode == kPropOffset) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, 0, isSigned); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, 0, isSigned); } else if (propExtend) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, amount, isSigned); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, amount, isSigned); } else { return nullptr; } return newMemOpnd; } -AArch64MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(AArch64RegOperand &replace, - Operand *oldOffset, int64 defVal) { +MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, + Operand *oldOffset, int64 defVal) { if (propMode != kPropBase) { return nullptr; } - AArch64OfstOperand *newOfstImm = nullptr; + OfstOperand *newOfstImm = nullptr; if (oldOffset == nullptr) { - newOfstImm = cgFunc.GetMemoryPool()->New(defVal, k32BitSize); + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(defVal, k32BitSize); } else { - auto *ofstOpnd = static_cast(oldOffset); + auto *ofstOpnd = static_cast(oldOffset); CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); - newOfstImm = cgFunc.GetMemoryPool()->New(defVal + ofstOpnd->GetValue(), k32BitSize); + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(defVal + ofstOpnd->GetValue(), k32BitSize); } CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); - return cgFunc.GetMemoryPool()->New(AArch64MemOperand::kAddrModeBOi, k64BitSize, - replace, nullptr, newOfstImm, nullptr); + return static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, + replace, nullptr, newOfstImm, nullptr); } /* @@ -547,25 +547,25 @@ bool AArch64StoreLoadOpt::IsAdjacentBB(Insn &defInsn, Insn &curInsn) const { * | extend/lsl | extend | borx(with extend) * ============================================================================= */ -AArch64MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, - RegOperand &base, Operand *offset) { - AArch64MemOperand *newMemOpnd = nullptr; +MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, + RegOperand &base, Operand *offset) { + MemOperand *newMemOpnd = nullptr; MOperator opCode = defInsn.GetMachineOpcode(); - AArch64RegOperand *replace = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + RegOperand *replace = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); switch (opCode) { case MOP_xsubrri12: case MOP_wsubrri12: { if (!IsAdjacentBB(defInsn, curInsn)) { break; } - auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); int64 defVal = -(immOpnd.GetValue()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal); break; } case MOP_xaddrri12: case MOP_waddrri12: { - auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); int64 defVal = immOpnd.GetValue(); newMemOpnd = HandleArithImmDef(*replace, offset, defVal); break; @@ -575,84 +575,84 @@ AArch64MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &cu case MOP_dadd: case MOP_sadd: { if (propMode == kPropBase) { - OfstOperand *ofstOpnd = static_cast(offset); + ImmOperand *ofstOpnd = static_cast(offset); if (!ofstOpnd->IsZero()) { break; } - AArch64RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, *replace, newOffset, nullptr, nullptr); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, newOffset, nullptr, nullptr); } break; } case MOP_xadrpl12: { if (propMode == kPropBase) { - OfstOperand *ofstOpnd = static_cast(offset); + ImmOperand *ofstOpnd = static_cast(offset); CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); int64 val = ofstOpnd->GetValue(); StImmOperand *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); val += offset1->GetOffset(); - AArch64OfstOperand *newOfsetOpnd = cgFunc.GetMemoryPool()->New(val, k32BitSize); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc).CreateOfstOpnd(val, k32BitSize); CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); const MIRSymbol *addr = offset1->GetSymbol(); - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeLo12Li, k64BitSize, *replace, nullptr, newOfsetOpnd, addr); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeLo12Li, k64BitSize, *replace, nullptr, newOfsetOpnd, addr); } break; } case MOP_xmovrr: case MOP_wmovrr: { if (propMode == kPropBase) { - AArch64OfstOperand *offsetTmp = static_cast(offset); + OfstOperand *offsetTmp = static_cast(offset); CHECK_FATAL(offsetTmp != nullptr, "newOffset is null!"); - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, k64BitSize, *replace, nullptr, offsetTmp, nullptr); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, *replace, nullptr, offsetTmp, nullptr); } else if (propMode == kPropOffset) { /* if newOffset is SP, swap base and newOffset */ - if (replace->IsSPOrFP()) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, *replace, &base, nullptr, nullptr); + if (cgFunc.IsSPOrFP(*replace)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, &base, nullptr, nullptr); } else { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, replace, nullptr, nullptr); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, replace, nullptr, nullptr); } } else if (propMode == kPropSignedExtend) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount, true); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount, true); } else { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount); } break; } case MOP_xmovri32: case MOP_xmovri64: { if (propMode == kPropOffset) { - AArch64ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); - AArch64OfstOperand *newOffset = cgFunc.GetMemoryPool()->New(imm->GetValue(), k32BitSize); + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc).CreateOfstOpnd(imm->GetValue(), k32BitSize); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOi, k64BitSize, base, nullptr, newOffset, nullptr); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, base, nullptr, newOffset, nullptr); } break; } case MOP_xlslrri6: case MOP_wlslrri5: { - AArch64ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); - AArch64RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); int64 shift = imm->GetValue(); if (propMode == kPropOffset) { if ((shift < k4ByteSize) && (shift >= 0)) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); } } else if (propMode == kPropShift) { shift += amount; if ((shift < k4ByteSize) && (shift >= 0)) { - newMemOpnd = cgFunc.GetMemoryPool()->New( - AArch64MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); } } break; @@ -682,7 +682,7 @@ bool AArch64StoreLoadOpt::ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand & if (!CheckDefInsn(*regDefInsn, insn)) { return false; } - AArch64MemOperand *newMemOpnd = SelectReplaceMem(*regDefInsn, insn, base, offset); + MemOperand *newMemOpnd = SelectReplaceMem(*regDefInsn, insn, base, offset); if (newMemOpnd == nullptr) { return false; } @@ -757,22 +757,22 @@ bool AArch64StoreLoadOpt::CanDoMemProp(const Insn *insn) { if (insn->IsAtomic() || insn->GetOperand(0).GetSize() == k128BitSize) { return false; } - AArch64MemOperand *currMemOpnd = static_cast(insn->GetMemOpnd()); + MemOperand *currMemOpnd = static_cast(insn->GetMemOpnd()); return currMemOpnd != nullptr; } return false; } -void AArch64StoreLoadOpt::SelectPropMode(const AArch64MemOperand &currMemOpnd) { - AArch64MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); +void AArch64StoreLoadOpt::SelectPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); switch (currAddrMode) { - case AArch64MemOperand::kAddrModeBOi: { + case MemOperand::kAddrModeBOi: { if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { propMode = kPropBase; } break; } - case AArch64MemOperand::kAddrModeBOrX: { + case MemOperand::kAddrModeBOrX: { propMode = kPropOffset; amount = currMemOpnd.ShiftAmount(); if (currMemOpnd.GetExtendAsString() == "LSL") { @@ -860,7 +860,7 @@ void AArch64StoreLoadOpt::DoStoreLoadOpt() { */ void AArch64StoreLoadOpt::MemProp(Insn &insn) { MemPropInit(); - AArch64MemOperand *currMemOpnd = static_cast(insn.GetMemOpnd()); + MemOperand *currMemOpnd = static_cast(insn.GetMemOpnd()); SelectPropMode(*currMemOpnd); RegOperand *base = currMemOpnd->GetBaseRegister(); Operand *offset = currMemOpnd->GetOffset(); @@ -869,12 +869,12 @@ void AArch64StoreLoadOpt::MemProp(Insn &insn) { if (propMode == kUndef) { return; } else if (propMode == kPropBase) { - OfstOperand *immOffset = static_cast(offset); + ImmOperand *immOffset = static_cast(offset); CHECK_FATAL(immOffset != nullptr, "immOffset is nullptr!"); regno_t baseRegNo = base->GetRegisterNumber(); memReplaced = ReplaceMemOpnd(insn, baseRegNo, *base, immOffset); } else { - AArch64RegOperand *regOffset = static_cast(offset); + RegOperand *regOffset = static_cast(offset); if (regOffset == nullptr) { return; } @@ -905,31 +905,31 @@ void AArch64StoreLoadOpt::MemProp(Insn &insn) { * ldr/str x0, [x1], #immVal1 */ void AArch64StoreLoadOpt::StrLdrIndexModeOpt(Insn &currInsn) { - auto *curMemopnd = static_cast(currInsn.GetMemOpnd()); + auto *curMemopnd = static_cast(currInsn.GetMemOpnd()); ASSERT(curMemopnd != nullptr, " get memopnd failed"); /* one instruction cannot define one register twice */ if (!CanDoIndexOpt(*curMemopnd) || static_cast(currInsn).IsRegDefined(curMemopnd->GetBaseRegister()->GetRegisterNumber())) { return; } - AArch64MemOperand *newMemopnd = SelectIndexOptMode(currInsn, *curMemopnd); + MemOperand *newMemopnd = SelectIndexOptMode(currInsn, *curMemopnd); if (newMemopnd != nullptr) { currInsn.SetMemOpnd(newMemopnd); } } -bool AArch64StoreLoadOpt::CanDoIndexOpt(const AArch64MemOperand &MemOpnd) { - if (MemOpnd.GetAddrMode() != AArch64MemOperand::kAddrModeBOi || !MemOpnd.IsIntactIndexed()) { +bool AArch64StoreLoadOpt::CanDoIndexOpt(const MemOperand &MemOpnd) { + if (MemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !MemOpnd.IsIntactIndexed()) { return false; } ASSERT(MemOpnd.GetOffsetImmediate() != nullptr, " kAddrModeBOi memopnd have no offset imm"); if (!MemOpnd.GetOffsetImmediate()->IsImmOffset()) { return false; } - if (MemOpnd.GetBaseRegister()->IsSPOrFP()) { + if (cgFunc.IsSPOrFP(*MemOpnd.GetBaseRegister())) { return false; } - AArch64OfstOperand *a64Ofst = MemOpnd.GetOffsetImmediate(); + OfstOperand *a64Ofst = MemOpnd.GetOffsetImmediate(); if (a64Ofst == nullptr) { return false; } @@ -944,8 +944,8 @@ int64 AArch64StoreLoadOpt::GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno "check def opnd"); auto &srcOpnd = static_cast(defInsn.GetOperand(kInsnSecondOpnd)); if (srcOpnd.GetRegisterNumber() == baseRegNO && defInsn.GetBB() == insn.GetBB()) { - int64 offsetVal = static_cast(defInsn.GetOperand(kInsnThirdOpnd)).GetValue(); - if (!AArch64MemOperand::IsSIMMOffsetOutOfRange(offsetVal, memOpndSize == k64BitSize, insn.IsLoadStorePair())) { + int64 offsetVal = static_cast(defInsn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!MemOperand::IsSIMMOffsetOutOfRange(offsetVal, memOpndSize == k64BitSize, insn.IsLoadStorePair())) { return subMode ? -offsetVal : offsetVal; } } @@ -954,7 +954,7 @@ int64 AArch64StoreLoadOpt::GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno }; -AArch64MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const AArch64MemOperand &curMemOpnd) { +MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd) { AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); ASSERT((a64RD != nullptr), "check a64RD!"); regno_t baseRegisterNO = curMemOpnd.GetBaseRegister()->GetRegisterNumber(); @@ -968,10 +968,10 @@ AArch64MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const AAr InsnSet tempCheck; (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, defInsn->GetNext(), insn.GetPrev(), tempCheck); if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { - auto &newMem = static_cast( - a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize())); + auto &newMem = + a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); - newMem.SetIndexOpt(AArch64MemOperand::kPreIndex); + newMem.SetIndexOpt(MemOperand::kPreIndex); insn.GetBB()->RemoveInsn(*defInsn); return &newMem; } @@ -986,10 +986,10 @@ AArch64MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const AAr InsnSet tempCheck; (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, insn.GetNext(), defInsn->GetPrev(), tempCheck); if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { - auto &newMem = static_cast(a64cgFunc.CreateMemOpnd( - *curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize())); + auto &newMem = a64cgFunc.CreateMemOpnd( + *curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); - newMem.SetIndexOpt(AArch64MemOperand::kPostIndex); + newMem.SetIndexOpt(MemOperand::kPostIndex); insn.GetBB()->RemoveInsn(*defInsn); return &newMem; } @@ -1002,7 +1002,7 @@ void AArch64StoreLoadOpt::ProcessStrPair(Insn &insn) { const short memIndex = 2; short regIndex = 0; Operand &opnd = insn.GetOperand(memIndex); - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { return; @@ -1025,7 +1025,7 @@ void AArch64StoreLoadOpt::ProcessStrPair(Insn &insn) { return; } auto ®Opnd = static_cast(insn.GetOperand(static_cast(regIndex))); - if (regOpnd.IsZeroRegister()) { + if (regOpnd.GetRegisterNumber() == RZR) { DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); } else { DoLoadToMoveTransfer(insn, regIndex, i, memUseInsnSet); @@ -1038,7 +1038,7 @@ void AArch64StoreLoadOpt::ProcessStr(Insn &insn) { const short memIndex = 1; const short regIndex = 0; Operand &opnd = insn.GetOperand(memIndex); - auto &memOpnd = static_cast(opnd); + auto &memOpnd = static_cast(opnd); RegOperand *base = memOpnd.GetBaseRegister(); if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { return; @@ -1056,7 +1056,7 @@ void AArch64StoreLoadOpt::ProcessStr(Insn &insn) { auto *regOpnd = static_cast(insn.GetOpnd(regIndex)); CHECK_NULL_FATAL(regOpnd); - if (regOpnd->IsZeroRegister()) { + if (regOpnd->GetRegisterNumber() == RZR) { DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); } else { DoLoadToMoveTransfer(insn, regIndex, 0, memUseInsnSet); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp index 6378799da1f1259f61186078cbc0cc375e21b654..5143fa82d8ffffdad05f7512a76750443f6311b6 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp @@ -17,17 +17,17 @@ namespace maplebe { -AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, - const Insn &loadIns, - MOperator newLoadMop) { +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, + MOperator newLoadMop) { MemPool &memPool = *cgFunc.GetMemoryPool(); - auto *memOp = static_cast(loadIns.GetMemOpnd()); + auto *memOp = static_cast(loadIns.GetMemOpnd()); MOperator loadMop = loadIns.GetMachineOpcode(); ASSERT(loadIns.IsLoad() && AArch64CG::kMd[newLoadMop].IsLoad(), "ins and Mop must be load"); - AArch64MemOperand *newMemOp = memOp; + MemOperand *newMemOp = memOp; uint32 memSize = AArch64CG::kMd[loadMop].GetOperandSize(); uint32 newMemSize = AArch64CG::kMd[newLoadMop].GetOperandSize(); @@ -37,7 +37,7 @@ AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, return newMemOp; } - newMemOp = static_cast(memOp->Clone(memPool)); + newMemOp = memOp->Clone(memPool); newMemOp->SetSize(newMemSize); if (!CGOptions::IsBigEndian()) { @@ -45,18 +45,18 @@ AArch64MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, } // for big-endian it's necessary to adjust offset if it's present - if (memOp->GetAddrMode() != AArch64MemOperand::kAddrModeBOi || + if (memOp->GetAddrMode() != MemOperand::kAddrModeBOi || newMemSize > memSize) { // currently, it's possible to adjust an offset only for immediate offset // operand if new size is less than the original one return nullptr; } - auto *newOffOp = static_cast( + auto *newOffOp = static_cast( memOp->GetOffsetImmediate()->Clone(memPool)); newOffOp->AdjustOffset((memSize - newMemSize) >> kLog2BitsPerByte); - newMemOp->SetOffsetImmediate(*newOffOp); + newMemOp->SetOffsetOperand(*newOffOp); ASSERT(memOp->IsOffsetMisaligned(memSize) || !newMemOp->IsOffsetMisaligned(newMemSize), diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..865a8c6146f1789c0d58cc2f3e2080d6578c8db6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp @@ -0,0 +1,548 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_validbit_opt.h" + +namespace maplebe { +void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + case MOP_wandrri12: + case MOP_xandrri13: { + Optimize(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + Optimize(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + Optimize(bb, insn); + break; + } + case MOP_bge: + case MOP_blt: { + Optimize(bb, insn); + break; + } + default: + break; + } +} + +void AArch64ValidBitOpt::SetValidBits(Insn &insn) { + MOperator mop = insn.GetMachineOpcode(); + switch (mop) { + case MOP_wcsetrc: + case MOP_xcsetrc: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(k1BitSize); + break; + } + case MOP_xmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(srcOpnd.IsIntImmediate(), "must be ImmOperand"); + auto &immOpnd = static_cast(srcOpnd); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize())); + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + break; + } + if (srcOpnd.GetRegisterNumber() == RZR) { + srcOpnd.SetValidBitsNum(k1BitSize); + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!(dstOpnd.GetSize() == k64BitSize && srcOpnd.GetSize() == k32BitSize) && + !(dstOpnd.GetSize() == k32BitSize && srcOpnd.GetSize() == k64BitSize)) { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum()); + } + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + int64 shiftBits = static_cast(opnd).GetValue(); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { + dstOpnd.SetValidBitsNum(k1BitSize); + } else { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + } + break; + } + case MOP_wlslrri5: + case MOP_xlslrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + int64 shiftBits = static_cast(opnd).GetValue(); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 newVB = ((srcOpnd.GetValidBitsNum() + shiftBits) > srcOpnd.GetSize()) ? + srcOpnd.GetSize() : (srcOpnd.GetValidBitsNum() + shiftBits); + dstOpnd.SetValidBitsNum(newVB); + } + case MOP_xuxtb32: + case MOP_xuxth32: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 srcVB = srcOpnd.GetValidBitsNum(); + uint32 newVB = dstOpnd.GetValidBitsNum(); + newVB = (mop == MOP_xuxtb32) ? ((srcVB < k8BitSize) ? srcVB : k8BitSize) : newVB; + newVB = (mop == MOP_xuxth32) ? ((srcVB < k16BitSize) ? srcVB : k16BitSize) : newVB; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wldrb: + case MOP_wldrh: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 newVB = (mop == MOP_wldrb) ? k8BitSize : k16BitSize; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrrr: + case MOP_xandrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + default: + break; + } +} + +bool AArch64ValidBitOpt::SetPhiValidBits(Insn &insn) { + Operand &defOpnd = insn.GetOperand(kInsnFirstOpnd); + ASSERT(defOpnd.IsRegister(), "expect register"); + auto &defRegOpnd = static_cast(defOpnd); + Operand &phiOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(phiOpnd.IsPhi(), "expect phiList"); + auto &phiList = static_cast(phiOpnd); + int32 maxVB = -1; + for (auto phiOpndIt : phiList.GetOperands()) { + if (phiOpndIt.second != nullptr) { + maxVB = (maxVB < static_cast(phiOpndIt.second->GetValidBitsNum())) ? + static_cast(phiOpndIt.second->GetValidBitsNum()) : maxVB; + } + } + if (maxVB >= k0BitSize && static_cast(maxVB) != defRegOpnd.GetValidBitsNum()) { + defRegOpnd.SetValidBitsNum(static_cast(maxVB)); + return true; + } + return false; +} + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AndValidBitPattern::CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const { + if ((__builtin_ffs(static_cast(andImm)) - 1 == shiftImm) && + ((andImm >> shiftImm) == ((1 << (andImmVB - shiftImm)) -1))) { + return true; + } + return false; +} + +bool AndValidBitPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_wandrri12) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xandrri13) { + newMop = MOP_xmovrr; + } + if (newMop == MOP_undef) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsImmediate(), "must be imm!"); + desReg = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + srcReg = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto &andImm = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn *defInsn = GetDefInsn(*srcReg); + if (defInsn && !defInsn->IsPhi()) { + int32 immVal = andImm.GetValue(); + uint32 validBit = srcReg->GetValidBitsNum(); + if (validBit == k8BitSize && immVal == 0xFF) { + return true; + } else if (validBit == k16BitSize && immVal == 0xFFFF) { + return true; + } + } + InsnSet useInsns = GetAllUseInsn(*desReg); + if (useInsns.size() == 1) { + Insn *useInsn = *useInsns.begin(); + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop != MOP_wasrrri5 && useMop != MOP_xasrrri6 && useMop != MOP_wlsrrri5 && useMop != MOP_xlsrrri6) { + return false; + } + Operand &shiftOpnd = useInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(shiftOpnd.IsImmediate(), "must be immediate"); + uint32 shiftImm = static_cast(shiftOpnd).GetValue(); + uint32 andImmVB = ValidBitOpt::GetImmValidBit(andImm.GetValue(), desReg->GetSize()); + if ((srcReg->GetValidBitsNum() == andImmVB) && CheckImmValidBit(andImm.GetValue(), andImmVB, shiftImm)) { + return true; + } + } + return false; +} + +void AndValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, *desReg, *srcReg); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ZxtValidBitPattern::CheckCondition(Insn &insn) { + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + CHECK_FATAL(dstOpnd.IsRegister(), "must be register"); + CHECK_FATAL(srcOpnd.IsRegister(), "must be register"); + if (static_cast(dstOpnd).GetValidBitsNum() != + static_cast(srcOpnd).GetValidBitsNum()) { + return false; + } + newMop = MOP_wmovrr; + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + Operand &immOpnd1 = insn.GetOperand(kInsnThirdOpnd); + Operand &immOpnd2 = insn.GetOperand(kInsnFourthOpnd); + CHECK_FATAL(immOpnd1.IsImmediate(), "must be immediate"); + CHECK_FATAL(immOpnd2.IsImmediate(), "must be immediate"); + int64 lsb = static_cast(immOpnd1).GetValue(); + int64 width = static_cast(immOpnd2).GetValue(); + if (lsb != 0 || width != static_cast(srcOpnd).GetValidBitsNum()) { + return false; + } + newMop = (mOp == MOP_wubfxrri5i5) ? MOP_wmovrr : MOP_xmovrr; + break; + } + default: + return false; + } + newDstOpnd = &static_cast(dstOpnd); + newSrcOpnd = &static_cast(srcOpnd); + return true; +} + +void ZxtValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + insn.SetMOP(newMop); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, *newDstOpnd, *newSrcOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } + } + default: + return; + } +} + +bool CmpCsetVBPattern::IsContinuousCmpCset(const Insn &curInsn) { + auto &csetDstReg = static_cast(curInsn.GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(csetDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *dstVersion = ssaInfo->FindSSAVersion(csetDstReg.GetRegisterNumber()); + ASSERT(dstVersion != nullptr, "find vRegVersion failed"); + for (auto useDUInfoIt : dstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = useDUInfoIt.second->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_wcmpri || useMop == MOP_xcmpri) { + auto &ccDstReg = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(ccDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *ccDstVersion = ssaInfo->FindSSAVersion(ccDstReg.GetRegisterNumber()); + ASSERT(ccDstVersion != nullptr, "find vRegVersion failed"); + for (auto ccUseDUInfoIt : ccDstVersion->GetAllUseInsns()) { + if (ccUseDUInfoIt.second == nullptr) { + continue; + } + Insn *ccUseInsn = ccUseDUInfoIt.second->GetInsn(); + if (ccUseInsn == nullptr) { + continue; + } + MOperator ccUseMop = ccUseInsn->GetMachineOpcode(); + if (ccUseMop == MOP_wcsetrc || ccUseMop == MOP_xcsetrc) { + return true; + } + } + } + } + return false; +} + +bool CmpCsetVBPattern::OpndDefByOneValidBit(const Insn &defInsn) { + if (defInsn.IsPhi()) { + return (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k1BitSize) || + (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k0BitSize); + } + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_xmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +bool CmpCsetVBPattern::CheckCondition(Insn &csetInsn) { + MOperator curMop = csetInsn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + /* combine [continuous cmp & cset] first, to eliminate more insns */ + if (IsContinuousCmpCset(csetInsn)) { + return false; + } + RegOperand &ccReg = static_cast(csetInsn.GetOperand(kInsnThirdOpnd)); + regno_t ccRegNo = ccReg.GetRegisterNumber(); + cmpInsn = GetDefInsn(ccReg); + CHECK_NULL_FATAL(cmpInsn); + MOperator mop = cmpInsn->GetMachineOpcode(); + if ((mop != MOP_wcmpri) && (mop != MOP_xcmpri)) { + return false; + } + VRegVersion *ccRegVersion = ssaInfo->FindSSAVersion(ccRegNo); + if (ccRegVersion->GetAllUseInsns().size() > k1BitSize) { + return false; + } + Operand &cmpSecondOpnd = cmpInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConst.GetValue(); + /* get ImmOperand, must be 0 or 1 */ + if ((cmpConstVal != 0) && (cmpConstVal != k1BitSize)) { + return false; + } + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(cmpFirstOpnd.IsRegister(), "cmpFirstOpnd must be register!"); + RegOperand &cmpReg = static_cast(cmpFirstOpnd); + Insn *defInsn = GetDefInsn(cmpReg); + if (defInsn == nullptr) { + return false; + } + if (defInsn->GetMachineOpcode() == MOP_wmovrr || defInsn->GetMachineOpcode() == MOP_xmovrr) { + auto &srcOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + return false; + } + } + return ((cmpReg.GetValidBitsNum() == k1BitSize) || (cmpReg.GetValidBitsNum() == k0BitSize) || + OpndDefByOneValidBit(*defInsn)); +} + +void CmpCsetVBPattern::Run(BB &bb, Insn &csetInsn) { + if (!CheckCondition(csetInsn)) { + return; + } + Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + auto &cond = static_cast(csetInsn.GetOperand(kInsnSecondOpnd)); + Insn *newInsn = nullptr; + + /* cmpFirstOpnd == 1 */ + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + newInsn = &cgFunc->GetCG()->BuildInstruction(mopCode, csetFirstOpnd, cmpFirstOpnd); + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + /* cmpFirstOpnd == 0 */ + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(cgFunc)->CreateImmOperand(1, k8BitSize, false); + newInsn = &cgFunc->GetCG()->BuildInstruction(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(csetInsn, *newInsn); + ssaInfo->ReplaceInsn(csetInsn, *newInsn); + if (CG_VALIDBIT_OPT_DUMP && (newInsn != nullptr)) { + std::vector prevInsns; + prevInsns.emplace_back(cmpInsn); + prevInsns.emplace_back(&csetInsn); + DumpAfterPattern(prevInsns, newInsn, nullptr); + } +} + +void CmpBranchesPattern::SelectNewMop(MOperator mop) { + switch (mop) { + case MOP_bge: { + newMop = is64Bit ? MOP_xtbnz : MOP_wtbnz; + break; + } + case MOP_blt: { + newMop = is64Bit ? MOP_xtbz : MOP_wtbz; + break; + } + default: + break; + } +} + +bool CmpBranchesPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_blt) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator cmpMop = prevCmpInsn->GetMachineOpcode(); + if (cmpMop != MOP_wcmpri && cmpMop != MOP_xcmpri) { + return false; + } + is64Bit = (cmpMop == MOP_xcmpri); + auto &cmpUseOpnd = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImmOpnd.GetValue(); + newImmVal = ValidBitOpt::GetLogValueAtBase2(cmpImmVal); + if (newImmVal < 0 || cmpUseOpnd.GetValidBitsNum() != (newImmVal + 1)) { + return false; + } + SelectNewMop(curMop); + if (newMop == MOP_undef) { + return false; + } + return true; +} + +void CmpBranchesPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &newImmOpnd = aarFunc->CreateImmOperand(newImmVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetCG()->BuildInstruction(newMop, prevCmpInsn->GetOperand(kInsnSecondOpnd), + newImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} +} /* namespace maplebe */ + diff --git a/src/mapleall/maple_be/src/cg/alignment.cpp b/src/mapleall/maple_be/src/cg/alignment.cpp index 3cc2b61cd4c2891eff4c33a086168ceef1b3a86b..6f2571d4b4f35ad1a7e538bb9f5f21b53fd4ca2c 100644 --- a/src/mapleall/maple_be/src/cg/alignment.cpp +++ b/src/mapleall/maple_be/src/cg/alignment.cpp @@ -16,6 +16,7 @@ #include "optimize_common.h" #include "cgfunc.h" #include "cg.h" +#include "cg_option.h" namespace maplebe { #define ALIGN_ANALYZE_DUMP_NEWPW CG_DEBUG_FUNC(func) @@ -25,7 +26,9 @@ void AlignAnalysis::AnalysisAlignment() { FindJumpTarget(); ComputeLoopAlign(); ComputeJumpAlign(); - ComputeCondBranchAlign(); + if (CGOptions::DoCondBrAlign()) { + ComputeCondBranchAlign(); + } } void AlignAnalysis::Dump() { diff --git a/src/mapleall/maple_be/src/cg/cfgo.cpp b/src/mapleall/maple_be/src/cg/cfgo.cpp index 37d2c25f7e3f41c0a3ba9d9ae48ffd6e653bf4b5..e57e75f853bd006867e35eea5d6af983550bcbd0 100644 --- a/src/mapleall/maple_be/src/cg/cfgo.cpp +++ b/src/mapleall/maple_be/src/cg/cfgo.cpp @@ -517,7 +517,7 @@ void FlipBRPattern::RelocateThrowBB(BB &curBB) { uint32 targetIdx = 1; MOperator mOp = curBBBranchInsn->FlipConditionOp(curBBBranchInsn->GetMachineOpcode(), targetIdx); LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(*ftBB); - curBBBranchInsn->SetMOperator(mOp); + curBBBranchInsn->SetMOP(mOp); curBBBranchInsn->SetOperand(targetIdx, brTarget); /* move ftBB after retBB */ @@ -585,7 +585,7 @@ bool FlipBRPattern::Optimize(BB &curBB) { (ftBB->IsSoloGoto() || (!IsLabelInLSDAOrSwitchTable(tgtBB->GetLabIdx()) && cgFunc->GetTheCFG()->CanMerge(*ftBB, *tgtBB)))) { - curBBBranchInsn->SetMOperator(mOp); + curBBBranchInsn->SetMOP(mOp); Operand &brTarget = brInsn->GetOperand(brInsn->GetJumpTargetIdx()); curBBBranchInsn->SetOperand(targetIdx, brTarget); /* Insert ftBB's insn at the beginning of tgtBB. */ @@ -626,7 +626,7 @@ bool FlipBRPattern::Optimize(BB &curBB) { ftBB->SetKind(BB::kBBFallthru); } else if (!IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) && !tgtBB->IsPredecessor(*tgtBB->GetPrev())) { - curBBBranchInsn->SetMOperator(mOp); + curBBBranchInsn->SetMOP(mOp); LabelIdx tgtLabIdx = ftBB->GetLabIdx(); if (ftBB->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { tgtLabIdx = cgFunc->CreateLabel(); @@ -766,7 +766,7 @@ bool DuplicateBBPattern::Optimize(BB &curBB) { if (curBB.IsUnreachable()) { return false; } - if (CGOptions::IsNoDupBB()) { + if (CGOptions::IsNoDupBB() || CGOptions::OptimizeForSize()) { return false; } diff --git a/src/mapleall/maple_be/src/cg/cfi.cpp b/src/mapleall/maple_be/src/cg/cfi.cpp index df074c5c598f64fe3b2dc2d3bede7e52ba7474fa..58323fcceb0728f416d255eedd8433ddf3031878 100644 --- a/src/mapleall/maple_be/src/cg/cfi.cpp +++ b/src/mapleall/maple_be/src/cg/cfi.cpp @@ -39,7 +39,7 @@ static CfiDescr cfiDescrTable[kOpCfiLast + 1] = { #undef ARM_DIRECTIVES_DEFINE { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } }; - +#if TARGAARCH64 || TARGRISCV64 void CfiInsn::Dump() const { MOperator mOp = GetMachineOpcode(); CfiDescr &cfiDescr = cfiDescrTable[mOp]; @@ -65,21 +65,7 @@ bool CfiInsn::Check() const { return true; } -void CfiInsn::Emit(const CG &cg, Emitter &emitter) const { - (void)cg; - MOperator mOp = GetMachineOpcode(); - CfiDescr &cfiDescr = cfiDescrTable[mOp]; - emitter.Emit("\t").Emit(cfiDescr.name); - for (uint32 i = 0; i < cfiDescr.opndCount; ++i) { - emitter.Emit(" "); - Operand &curOperand = GetOperand(i); - curOperand.Emit(emitter, nullptr); - if (i < (cfiDescr.opndCount - 1)) { - emitter.Emit(","); - } - } - emitter.Emit("\n"); -} +#endif void RegOperand::Emit(Emitter &emitter, const OpndProp*) const { emitter.Emit(regNO); diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index 18e446783779f8200c631fc528f80adf895ed194..8675453b0e2de6cd1cc0303a88c638819d9ca04f 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -27,6 +27,7 @@ CG::~CG() { if (emitter != nullptr) { emitter->CloseOutput(); } + delete memPool; memPool = nullptr; mirModule = nullptr; emitter = nullptr; diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index 159c4ca3b9153e0853e778a5c3d621d59ed85d1d..2ad9f227b4cfc96c959c8f387303e6775d239600 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -27,6 +27,7 @@ #include "x64_cgfunc.h" #include "cg.h" #endif +#include namespace { using namespace maplebe; @@ -185,6 +186,47 @@ void CGCFG::CheckCFG() { } } +void CGCFG::CheckCFGFreq() { + auto verifyBBFreq = [this](BB *bb, uint32 succFreq) { + uint32 res = bb->GetFrequency(); + if ((res != 0 && abs((int)(res - succFreq)) / res > 1.0) || (res == 0 && res != succFreq)) { + // Not included + if (bb->GetSuccs().size() > 1 && bb->GetPreds().size() > 1) { + return; + } + LogInfo::MapleLogger() << cgFunc->GetName() << " curBB: " << bb->GetId() << " freq: " + << bb->GetFrequency() << std::endl; + CHECK_FATAL(false, "Verifyfreq failure BB frequency!"); + } + }; + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsUnreachable() || bb->IsCleanup()) { + continue; + } + uint32 res = 0; + if (bb->GetSuccs().size() > 1) { + for (auto *succBB : bb->GetSuccs()) { + res += succBB->GetFrequency(); + if (succBB->GetPreds().size() > 1) { + LogInfo::MapleLogger() << cgFunc->GetName() << " critical edges: curBB: " << bb->GetId() << std::endl; + CHECK_FATAL(false, "The CFG has critical edges!"); + } + } + verifyBBFreq(bb, res); + } else if (bb->GetSuccs().size() == 1) { + auto *succBB = bb->GetSuccs().front(); + if (succBB->GetPreds().size() == 1) { + verifyBBFreq(bb, succBB->GetFrequency()); + } else if (succBB->GetPreds().size() > 1) { + for (auto *pred : succBB->GetPreds()) { + res += pred->GetFrequency(); + } + verifyBBFreq(succBB, res); + } + } + } +} + InsnVisitor *CGCFG::insnVisitor; void CGCFG::InitInsnVisitor(CGFunc &func) { @@ -266,6 +308,10 @@ void CGCFG::MergeBB(BB &merger, BB &mergee, CGFunc &func) { } func.PushBackExitBBsVec(merger); } + if (mergee.GetKind() == BB::kBBRangeGoto) { + func.AddEmitSt(merger.GetId(), *func.GetEmitSt(mergee.GetId())); + func.DeleteEmitSt(mergee.GetId()); + } } void CGCFG::MergeBB(BB &merger, BB &mergee) { diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 00afe61b4229d1faa37ab849802cfb388ec4e6ac..0b2626cdad2cd665b0d495b95c2c1ecd5a38eaec 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -14,7 +14,60 @@ */ #include "cg_irbuilder.h" +#include "isa.h" namespace maplebe { +#ifdef TARGX86_64 +Insn &InsnBuilder::BuildInsn(MOperator opCode, const InsnDescription &idesc) { + Insn *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + return *newInsn; +} +#endif + +CGImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, MemPool *mp) { + return mp ? *mp->New(size, value) : *alloc.New(size, value); +} + +CGImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp) { + return mp ? *mp->New(symbol, offset, relocs) : *alloc.New(symbol, offset, relocs); +} + +CGMemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { + return mp ? *mp->New(size) : *alloc.New(size); +} + +CGMemOperand &OperandBuilder::CreateMem(CGRegOperand &baseOpnd, int64 offset, uint32 size) { + CGMemOperand *memOprand = &CreateMem(size); + memOprand->SetBaseRegister(baseOpnd); + memOprand->SetBaseOfst(CreateImm(baseOpnd.GetSize(), offset)); + return *memOprand; +} + +CGRegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { + virtualRegNum++; + regno_t vRegNO = baseVirtualRegNO + virtualRegNum; + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +CGRegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +CGRegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(pRegNO, size, type) : *alloc.New(pRegNO, size, type); +} + +CGListOperand &OperandBuilder::CreateList(MemPool *mp) { + return mp ? *mp->New(alloc) : *alloc.New(alloc); +} + +CGFuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp){ + return mp ? *mp->New(symbol) : *alloc.New(symbol); +} + +CGLabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp){ + return mp ? *mp->New(parent, idx) : *alloc.New(parent, idx); +} } \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index f2c3edecd622966b81a76ad38e3e70b767cb3a0e..27cd52dd284a86938efab3a14914ed030a81936c 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -54,6 +54,13 @@ uint64 CGOptions::lsraBBOptSize = 150000; uint64 CGOptions::lsraInsnOptSize = 200000; uint64 CGOptions::overlapNum = 28; uint8 CGOptions::rematLevel = 2; +bool CGOptions::optForSize = false; +bool CGOptions::enableHotColdSplit = false; +uint32 CGOptions::alignMinBBSize = 16; +uint32 CGOptions::alignMaxBBSize = 96; +uint32 CGOptions::loopAlignPow = 4; +uint32 CGOptions::jumpAlignPow = 5; +uint32 CGOptions::funcAlignPow = 5; #if TARGAARCH64 || TARGRISCV64 bool CGOptions::useBarriersForVolatile = false; #else @@ -110,8 +117,10 @@ bool CGOptions::replaceASM = false; bool CGOptions::generalRegOnly = false; bool CGOptions::fastMath = false; bool CGOptions::doAlignAnalysis = false; +bool CGOptions::doCondBrAlign = false; bool CGOptions::cgBigEndian = false; bool CGOptions::arm64ilp32 = false; +bool CGOptions::noCommon = false; enum OptionIndex : uint64 { kCGQuiet = kCommonOptionEnd + 1, @@ -127,6 +136,7 @@ enum OptionIndex : uint64 { kIco, kSlo, kGo, + kEnableHotColdSplit, kPreLSRAOpt, kLocalrefSpill, kOptCallee, @@ -172,6 +182,7 @@ enum OptionIndex : uint64 { kCGO0, kCGO1, kCGO2, + kCGOs, // optimize for size kProepilogue, kYieldPoing, kLocalRc, @@ -207,11 +218,18 @@ enum OptionIndex : uint64 { kFastMath, kTailCall, kAlignAnalysis, + kCondBrAlign, kRegSaves, kSsaPreSave, kSsuPreRestore, kArm64ilp32, kCGSSA, + kCommon, + kAlignMinBBSize, + kAlignMaxBBSize, + kLoopAlignPow, + kJumpAlignPow, + kFuncAlignPow, }; const Descriptor kUsage[] = { @@ -374,6 +392,16 @@ const Descriptor kUsage[] = { " --no-globalopt\n", "mplcg", {} }, + { kEnableHotColdSplit, + kEnable, + "", + "hotcoldsplit", + kBuildTypeExperimental, + kArgCheckPolicyBool, + " --enableHotColdSplit \tPerform HotColdSplit optimization\n" + " --no-enableHotColdSplit\n", + "mplcg", + {} }, { kPreLSRAOpt, kEnable, "", @@ -772,6 +800,15 @@ const Descriptor kUsage[] = { " -O2 \tDo some optimization.\n", "mplcg", {} }, + { kCGOs, + 0, + "Os", + "", + kBuildTypeProduct, + kArgCheckPolicyOptional, + " -Os \tOptimize for size, based on O2.\n", + "mplcg", + {} }, { kLSRABB, 0, "", @@ -1145,6 +1182,16 @@ const Descriptor kUsage[] = { " --no-align-analysis\n", "mplcg", {} }, + { kCondBrAlign, + kEnable, + "", + "condbr-align", + kBuildTypeExperimental, + kArgCheckPolicyBool, + " --condbr-align \tPerform condbr align\n" + " --no-condbr-align\n", + "mplcg", + {} }, { kCGSSA, kEnable, "", @@ -1165,7 +1212,61 @@ const Descriptor kUsage[] = { " --no-arm64-ilp32\n", "mplcg", {} }, - + { kCommon, + kEnable, + "", + "common", + kBuildTypeProduct, + kArgCheckPolicyBool, + " --common \t \n" + " --no-common\n", + "mplcg", + {} }, + { kAlignMinBBSize, + 0, + "", + "align-min-bb-size", + kBuildTypeExperimental, + kArgCheckPolicyRequired, + " --align-min-bb-size=NUM \tO2 Minimum bb size for alignment unit:byte\n", + "mplcg", + {} }, + { kAlignMaxBBSize, + 0, + "", + "align-max-bb-size", + kBuildTypeExperimental, + kArgCheckPolicyRequired, + " --align-max-bb-size=NUM \tO2 Maximum bb size for alignment unit:byte\n", + "mplcg", + {} }, + { kLoopAlignPow, + 0, + "", + "loop-align-pow", + kBuildTypeExperimental, + kArgCheckPolicyRequired, + " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", + "mplcg", + {} }, + { kJumpAlignPow, + 0, + "", + "jump-align-pow", + kBuildTypeExperimental, + kArgCheckPolicyRequired, + " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", + "mplcg", + {} }, + { kFuncAlignPow, + 0, + "", + "func-align-pow", + kBuildTypeExperimental, + kArgCheckPolicyRequired, + " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", + "mplcg", + {} }, // End { kUnknown, 0, @@ -1200,6 +1301,10 @@ void CGOptions::DecideMplcgRealLevel(const std::deque &inpu case kCGO2: realLevel = CGOptions::kLevel2; break; + case kCGOs: + optForSize = true; + realLevel = CGOptions::kLevel2; + break; default: break; } @@ -1428,6 +1533,9 @@ bool CGOptions::SolveOptions(const std::deque